hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dec9be5d7623bfb01463ca78be5f847f3b7dae8e
| 1,494
|
py
|
Python
|
tests/transformations/state_fusion_node_merge_test.py
|
targetsm/dace
|
297b12804a334df8cc6fad5250d5fb0cce20dc6e
|
[
"BSD-3-Clause"
] | null | null | null |
tests/transformations/state_fusion_node_merge_test.py
|
targetsm/dace
|
297b12804a334df8cc6fad5250d5fb0cce20dc6e
|
[
"BSD-3-Clause"
] | null | null | null |
tests/transformations/state_fusion_node_merge_test.py
|
targetsm/dace
|
297b12804a334df8cc6fad5250d5fb0cce20dc6e
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2019-2020 ETH Zurich and the DaCe authors. All rights reserved.
import dace
import numpy as np
N = dace.symbol('N')
@dace.program
def test1(A: dace.float64[N], B:dace.float64[N]):
for i in dace.map[0:N]:
with dace.tasklet:
input << A[i]
out >> A[i]
out = input + 42
B[:] = A[:]
for i in dace.map[0:N]:
with dace.tasklet:
input << A[i]
out >> A[i]
out = input + 43
@dace.program
def test2(C: dace.float32[1], E: dace.float32[1], F:dace.float32[1]):
with dace.tasklet:
ci << C[0]
co >> C[0]
co = ci + 1
with dace.tasklet:
c << C[0]
e >> E[0]
e = c
with dace.tasklet:
c << C[0]
f >> F[0]
f = c
def relative_error(val, ref):
return np.linalg.norm(val - ref) / np.linalg.norm(ref)
if __name__ == '__main__':
N.set(42)
A = np.random.rand(N.get()).astype(np.float64)
B = np.random.rand(N.get()).astype(np.float64)
A_ref = A + 42 + 43
B_ref = A + 42
test1(A, B)
assert(relative_error(A, A_ref) < 1e-12)
assert(relative_error(B, B_ref) < 1e-12)
C = np.random.rand(1).astype(np.float32)
E = np.random.rand(1).astype(np.float32)
F = np.random.rand(1).astype(np.float32)
C_ref = np.random.rand(1).astype(np.float32)
C_ref[:] = C[:] + 1
test2(C, E, F)
assert(C[0] == C_ref[0])
assert(E[0] == C_ref[0])
assert(F[0] == C_ref[0])
| 23.34375
| 75
| 0.53012
|
2d3eca997ff33f0fc9408971a510324c3b811a23
| 594
|
py
|
Python
|
catalog/bindings/gmd/linear_cs.py
|
NIVANorge/s-enda-playground
|
56ae0a8978f0ba8a5546330786c882c31e17757a
|
[
"Apache-2.0"
] | null | null | null |
catalog/bindings/gmd/linear_cs.py
|
NIVANorge/s-enda-playground
|
56ae0a8978f0ba8a5546330786c882c31e17757a
|
[
"Apache-2.0"
] | null | null | null |
catalog/bindings/gmd/linear_cs.py
|
NIVANorge/s-enda-playground
|
56ae0a8978f0ba8a5546330786c882c31e17757a
|
[
"Apache-2.0"
] | null | null | null |
from dataclasses import dataclass
from bindings.gmd.linear_cstype import LinearCstype
__NAMESPACE__ = "http://www.opengis.net/gml"
@dataclass
class LinearCs(LinearCstype):
"""gml:LinearCS is a one-dimensional coordinate system that consists of the
points that lie on the single axis described.
The associated coordinate is the distance – with or without offset –
from the specified datum to the point along the axis. A LinearCS
shall have one gml:axis property element.
"""
class Meta:
name = "LinearCS"
namespace = "http://www.opengis.net/gml"
| 29.7
| 79
| 0.727273
|
c2de6617e443011642cea3d79b7e54d2cde87b20
| 693
|
py
|
Python
|
init_db.py
|
Samar-Bons/Flask_Blog
|
f765cd7c124fc9249596c7606ed417a325aa5f53
|
[
"MIT"
] | null | null | null |
init_db.py
|
Samar-Bons/Flask_Blog
|
f765cd7c124fc9249596c7606ed417a325aa5f53
|
[
"MIT"
] | null | null | null |
init_db.py
|
Samar-Bons/Flask_Blog
|
f765cd7c124fc9249596c7606ed417a325aa5f53
|
[
"MIT"
] | null | null | null |
#Module to initialize a db named "database.db" which will store
#post IDs, time_created, Title, and Post content
import sqlite3
connection = sqlite3.connect('database.db')
# Executing schema.sql script to initialize our db
with open('schema.sql') as f:
connection.executescript(f.read())
# Prepopulating the database for developmental purposes
cur = connection.cursor()
cur.execute("INSERT INTO posts (title, content) VALUES (?, ?)",
('First Post', 'Content for the first post')
)
cur.execute("INSERT INTO posts (title, content) VALUES (?, ?)",
('Second Post', 'Content for the second post')
)
connection.commit()
connection.close()
| 27.72
| 64
| 0.681097
|
6cbc39d8b2c0f6c2e561a77d724782f6f5f6e212
| 4,278
|
py
|
Python
|
relationships/templatetags/relationship_tags.py
|
SalahAdDin/django-relationships
|
a1f98e5b3d0f8e9b12b616d3c3d4153f7f7b06bc
|
[
"MIT"
] | null | null | null |
relationships/templatetags/relationship_tags.py
|
SalahAdDin/django-relationships
|
a1f98e5b3d0f8e9b12b616d3c3d4153f7f7b06bc
|
[
"MIT"
] | null | null | null |
relationships/templatetags/relationship_tags.py
|
SalahAdDin/django-relationships
|
a1f98e5b3d0f8e9b12b616d3c3d4153f7f7b06bc
|
[
"MIT"
] | null | null | null |
from django import template
from django.urls import reverse
try:
from django.db.models.loading import get_model
except ImportError:
from django.apps import apps
get_model = apps.get_model
from django.template import TemplateSyntaxError
from django.utils.functional import wraps
from django.utils.translation import ugettext as _
from relationships.models import RelationshipStatus
from relationships.utils import (
positive_filter,
negative_filter
)
register = template.Library()
@register.simple_tag
def relationship(from_user, to_user, status):
"""
Determine if a certain type of relationship exists between two users.
The ``status`` parameter must be a slug matching either the from_slug,
to_slug or symmetrical_slug of a RelationshipStatus.
Example::
{% relationship from_user to_user "friends" as felas %}
{% relationship from_user to_user "blocking" as blocked %}
{% if felas %}
Here are pictures of me drinking alcohol
{% elif blocked %}
damn seo experts
{% else %}
Sorry coworkers
{% endif %}
"""
requested_status = status.replace('"', '') # strip quotes
if from_user.is_anonymous() or to_user.is_anonymous():
return False
try:
status = RelationshipStatus.objects.by_slug(requested_status)
except RelationshipStatus.DoesNotExist:
raise template.TemplateSyntaxError('RelationshipStatus not found')
if status.from_slug == requested_status:
val = from_user.relationships.exists(to_user, status)
elif status.to_slug == requested_status:
val = to_user.relationships.exists(from_user, status)
else:
val = from_user.relationships.exists(to_user, status, symmetrical=True)
if val:
return True
return False
@register.filter
def add_relationship_url(user, status):
"""
Generate a url for adding a relationship on a given user. ``user`` is a
User object, and ``status`` is either a relationship_status object or a
string denoting a RelationshipStatus
Usage::
href="{{ user|add_relationship_url:"following" }}"
"""
if isinstance(status, RelationshipStatus):
status = status.from_slug
return reverse('relationship_add', args=[user.username, status])
@register.filter
def remove_relationship_url(user, status):
"""
Generate a url for removing a relationship on a given user. ``user`` is a
User object, and ``status`` is either a relationship_status object or a
string denoting a RelationshipStatus
Usage::
href="{{ user|remove_relationship_url:"following" }}"
"""
if isinstance(status, RelationshipStatus):
status = status.from_slug
return reverse('relationship_remove', args=[user.username, status])
def positive_filter_decorator(func):
def inner(qs, user):
if isinstance(qs, basestring):
model = get_model(*qs.split('.'))
if not model:
return []
qs = model._default_manager.all()
if user.is_anonymous():
return qs.none()
return func(qs, user)
inner._decorated_function = getattr(func, '_decorated_function', func)
return wraps(func)(inner)
def negative_filter_decorator(func):
def inner(qs, user):
if isinstance(qs, basestring):
model = get_model(*qs.split('.'))
if not model:
return []
qs = model._default_manager.all()
if user.is_anonymous():
return qs
return func(qs, user)
inner._decorated_function = getattr(func, '_decorated_function', func)
return wraps(func)(inner)
@register.filter
@positive_filter_decorator
def friend_content(qs, user):
return positive_filter(qs, user.relationships.friends())
@register.filter
@positive_filter_decorator
def following_content(qs, user):
return positive_filter(qs, user.relationships.following())
@register.filter
@positive_filter_decorator
def followers_content(qs, user):
return positive_filter(qs, user.relationships.followers())
@register.filter
@negative_filter_decorator
def unblocked_content(qs, user):
return negative_filter(qs, user.relationships.blocking())
| 28.711409
| 79
| 0.687705
|
a0169a974f81264426a0688fb07602fbf79e8796
| 1,564
|
py
|
Python
|
ipylivebash/tests/test_runner.py
|
benlau/ipylivebash
|
1d935efecf69ebc55983722dd9b811c5f80b1f88
|
[
"BSD-3-Clause-Clear",
"BSD-3-Clause"
] | 6
|
2022-01-02T19:33:05.000Z
|
2022-03-02T02:14:20.000Z
|
ipylivebash/tests/test_runner.py
|
benlau/ipylivebash
|
1d935efecf69ebc55983722dd9b811c5f80b1f88
|
[
"BSD-3-Clause-Clear",
"BSD-3-Clause"
] | 1
|
2022-01-17T18:08:25.000Z
|
2022-01-29T22:35:10.000Z
|
ipylivebash/tests/test_runner.py
|
benlau/ipylivebash
|
1d935efecf69ebc55983722dd9b811c5f80b1f88
|
[
"BSD-3-Clause-Clear",
"BSD-3-Clause"
] | null | null | null |
from unittest.mock import MagicMock
import time
from ..runner import Runner, run_script, execute_script_in_thread
ECHO_SCRIPT = "echo 123"
def test_runner_parse_args():
runner = Runner("--help".split())
assert runner.args.print_help is True
assert runner.args.keep_cell_output is False
runner = Runner("--keep-cell-output".split())
assert runner.args.keep_cell_output is True
def test_runner_run():
runner = Runner("")
runner.log_view = MagicMock()
runner.run(ECHO_SCRIPT)
runner.log_view.write_message.assert_called_once_with("123\n")
def test_runner_run_confirm():
runner = Runner("--ask-confirm".split())
runner.run(ECHO_SCRIPT)
def test_runner_run_notify():
runner = Runner("--notify".split())
runner.run(ECHO_SCRIPT)
def test_run_script():
run_script(ECHO_SCRIPT)
def test_execute_script_in_thread():
proxy = execute_script_in_thread(ECHO_SCRIPT)
messages = []
while True:
proxy.acquire()
is_finished = proxy.is_finished
if len(proxy.messages) > 0:
messages = messages + proxy.messages
proxy.messages = []
proxy.release()
if is_finished:
break
time.sleep(0.1)
assert messages == ["123\n"]
def test_log():
runner = Runner("--save output.txt".split())
runner.log_file = MagicMock()
runner.run(ECHO_SCRIPT)
runner.log_file.open.assert_called_once_with()
runner.log_file.write_message.assert_called_once_with("123\n")
runner.log_file.close.assert_called_once_with()
| 25.225806
| 66
| 0.688619
|
16a4c9190e5df021753712d2f42b13f6c31b0fed
| 16,678
|
py
|
Python
|
facet/configuration.py
|
edponce/FACET
|
0dca2d728813a4865e72b2e8fd6b114a0c63d5b0
|
[
"MIT"
] | 2
|
2020-10-16T19:27:21.000Z
|
2021-11-04T15:06:54.000Z
|
facet/configuration.py
|
edponce/FACET
|
0dca2d728813a4865e72b2e8fd6b114a0c63d5b0
|
[
"MIT"
] | 24
|
2020-07-13T01:49:36.000Z
|
2020-10-30T21:54:00.000Z
|
facet/configuration.py
|
edponce/FACET
|
0dca2d728813a4865e72b2e8fd6b114a0c63d5b0
|
[
"MIT"
] | null | null | null |
import os
import re
import copy
import yaml
import json
import pyparsing
import configparser
from typing import (
Any,
Dict,
Union,
Iterable,
)
__all__ = ['Configuration']
# Boolean and None states are case-insensitive
BOOLEAN_STATES = {
True: ('true', 'yes', 'on', 'accept', 'enable'),
False: ('false', 'no', 'off', 'reject', 'disable'),
}
NONE_STATES = ('none', 'null', 'nul', 'nil')
LEFT_BRACKETS = '([{<'
RIGHT_BRACKETS = ')]}>'
LPAREN, LBRACK, LBRACE, LANGLE = map(pyparsing.Suppress, LEFT_BRACKETS)
RPAREN, RBRACK, RBRACE, RANGLE = map(pyparsing.Suppress, RIGHT_BRACKETS)
BRACKETS = LEFT_BRACKETS + RIGHT_BRACKETS
QUOTES = '\'\"'
SGLQUOTE, DBLQUOTE = map(pyparsing.Suppress, QUOTES)
DELIMITERS = ':,='
COLON, COMMA, EQUAL = map(pyparsing.Suppress, DELIMITERS)
PUNCTUATIONS = BRACKETS + QUOTES + DELIMITERS
# Integral, real, and scientific numbers
numberToken = pyparsing.pyparsing_common.number
# Boolean values
boolToken = pyparsing.oneOf(
' '.join(BOOLEAN_STATES[True] + BOOLEAN_STATES[False]),
caseless=True,
asKeyword=True,
)
boolToken.setParseAction(lambda token: token[0] in BOOLEAN_STATES[True])
# None values
noneToken = pyparsing.oneOf(
' '.join(NONE_STATES),
caseless=True,
asKeyword=True,
)
noneToken.setParseAction(pyparsing.replaceWith(None))
# Quoted strings
quotedToken = pyparsing.quotedString(QUOTES)
quotedToken.setParseAction(pyparsing.removeQuotes)
# Unquoted strings
rawToken = pyparsing.Word(pyparsing.printables, excludeChars=PUNCTUATIONS)
# Key/value pairs
kvToken = pyparsing.Forward()
# Iterables: key/value, list, tuple, dict, set
kvIterToken = pyparsing.Forward()
listToken = pyparsing.Forward()
tupleToken = pyparsing.Forward()
dictToken = pyparsing.Forward()
setToken = pyparsing.Forward()
# Parsers: scalar and all
# Order matters based on the following rules:
# Key/value tokens are first literals
# Numbers before other scalar literals
# Iterables are last literals
# bool/none before raw/quoted to prioritize keyword comparison
# kvIterToken as first iterable to prioritize kvToken comparison
# dictToken before setToken to resolve '{}' as a dictionary
pyparser_scalars = (
numberToken
| boolToken
| noneToken
| quotedToken
| rawToken
)
pyparser = (
kvToken
| pyparser_scalars
| kvIterToken
| listToken
| tupleToken
| dictToken
| setToken
)
# Key/value pairs: '[key1=val1, key2=val2, ...]'
# Key can only be scalar literals
kvToken <<= pyparsing.Group(pyparser_scalars + EQUAL + pyparser)
kvToken.setParseAction(lambda token: dict(token.asList()))
kvIterToken <<= (
(
LPAREN
+ pyparsing.delimitedList(kvToken)
+ pyparsing.Optional(COMMA)
+ RPAREN
) ^ (
LBRACK
+ pyparsing.delimitedList(kvToken)
+ pyparsing.Optional(COMMA)
+ RBRACK
) ^ (
LBRACE
+ pyparsing.delimitedList(kvToken)
+ pyparsing.Optional(COMMA)
+ RBRACE
) ^ (
LANGLE
+ pyparsing.delimitedList(kvToken)
+ pyparsing.Optional(COMMA)
+ RANGLE
)
)
kvIterToken.setParseAction(lambda token: {
k: v
for d in token.asList()
for k, v in d.items()
})
listToken <<= (
LBRACK
+ pyparsing.Optional(pyparsing.delimitedList(pyparser))
+ pyparsing.Optional(COMMA)
+ RBRACK
)
listToken.setParseAction(lambda token: [token.asList()])
# Tuples: '(val1, ...)' or '<val1, ...>'
tupleToken <<= (
(
LPAREN
+ pyparsing.Optional(pyparsing.delimitedList(pyparser))
+ pyparsing.Optional(COMMA)
+ RPAREN
) ^ (
LANGLE
+ pyparsing.Optional(pyparsing.delimitedList(pyparser))
+ pyparsing.Optional(COMMA)
+ RANGLE
)
)
tupleToken.setParseAction(lambda token: tuple(token))
dictEntry = pyparsing.Group(pyparser + COLON + pyparser)
dictToken <<= (
LBRACE
+ pyparsing.Optional(pyparsing.delimitedList(dictEntry))
+ pyparsing.Optional(COMMA)
+ RBRACE
)
dictToken.setParseAction(lambda token: dict(token.asList()))
setToken <<= (
LBRACE
+ pyparsing.Optional(pyparsing.delimitedList(pyparser))
+ pyparsing.Optional(COMMA)
+ RBRACE
)
setToken.setParseAction(lambda token: set(token))
# Non-enclosed, non-quoted CSV
rawCSVToken = (
pyparsing.Optional(pyparser)
+ COMMA
+ pyparsing.Optional(pyparser)
)
class Configuration:
def __init__(self, regex=r'\${{([^}]+)}}'):
self.regex = regex
def expand_envvars(self, obj):
"""Recursively expand user and environment variables in common
data structures."""
# NOTE: To support values of the forms key:value and key=value pairs,
# 'parse_string' does not recurses, so let the result be checked for
# instances of data structures that need subsequent parsing.
if isinstance(obj, str):
obj = os.path.expandvars(os.path.expanduser(obj))
if isinstance(obj, dict):
# NOTE: Keys are not modified
obj = {k: self.expand_envvars(v) for k, v in obj.items()}
elif isinstance(obj, (list, tuple, set)):
obj = type(obj)([self.expand_envvars(v) for v in obj])
return obj
def interpolate(self, obj: dict):
"""Recursively interpolate dictionary values based on a regular
expression.
Dictionary is traversed in a depth-first manner and supports
out-of-order chained interpolations.
Interpolation values are represented by keys enclosed in '${{...}}'.
An arbitrary number of consecutive keys can be specified by delimiting
them with a colon, ':'. Note that keys can be strings for dictionaries
or indices for indexable iterables.
Example:
${{key}} - substitute current value with dict[key]
${{key1:key2}} - substitute current value with dict[key1][key2]
${{key:2}} - substitute current value with dict[key][2]
"""
regex = re.compile(self.regex)
# Get handle to root of object, so that we can do a forward search
# for interpolation.
root = obj
def _interpolate(obj: dict):
if isinstance(obj, dict):
for k, v in obj.items():
obj[k] = _interpolate(v)
elif isinstance(obj, (list, tuple, set)):
obj = type(obj)([_interpolate(v) for v in obj])
elif isinstance(obj, str):
match = regex.fullmatch(obj)
if match:
# Do not modify handle to root
value = root
for key in match.group(1).split(':'):
if isinstance(value, dict):
value = value[key]
else:
# Assume value is an iterable and key the index
value = value[int(key)]
obj = copy.deepcopy(_interpolate(value))
return obj
return _interpolate(obj)
def parse_string(self, string, protect_numerics=True):
"""Parse a string based on a grammar and resolve environment variables.
Args:
protect_numerics (bool): If set, string-based numbers are not
parsed to numeric types.
"""
# Get copy of original string, to return it in case of invalid/error
orig_string = string
# Case: Protect string numerics (do not parse)
if protect_numerics:
try:
float(string)
except ValueError:
pass
else:
return string
# Case: Expand user and environment variables
# NOTE: Even though environment variables should have been resolved
# prior to these parsing actions, for values with single-line composite
# values (e.g., CSV of key=value pairs), we resolve those internal
# values as well.
string = self.expand_envvars(string)
# Case: Non-enclosed CSV --> Convert to a tuple
try:
rawCSVToken.parseString(string)
except pyparsing.ParseException:
pass
else:
string = '(' + string + ')'
try:
# NOTE: Parser does not parses correctly a value that begins with
# numbers followed by other characters, e.g., value=2a.
return pyparser.parseString(string, parseAll=True)[0]
except pyparsing.ParseException:
return orig_string
def parse(self, obj, **kwargs):
"""Recursively parse data structures."""
# NOTE: Given that ConfigParser may be applied before this parsing and
# ConfigParser only allows a single level of key/value pairs, nested
# structures are stringified, and 'parse_string' does not recurses, so
# we let the result data to be checked for instances of data structures
# that need subsequent parsing.
if isinstance(obj, str):
obj = self.parse_string(obj, **kwargs)
if isinstance(obj, dict):
# NOTE: Keys are not modified
obj = {k: self.parse(v, **kwargs) for k, v in obj.items()}
elif isinstance(obj, (list, tuple, set)):
obj = type(obj)([self.parse(v, **kwargs) for v in obj])
return obj
def fix_configparser(self, obj: Any, **kwargs):
"""Recursively parse/fix data structures previously parsed by
ConfigParser.
Given that ConfigParser only allows a single level of
key/value pairs, nested structures are stringified, so we apply regex
transformations to conform multi-line values for 'parse_string()'.
"""
if isinstance(obj, str):
# Fix first nested element in mulitline values
obj = re.sub(r'=\s*', '=', obj.strip())
# Change nested elements in same level to be a CSV string
obj = re.sub(r'\n', ',', obj.strip())
# Quote interpolation strings
obj = re.sub(fr'["\']?({self.regex})["\']?', r'"\1"', obj)
obj = self.parse_string(obj, **kwargs)
# Enable numeric protection for subsequent 'parse_string()'
kwargs['protect_numerics'] = True
if isinstance(obj, dict):
obj = {
k: self.fix_configparser(v, **kwargs)
for k, v in obj.items()
}
elif isinstance(obj, (list, tuple, set)):
obj = type(obj)([
self.fix_configparser(v, **kwargs)
for v in obj
])
return obj
def parse_with_configparser(self, config: Union[str, Dict[str, Any]]):
"""Parse with ConfigParser.
Args:
config (str, Dict[str, Any]): Configuration file/data to parse.
"""
parser = configparser.ConfigParser(
delimiters=('=',),
allow_no_value=True,
)
# Leave options (keys) unchanged, do not lowercase
parser.optionxform = lambda option: option
# Expand environment variables
config = self.expand_envvars(config)
# Parse data
if isinstance(config, str):
if os.path.isfile(config):
filename = config
with open(filename) as fd:
config = fd.read()
# NOTE: Expand environment variables from loaded data
config = self.expand_envvars(config)
parser.read_string(config)
elif isinstance(config, dict):
parser.read_dict(config)
else:
raise ValueError('invalid configuration data for ConfigParser')
# Convert ConfigParser to dict (include defaults, if available)
config = {
section: dict(parser[section])
for section in parser.sections()
}
if len(parser.defaults()) > 0:
config[parser.default_section] = dict(parser.defaults())
# Fix multi-line options
return self.fix_configparser(config, protect_numerics=False)
def load_from_string(self, data: str):
"""Parse a string into a dictionary.
Returns:
Dict[str:Any]: Configuration mapping
"""
data = data.strip()
try:
config = json.loads(data)
except json.decoder.JSONDecodeError:
try:
config = yaml.safe_load(data)
except yaml.parser.ParserError:
try:
config = self.parse_with_configparser(data)
except Exception:
raise ValueError('invalid configuration string')
return config
def load_from_file(self, filename: str, file_type: str = None):
"""Load configuration data from YAML/JSON/INI file.
Args:
file_type (str): Explicit file format for file, ignore extension.
Returns:
dict[str:Any]: Configuration mapping
None: If error/invalid occurs during parsing
"""
filename = self.expand_envvars(filename)
if not file_type:
_, ext = os.path.splitext(filename)
file_type = ext[1:]
file_type = file_type.lower()
if file_type in ('yaml', 'yml'):
with open(filename) as fd:
config = yaml.safe_load(fd)
elif file_type in ('json',):
with open(filename) as fd:
config = json.load(fd)
elif file_type in ('ini', 'cfg', 'conf'):
config = self.parse_with_configparser(filename)
else:
raise ValueError(f"invalid configuration file type, '{file_type}'")
return config
def load(
self,
config: Union[str, Dict[Any, Any]],
*,
keys: Union[str, Iterable[str]] = None,
file_type: str = None,
delimiter: str = ':',
):
"""Load/parse configuration from a file/string/mapping into a mapping.
Note:
* The transformations are idempotent with regards to the resulting
configuration mapping.
Args:
config (str, Dict[Any, Any]): Configuration file/data to parse.
File can embed keys via format: 'file.conf:key1:key2:...'
Embedded keys supersede parameter keys. String formats can be
dictionary-like or comma-delimited key=value pairs.
keys (str, Iterable[str]): Extract only data from configuration
corresponding to key. If multiple keys are provided, they
are used (in order) to update the resulting configuration.
file_type (str): Explicit file format for file, ignore extension.
delimiter (str): Delimiter symbol between file name and keys.
Returns:
dict[str:Any]: Configuration mapping
"""
if isinstance(config, str):
config = self.expand_envvars(config)
filename, *tmp_keys = config.split(delimiter)
if os.path.isfile(filename):
# Embedded keys supersede parameter keys
if len(tmp_keys) > 0:
keys = tmp_keys
config = self.load_from_file(filename, file_type=file_type)
# If only a single configuration block, then use it
if not keys and len(config) == 1:
keys = list(config.keys())
# NOTE: Consider a string with no data structure related symbols
# to be a filename. The symbols are based on the general parser.
elif filename and not any(x in filename for x in PUNCTUATIONS):
raise ValueError(
f"configuration file does not exists, '{filename}'"
)
else:
config = self.load_from_string(config)
elif isinstance(config, dict):
config = self.expand_envvars(config)
elif config is not None:
raise ValueError(
f"invalid configuration data type, '{type(config)}'"
)
if not config:
return {}
# Normalize strings and types based on a grammar
config = self.parse(config)
# Interpolate/substitute values
config = self.interpolate(config)
# Filter configuration based on keys
if keys:
if isinstance(keys, str):
keys = [keys]
filtered_config = {}
for key in keys:
filtered_config.update(config[key])
config = filtered_config
return config
| 32.447471
| 79
| 0.597254
|
66981e06e9c908c9cb8573a8e7c5d4a9b8632902
| 6,293
|
py
|
Python
|
docs/conf.py
|
timgates42/pyttsx
|
1a84ee33971951b1ea18f2708061a5d19ef94018
|
[
"FSFAP"
] | 160
|
2016-10-04T22:45:36.000Z
|
2022-02-10T06:41:56.000Z
|
docs/conf.py
|
simz089s/pyttsx
|
4ad1e84fdefee4eed290fdc966573cb57d0b0079
|
[
"FSFAP"
] | 27
|
2016-10-04T02:45:18.000Z
|
2022-03-09T15:15:54.000Z
|
docs/conf.py
|
simz089s/pyttsx
|
4ad1e84fdefee4eed290fdc966573cb57d0b0079
|
[
"FSFAP"
] | 58
|
2016-10-06T16:53:43.000Z
|
2021-10-21T22:17:35.000Z
|
# -*- coding: utf-8 -*-
#
# pyttsx documentation build configuration file, created by
# sphinx-quickstart on Sun Nov 1 09:40:19 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pyttsx'
copyright = u'2009, 2013 Peter Parente'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.2'
# The full version, including alpha/beta/rc tags.
release = '1.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'pyttsxdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'pyttsx.tex', u'pyttsx Documentation',
u'Peter Parente', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
| 32.271795
| 80
| 0.722708
|
dd09270e73d492b8977b90e570294b345e62791b
| 2,500
|
py
|
Python
|
test/selenium/src/tests/test_unified_mapper.py
|
j0gurt/ggrc-core
|
84662dc85aa8864c907eabe70b8efccf92298a1f
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2019-01-04T10:55:14.000Z
|
2019-01-04T10:55:14.000Z
|
test/selenium/src/tests/test_unified_mapper.py
|
farcry4998/ggrc-core
|
c469039dabb55033c1b379850feb19e8dda2e2a1
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
test/selenium/src/tests/test_unified_mapper.py
|
farcry4998/ggrc-core
|
c469039dabb55033c1b379850feb19e8dda2e2a1
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2018 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Unified mapper tests."""
# pylint: disable=no-self-use
# pylint: disable=invalid-name
import pytest
from lib import base, users
from lib.entities import entities_factory
from lib.entities.entity import Representation
from lib.service import webui_service, rest_facade
class TestProgramPage(base.Test):
"""Tests of unified mapper."""
@pytest.mark.smoke_tests
def test_destructive_mapping_controls_to_program_via_unified_mapper(
self, new_program_rest, new_controls_rest, selenium
):
"""Check if Controls can be mapped to Program from Controls widget under
Program page via unified mapper.
Preconditions:
- Program, Controls created via REST API.
"""
expected_controls = [
expected_control.repr_ui() for expected_control in new_controls_rest]
controls_ui_service = webui_service.ControlsService(selenium)
controls_ui_service.map_objs_via_tree_view(
src_obj=new_program_rest, dest_objs=expected_controls)
actual_controls_tab_count = controls_ui_service.get_count_objs_from_tab(
src_obj=new_program_rest)
assert len(expected_controls) == actual_controls_tab_count
actual_controls = controls_ui_service.get_list_objs_from_tree_view(
src_obj=new_program_rest)
# 'actual_controls': created_at, updated_at, custom_attributes (None)
self.general_equal_assert(
sorted(expected_controls), sorted(actual_controls),
*Representation.tree_view_attrs_to_exclude)
def test_create_and_map_control(self, program, selenium):
"""Test that control can be created and mapped using Unified mapper."""
controls_service = webui_service.ControlsService(selenium)
controls_service.open_widget_of_mapped_objs(
program).tree_view.open_map().click_create_and_map_obj()
control = entities_factory.ControlsFactory().create()
controls_service.submit_obj_modal(control)
tree_view_control = controls_service.get_list_objs_from_tree_view(
program)[0]
actual_control = controls_service.get_obj_from_info_page(tree_view_control)
rest_control = rest_facade.get_obj(actual_control)
control.update_attrs(
created_at=rest_control.created_at,
updated_at=rest_control.updated_at,
modified_by=users.current_user(),
slug=rest_control.slug).repr_ui()
self.general_equal_assert(control, actual_control, "custom_attributes")
| 42.372881
| 79
| 0.7744
|
cb8b8168abd406ac3172b2ed5c46a4d6031fcaab
| 2,559
|
py
|
Python
|
project/drupal.py
|
thomasdiluccio/template-builder
|
effb471d7644c7d589a2ed8f6c70be887bdae5d5
|
[
"MIT"
] | null | null | null |
project/drupal.py
|
thomasdiluccio/template-builder
|
effb471d7644c7d589a2ed8f6c70be887bdae5d5
|
[
"MIT"
] | null | null | null |
project/drupal.py
|
thomasdiluccio/template-builder
|
effb471d7644c7d589a2ed8f6c70be887bdae5d5
|
[
"MIT"
] | null | null | null |
from . import BaseProject
from .remote import RemoteProject
import json
from collections import OrderedDict
class Drupal7_vanilla(BaseProject):
version = '7.67'
@property
def update(self):
return super(Drupal7_vanilla, self).update + [
"wget https://ftp.drupal.org/files/projects/drupal-{0}.tar.gz && tar xzvf drupal-{0}.tar.gz -C {1}".format(self.version, self.builddir),
"rm drupal-{0}.tar.gz".format(self.version),
"rm -rf {0}public || true".format(self.builddir),
"mv {0}drupal-{1} {0}public".format(self.builddir, self.version),
]
class Drupal8(RemoteProject):
major_version = '8.9'
remote = 'https://github.com/drupal/recommended-project.git'
@property
def platformify(self):
return super(Drupal8, self).platformify + [
# 'cd {0} && composer update -W'.format(self.builddir) + self.composer_defaults()
'cd {0} && composer require platformsh/config-reader drush/drush drupal/console drupal/redis'.format(self.builddir) + self.composer_defaults(),
# 'cd {0} && composer update -W'.format(self.builddir) + self.composer_defaults()
]
class Drupal9(RemoteProject):
# This can have a common base with Drupal 8 eventually, once modules are updated.
major_version = "9.3"
remote = 'https://github.com/drupal/recommended-project.git'
@property
def platformify(self):
return super(Drupal9, self).platformify + [
'cd {0} && composer require platformsh/config-reader drush/drush drupal/redis'.format(self.builddir) + self.composer_defaults()
]
class Drupal8_multisite(Drupal8):
pass
class Drupal8_opigno(Drupal8):
major_version = '2'
remote = 'https://bitbucket.org/opigno/opigno-composer.git'
class Drupal8_govcms8(RemoteProject):
major_version = '1'
remote = 'https://github.com/govCMS/govCMS8-project.git'
@property
def platformify(self):
return super(Drupal8_govcms8, self).platformify + [
# GovCMS comes with a pre-made lock file that pins symfony/filesystem at v4, but
# drupal/console only works with the 3.x version, and therefore will fail.
# It should work to remove the lock file first, but for some reason that is still failing.
# For now, just skip installing console on GovCMS. I don't know if anyone uses it anyway.
'cd {0} && composer require platformsh/config-reader drush/drush drupal/redis'.format(self.builddir) + self.composer_defaults(),
]
| 40.619048
| 156
| 0.669793
|
92235ee751f5bdc387e7ba6c286e1e30c5739a8b
| 4,964
|
py
|
Python
|
adamw.py
|
imics-lab/tts-gan
|
872213938a68ca63570f9f1df0ccc8e179d73ab2
|
[
"Apache-2.0"
] | 14
|
2022-01-25T09:57:59.000Z
|
2022-03-28T04:27:12.000Z
|
adamw.py
|
imics-lab/tts-gan
|
872213938a68ca63570f9f1df0ccc8e179d73ab2
|
[
"Apache-2.0"
] | 1
|
2022-03-11T07:11:32.000Z
|
2022-03-11T15:09:21.000Z
|
adamw.py
|
imics-lab/tts-gan
|
872213938a68ca63570f9f1df0ccc8e179d73ab2
|
[
"Apache-2.0"
] | 3
|
2022-02-11T15:22:41.000Z
|
2022-03-27T11:10:58.000Z
|
""" AdamW Optimizer
Impl copied from PyTorch master
"""
import math
import torch
from torch.optim.optimizer import Optimizer
class AdamW(Optimizer):
r"""Implements AdamW algorithm.
The original Adam algorithm was proposed in `Adam: A Method for Stochastic Optimization`_.
The AdamW variant was proposed in `Decoupled Weight Decay Regularization`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay coefficient (default: 1e-2)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False)
.. _Adam\: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _Decoupled Weight Decay Regularization:
https://arxiv.org/abs/1711.05101
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=1e-2, amsgrad=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay, amsgrad=amsgrad)
super(AdamW, self).__init__(params, defaults)
def __setstate__(self, state):
super(AdamW, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
# Perform stepweight decay
p.data.mul_(1 - group['lr'] * group['weight_decay'])
# Perform optimization step
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
amsgrad = group['amsgrad']
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
if amsgrad:
max_exp_avg_sq = state['max_exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = (max_exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
else:
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
step_size = group['lr'] / bias_correction1
p.data.addcdiv_(-step_size, exp_avg, denom)
return loss
| 42.42735
| 116
| 0.577357
|
7e5f7ea4df406d8c403e706764e541c8a3f2cc3f
| 4,129
|
py
|
Python
|
qiling/qiling/cc/__init__.py
|
mrTavas/owasp-fstm-auto
|
6e9ff36e46d885701c7419db3eca15f12063a7f3
|
[
"CC0-1.0"
] | 2
|
2021-05-05T12:03:01.000Z
|
2021-06-04T14:27:15.000Z
|
qiling/qiling/cc/__init__.py
|
mrTavas/owasp-fstm-auto
|
6e9ff36e46d885701c7419db3eca15f12063a7f3
|
[
"CC0-1.0"
] | null | null | null |
qiling/qiling/cc/__init__.py
|
mrTavas/owasp-fstm-auto
|
6e9ff36e46d885701c7419db3eca15f12063a7f3
|
[
"CC0-1.0"
] | 2
|
2021-05-05T12:03:09.000Z
|
2021-06-04T14:27:21.000Z
|
#!/usr/bin/env python3
#
# Cross Platform and Multi Architecture Advanced Binary Emulation Framework
from typing import Callable, Tuple
from qiling import Qiling
class QlCC:
"""Calling convention base class.
"""
def __init__(self, ql: Qiling) -> None:
"""Initialize a calling convention instance.
Args:
ql: qiling instance
"""
self.ql = ql
@staticmethod
def getNumSlots(argbits: int) -> int:
"""Get the number of slots allocated for an argument of width `argbits`.
"""
raise NotImplementedError
def getRawParam(self, slot: int, argbits: int = None) -> int:
"""Read a value of native size from the specified argument slot.
Note that argument slots and argument indexes are not the same. Though they often correlate
to each other, some implementations might use more than one slot to represent a sigle argument.
Args:
slot: argument slot to access
argbits: argument size in bits (default: arch native size)
Returns: raw value
"""
raise NotImplementedError
def setRawParam(self, slot: int, value: int, argbits: int = None) -> None:
"""Replace the value in the specified argument slot.
Note that argument slots and argument indexes are not the same. Though they often correlate
to each other, some implementations might use more than one slot to represent a sigle argument.
Args:
slot: argument slot to access
value: new raw value to write
argbits: argument size in bits (default: arch native size)
"""
raise NotImplementedError
def getReturnValue(self) -> int:
"""Get function return value.
"""
raise NotImplementedError
def setReturnValue(self, val: int) -> None:
"""Set function return value.
Args:
val: a value to set
"""
raise NotImplementedError
def unwind(self, nslots: int) -> int:
"""Unwind frame and return from function call.
Args:
nslots: number of arg slots used
Returns: return address
"""
raise NotImplementedError
class QlCommonBaseCC(QlCC):
"""Calling convention base class that implements parameters access through both
registers and the stack. The extending class is resopnsible to implement the rest
of the QlCC interface.
"""
_argregs = ()
_shadow = 0
def __init__(self, ql: Qiling, retreg: int):
super().__init__(ql)
# native address size in bytes
self._asize = self.ql.pointersize
# return value register
self._retreg = retreg
def __access_param(self, index: int, stack_access: Callable, reg_access: Callable) -> Tuple[Callable, int]:
"""[private] Generic accessor to function call parameters by their index.
This method will determine whether the parameter should be accessed on the stack or in a
register, and return the appropriate accessor along with the location to access (either a
register id or stack address)
Args:
index: parameter index to access
stack_access: stack accessor method (either read or write)
reg_access: regs accessor method (either read or write)
Returns: a tuple of the accessor method to use and the location to access
"""
if index >= len(self._argregs):
raise IndexError(f'tried to access arg {index}, but only {len(self._argregs) - 1} args are supported')
reg = self._argregs[index]
# should arg be read from a reg or the stack?
if reg is None:
# get matching stack item
si = index - self._argregs.index(None)
# skip return address and shadow space
return stack_access, (1 + self._shadow + si) * self._asize
else:
return reg_access, reg
def getRawParam(self, index: int, argbits: int = None) -> int:
read, loc = self.__access_param(index, self.ql.stack_read, self.ql.reg.read)
mask = (0 if argbits is None else (1 << argbits)) - 1
return read(loc) & mask
def setRawParam(self, index: int, value: int, argbits: int = None) -> None:
write, loc = self.__access_param(index, self.ql.stack_write, self.ql.reg.write)
mask = (0 if argbits is None else (1 << argbits)) - 1
write(loc, value & mask)
def getReturnValue(self) -> int:
return self.ql.reg.read(self._retreg)
def setReturnValue(self, value: int) -> None:
self.ql.reg.write(self._retreg, value)
| 27.344371
| 108
| 0.717365
|
0592bb492266682f5fb2d4e5e66ce2ac965b453e
| 33,976
|
py
|
Python
|
couchjs/scons/scons-local-2.0.1/SCons/Builder.py
|
Gussy/bigcouch
|
9e67d3f754186ce8368503509ae041a2847f2b7c
|
[
"Apache-2.0"
] | 73
|
2015-03-19T04:04:52.000Z
|
2021-08-16T10:45:11.000Z
|
couchjs/scons/scons-local-2.0.1/SCons/Builder.py
|
Gussy/bigcouch
|
9e67d3f754186ce8368503509ae041a2847f2b7c
|
[
"Apache-2.0"
] | 5
|
2016-04-26T13:19:25.000Z
|
2017-03-11T14:11:22.000Z
|
couchjs/scons/scons-local-2.0.1/SCons/Builder.py
|
Gussy/bigcouch
|
9e67d3f754186ce8368503509ae041a2847f2b7c
|
[
"Apache-2.0"
] | 13
|
2015-03-27T05:21:42.000Z
|
2017-05-22T11:45:30.000Z
|
"""SCons.Builder
Builder object subsystem.
A Builder object is a callable that encapsulates information about how
to execute actions to create a target Node (file) from source Nodes
(files), and how to create those dependencies for tracking.
The main entry point here is the Builder() factory method. This provides
a procedural interface that creates the right underlying Builder object
based on the keyword arguments supplied and the types of the arguments.
The goal is for this external interface to be simple enough that the
vast majority of users can create new Builders as necessary to support
building new types of files in their configurations, without having to
dive any deeper into this subsystem.
The base class here is BuilderBase. This is a concrete base class which
does, in fact, represent the Builder objects that we (or users) create.
There is also a proxy that looks like a Builder:
CompositeBuilder
This proxies for a Builder with an action that is actually a
dictionary that knows how to map file suffixes to a specific
action. This is so that we can invoke different actions
(compilers, compile options) for different flavors of source
files.
Builders and their proxies have the following public interface methods
used by other modules:
__call__()
THE public interface. Calling a Builder object (with the
use of internal helper methods) sets up the target and source
dependencies, appropriate mapping to a specific action, and the
environment manipulation necessary for overridden construction
variable. This also takes care of warning about possible mistakes
in keyword arguments.
add_emitter()
Adds an emitter for a specific file suffix, used by some Tool
modules to specify that (for example) a yacc invocation on a .y
can create a .h *and* a .c file.
add_action()
Adds an action for a specific file suffix, heavily used by
Tool modules to add their specific action(s) for turning
a source file into an object file to the global static
and shared object file Builders.
There are the following methods for internal use within this module:
_execute()
The internal method that handles the heavily lifting when a
Builder is called. This is used so that the __call__() methods
can set up warning about possible mistakes in keyword-argument
overrides, and *then* execute all of the steps necessary so that
the warnings only occur once.
get_name()
Returns the Builder's name within a specific Environment,
primarily used to try to return helpful information in error
messages.
adjust_suffix()
get_prefix()
get_suffix()
get_src_suffix()
set_src_suffix()
Miscellaneous stuff for handling the prefix and suffix
manipulation we use in turning source file names into target
file names.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Builder.py 5134 2010/08/16 23:02:40 bdeegan"
import collections
import SCons.Action
from SCons.Debug import logInstanceCreation
from SCons.Errors import InternalError, UserError
import SCons.Executor
import SCons.Memoize
import SCons.Node
import SCons.Node.FS
import SCons.Util
import SCons.Warnings
class _Null(object):
pass
_null = _Null
def match_splitext(path, suffixes = []):
if suffixes:
matchsuf = [S for S in suffixes if path[-len(S):] == S]
if matchsuf:
suf = max([(len(_f),_f) for _f in matchsuf])[1]
return [path[:-len(suf)], path[-len(suf):]]
return SCons.Util.splitext(path)
class DictCmdGenerator(SCons.Util.Selector):
"""This is a callable class that can be used as a
command generator function. It holds on to a dictionary
mapping file suffixes to Actions. It uses that dictionary
to return the proper action based on the file suffix of
the source file."""
def __init__(self, dict=None, source_ext_match=1):
SCons.Util.Selector.__init__(self, dict)
self.source_ext_match = source_ext_match
def src_suffixes(self):
return list(self.keys())
def add_action(self, suffix, action):
"""Add a suffix-action pair to the mapping.
"""
self[suffix] = action
def __call__(self, target, source, env, for_signature):
if not source:
return []
if self.source_ext_match:
suffixes = self.src_suffixes()
ext = None
for src in map(str, source):
my_ext = match_splitext(src, suffixes)[1]
if ext and my_ext != ext:
raise UserError("While building `%s' from `%s': Cannot build multiple sources with different extensions: %s, %s"
% (repr(list(map(str, target))), src, ext, my_ext))
ext = my_ext
else:
ext = match_splitext(str(source[0]), self.src_suffixes())[1]
if not ext:
#return ext
raise UserError("While building `%s': "
"Cannot deduce file extension from source files: %s"
% (repr(list(map(str, target))), repr(list(map(str, source)))))
try:
ret = SCons.Util.Selector.__call__(self, env, source, ext)
except KeyError, e:
raise UserError("Ambiguous suffixes after environment substitution: %s == %s == %s" % (e.args[0], e.args[1], e.args[2]))
if ret is None:
raise UserError("While building `%s' from `%s': Don't know how to build from a source file with suffix `%s'. Expected a suffix in this list: %s." % \
(repr(list(map(str, target))), repr(list(map(str, source))), ext, repr(list(self.keys()))))
return ret
class CallableSelector(SCons.Util.Selector):
"""A callable dictionary that will, in turn, call the value it
finds if it can."""
def __call__(self, env, source):
value = SCons.Util.Selector.__call__(self, env, source)
if callable(value):
value = value(env, source)
return value
class DictEmitter(SCons.Util.Selector):
"""A callable dictionary that maps file suffixes to emitters.
When called, it finds the right emitter in its dictionary for the
suffix of the first source file, and calls that emitter to get the
right lists of targets and sources to return. If there's no emitter
for the suffix in its dictionary, the original target and source are
returned.
"""
def __call__(self, target, source, env):
emitter = SCons.Util.Selector.__call__(self, env, source)
if emitter:
target, source = emitter(target, source, env)
return (target, source)
class ListEmitter(collections.UserList):
"""A callable list of emitters that calls each in sequence,
returning the result.
"""
def __call__(self, target, source, env):
for e in self.data:
target, source = e(target, source, env)
return (target, source)
# These are a common errors when calling a Builder;
# they are similar to the 'target' and 'source' keyword args to builders,
# so we issue warnings when we see them. The warnings can, of course,
# be disabled.
misleading_keywords = {
'targets' : 'target',
'sources' : 'source',
}
class OverrideWarner(collections.UserDict):
"""A class for warning about keyword arguments that we use as
overrides in a Builder call.
This class exists to handle the fact that a single Builder call
can actually invoke multiple builders. This class only emits the
warnings once, no matter how many Builders are invoked.
"""
def __init__(self, dict):
collections.UserDict.__init__(self, dict)
if __debug__: logInstanceCreation(self, 'Builder.OverrideWarner')
self.already_warned = None
def warn(self):
if self.already_warned:
return
for k in self.keys():
if k in misleading_keywords:
alt = misleading_keywords[k]
msg = "Did you mean to use `%s' instead of `%s'?" % (alt, k)
SCons.Warnings.warn(SCons.Warnings.MisleadingKeywordsWarning, msg)
self.already_warned = 1
def Builder(**kw):
"""A factory for builder objects."""
composite = None
if 'generator' in kw:
if 'action' in kw:
raise UserError("You must not specify both an action and a generator.")
kw['action'] = SCons.Action.CommandGeneratorAction(kw['generator'], {})
del kw['generator']
elif 'action' in kw:
source_ext_match = kw.get('source_ext_match', 1)
if 'source_ext_match' in kw:
del kw['source_ext_match']
if SCons.Util.is_Dict(kw['action']):
composite = DictCmdGenerator(kw['action'], source_ext_match)
kw['action'] = SCons.Action.CommandGeneratorAction(composite, {})
kw['src_suffix'] = composite.src_suffixes()
else:
kw['action'] = SCons.Action.Action(kw['action'])
if 'emitter' in kw:
emitter = kw['emitter']
if SCons.Util.is_String(emitter):
# This allows users to pass in an Environment
# variable reference (like "$FOO") as an emitter.
# We will look in that Environment variable for
# a callable to use as the actual emitter.
var = SCons.Util.get_environment_var(emitter)
if not var:
raise UserError("Supplied emitter '%s' does not appear to refer to an Environment variable" % emitter)
kw['emitter'] = EmitterProxy(var)
elif SCons.Util.is_Dict(emitter):
kw['emitter'] = DictEmitter(emitter)
elif SCons.Util.is_List(emitter):
kw['emitter'] = ListEmitter(emitter)
result = BuilderBase(**kw)
if not composite is None:
result = CompositeBuilder(result, composite)
return result
def _node_errors(builder, env, tlist, slist):
"""Validate that the lists of target and source nodes are
legal for this builder and environment. Raise errors or
issue warnings as appropriate.
"""
# First, figure out if there are any errors in the way the targets
# were specified.
for t in tlist:
if t.side_effect:
raise UserError("Multiple ways to build the same target were specified for: %s" % t)
if t.has_explicit_builder():
if not t.env is None and not t.env is env:
action = t.builder.action
t_contents = action.get_contents(tlist, slist, t.env)
contents = action.get_contents(tlist, slist, env)
if t_contents == contents:
msg = "Two different environments were specified for target %s,\n\tbut they appear to have the same action: %s" % (t, action.genstring(tlist, slist, t.env))
SCons.Warnings.warn(SCons.Warnings.DuplicateEnvironmentWarning, msg)
else:
msg = "Two environments with different actions were specified for the same target: %s" % t
raise UserError(msg)
if builder.multi:
if t.builder != builder:
msg = "Two different builders (%s and %s) were specified for the same target: %s" % (t.builder.get_name(env), builder.get_name(env), t)
raise UserError(msg)
# TODO(batch): list constructed each time!
if t.get_executor().get_all_targets() != tlist:
msg = "Two different target lists have a target in common: %s (from %s and from %s)" % (t, list(map(str, t.get_executor().get_all_targets())), list(map(str, tlist)))
raise UserError(msg)
elif t.sources != slist:
msg = "Multiple ways to build the same target were specified for: %s (from %s and from %s)" % (t, list(map(str, t.sources)), list(map(str, slist)))
raise UserError(msg)
if builder.single_source:
if len(slist) > 1:
raise UserError("More than one source given for single-source builder: targets=%s sources=%s" % (list(map(str,tlist)), list(map(str,slist))))
class EmitterProxy(object):
"""This is a callable class that can act as a
Builder emitter. It holds on to a string that
is a key into an Environment dictionary, and will
look there at actual build time to see if it holds
a callable. If so, we will call that as the actual
emitter."""
def __init__(self, var):
self.var = SCons.Util.to_String(var)
def __call__(self, target, source, env):
emitter = self.var
# Recursively substitute the variable.
# We can't use env.subst() because it deals only
# in strings. Maybe we should change that?
while SCons.Util.is_String(emitter) and emitter in env:
emitter = env[emitter]
if callable(emitter):
target, source = emitter(target, source, env)
elif SCons.Util.is_List(emitter):
for e in emitter:
target, source = e(target, source, env)
return (target, source)
def __cmp__(self, other):
return cmp(self.var, other.var)
class BuilderBase(object):
"""Base class for Builders, objects that create output
nodes (files) from input nodes (files).
"""
if SCons.Memoize.use_memoizer:
__metaclass__ = SCons.Memoize.Memoized_Metaclass
memoizer_counters = []
def __init__(self, action = None,
prefix = '',
suffix = '',
src_suffix = '',
target_factory = None,
source_factory = None,
target_scanner = None,
source_scanner = None,
emitter = None,
multi = 0,
env = None,
single_source = 0,
name = None,
chdir = _null,
is_explicit = 1,
src_builder = None,
ensure_suffix = False,
**overrides):
if __debug__: logInstanceCreation(self, 'Builder.BuilderBase')
self._memo = {}
self.action = action
self.multi = multi
if SCons.Util.is_Dict(prefix):
prefix = CallableSelector(prefix)
self.prefix = prefix
if SCons.Util.is_Dict(suffix):
suffix = CallableSelector(suffix)
self.env = env
self.single_source = single_source
if 'overrides' in overrides:
SCons.Warnings.warn(SCons.Warnings.DeprecatedBuilderKeywordsWarning,
"The \"overrides\" keyword to Builder() creation has been deprecated;\n" +\
"\tspecify the items as keyword arguments to the Builder() call instead.")
overrides.update(overrides['overrides'])
del overrides['overrides']
if 'scanner' in overrides:
SCons.Warnings.warn(SCons.Warnings.DeprecatedBuilderKeywordsWarning,
"The \"scanner\" keyword to Builder() creation has been deprecated;\n"
"\tuse: source_scanner or target_scanner as appropriate.")
del overrides['scanner']
self.overrides = overrides
self.set_suffix(suffix)
self.set_src_suffix(src_suffix)
self.ensure_suffix = ensure_suffix
self.target_factory = target_factory
self.source_factory = source_factory
self.target_scanner = target_scanner
self.source_scanner = source_scanner
self.emitter = emitter
# Optional Builder name should only be used for Builders
# that don't get attached to construction environments.
if name:
self.name = name
self.executor_kw = {}
if not chdir is _null:
self.executor_kw['chdir'] = chdir
self.is_explicit = is_explicit
if src_builder is None:
src_builder = []
elif not SCons.Util.is_List(src_builder):
src_builder = [ src_builder ]
self.src_builder = src_builder
def __nonzero__(self):
raise InternalError("Do not test for the Node.builder attribute directly; use Node.has_builder() instead")
def get_name(self, env):
"""Attempts to get the name of the Builder.
Look at the BUILDERS variable of env, expecting it to be a
dictionary containing this Builder, and return the key of the
dictionary. If there's no key, then return a directly-configured
name (if there is one) or the name of the class (by default)."""
try:
index = list(env['BUILDERS'].values()).index(self)
return list(env['BUILDERS'].keys())[index]
except (AttributeError, KeyError, TypeError, ValueError):
try:
return self.name
except AttributeError:
return str(self.__class__)
def __cmp__(self, other):
return cmp(self.__dict__, other.__dict__)
def splitext(self, path, env=None):
if not env:
env = self.env
if env:
suffixes = self.src_suffixes(env)
else:
suffixes = []
return match_splitext(path, suffixes)
def _adjustixes(self, files, pre, suf, ensure_suffix=False):
if not files:
return []
result = []
if not SCons.Util.is_List(files):
files = [files]
for f in files:
if SCons.Util.is_String(f):
f = SCons.Util.adjustixes(f, pre, suf, ensure_suffix)
result.append(f)
return result
def _create_nodes(self, env, target = None, source = None):
"""Create and return lists of target and source nodes.
"""
src_suf = self.get_src_suffix(env)
target_factory = env.get_factory(self.target_factory)
source_factory = env.get_factory(self.source_factory)
source = self._adjustixes(source, None, src_suf)
slist = env.arg2nodes(source, source_factory)
pre = self.get_prefix(env, slist)
suf = self.get_suffix(env, slist)
if target is None:
try:
t_from_s = slist[0].target_from_source
except AttributeError:
raise UserError("Do not know how to create a target from source `%s'" % slist[0])
except IndexError:
tlist = []
else:
splitext = lambda S: self.splitext(S,env)
tlist = [ t_from_s(pre, suf, splitext) ]
else:
target = self._adjustixes(target, pre, suf, self.ensure_suffix)
tlist = env.arg2nodes(target, target_factory, target=target, source=source)
if self.emitter:
# The emitter is going to do str(node), but because we're
# being called *from* a builder invocation, the new targets
# don't yet have a builder set on them and will look like
# source files. Fool the emitter's str() calls by setting
# up a temporary builder on the new targets.
new_targets = []
for t in tlist:
if not t.is_derived():
t.builder_set(self)
new_targets.append(t)
orig_tlist = tlist[:]
orig_slist = slist[:]
target, source = self.emitter(target=tlist, source=slist, env=env)
# Now delete the temporary builders that we attached to any
# new targets, so that _node_errors() doesn't do weird stuff
# to them because it thinks they already have builders.
for t in new_targets:
if t.builder is self:
# Only delete the temporary builder if the emitter
# didn't change it on us.
t.builder_set(None)
# Have to call arg2nodes yet again, since it is legal for
# emitters to spit out strings as well as Node instances.
tlist = env.arg2nodes(target, target_factory,
target=orig_tlist, source=orig_slist)
slist = env.arg2nodes(source, source_factory,
target=orig_tlist, source=orig_slist)
return tlist, slist
def _execute(self, env, target, source, overwarn={}, executor_kw={}):
# We now assume that target and source are lists or None.
if self.src_builder:
source = self.src_builder_sources(env, source, overwarn)
if self.single_source and len(source) > 1 and target is None:
result = []
if target is None: target = [None]*len(source)
for tgt, src in zip(target, source):
if not tgt is None: tgt = [tgt]
if not src is None: src = [src]
result.extend(self._execute(env, tgt, src, overwarn))
return SCons.Node.NodeList(result)
overwarn.warn()
tlist, slist = self._create_nodes(env, target, source)
# Check for errors with the specified target/source lists.
_node_errors(self, env, tlist, slist)
# The targets are fine, so find or make the appropriate Executor to
# build this particular list of targets from this particular list of
# sources.
executor = None
key = None
if self.multi:
try:
executor = tlist[0].get_executor(create = 0)
except (AttributeError, IndexError):
pass
else:
executor.add_sources(slist)
if executor is None:
if not self.action:
fmt = "Builder %s must have an action to build %s."
raise UserError(fmt % (self.get_name(env or self.env),
list(map(str,tlist))))
key = self.action.batch_key(env or self.env, tlist, slist)
if key:
try:
executor = SCons.Executor.GetBatchExecutor(key)
except KeyError:
pass
else:
executor.add_batch(tlist, slist)
if executor is None:
executor = SCons.Executor.Executor(self.action, env, [],
tlist, slist, executor_kw)
if key:
SCons.Executor.AddBatchExecutor(key, executor)
# Now set up the relevant information in the target Nodes themselves.
for t in tlist:
t.cwd = env.fs.getcwd()
t.builder_set(self)
t.env_set(env)
t.add_source(slist)
t.set_executor(executor)
t.set_explicit(self.is_explicit)
return SCons.Node.NodeList(tlist)
def __call__(self, env, target=None, source=None, chdir=_null, **kw):
# We now assume that target and source are lists or None.
# The caller (typically Environment.BuilderWrapper) is
# responsible for converting any scalar values to lists.
if chdir is _null:
ekw = self.executor_kw
else:
ekw = self.executor_kw.copy()
ekw['chdir'] = chdir
if kw:
if 'srcdir' in kw:
def prependDirIfRelative(f, srcdir=kw['srcdir']):
import os.path
if SCons.Util.is_String(f) and not os.path.isabs(f):
f = os.path.join(srcdir, f)
return f
if not SCons.Util.is_List(source):
source = [source]
source = list(map(prependDirIfRelative, source))
del kw['srcdir']
if self.overrides:
env_kw = self.overrides.copy()
env_kw.update(kw)
else:
env_kw = kw
else:
env_kw = self.overrides
env = env.Override(env_kw)
return self._execute(env, target, source, OverrideWarner(kw), ekw)
def adjust_suffix(self, suff):
if suff and not suff[0] in [ '.', '_', '$' ]:
return '.' + suff
return suff
def get_prefix(self, env, sources=[]):
prefix = self.prefix
if callable(prefix):
prefix = prefix(env, sources)
return env.subst(prefix)
def set_suffix(self, suffix):
if not callable(suffix):
suffix = self.adjust_suffix(suffix)
self.suffix = suffix
def get_suffix(self, env, sources=[]):
suffix = self.suffix
if callable(suffix):
suffix = suffix(env, sources)
return env.subst(suffix)
def set_src_suffix(self, src_suffix):
if not src_suffix:
src_suffix = []
elif not SCons.Util.is_List(src_suffix):
src_suffix = [ src_suffix ]
self.src_suffix = [callable(suf) and suf or self.adjust_suffix(suf) for suf in src_suffix]
def get_src_suffix(self, env):
"""Get the first src_suffix in the list of src_suffixes."""
ret = self.src_suffixes(env)
if not ret:
return ''
return ret[0]
def add_emitter(self, suffix, emitter):
"""Add a suffix-emitter mapping to this Builder.
This assumes that emitter has been initialized with an
appropriate dictionary type, and will throw a TypeError if
not, so the caller is responsible for knowing that this is an
appropriate method to call for the Builder in question.
"""
self.emitter[suffix] = emitter
def add_src_builder(self, builder):
"""
Add a new Builder to the list of src_builders.
This requires wiping out cached values so that the computed
lists of source suffixes get re-calculated.
"""
self._memo = {}
self.src_builder.append(builder)
def _get_sdict(self, env):
"""
Returns a dictionary mapping all of the source suffixes of all
src_builders of this Builder to the underlying Builder that
should be called first.
This dictionary is used for each target specified, so we save a
lot of extra computation by memoizing it for each construction
environment.
Note that this is re-computed each time, not cached, because there
might be changes to one of our source Builders (or one of their
source Builders, and so on, and so on...) that we can't "see."
The underlying methods we call cache their computed values,
though, so we hope repeatedly aggregating them into a dictionary
like this won't be too big a hit. We may need to look for a
better way to do this if performance data show this has turned
into a significant bottleneck.
"""
sdict = {}
for bld in self.get_src_builders(env):
for suf in bld.src_suffixes(env):
sdict[suf] = bld
return sdict
def src_builder_sources(self, env, source, overwarn={}):
sdict = self._get_sdict(env)
src_suffixes = self.src_suffixes(env)
lengths = list(set(map(len, src_suffixes)))
def match_src_suffix(name, src_suffixes=src_suffixes, lengths=lengths):
node_suffixes = [name[-l:] for l in lengths]
for suf in src_suffixes:
if suf in node_suffixes:
return suf
return None
result = []
for s in SCons.Util.flatten(source):
if SCons.Util.is_String(s):
match_suffix = match_src_suffix(env.subst(s))
if not match_suffix and not '.' in s:
src_suf = self.get_src_suffix(env)
s = self._adjustixes(s, None, src_suf)[0]
else:
match_suffix = match_src_suffix(s.name)
if match_suffix:
try:
bld = sdict[match_suffix]
except KeyError:
result.append(s)
else:
tlist = bld._execute(env, None, [s], overwarn)
# If the subsidiary Builder returned more than one
# target, then filter out any sources that this
# Builder isn't capable of building.
if len(tlist) > 1:
tlist = [t for t in tlist if match_src_suffix(t.name)]
result.extend(tlist)
else:
result.append(s)
source_factory = env.get_factory(self.source_factory)
return env.arg2nodes(result, source_factory)
def _get_src_builders_key(self, env):
return id(env)
memoizer_counters.append(SCons.Memoize.CountDict('get_src_builders', _get_src_builders_key))
def get_src_builders(self, env):
"""
Returns the list of source Builders for this Builder.
This exists mainly to look up Builders referenced as
strings in the 'BUILDER' variable of the construction
environment and cache the result.
"""
memo_key = id(env)
try:
memo_dict = self._memo['get_src_builders']
except KeyError:
memo_dict = {}
self._memo['get_src_builders'] = memo_dict
else:
try:
return memo_dict[memo_key]
except KeyError:
pass
builders = []
for bld in self.src_builder:
if SCons.Util.is_String(bld):
try:
bld = env['BUILDERS'][bld]
except KeyError:
continue
builders.append(bld)
memo_dict[memo_key] = builders
return builders
def _subst_src_suffixes_key(self, env):
return id(env)
memoizer_counters.append(SCons.Memoize.CountDict('subst_src_suffixes', _subst_src_suffixes_key))
def subst_src_suffixes(self, env):
"""
The suffix list may contain construction variable expansions,
so we have to evaluate the individual strings. To avoid doing
this over and over, we memoize the results for each construction
environment.
"""
memo_key = id(env)
try:
memo_dict = self._memo['subst_src_suffixes']
except KeyError:
memo_dict = {}
self._memo['subst_src_suffixes'] = memo_dict
else:
try:
return memo_dict[memo_key]
except KeyError:
pass
suffixes = [env.subst(x) for x in self.src_suffix]
memo_dict[memo_key] = suffixes
return suffixes
def src_suffixes(self, env):
"""
Returns the list of source suffixes for all src_builders of this
Builder.
This is essentially a recursive descent of the src_builder "tree."
(This value isn't cached because there may be changes in a
src_builder many levels deep that we can't see.)
"""
sdict = {}
suffixes = self.subst_src_suffixes(env)
for s in suffixes:
sdict[s] = 1
for builder in self.get_src_builders(env):
for s in builder.src_suffixes(env):
if s not in sdict:
sdict[s] = 1
suffixes.append(s)
return suffixes
class CompositeBuilder(SCons.Util.Proxy):
"""A Builder Proxy whose main purpose is to always have
a DictCmdGenerator as its action, and to provide access
to the DictCmdGenerator's add_action() method.
"""
def __init__(self, builder, cmdgen):
if __debug__: logInstanceCreation(self, 'Builder.CompositeBuilder')
SCons.Util.Proxy.__init__(self, builder)
# cmdgen should always be an instance of DictCmdGenerator.
self.cmdgen = cmdgen
self.builder = builder
__call__ = SCons.Util.Delegate('__call__')
def add_action(self, suffix, action):
self.cmdgen.add_action(suffix, action)
self.set_src_suffix(self.cmdgen.src_suffixes())
def is_a_Builder(obj):
""""Returns True iff the specified obj is one of our Builder classes.
The test is complicated a bit by the fact that CompositeBuilder
is a proxy, not a subclass of BuilderBase.
"""
return (isinstance(obj, BuilderBase)
or isinstance(obj, CompositeBuilder)
or callable(obj))
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 38.697039
| 186
| 0.606222
|
763f7db739ce9fd6b00a655edf9a4fefe2c7e095
| 6,793
|
py
|
Python
|
panel/widgets/base.py
|
govinda18/panel
|
d2b70d9a0a4433d427c627e70328d0bc8621d78b
|
[
"BSD-3-Clause"
] | 2
|
2018-08-23T16:50:40.000Z
|
2018-08-23T20:01:45.000Z
|
panel/widgets/base.py
|
pyviz/pyviz_panels
|
120019e4318ac51bc2b9d0a1b2eb2239c8a0c9ad
|
[
"BSD-3-Clause"
] | null | null | null |
panel/widgets/base.py
|
pyviz/pyviz_panels
|
120019e4318ac51bc2b9d0a1b2eb2239c8a0c9ad
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Defines the Widget base class which provides bi-directional
communication between the rendered dashboard and the Widget
parameters.
"""
from __future__ import annotations
import math
from typing import (
TYPE_CHECKING, Any, Callable, List, Mapping, Optional, Tuple,
)
import param # type: ignore
from ..layout import Row
from ..reactive import Reactive
from ..viewable import Layoutable, Viewable
if TYPE_CHECKING:
from bokeh.document import Document
from bokeh.model import Model
from pyviz_comms import Comm
from ..layout import Panel
class Widget(Reactive):
"""
Widgets allow syncing changes in bokeh widget models with the
parameters on the Widget instance.
"""
disabled = param.Boolean(default=False, doc="""
Whether the widget is disabled.""")
name = param.String(default='')
height = param.Integer(default=None, bounds=(0, None))
width = param.Integer(default=None, bounds=(0, None))
margin = param.Parameter(default=(5, 10), doc="""
Allows to create additional space around the component. May
be specified as a two-tuple of the form (vertical, horizontal)
or a four-tuple (top, right, bottom, left).""")
_rename: Mapping[str, str | None] = {'name': 'title'}
# Whether the widget supports embedding
_supports_embed: bool = False
# Declares the Bokeh model type of the widget
_widget_type: 'Model' = None
__abstract = True
def __init__(self, **params):
if 'name' not in params:
params['name'] = ''
if '_supports_embed' in params:
self._supports_embed = params.pop('_supports_embed')
if '_param_pane' in params:
self._param_pane = params.pop('_param_pane')
else:
self._param_pane = None
super().__init__(**params)
@classmethod
def from_param(cls, parameter: param.Parameter, **params) -> Viewable:
"""
Construct a widget from a Parameter and link the two
bi-directionally.
Parameters
----------
parameter: param.Parameter
A parameter to create the widget from.
params: dict
Keyword arguments to be passed to the widget constructor
Returns
-------
Widget instance linked to the supplied parameter
"""
from ..param import Param
layout = Param(
parameter, widgets={parameter.name: dict(type=cls, **params)},
display_threshold=-math.inf
)
return layout[0]
def _get_model(
self, doc: 'Document', root: Optional['Model'] = None,
parent: Optional['Model'] = None, comm: Optional['Comm'] = None
) -> 'Model':
model = self._widget_type(**self._process_param_change(self._init_params()))
if root is None:
root = model
# Link parameters and bokeh model
values = self.param.values()
properties = self._filter_properties(list(self._process_param_change(values)))
self._models[root.ref['id']] = (model, parent)
self._link_props(model, properties, doc, root, comm)
return model
def _filter_properties(self, properties: List[str]) -> List[str]:
ignored = list(Layoutable.param)+['loading']
return [p for p in properties if p not in ignored]
def _get_embed_state(
self, root: 'Model', values: Optional[List[Any]] = None, max_opts: int = 3
) -> Tuple['Widget', 'Model', List[Any], Callable[['Model'], Any], str, str]:
"""
Returns the bokeh model and a discrete set of value states
for the widget.
Arguments
---------
root: bokeh.model.Model
The root model of the widget
values: list (optional)
An explicit list of value states to embed
max_opts: int
The maximum number of states the widget should return
Returns
-------
widget: panel.widget.Widget
The Panel widget instance to modify to effect state changes
model: bokeh.model.Model
The bokeh model to record the current value state on
values: list
A list of value states to explore.
getter: callable
A function that returns the state value given the model
on_change: string
The name of the widget property to attach a callback on
js_getter: string
JS snippet that returns the state value given the model
"""
class CompositeWidget(Widget):
"""
A baseclass for widgets which are made up of two or more other
widgets
"""
_composite_type: 'Panel' = Row
__abstract = True
def __init__(self, **params):
super().__init__(**params)
layout_params = [p for p in Layoutable.param if p != 'name']
layout = {p: getattr(self, p) for p in layout_params
if getattr(self, p) is not None}
if layout.get('width', self.width) is None and not 'sizing_mode' in layout:
layout['sizing_mode'] = 'stretch_width'
self._composite = self._composite_type(**layout)
self._models = self._composite._models
self.param.watch(self._update_layout_params, layout_params)
def _update_layout_params(self, *events: param.parameterized.Event) -> None:
updates = {event.name: event.new for event in events}
self._composite.param.update(**updates)
def select(
self, selector: Optional[type | Callable[['Viewable'], bool]] = None
) -> List[Viewable]:
"""
Iterates over the Viewable and any potential children in the
applying the Selector.
Arguments
---------
selector: type or callable or None
The selector allows selecting a subset of Viewables by
declaring a type or callable function to filter by.
Returns
-------
viewables: list(Viewable)
"""
objects = super().select(selector)
for obj in self._composite.objects:
objects += obj.select(selector)
return objects
def _cleanup(self, root: 'Model') -> None:
self._composite._cleanup(root)
super()._cleanup(root)
def _get_model(
self, doc: 'Document', root: Optional['Model'] = None,
parent: Optional['Model'] = None, comm: Optional['Comm'] = None
) -> 'Model':
model = self._composite._get_model(doc, root, parent, comm)
if root is None:
root = parent = model
self._models[root.ref['id']] = (model, parent)
return model
def __contains__(self, object: Any) -> bool:
return object in self._composite.objects
@property
def _synced_params(self) -> List[str]:
return []
| 32.194313
| 86
| 0.620639
|
4acdf127cec504c1e290947f0b6f88de0c63f82c
| 9,788
|
py
|
Python
|
tests/testflows/rbac/tests/syntax/revoke_role.py
|
taleh007/ClickHouse
|
c94ee3151d698a77c8d32f4b59b2b2678f0a9246
|
[
"Apache-2.0"
] | null | null | null |
tests/testflows/rbac/tests/syntax/revoke_role.py
|
taleh007/ClickHouse
|
c94ee3151d698a77c8d32f4b59b2b2678f0a9246
|
[
"Apache-2.0"
] | null | null | null |
tests/testflows/rbac/tests/syntax/revoke_role.py
|
taleh007/ClickHouse
|
c94ee3151d698a77c8d32f4b59b2b2678f0a9246
|
[
"Apache-2.0"
] | 1
|
2021-04-22T15:27:46.000Z
|
2021-04-22T15:27:46.000Z
|
from contextlib import contextmanager
from testflows.core import *
import rbac.helper.errors as errors
from rbac.requirements import *
@TestFeature
@Name("revoke role")
@Args(format_description=False)
def feature(self, node="clickhouse1"):
"""Check revoke query syntax.
```sql
REVOKE [ON CLUSTER cluster_name] [ADMIN OPTION FOR]
role [,...] FROM {user | role | CURRENT_USER} [,...]
| ALL | ALL EXCEPT {user_name | role_name | CURRENT_USER} [,...]
```
"""
node = self.context.cluster.node(node)
@contextmanager
def setup(users=2,roles=2):
try:
with Given("I have some users"):
for i in range(users):
node.query(f"CREATE USER OR REPLACE user{i}")
with And("I have some roles"):
for i in range(roles):
node.query(f"CREATE ROLE OR REPLACE role{i}")
yield
finally:
with Finally("I drop the users"):
for i in range(users):
node.query(f"DROP USER IF EXISTS user{i}")
with And("I drop the roles"):
for i in range(roles):
node.query(f"DROP ROLE IF EXISTS role{i}")
with Scenario("I revoke a role from a user", requirements=[
RQ_SRS_006_RBAC_Revoke_Role("1.0")]):
with setup():
with When("I revoke a role"):
node.query("REVOKE role0 FROM user0")
with Scenario("I revoke a nonexistent role from user", requirements=[
RQ_SRS_006_RBAC_Revoke_Role("1.0")]):
with setup(1,0):
with When("I revoke nonexistent role from a user"):
exitcode, message = errors.role_not_found_in_disk(name="role0")
node.query("REVOKE role0 FROM user0", exitcode=exitcode, message=message)
# with nonexistent object name, REVOKE assumes type role (treats user0 as role)
with Scenario("I revoke a role from a nonexistent user", requirements=[
RQ_SRS_006_RBAC_Revoke_Role("1.0")]):
with setup(0,1):
with When("I revoke role from a nonexistent user"):
exitcode, message = errors.role_not_found_in_disk(name="user0")
node.query("REVOKE role0 FROM user0", exitcode=exitcode, message=message)
# with nonexistent object name, REVOKE assumes type role (treats user0 as role)
with Scenario("I revoke a role from ALL EXCEPT nonexistent user", requirements=[
RQ_SRS_006_RBAC_Revoke_Role("1.0")]):
with setup(0,1):
with When("I revoke role from a nonexistent user"):
exitcode, message = errors.role_not_found_in_disk(name="user0")
node.query("REVOKE role0 FROM ALL EXCEPT user0", exitcode=exitcode, message=message)
with Scenario("I revoke a nonexistent role from a nonexistent user", requirements=[
RQ_SRS_006_RBAC_Revoke_Role("1.0")]):
with setup(0,0):
with When("I revoke nonexistent role from a nonexistent user"):
exitcode, message = errors.role_not_found_in_disk(name="role0")
node.query("REVOKE role0 FROM user0", exitcode=exitcode, message=message)
with Scenario("I revoke a role from multiple users", requirements=[
RQ_SRS_006_RBAC_Revoke_Role("1.0")]):
with setup():
with When("I revoke a role from multiple users"):
node.query("REVOKE role0 FROM user0, user1")
with Scenario("I revoke multiple roles from multiple users", requirements=[
RQ_SRS_006_RBAC_Revoke_Role("1.0")]):
with setup():
node.query("REVOKE role0, role1 FROM user0, user1")
#user is default, expect exception
with Scenario("I revoke a role from default user", requirements=[
RQ_SRS_006_RBAC_Revoke_Role("1.0"),
RQ_SRS_006_RBAC_Revoke_Role_Keywords("1.0")]):
with setup():
with When("I revoke a role from default user"):
exitcode, message = errors.cannot_update_default()
node.query("REVOKE role0 FROM CURRENT_USER", exitcode=exitcode, message=message)
#user is user0
with Scenario("I revoke a role from current user", requirements=[
RQ_SRS_006_RBAC_Revoke_Role("1.0"),
RQ_SRS_006_RBAC_Revoke_Role_Keywords("1.0")]):
with setup():
with When("I revoke a role from current user"):
node.query("REVOKE role0 FROM CURRENT_USER", settings = [("user","user0")])
#user is default, expect exception
with Scenario("I revoke a role from all", requirements=[
RQ_SRS_006_RBAC_Revoke_Role("1.0"),
RQ_SRS_006_RBAC_Revoke_Role_Keywords("1.0")]):
with setup():
with When("I revoke a role from all"):
exitcode, message = errors.cannot_update_default()
node.query("REVOKE role0 FROM ALL", exitcode=exitcode, message=message)
#user is default, expect exception
with Scenario("I revoke multiple roles from all", requirements=[
RQ_SRS_006_RBAC_Revoke_Role("1.0"),
RQ_SRS_006_RBAC_Revoke_Role_Keywords("1.0")]):
with setup():
with When("I revoke multiple roles from all"):
exitcode, message = errors.cannot_update_default()
node.query("REVOKE role0, role1 FROM ALL", exitcode=exitcode, message=message)
with Scenario("I revoke a role from all but current user", requirements=[
RQ_SRS_006_RBAC_Revoke_Role("1.0"),
RQ_SRS_006_RBAC_Revoke_Role_Keywords("1.0")]):
with setup():
with When("I revoke a role from all except current"):
node.query("REVOKE role0 FROM ALL EXCEPT CURRENT_USER")
with Scenario("I revoke a role from all but default user", requirements=[
RQ_SRS_006_RBAC_Revoke_Role("1.0"),
RQ_SRS_006_RBAC_Revoke_Role_Keywords("1.0")]):
with setup():
with When("I revoke a role from all except default"):
node.query("REVOKE role0 FROM ALL EXCEPT default",
settings = [("user","user0")])
with Scenario("I revoke multiple roles from all but default user", requirements=[
RQ_SRS_006_RBAC_Revoke_Role("1.0"),
RQ_SRS_006_RBAC_Revoke_Role_Keywords("1.0")]):
with setup():
with When("I revoke multiple roles from all except default"):
node.query("REVOKE role0, role1 FROM ALL EXCEPT default", settings = [("user","user0")])
with Scenario("I revoke a role from a role", requirements=[
RQ_SRS_006_RBAC_Revoke_Role("1.0")]):
with setup():
with When("I revoke a role from a role"):
node.query("REVOKE role0 FROM role1")
with Scenario("I revoke a role from a role and a user", requirements=[
RQ_SRS_006_RBAC_Revoke_Role("1.0")]):
with setup():
with When("I revoke a role from multiple roles"):
node.query("REVOKE role0 FROM role1, user0")
with Scenario("I revoke a role from a user on cluster", requirements=[
RQ_SRS_006_RBAC_Revoke_Role_Cluster("1.0")]):
with Given("I have a role and a user on a cluster"):
node.query("CREATE USER OR REPLACE user0 ON CLUSTER sharded_cluster")
node.query("CREATE ROLE OR REPLACE role0 ON CLUSTER sharded_cluster")
with When("I revoke a role from user on a cluster"):
node.query("REVOKE ON CLUSTER sharded_cluster role0 FROM user0")
with Finally("I drop the user and role"):
node.query("DROP USER IF EXISTS user0 ON CLUSTER sharded_cluster")
node.query("DROP ROLE IF EXISTS role0 ON CLUSTER sharded_cluster")
with Scenario("I revoke a role on fake cluster, throws exception", requirements=[
RQ_SRS_006_RBAC_Revoke_Role_Cluster("1.0")]):
with When("I revoke a role from user on a cluster"):
exitcode, message = errors.cluster_not_found("fake_cluster")
node.query("REVOKE ON CLUSTER fake_cluster role0 FROM user0", exitcode=exitcode, message=message)
with Scenario("I revoke multiple roles from multiple users on cluster", requirements=[
RQ_SRS_006_RBAC_Revoke_Role("1.0"),
RQ_SRS_006_RBAC_Revoke_Role_Cluster("1.0")]):
with Given("I have multiple roles and multiple users on a cluster"):
for i in range(2):
node.query(f"CREATE USER OR REPLACE user{i} ON CLUSTER sharded_cluster")
node.query(f"CREATE ROLE OR REPLACE role{i} ON CLUSTER sharded_cluster")
with When("I revoke multiple roles from multiple users on cluster"):
node.query("REVOKE ON CLUSTER sharded_cluster role0, role1 FROM user0, user1")
with Finally("I drop the roles and users"):
for i in range(2):
node.query(f"DROP USER IF EXISTS user{i} ON CLUSTER sharded_cluster")
node.query(f"DROP ROLE IF EXISTS role{i} ON CLUSTER sharded_cluster")
with Scenario("I revoke admin option for role from a user", requirements=[
RQ_SRS_006_RBAC_Revoke_AdminOption("1.0")]):
with setup():
with When("I revoke admin option for role from a user"):
node.query("REVOKE ADMIN OPTION FOR role0 FROM user0")
with Scenario("I revoke admin option for multiple roles from multiple users", requirements=[
RQ_SRS_006_RBAC_Revoke_Role("1.0"),
RQ_SRS_006_RBAC_Revoke_AdminOption("1.0")]):
with setup():
with When("I revoke admin option for multiple roles from multiple users"):
node.query("REVOKE ADMIN OPTION FOR role0, role1 FROM user0, user1")
| 49.434343
| 109
| 0.627401
|
ba921d0b758fb1b369523509665d96049df0fa86
| 1,640
|
py
|
Python
|
test/run/test_ostree.py
|
jmikovic/osbuild
|
1476b88dcf1efa0221101b7b412bbe7934c6d2c6
|
[
"Apache-2.0"
] | null | null | null |
test/run/test_ostree.py
|
jmikovic/osbuild
|
1476b88dcf1efa0221101b7b412bbe7934c6d2c6
|
[
"Apache-2.0"
] | null | null | null |
test/run/test_ostree.py
|
jmikovic/osbuild
|
1476b88dcf1efa0221101b7b412bbe7934c6d2c6
|
[
"Apache-2.0"
] | null | null | null |
#
# Runtime / Integration Tests for ostree pipelines
#
import os
import tempfile
import unittest
from .. import test
@unittest.skipUnless(test.TestBase.have_test_data(), "no test-data access")
@unittest.skipUnless(test.TestBase.can_bind_mount(), "root-only")
class TestOSTree(test.TestBase):
def setUp(self):
self.osbuild = test.OSBuild(self)
def test_ostree(self):
with self.osbuild as osb:
with tempfile.TemporaryDirectory(dir="/var/tmp") as temp_dir:
# Build a container
manifest = os.path.join(self.locate_test_data(),
"manifests/fedora-ostree-container.json")
osb.compile_file(manifest,
output_dir=temp_dir,
checkpoints=["build", "ostree-tree", "ostree-commit"],
exports=["container"])
oci_archive = os.path.join(temp_dir, "container", "fedora-container.tar")
self.assertTrue(os.path.exists(oci_archive))
# build a bootable ISO
manifest = os.path.join(self.locate_test_data(),
"manifests/fedora-ostree-bootiso.json")
osb.compile_file(manifest,
output_dir=temp_dir,
checkpoints=["build", "ostree-tree", "ostree-commit"],
exports=["bootiso"])
bootiso = os.path.join(temp_dir, "bootiso", "fedora-ostree-boot.iso")
self.assertTrue(os.path.exists(bootiso))
| 38.139535
| 89
| 0.545122
|
53659adb0c4013151a1dd85bc73afcf46db07b10
| 5,178
|
py
|
Python
|
swagger_client/models/dimension.py
|
BruceNL/pdf-stamp---1.0
|
d89a5f3bfddb77661588311188fe4ff310b781ee
|
[
"Apache-2.0"
] | null | null | null |
swagger_client/models/dimension.py
|
BruceNL/pdf-stamp---1.0
|
d89a5f3bfddb77661588311188fe4ff310b781ee
|
[
"Apache-2.0"
] | null | null | null |
swagger_client/models/dimension.py
|
BruceNL/pdf-stamp---1.0
|
d89a5f3bfddb77661588311188fe4ff310b781ee
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
PDF stamper
The PDF Stamper API enables the possibility to add both static and dynamic stamps on existing PDFs. The stamps can consist of one or more barcode, hyperlink, image, line or text elements. The flow is generally as follows: 1. Make a configuration containing the stamp information 2. Create a job specifying the desired configuration 3. Add one or more PDF files to the job 4. Start the job for processing 5. Retrieve the processed files Full API Documentation: https://docs.sphereon.com/api/pdf-stamper/1.0 Interactive testing: A web based test console is available in the Sphereon API Store at https://store.sphereon.com
OpenAPI spec version: 1.0
Contact: dev@sphereon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class Dimension(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, unit=None, width=None, height=None):
"""
Dimension - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'unit': 'str',
'width': 'float',
'height': 'float'
}
self.attribute_map = {
'unit': 'unit',
'width': 'width',
'height': 'height'
}
self._unit = unit
self._width = width
self._height = height
@property
def unit(self):
"""
Gets the unit of this Dimension.
:return: The unit of this Dimension.
:rtype: str
"""
return self._unit
@unit.setter
def unit(self, unit):
"""
Sets the unit of this Dimension.
:param unit: The unit of this Dimension.
:type: str
"""
allowed_values = ["pt"]
if unit not in allowed_values:
raise ValueError(
"Invalid value for `unit` ({0}), must be one of {1}"
.format(unit, allowed_values)
)
self._unit = unit
@property
def width(self):
"""
Gets the width of this Dimension.
:return: The width of this Dimension.
:rtype: float
"""
return self._width
@width.setter
def width(self, width):
"""
Sets the width of this Dimension.
:param width: The width of this Dimension.
:type: float
"""
self._width = width
@property
def height(self):
"""
Gets the height of this Dimension.
:return: The height of this Dimension.
:rtype: float
"""
return self._height
@height.setter
def height(self, height):
"""
Sets the height of this Dimension.
:param height: The height of this Dimension.
:type: float
"""
self._height = height
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 28.141304
| 636
| 0.566628
|
3ea11de396cd2e58caab31ecb3c3b0e263414dc6
| 5,034
|
py
|
Python
|
apps/src/tax_credit.py
|
xgerrmann/geld-en-zo
|
d041c9812164b8c693d4ee86c1f897db05973df0
|
[
"MIT"
] | null | null | null |
apps/src/tax_credit.py
|
xgerrmann/geld-en-zo
|
d041c9812164b8c693d4ee86c1f897db05973df0
|
[
"MIT"
] | null | null | null |
apps/src/tax_credit.py
|
xgerrmann/geld-en-zo
|
d041c9812164b8c693d4ee86c1f897db05973df0
|
[
"MIT"
] | null | null | null |
import dash_core_components as dcc
import dash_html_components as html
import plotly.graph_objects as go
from dash.dependencies import Input, Output
from pfinsim.taxes import Taxes
import pfinsim
from common import app, default_salary
input_salary = default_salary
def plot_tax_credits(taxes, selected_year):
fig = go.Figure()
gross_incomes = range(0, 120000, 10)
discount_work = []
discount_general = []
discount_total = []
for gross_income in gross_incomes:
work_discount = taxes.calc_work_tax_discount(gross_income)
general_discount = taxes.calc_general_tax_discount(gross_income)
total_discount = work_discount + general_discount
discount_work.append(work_discount)
discount_general.append(general_discount)
discount_total.append(total_discount)
fig.add_trace(go.Scatter(
x=list(gross_incomes),
y=discount_work,
mode='lines',
name='Arbeidskorting'
))
fig.add_trace(go.Scatter(
x=list(gross_incomes),
y=discount_general,
mode='lines',
name='Algemene heffingskorting'
))
fig.add_trace(go.Scatter(
x=list(gross_incomes),
y=discount_total,
mode='lines',
name='Totaal'
))
fig.update_layout(legend=dict(
orientation="v",
yanchor="top",
y=-0.2,
xanchor="right",
x=1,
bgcolor='rgba(0,0,0,0)'
), plot_bgcolor='rgba(0,0,0,0)',
title={
'text': f"Heffingskortingen vs inkomen {selected_year}",
'y': 0.99,
'x': 0.5,
'xanchor': 'center',
'yanchor': 'top'},
margin=dict(l=0, r=0, t=30, b=0),
xaxis={
'linecolor': '#BCCCDC',
'showgrid': False,
'fixedrange': True,
'showspikes': True,
'spikethickness': 2,
'spikedash': "dot",
'spikecolor': "#999999",
'spikemode': "across"
},
yaxis={
'linecolor': '#BCCCDC',
'showgrid': False,
'fixedrange': True,
'range': [0, 7000]
},
font=dict(
size=16,
)
)
fig.update_yaxes(title="Hoogte korting [€]")
fig.update_xaxes(title="Inkomen [€]")
return dcc.Graph(figure=fig, id='tax_credit_graph',
config={'displayModeBar': False})
def tax_credit_app(pathname):
available_years = list(pfinsim.common.load_settings()['taxes'].keys())
available_years.sort(reverse=True)
selected_year = available_years[0]
tax_settings = pfinsim.common.load_settings()['taxes'][selected_year]
taxes = Taxes(tax_settings)
return html.Div(children=[
html.Div(children=[plot_tax_credits(taxes, selected_year)], id='tax_credit_plot_div'),
dcc.Dropdown(
id='tax_credit_year_selection',
options=[{'label': year, 'value': year} for year in available_years],
value=selected_year
),
html.Div(
[html.H1('Bereken eigen situatie'),
html.Div([
html.Label(children=['Bruto jaarinkomen'], className='input_label'),
dcc.Input(id="salary_input",
type="number",
value=default_salary,
min=0,
max=10000000,
placeholder=default_salary)
], id="input_div"),
html.Div(id='output')],
id="input_form"
),
])
@app.callback(Output(component_id='tax_credit_plot_div', component_property='children'),
Input(component_id='tax_credit_year_selection', component_property='value'))
def update_tax_credit_plot(selected_year):
taxes = Taxes(pfinsim.common.load_settings()['taxes'][selected_year])
return plot_tax_credits(taxes, selected_year)
@app.callback(
Output(component_id='output', component_property='children'),
[Input(component_id='salary_input', component_property='value'),
Input(component_id='tax_credit_year_selection', component_property='value')],
)
def determine_taxable_income(salary, selected_year):
taxes = Taxes(pfinsim.common.load_settings()['taxes'][selected_year])
if salary is None:
salary = 0
global input_salary
if input_salary != salary:
input_salary = salary
work_tax_credit = taxes.calc_work_tax_discount(input_salary)
general_tax_credit = taxes.calc_general_tax_discount(input_salary)
total_tax_credit = work_tax_credit + general_tax_credit
return (
html.Table(
[
html.Tbody([
html.Tr(children=[html.Td('Arbeidskorting'),
html.Td(f'{work_tax_credit:.2f} €', className="align_right")]),
html.Tr(children=[html.Td('Algemene heffingskorting', className='border_bottom'),
html.Td(f'{general_tax_credit:.2f} €', className="align_right border_bottom")]),
html.Tr(children=[html.Td('Totaal heffingskortingen'),
html.Td(f'{total_tax_credit:.2f} €', className="align_right bottom_row")]),
])
]
)
)
| 31.4625
| 118
| 0.623758
|
7716a37ee4386ea5a419ddc40b21d32a7de36ea7
| 18,105
|
py
|
Python
|
tests/unit/lms/services/canvas_api/client_test.py
|
hypothesis/lms
|
722dac444dc1e73298eea5193f871f3ddefe46fd
|
[
"BSD-2-Clause"
] | 38
|
2017-12-30T23:49:53.000Z
|
2022-02-15T21:07:49.000Z
|
tests/unit/lms/services/canvas_api/client_test.py
|
hypothesis/lms
|
722dac444dc1e73298eea5193f871f3ddefe46fd
|
[
"BSD-2-Clause"
] | 1,733
|
2017-11-09T18:46:05.000Z
|
2022-03-31T11:05:50.000Z
|
tests/unit/lms/services/canvas_api/client_test.py
|
hypothesis/lms
|
722dac444dc1e73298eea5193f871f3ddefe46fd
|
[
"BSD-2-Clause"
] | 10
|
2018-07-11T17:12:46.000Z
|
2022-01-07T20:00:23.000Z
|
from unittest.mock import create_autospec, sentinel
import pytest
from h_matchers import Any
from pyramid.registry import Registry
from lms.events import FilesDiscoveredEvent
from lms.services import CanvasAPIError, CanvasAPIServerError, OAuth2TokenError
from lms.services.canvas_api.client import CanvasAPIClient
from tests import factories
class TestCanvasAPIClientGetToken:
# This is the only test where we fake out the underlying class, because
# this _one_ call is just a pass through.
def test_get_token(self, canvas_api_client, authenticated_client):
token = canvas_api_client.get_token(sentinel.authorization_code)
authenticated_client.get_token.assert_called_once_with(
sentinel.authorization_code
)
assert token == authenticated_client.get_token.return_value
@pytest.fixture
def authenticated_client(self, patch):
return patch("lms.services.canvas_api._authenticated.AuthenticatedClient")
@pytest.mark.usefixtures("http_session", "oauth_token")
class TestCanvasAPIClient:
def test_authenticated_users_sections(self, canvas_api_client, http_session):
sections = [{"id": 1, "name": "name_1"}, {"id": 2, "name": "name_2"}]
http_session.send.return_value = factories.requests.Response(
status_code=200, json_data={"sections": sections}
)
response = canvas_api_client.authenticated_users_sections("COURSE_ID")
assert response == sections
http_session.send.assert_called_once_with(
Any.request(
"GET",
url=Any.url.with_path("api/v1/courses/COURSE_ID").with_query(
{"include[]": "sections"}
),
),
timeout=Any(),
)
def test_authenticated_users_sections_deduplicates_sections(
self, canvas_api_client, http_session
):
http_session.send.return_value = factories.requests.Response(
status_code=200,
json_data={
"sections": [{"id": 1, "name": "name"}, {"id": 1, "name": "name"}]
},
)
sections = canvas_api_client.authenticated_users_sections("course_id")
assert sections == [{"id": 1, "name": "name"}]
def test_authenticated_users_sections_raises_CanvasAPIError_with_conflicting_duplicates(
self, canvas_api_client, http_session
):
http_session.send.return_value = factories.requests.Response(
status_code=200,
json_data={
"sections": [{"id": 1, "name": "name"}, {"id": 1, "name": "DIFFERENT"}]
},
)
with pytest.raises(CanvasAPIError):
canvas_api_client.authenticated_users_sections("course_id")
def test_course_sections(self, canvas_api_client, http_session):
sections = [
{"id": 101, "name": "name_1"},
{"id": 102, "name": "name_2"},
]
sections_with_noise = [
dict(section, unexpected="ignored") for section in sections
]
http_session.send.return_value = factories.requests.Response(
status_code=200, json_data=sections_with_noise
)
response = canvas_api_client.course_sections("COURSE_ID")
assert response == sections
http_session.send.assert_called_once_with(
Any.request(
"GET", url=Any.url.with_path("api/v1/courses/COURSE_ID/sections")
),
timeout=Any(),
)
def test_course_sections_deduplicates_sections(
self, canvas_api_client, http_session
):
http_session.send.return_value = factories.requests.Response(
status_code=200,
json_data=[{"id": 1, "name": "name"}, {"id": 1, "name": "name"}],
)
sections = canvas_api_client.course_sections("course_id")
assert sections == [{"id": 1, "name": "name"}]
def test_course_sections_raises_CanvasAPIError_with_conflicting_duplicates(
self, canvas_api_client, http_session
):
http_session.send.return_value = factories.requests.Response(
status_code=200,
json_data=[{"id": 1, "name": "name"}, {"id": 1, "name": "DIFFERENT"}],
)
with pytest.raises(CanvasAPIError):
canvas_api_client.course_sections("course_id")
def test_course_sections_raises_CanvasAPIError_with_too_few_returned(
self, canvas_api_client, http_session
):
http_session.send.return_value = factories.requests.Response(
status_code=200, json_data=[]
)
with pytest.raises(CanvasAPIError):
canvas_api_client.course_sections("dummy")
def test_group_categories_list(self, canvas_api_client, http_session):
group_categories = [
{"id": 1, "name": "Group category 1"},
{"id": 2, "name": "Group category 2"},
]
http_session.send.return_value = factories.requests.Response(
status_code=200, json_data=group_categories
)
response = canvas_api_client.course_group_categories("COURSE_ID")
assert response == group_categories
http_session.send.assert_called_once_with(
Any.request(
"GET",
url=Any.url.with_path(
"api/v1/courses/COURSE_ID/group_categories"
).with_query({"per_page": Any.string()}),
),
timeout=Any(),
)
@pytest.mark.parametrize(
"only_own_groups,include_users", [(True, False), (False, True)]
)
def test_course_groups(
self, only_own_groups, include_users, canvas_api_client, http_session
):
groups = [
{
"id": 1,
"name": "Group 1",
"description": "Group 1",
"group_category_id": 1,
},
{
"id": 2,
"name": "Group 2",
"description": "Group 2",
"group_category_id": 1,
},
]
http_session.send.return_value = factories.requests.Response(
status_code=200, json_data=groups
)
response = canvas_api_client.course_groups(
"COURSE_ID", only_own_groups=only_own_groups, include_users=include_users
)
assert response == groups
expected_params = {
"per_page": Any.string(),
"only_own_groups": str(only_own_groups),
}
expected_timeout = Any()
if include_users:
expected_params["include[]"] = "users"
expected_timeout = (31, 31) # pylint:disable=redefined-variable-type
http_session.send.assert_called_once_with(
Any.request(
"GET",
url=Any.url.with_path("api/v1/courses/COURSE_ID/groups").with_query(
expected_params
),
),
timeout=expected_timeout,
)
@pytest.mark.usefixtures("list_groups_response")
def test_current_user_groups(self, canvas_api_client):
course_id = 1
group_category_id = 1
response = canvas_api_client.current_user_groups(course_id, group_category_id)
assert len(response) == 1
assert response[0]["group_category_id"] == group_category_id
@pytest.mark.usefixtures("list_groups_response")
def test_current_user_groups_no_group_category(self, canvas_api_client):
course_id = 1
response = canvas_api_client.current_user_groups(
course_id, group_category_id=None
)
assert len(response) == 2
@pytest.mark.usefixtures("list_groups_response")
def test_current_user_groups_empty(self, canvas_api_client):
course_id = 1
group_category_id = 10000
response = canvas_api_client.current_user_groups(course_id, group_category_id)
assert not response
@pytest.mark.usefixtures("list_groups_with_users_response")
def test_user_groups_none_match_group_category_id(self, canvas_api_client):
course_id = 1
user_id = 10000
group_category_id = 10000
response = canvas_api_client.user_groups(course_id, user_id, group_category_id)
assert not response
@pytest.mark.usefixtures("list_groups_with_users_response")
def test_user_groups_none_match_user_id(self, canvas_api_client):
course_id = 1
user_id = 10000
group_category_id = 2
response = canvas_api_client.user_groups(course_id, user_id, group_category_id)
assert not response
@pytest.mark.usefixtures("list_groups_with_users_response")
def test_user_groups_no_group_category(self, canvas_api_client):
course_id = 1
user_id = 1
response = canvas_api_client.user_groups(course_id, user_id)
assert len(response) == 1
assert user_id in [u["id"] for u in response[0]["users"]]
@pytest.mark.usefixtures("list_groups_with_users_response")
def test_user_groups(self, canvas_api_client):
course_id = 1
user_id = 1
group_category_id = 2
response = canvas_api_client.user_groups(course_id, user_id, group_category_id)
assert len(response) == 1
assert user_id in [u["id"] for u in response[0]["users"]]
assert response[0]["group_category_id"] == group_category_id
@pytest.mark.usefixtures("list_groups_response")
def test_group_category_groups(self, canvas_api_client, http_session):
response = canvas_api_client.group_category_groups("GROUP_CATEGORY")
assert len(response) == 2
http_session.send.assert_called_once_with(
Any.request(
"GET",
url=Any.url.with_path(
"api/v1/group_categories/GROUP_CATEGORY/groups"
).with_query({"per_page": Any.string()}),
),
timeout=Any(),
)
def test_users_sections(self, canvas_api_client, http_session):
http_session.send.return_value = factories.requests.Response(
status_code=200,
json_data={
"enrollments": [
{"course_section_id": 101, "unexpected": "ignored"},
{"course_section_id": 102, "unexpected": "ignored"},
]
},
)
response = canvas_api_client.users_sections("USER_ID", "COURSE_ID")
assert response == [{"id": 101}, {"id": 102}]
http_session.send.assert_called_once_with(
Any.request(
"GET",
url=Any.url.with_path(
"api/v1/courses/COURSE_ID/users/USER_ID"
).with_query({"include[]": "enrollments"}),
),
timeout=Any(),
)
def test_users_sections_deduplicates_sections(
self, canvas_api_client, http_session
):
http_session.send.return_value = factories.requests.Response(
status_code=200,
json_data={
"enrollments": [{"course_section_id": 1}, {"course_section_id": 1}]
},
)
sections = canvas_api_client.users_sections("user_id", "course_id")
assert sections == [{"id": 1}]
def test_list_files(self, canvas_api_client, http_session):
files = [
{
"display_name": "display_name_1",
"id": 1,
"updated_at": "updated_at_1",
"size": 12345,
},
{
"display_name": "display_name_1",
"id": 2,
"updated_at": "updated_at_1",
"size": 12345,
},
]
files_with_noise = [dict(file, unexpected="ignored") for file in files]
http_session.send.return_value = factories.requests.Response(
status_code=200, json_data=files_with_noise
)
response = canvas_api_client.list_files("COURSE_ID")
assert response == files
http_session.send.assert_called_once_with(
Any.request(
"GET",
url=Any.url.with_path("api/v1/courses/COURSE_ID/files").with_query(
{
"content_types[]": "application/pdf",
"per_page": Any.string(),
"sort": "position",
}
),
),
timeout=Any(),
)
def test_list_duplicate_files(self, canvas_api_client, http_session):
files = [
{
"display_name": "display_name_1",
"id": 1,
"updated_at": "updated_at_1",
"size": 12345,
},
{
"display_name": "display_name_1",
"id": 1,
"updated_at": "updated_at_1",
"size": 12345,
},
]
http_session.send.return_value = factories.requests.Response(
status_code=200, json_data=files
)
response = canvas_api_client.list_files("COURSE_ID")
assert response == [files[0]]
def test_list_files_emits_event(self, canvas_api_client, http_session, registry):
# pylint: disable=protected-access
canvas_api_client._request.registry = registry
files = [
{
"id": i,
"display_name": "display_name_{i}",
"updated_at": "updated_at_{i}",
"size": 1000 + i,
}
for i in range(2)
]
http_session.send.return_value = factories.requests.Response(
status_code=200, json_data=files
)
canvas_api_client.list_files("COURSE_ID")
canvas_api_client._request.registry.notify.assert_called_once_with(
FilesDiscoveredEvent(
request=canvas_api_client._request,
values=[
{
"type": "canvas_file",
"course_id": "COURSE_ID",
"lms_id": file["id"],
"name": file["display_name"],
"size": file["size"],
}
for file in files
],
)
)
def test_public_url(self, canvas_api_client, http_session):
http_session.send.return_value = factories.requests.Response(
status_code=200, json_data={"public_url": "public_url_value"}
)
response = canvas_api_client.public_url("FILE_ID")
assert response == "public_url_value"
http_session.send.assert_called_once_with(
Any.request(
"GET", url=Any.url.with_path("api/v1/files/FILE_ID/public_url")
),
timeout=Any(),
)
@pytest.fixture
def registry(self):
return create_autospec(Registry, instance=True, spec_set=True)
@pytest.fixture
def list_groups_response(self, http_session):
http_session.send.return_value = factories.requests.Response(
json_data=[
{
"id": 1,
"name": "Group 1",
"description": "Group 1",
"group_category_id": 1,
},
{
"id": 2,
"name": "Group 2",
"description": "Group 2",
"group_category_id": 2,
},
],
status_code=200,
)
@pytest.fixture
def list_groups_with_users_response(self, http_session):
http_session.send.return_value = factories.requests.Response(
json_data=[
{
"id": 1,
"name": "Group 1",
"description": "Group 1",
"group_category_id": 1,
"users": [],
},
{
"id": 2,
"name": "Group 2",
"description": "Group 2",
"group_category_id": 2,
"users": [{"id": 1}],
},
],
status_code=200,
)
class TestMetaBehavior:
def test_methods_require_access_token(self, data_method, oauth2_token_service):
oauth2_token_service.get.side_effect = OAuth2TokenError(
"We don't have a Canvas API access token for this user"
)
with pytest.raises(OAuth2TokenError):
data_method()
@pytest.mark.usefixtures("oauth_token")
def test_methods_raise_CanvasAPIServerError_if_the_response_json_has_the_wrong_format(
self, data_method, http_session
):
http_session.send.return_value = factories.requests.Response(
status_code=200, json_data={}
)
with pytest.raises(CanvasAPIServerError):
data_method()
@pytest.mark.usefixtures("oauth_token")
def test_methods_raise_CanvasAPIServerError_if_the_response_is_invalid_json(
self, data_method, http_session
):
http_session.send.return_value = factories.requests.Response(
status_code=200, raw="[broken json"
)
with pytest.raises(CanvasAPIServerError):
data_method()
methods = {
"authenticated_users_sections": ["course_id"],
"course_sections": ["course_id"],
"course_group_categories": ["course_id"],
"users_sections": ["user_id", "course_id"],
"list_files": ["course_id"],
"public_url": ["file_id"],
}
@pytest.fixture(params=tuple(methods.items()), ids=tuple(methods.keys()))
def data_method(self, request, canvas_api_client):
method, args = request.param
return lambda: getattr(canvas_api_client, method)(*args)
@pytest.fixture
def canvas_api_client(authenticated_client, pyramid_request):
return CanvasAPIClient(authenticated_client, pyramid_request)
| 33.715084
| 92
| 0.582381
|
aeb113e20f94b2fe958b6d8514394f09972b0412
| 3,230
|
py
|
Python
|
flower_dictionary.py
|
larry-x/flower-dictionary
|
def60dee0de5b4cfdc7c199f864563dcec88071c
|
[
"MIT"
] | null | null | null |
flower_dictionary.py
|
larry-x/flower-dictionary
|
def60dee0de5b4cfdc7c199f864563dcec88071c
|
[
"MIT"
] | null | null | null |
flower_dictionary.py
|
larry-x/flower-dictionary
|
def60dee0de5b4cfdc7c199f864563dcec88071c
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python3
# Flower_Dictionary: originally a program that allows you to catalog new flower names on an alien planet and save them to a txt file.
# There are just two fields: the first field is limited to 20 characters and the second field is limited to 80 characters.
# It can be used to keep reminders or short diary entries. It can also be used as a phone book.
# However, as this is my first python project the original code to catalog flowers on an alien planet will be kept.
import re, sys, os, logging
logging.basicConfig(level=logging.DEBUG, format='%(levelname)s - %(message)s')
logging.info('Welcome to the program.')
book = {}
flower_path = None
planet_name = None
def entry_prompt():
global flower_path
global planet_name
while True:
print('What planet are you updating the flower log for? (File name please)')
dr_name = os.getcwd()
planet_name = input()
flower_path = os.path.join(dr_name, planet_name + '.txt')
if planet_name and os.path.exists(flower_path):
validity()
else:
new_file()
def new_file():
while True:
print('The file for planet "%s" does not exist. Create new file?' % planet_name)
answer = input()
if planet_name and answer.lower() == 'yes':
new_file = open(flower_path, 'w')
new_file.write('This is a flower log for planet %s.'.center(150) % planet_name + '\n\n')
new_file.close()
validity()
elif answer.lower() == 'no':
entry_prompt()
else:
print('Please type "yes" or "no".')
continue
def validity():
while True:
print('Please name this new flower. You can use letters and numbers, but cannot start with a number.')
trythis = input()
trythis_reg = re.compile(r'^[a-zA-Z][a-zA-Z0-9]+$')
trythis_match = trythis_reg.search(trythis)
if trythis_match == None:
print('Sorry, no special characters or spaces allowed, and start with a letter. \n')
continue
elif len(trythis) < 3 or len(trythis) > 15:
print('Sorry, the name must be at least 3 characters, but no more than 15 characters.')
continue
else:
print('Lovely. Now let\'s have a quick description of this flower. Please be brief.')
trythat = input()
while len(trythat) == 0 or len(trythat) > 80:
print('Your description must not exceed 80 characters but must be at least one character.')
trythat = input()
book.update({trythis : trythat})
addanother()
def addanother():
print('Do you have more entries to make? Answer "yes" or "no".')
answer = input()
if answer.lower() == 'yes':
validity()
elif answer.lower() == 'no':
edit_file()
else:
addanother()
def edit_file():
global flower_path
c_file = open(flower_path, 'a')
for n, d in book.items():
c_file.write('{:30s} :: {} \n'.format(n, d))
c_file.close()
content = open(flower_path, 'r')
print(content.read())
content.close()
sys.exit()
entry_prompt()
| 35.108696
| 133
| 0.608978
|
dfa21df37fd35efc4af2eb4a946c07b5e931850d
| 10,119
|
py
|
Python
|
website/apps/monster/migrations/0002_auto__add_monsterinvite.py
|
bopopescu/drawquest-web
|
8d8f9149b6efeb65202809a5f8916386f58a1b3b
|
[
"BSD-3-Clause"
] | 61
|
2015-11-10T17:13:46.000Z
|
2021-08-06T17:58:30.000Z
|
website/apps/monster/migrations/0002_auto__add_monsterinvite.py
|
bopopescu/drawquest-web
|
8d8f9149b6efeb65202809a5f8916386f58a1b3b
|
[
"BSD-3-Clause"
] | 13
|
2015-11-11T07:49:41.000Z
|
2021-06-09T03:45:31.000Z
|
website/apps/monster/migrations/0002_auto__add_monsterinvite.py
|
bopopescu/drawquest-web
|
8d8f9149b6efeb65202809a5f8916386f58a1b3b
|
[
"BSD-3-Clause"
] | 18
|
2015-11-11T04:50:04.000Z
|
2021-08-20T00:57:11.000Z
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'MonsterInvite'
db.create_table('monster_monsterinvite', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('monster_part', self.gf('django.db.models.fields.related.ForeignKey')(related_name='invites', to=orm['monster.MonsterPart'])),
('timestamp', self.gf('canvas.util.UnixTimestampField')(default=None, null=True)),
('used_by', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True)),
))
db.send_create_signal('monster', ['MonsterInvite'])
def backwards(self, orm):
# Deleting model 'MonsterInvite'
db.delete_table('monster_monsterinvite')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '254', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'canvas.category': {
'Meta': {'object_name': 'Category'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'founded': ('django.db.models.fields.FloatField', [], {'default': '1298956320'}),
'founder': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'founded_groups'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderators': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'moderated_categories'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'}),
'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'canvas.comment': {
'Meta': {'object_name': 'Comment'},
'anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'comments'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'comments'", 'null': 'True', 'blank': 'True', 'to': "orm['canvas.Category']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'default': "'0.0.0.0'", 'max_length': '15'}),
'judged': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ot_hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'parent_comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'replies'", 'null': 'True', 'blank': 'True', 'to': "orm['canvas.Comment']"}),
'parent_content': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'comments'", 'null': 'True', 'to': "orm['canvas.Content']"}),
'replied_comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['canvas.Comment']", 'null': 'True', 'blank': 'True'}),
'reply_content': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'used_in_comments'", 'null': 'True', 'to': "orm['canvas.Content']"}),
'reply_text': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'score': ('django.db.models.fields.FloatField', [], {'default': '0', 'db_index': 'True'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'canvas.content': {
'Meta': {'object_name': 'Content'},
'alpha': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'animated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'default': "'0.0.0.0'", 'max_length': '15'}),
'remix_of': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'remixes'", 'null': 'True', 'to': "orm['canvas.Content']"}),
'remix_text': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1000', 'blank': 'True'}),
'source_url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '4000', 'blank': 'True'}),
'stamps_used': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'used_as_stamp'", 'blank': 'True', 'to': "orm['canvas.Content']"}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'url_mapping': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.ContentUrlMapping']", 'null': 'True', 'blank': 'True'}),
'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'canvas.contenturlmapping': {
'Meta': {'object_name': 'ContentUrlMapping'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'canvas_auth.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'", '_ormbases': ['auth.User'], 'proxy': 'True'}
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'monster.monsterinvite': {
'Meta': {'object_name': 'MonsterInvite'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'monster_part': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'invites'", 'to': "orm['monster.MonsterPart']"}),
'timestamp': ('canvas.util.UnixTimestampField', [], {'default': 'None', 'null': 'True'}),
'used_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'})
},
'monster.monsterpart': {
'Meta': {'object_name': 'MonsterPart'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'monster_parts'", 'to': "orm['canvas.Comment']"}),
'hint_slice': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.Content']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'top': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'bottoms'", 'null': 'True', 'blank': 'True', 'to': "orm['monster.MonsterPart']"})
}
}
complete_apps = ['monster']
| 77.244275
| 195
| 0.567349
|
53d402920ea27e179e57794f99b0be93555a7369
| 733
|
py
|
Python
|
var/spack/repos/builtin/packages/fastjson/package.py
|
jeanbez/spack
|
f4e51ce8f366c85bf5aa0eafe078677b42dae1ba
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/fastjson/package.py
|
jeanbez/spack
|
f4e51ce8f366c85bf5aa0eafe078677b42dae1ba
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 8
|
2021-11-09T20:28:40.000Z
|
2022-03-15T03:26:33.000Z
|
var/spack/repos/builtin/packages/fastjson/package.py
|
jeanbez/spack
|
f4e51ce8f366c85bf5aa0eafe078677b42dae1ba
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2
|
2019-02-08T20:37:20.000Z
|
2019-03-31T15:19:26.000Z
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class Fastjson(MavenPackage):
"""Fastjson is a Java library that can be used to convert Java Objects
into their JSON representation. It can also be used to convert a JSON
string to an equivalent Java object. """
homepage = "https://github.com/alibaba/fastjson/wiki"
url = "https://github.com/alibaba/fastjson/archive/1.2.68.tar.gz"
version('1.2.68', sha256='0b3f5308830e5e5abacf9dc8e4115c20153c1cdabec228c3eca48a48c9d5f4d7')
depends_on('java@8', type=('build', 'run'))
| 36.65
| 96
| 0.735334
|
bd97323b99c2df3d394de6a3931a32a0fe43d57b
| 32,500
|
py
|
Python
|
example/sockeye/source/sockeye/encoder.py
|
rah9eu/p3
|
530628be7b7a8dd3e6199c3bebebdbf104005e5f
|
[
"Apache-2.0"
] | 22
|
2019-02-20T12:42:20.000Z
|
2021-12-25T06:09:46.000Z
|
example/sockeye/source/sockeye/encoder.py
|
rah9eu/p3
|
530628be7b7a8dd3e6199c3bebebdbf104005e5f
|
[
"Apache-2.0"
] | 4
|
2019-04-01T07:36:04.000Z
|
2022-03-24T03:11:26.000Z
|
example/sockeye/source/sockeye/encoder.py
|
rah9eu/p3
|
530628be7b7a8dd3e6199c3bebebdbf104005e5f
|
[
"Apache-2.0"
] | 7
|
2019-03-20T16:04:37.000Z
|
2021-04-28T18:40:11.000Z
|
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Encoders for sequence-to-sequence models.
"""
import logging
from math import ceil, floor
from abc import ABC, abstractmethod
from typing import Callable, List, Optional, Tuple
import mxnet as mx
from sockeye.config import Config
from . import constants as C
from . import rnn
from . import transformer
from . import utils
logger = logging.getLogger(__name__)
def get_encoder(config: Config, fused: bool, embed_weight: Optional[mx.sym.Symbol] = None):
if isinstance(config, RecurrentEncoderConfig):
return get_recurrent_encoder(config, fused, embed_weight)
elif isinstance(config, transformer.TransformerConfig):
return get_transformer_encoder(config, embed_weight)
else:
raise ValueError("Unsupported encoder configuration")
class RecurrentEncoderConfig(Config):
"""
Recurrent encoder configuration.
:param vocab_size: Source vocabulary size.
:param num_embed: Size of embedding layer.
:param embed_dropout: Dropout probability on embedding layer.
:param rnn_config: RNN configuration.
:param conv_config: Optional configuration for convolutional embedding.
:param reverse_input: Reverse embedding sequence before feeding into RNN.
"""
def __init__(self,
vocab_size: int,
num_embed: int,
embed_dropout: float,
rnn_config: rnn.RNNConfig,
conv_config: Optional['ConvolutionalEmbeddingConfig'] = None,
reverse_input: bool = False) -> None:
super().__init__()
self.vocab_size = vocab_size
self.num_embed = num_embed
self.embed_dropout = embed_dropout
self.rnn_config = rnn_config
self.conv_config = conv_config
self.reverse_input = reverse_input
def get_recurrent_encoder(config: RecurrentEncoderConfig, fused: bool,
embed_weight: Optional[mx.sym.Symbol] = None) -> 'Encoder':
"""
Returns a recurrent encoder with embedding, batch2time-major conversion, and bidirectional RNN.
If num_layers > 1, adds additional uni-directional RNNs.
:param config: Configuration for recurrent encoder.
:param fused: Whether to use FusedRNNCell (CuDNN). Only works with GPU context.
:param embed_weight: Optionally use an existing embedding matrix instead of creating a new one.
:return: Encoder instance.
"""
# TODO give more control on encoder architecture
encoders = list() # type: List[Encoder]
encoders.append(Embedding(num_embed=config.num_embed,
vocab_size=config.vocab_size,
prefix=C.SOURCE_EMBEDDING_PREFIX,
dropout=config.embed_dropout,
embed_weight=embed_weight))
if config.conv_config is not None:
encoders.append(ConvolutionalEmbeddingEncoder(config.conv_config))
encoders.append(BatchMajor2TimeMajor())
if config.reverse_input:
encoders.append(ReverseSequence())
if config.rnn_config.residual:
utils.check_condition(config.rnn_config.first_residual_layer >= 2,
"Residual connections on the first encoder layer are not supported")
encoder_class = FusedRecurrentEncoder if fused else RecurrentEncoder
# One layer bi-directional RNN:
encoders.append(BiDirectionalRNNEncoder(rnn_config=config.rnn_config.copy(num_layers=1),
prefix=C.BIDIRECTIONALRNN_PREFIX,
layout=C.TIME_MAJOR))
if config.rnn_config.num_layers > 1:
# Stacked uni-directional RNN:
# Because we already have a one layer bi-rnn we reduce the num_layers as well as the first_residual_layer.
remaining_rnn_config = config.rnn_config.copy(num_layers=config.rnn_config.num_layers - 1,
first_residual_layer=config.rnn_config.first_residual_layer - 1)
encoders.append(encoder_class(rnn_config=remaining_rnn_config,
prefix=C.STACKEDRNN_PREFIX,
layout=C.TIME_MAJOR))
return EncoderSequence(encoders)
def get_transformer_encoder(config: transformer.TransformerConfig,
embed_weight: Optional[mx.sym.Symbol] = None) -> 'Encoder':
"""
Returns a Transformer encoder, consisting of an embedding layer with
positional encodings and a TransformerEncoder instance.
:param config: Configuration for transformer encoder.
:param embed_weight: Optionally use an existing embedding matrix instead of creating a new one.
:return: Encoder instance.
"""
encoders = list() # type: List[Encoder]
encoders.append(Embedding(num_embed=config.model_size,
vocab_size=config.vocab_size,
prefix=C.SOURCE_EMBEDDING_PREFIX,
dropout=config.dropout_residual,
embed_weight=embed_weight,
add_positional_encoding=config.positional_encodings))
if config.conv_config is not None:
encoders.append(ConvolutionalEmbeddingEncoder(config.conv_config))
encoders.append(TransformerEncoder(config))
encoders.append(BatchMajor2TimeMajor())
return EncoderSequence(encoders)
class Encoder(ABC):
"""
Generic encoder interface.
"""
@abstractmethod
def encode(self,
data: mx.sym.Symbol,
data_length: mx.sym.Symbol,
seq_len: int) -> Tuple[mx.sym.Symbol, mx.sym.Symbol, int]:
"""
Encodes data given sequence lengths of individual examples and maximum sequence length.
:param data: Input data.
:param data_length: Vector with sequence lengths.
:param seq_len: Maximum sequence length.
:return: Encoded versions of input data (data, data_length, seq_len).
"""
pass
def get_num_hidden(self) -> int:
"""
Return the representation size of this encoder.
"""
raise NotImplementedError()
def get_rnn_cells(self) -> List[mx.rnn.BaseRNNCell]:
"""
Returns a list of RNNCells used by this encoder.
"""
return []
def get_encoded_seq_len(self, seq_len: int) -> int:
"""
Returns the size of the encoded sequence.
"""
return seq_len
class BatchMajor2TimeMajor(Encoder):
"""
Converts batch major data to time major.
"""
def encode(self,
data: mx.sym.Symbol,
data_length: mx.sym.Symbol,
seq_len: int) -> Tuple[mx.sym.Symbol, mx.sym.Symbol, int]:
"""
Encodes data given sequence lengths of individual examples and maximum sequence length.
:param data: Input data.
:param data_length: Vector with sequence lengths.
:param seq_len: Maximum sequence length.
:return: Encoded versions of input data (data, data_length, seq_len).
"""
with mx.AttrScope(__layout__=C.TIME_MAJOR):
return mx.sym.swapaxes(data=data, dim1=0, dim2=1), data_length, seq_len
class ReverseSequence(Encoder):
"""
Reverses the input sequence. Requires time-major layout.
"""
def encode(self,
data: mx.sym.Symbol,
data_length: mx.sym.Symbol,
seq_len: int) -> Tuple[mx.sym.Symbol, mx.sym.Symbol, int]:
data = mx.sym.SequenceReverse(data=data, sequence_length=data_length, use_sequence_length=True)
return data, data_length, seq_len
class Embedding(Encoder):
"""
Thin wrapper around MXNet's Embedding symbol. Works with both time- and batch-major data layouts.
:param num_embed: Embedding size.
:param vocab_size: Source vocabulary size.
:param prefix: Name prefix for symbols of this encoder.
:param dropout: Dropout probability.
:param embed_weight: Optionally use an existing embedding matrix instead of creating a new one.
:param add_positional_encoding: If true, adds positional encodings to embedding.
"""
def __init__(self,
num_embed: int,
vocab_size: int,
prefix: str,
dropout: float,
embed_weight: Optional[mx.sym.Symbol] = None,
add_positional_encoding: bool = False) -> None:
self.num_embed = num_embed
self.vocab_size = vocab_size
self.prefix = prefix
self.dropout = dropout
if embed_weight is not None:
self.embed_weight = embed_weight
else:
self.embed_weight = mx.sym.Variable(prefix + "weight")
self.add_positional_encoding = add_positional_encoding
def encode(self,
data: mx.sym.Symbol,
data_length: mx.sym.Symbol,
seq_len: int) -> Tuple[mx.sym.Symbol, mx.sym.Symbol, int]:
"""
Encodes data given sequence lengths of individual examples and maximum sequence length.
:param data: Input data.
:param data_length: Vector with sequence lengths.
:param seq_len: Maximum sequence length.
:return: Encoded versions of input data (data, data_length, seq_len).
"""
embedding = mx.sym.Embedding(data=data,
input_dim=self.vocab_size,
weight=self.embed_weight,
output_dim=self.num_embed,
name=self.prefix + "embed")
if self.add_positional_encoding:
embedding = mx.sym.broadcast_add(embedding,
self.get_positional_encoding(length=seq_len,
depth=self.num_embed,
name="%spositional_encodings" % self.prefix),
name='%sadd_positional_encodings' % self.prefix)
if self.dropout > 0:
embedding = mx.sym.Dropout(data=embedding, p=self.dropout, name="source_embed_dropout")
return embedding, data_length, seq_len
@staticmethod
def get_positional_encoding(length: int, depth: int, name: str) -> mx.sym.Symbol:
"""
Returns symbol initialized with positional encodings as in Vaswani et al.
:param length: Maximum sequence length
:param depth: Depth.
:param name: Symbol name.
:return: Symbol(1, length, depth)
"""
return mx.sym.BlockGrad(mx.symbol.Custom(length=length, depth=depth, name=name,
op_type='positional_encodings'))
def get_num_hidden(self) -> int:
"""
Return the representation size of this encoder.
"""
return self.num_embed
class EncoderSequence(Encoder):
"""
A sequence of encoders is itself an encoder.
:param encoders: List of encoders.
"""
def __init__(self, encoders: List[Encoder]) -> None:
self.encoders = encoders
def encode(self,
data: mx.sym.Symbol,
data_length: mx.sym.Symbol,
seq_len: int) -> Tuple[mx.sym.Symbol, mx.sym.Symbol, int]:
"""
Encodes data given sequence lengths of individual examples and maximum sequence length.
:param data: Input data.
:param data_length: Vector with sequence lengths.
:param seq_len: Maximum sequence length.
:return: Encoded versions of input data (data, data_length, seq_len).
"""
for encoder in self.encoders:
data, data_length, seq_len = encoder.encode(data, data_length, seq_len)
return data, data_length, seq_len
def get_num_hidden(self) -> int:
"""
Return the representation size of this encoder.
"""
if isinstance(self.encoders[-1], BatchMajor2TimeMajor):
utils.check_condition(len(self.encoders) > 1,
"Cannot return num_hidden from a BatchMajor2TimeMajor encoder only")
return self.encoders[-2].get_num_hidden()
else:
return self.encoders[-1].get_num_hidden()
def get_rnn_cells(self) -> List[mx.rnn.BaseRNNCell]:
"""
Returns a list of RNNCells used by this encoder.
"""
cells = []
for encoder in self.encoders:
for cell in encoder.get_rnn_cells():
cells.append(cell)
return cells
def get_encoded_seq_len(self, seq_len: int) -> int:
"""
Returns the size of the encoded sequence.
"""
for encoder in self.encoders:
seq_len = encoder.get_encoded_seq_len(seq_len)
return seq_len
class RecurrentEncoder(Encoder):
"""
Uni-directional (multi-layered) recurrent encoder.
:param rnn_config: RNN configuration.
:param prefix: Prefix.
:param layout: Data layout.
"""
def __init__(self,
rnn_config: rnn.RNNConfig,
prefix: str = C.STACKEDRNN_PREFIX,
layout: str = C.TIME_MAJOR) -> None:
self.rnn_config = rnn_config
self.layout = layout
self.rnn = rnn.get_stacked_rnn(rnn_config, prefix)
def encode(self,
data: mx.sym.Symbol,
data_length: mx.sym.Symbol,
seq_len: int) -> Tuple[mx.sym.Symbol, mx.sym.Symbol, int]:
"""
Encodes data given sequence lengths of individual examples and maximum sequence length.
:param data: Input data.
:param data_length: Vector with sequence lengths.
:param seq_len: Maximum sequence length.
:return: Encoded versions of input data (data, data_length, seq_len).
"""
outputs, _ = self.rnn.unroll(seq_len, inputs=data, merge_outputs=True, layout=self.layout)
return outputs, data_length, seq_len
def get_rnn_cells(self):
"""
Returns RNNCells used in this encoder.
"""
return [self.rnn]
def get_num_hidden(self):
"""
Return the representation size of this encoder.
"""
return self.rnn_config.num_hidden
class FusedRecurrentEncoder(RecurrentEncoder):
"""
Uni-directional (multi-layered) recurrent encoder.
:param rnn_config: RNN configuration.
:param prefix: Prefix.
:param layout: Data layout.
"""
def __init__(self,
rnn_config: rnn.RNNConfig,
prefix: str = C.STACKEDRNN_PREFIX,
layout: str = C.TIME_MAJOR) -> None:
super().__init__(rnn_config, prefix, layout)
logger.warning("%s: FusedRNNCell uses standard MXNet Orthogonal initializer w/ rand_type=uniform", prefix)
self.rnn = mx.rnn.FusedRNNCell(self.rnn_config.num_hidden,
num_layers=self.rnn_config.num_layers,
mode=self.rnn_config.cell_type,
bidirectional=False,
dropout=self.rnn_config.dropout,
forget_bias=self.rnn_config.forget_bias,
prefix=prefix)
class BiDirectionalRNNEncoder(Encoder):
"""
An encoder that runs a forward and a reverse RNN over input data.
States from both RNNs are concatenated together.
:param rnn_config: RNN configuration.
:param prefix: Prefix.
:param layout: Data layout.
:param encoder_class: Recurrent encoder class to use.
"""
def __init__(self,
rnn_config: rnn.RNNConfig,
prefix=C.BIDIRECTIONALRNN_PREFIX,
layout=C.TIME_MAJOR,
encoder_class: Callable = RecurrentEncoder) -> None:
utils.check_condition(rnn_config.num_hidden % 2 == 0,
"num_hidden must be a multiple of 2 for BiDirectionalRNNEncoders.")
self.rnn_config = rnn_config
self.internal_rnn_config = rnn_config.copy(num_hidden=rnn_config.num_hidden // 2)
if layout[0] == 'N':
logger.warning("Batch-major layout for encoder input. Consider using time-major layout for faster speed")
# time-major layout as _encode needs to swap layout for SequenceReverse
self.forward_rnn = encoder_class(rnn_config=self.internal_rnn_config,
prefix=prefix + C.FORWARD_PREFIX,
layout=C.TIME_MAJOR)
self.reverse_rnn = encoder_class(rnn_config=self.internal_rnn_config,
prefix=prefix + C.REVERSE_PREFIX,
layout=C.TIME_MAJOR)
self.layout = layout
self.prefix = prefix
def encode(self,
data: mx.sym.Symbol,
data_length: mx.sym.Symbol,
seq_len: int) -> Tuple[mx.sym.Symbol, mx.sym.Symbol, int]:
"""
Encodes data given sequence lengths of individual examples and maximum sequence length.
:param data: Input data.
:param data_length: Vector with sequence lengths.
:param seq_len: Maximum sequence length.
:return: Encoded versions of input data (data, data_length, seq_len).
"""
if self.layout[0] == 'N':
data = mx.sym.swapaxes(data=data, dim1=0, dim2=1)
data = self._encode(data, data_length, seq_len)
if self.layout[0] == 'N':
data = mx.sym.swapaxes(data=data, dim1=0, dim2=1)
return data, data_length, seq_len
def _encode(self, data: mx.sym.Symbol, data_length: mx.sym.Symbol, seq_len: int) -> mx.sym.Symbol:
"""
Bidirectionally encodes time-major data.
"""
# (seq_len, batch_size, num_embed)
data_reverse = mx.sym.SequenceReverse(data=data, sequence_length=data_length,
use_sequence_length=True)
# (seq_length, batch, cell_num_hidden)
hidden_forward, _, _ = self.forward_rnn.encode(data, data_length, seq_len)
# (seq_length, batch, cell_num_hidden)
hidden_reverse, _, _ = self.reverse_rnn.encode(data_reverse, data_length, seq_len)
# (seq_length, batch, cell_num_hidden)
hidden_reverse = mx.sym.SequenceReverse(data=hidden_reverse, sequence_length=data_length,
use_sequence_length=True)
# (seq_length, batch, 2 * cell_num_hidden)
hidden_concat = mx.sym.concat(hidden_forward, hidden_reverse, dim=2, name="%s_rnn" % self.prefix)
return hidden_concat
def get_num_hidden(self) -> int:
"""
Return the representation size of this encoder.
"""
return self.rnn_config.num_hidden
def get_rnn_cells(self) -> List[mx.rnn.BaseRNNCell]:
"""
Returns a list of RNNCells used by this encoder.
"""
return self.forward_rnn.get_rnn_cells() + self.reverse_rnn.get_rnn_cells()
class TransformerEncoder(Encoder):
"""
Non-recurrent encoder based on the transformer architecture in:
Attention Is All You Need, Figure 1 (left)
Vaswani et al. (https://arxiv.org/pdf/1706.03762.pdf).
:param config: Configuration for transformer encoder.
:param prefix: Name prefix for operations in this encoder.
"""
def __init__(self,
config: transformer.TransformerConfig,
prefix: str = C.TRANSFORMER_ENCODER_PREFIX) -> None:
self.config = config
self.prefix = prefix
self.layers = [transformer.TransformerEncoderBlock(
config, prefix="%s%d_" % (prefix, i)) for i in range(config.num_layers)]
def encode(self,
data: mx.sym.Symbol,
data_length: mx.sym.Symbol,
seq_len: int) -> Tuple[mx.sym.Symbol, mx.sym.Symbol, int]:
"""
Encodes data given sequence lengths of individual examples and maximum sequence length.
:param data: Input data.
:param data_length: Vector with sequence lengths.
:param seq_len: Maximum sequence length.
:return: Encoded versions of input data data, data_length, seq_len.
"""
for i, layer in enumerate(self.layers):
# (batch_size, seq_len, config.model_size)
data = layer(data, data_length, seq_len)
return data, data_length, seq_len
def get_num_hidden(self) -> int:
"""
Return the representation size of this encoder.
"""
return self.config.model_size
class ConvolutionalEmbeddingConfig(Config):
"""
Convolutional embedding encoder configuration.
:param num_embed: Input embedding size.
:param output_dim: Output segment embedding size.
:param max_filter_width: Maximum filter width for convolutions.
:param num_filters: Number of filters of each width.
:param pool_stride: Stride for pooling layer after convolutions.
:param num_highway_layers: Number of highway layers for segment embeddings.
:param dropout: Dropout probability.
"""
def __init__(self,
num_embed: int,
output_dim: int = None,
max_filter_width: int = 8,
num_filters: Tuple[int, ...] = (200, 200, 250, 250, 300, 300, 300, 300),
pool_stride: int = 5,
num_highway_layers: int = 4,
dropout: float = 0.0,
add_positional_encoding: bool = False) -> None:
super().__init__()
self.num_embed = num_embed
self.output_dim = output_dim
self.max_filter_width = max_filter_width
self.num_filters = num_filters
self.pool_stride = pool_stride
self.num_highway_layers = num_highway_layers
self.dropout = dropout
self.add_positional_encoding = add_positional_encoding
if self.output_dim is None:
self.output_dim = sum(self.num_filters)
class ConvolutionalEmbeddingEncoder(Encoder):
"""
An encoder developed to map a sequence of character embeddings to a shorter sequence of segment
embeddings using convolutional, pooling, and highway layers. More generally, it maps a sequence
of input embeddings to a sequence of span embeddings.
* "Fully Character-Level Neural Machine Translation without Explicit Segmentation"
Jason Lee; Kyunghyun Cho; Thomas Hofmann (https://arxiv.org/pdf/1610.03017.pdf)
:param config: Convolutional embedding config.
:param prefix: Name prefix for symbols of this encoder.
"""
def __init__(self,
config: ConvolutionalEmbeddingConfig,
prefix: str = C.CHAR_SEQ_ENCODER_PREFIX) -> None:
utils.check_condition(len(config.num_filters) == config.max_filter_width,
"num_filters must have max_filter_width elements.")
self.num_embed = config.num_embed
self.output_dim = config.output_dim
self.max_filter_width = config.max_filter_width
self.num_filters = config.num_filters[:]
self.pool_stride = config.pool_stride
self.num_highway_layers = config.num_highway_layers
self.prefix = prefix
self.dropout = config.dropout
self.add_positional_encoding = config.add_positional_encoding
self.conv_weight = {filter_width: mx.sym.Variable("%s%s%d%s" % (self.prefix, "conv_", filter_width, "_weight"))
for filter_width in range(1, self.max_filter_width + 1)}
self.conv_bias = {filter_width: mx.sym.Variable("%s%s%d%s" % (self.prefix, "conv_", filter_width, "_bias"))
for filter_width in range(1, self.max_filter_width + 1)}
self.project_weight = mx.sym.Variable(self.prefix + "project_weight")
self.project_bias = mx.sym.Variable(self.prefix + "project_bias")
self.gate_weight = [mx.sym.Variable("%s%s%d%s" % (self.prefix, "gate_", i, "_weight"))
for i in range(self.num_highway_layers)]
self.gate_bias = [mx.sym.Variable("%s%s%d%s" % (self.prefix, "gate_", i, "_bias"))
for i in range(self.num_highway_layers)]
self.transform_weight = [mx.sym.Variable("%s%s%d%s" % (self.prefix, "transform_", i, "_weight"))
for i in range(self.num_highway_layers)]
self.transform_bias = [mx.sym.Variable("%s%s%d%s" % (self.prefix, "transform_", i, "_bias"))
for i in range(self.num_highway_layers)]
def encode(self,
data: mx.sym.Symbol,
data_length: mx.sym.Symbol,
seq_len: int) -> Tuple[mx.sym.Symbol, mx.sym.Symbol, int]:
"""
Encodes data given sequence lengths of individual examples and maximum sequence length.
:param data: Input data.
:param data_length: Vector with sequence lengths.
:param seq_len: Maximum sequence length.
:return: Encoded versions of input data data, data_length, seq_len.
"""
total_num_filters = sum(self.num_filters)
encoded_seq_len = self.get_encoded_seq_len(seq_len)
# (batch_size, channel=1, seq_len, num_embed)
data = mx.sym.Reshape(data=data, shape=(-1, 1, seq_len, self.num_embed))
# Convolution filters of width 1..N
conv_outputs = []
for filter_width, num_filter in enumerate(self.num_filters, 1):
# "half" padding: output length == input length
pad_before = ceil((filter_width - 1) / 2)
pad_after = floor((filter_width - 1) / 2)
# (batch_size, channel=1, seq_len + (filter_width - 1), num_embed)
padded = mx.sym.pad(data=data,
mode="constant",
constant_value=0,
pad_width=(0, 0, 0, 0, pad_before, pad_after, 0, 0))
# (batch_size, num_filter, seq_len, num_scores=1)
conv = mx.sym.Convolution(data=padded,
# cudnn_tune="off",
kernel=(filter_width, self.num_embed),
num_filter=num_filter,
weight=self.conv_weight[filter_width],
bias=self.conv_bias[filter_width])
conv = mx.sym.Activation(data=conv, act_type="relu")
conv_outputs.append(conv)
# (batch_size, total_num_filters, seq_len, num_scores=1)
conv_concat = mx.sym.concat(*conv_outputs, dim=1)
# Max pooling with stride
uncovered = seq_len % self.pool_stride
if uncovered > 0:
pad_after = self.pool_stride - uncovered
# (batch_size, total_num_filters, seq_len + pad_to_final_stride, num_scores=1)
conv_concat = mx.sym.pad(data=conv_concat,
mode="constant",
constant_value=0,
pad_width=(0, 0, 0, 0, 0, pad_after, 0, 0))
# (batch_size, total_num_filters, seq_len/stride, num_scores=1)
pool = mx.sym.Pooling(data=conv_concat,
pool_type="max",
kernel=(self.pool_stride, 1),
stride=(self.pool_stride, 1))
# (batch_size, total_num_filters, seq_len/stride)
pool = mx.sym.reshape(data=pool,
shape=(-1, total_num_filters, encoded_seq_len))
# (batch_size, seq_len/stride, total_num_filters)
pool = mx.sym.swapaxes(data=pool, dim1=1, dim2=2)
if self.dropout > 0:
pool = mx.sym.Dropout(data=pool, p=self.dropout)
# Raw segment embeddings reshaped for highway network
# (batch_size * seq_len/stride, total_num_filters)
seg_embedding = mx.sym.Reshape(data=pool, shape=(-3, total_num_filters))
# Projection layer if requested output dimension is different from total number of filters
# (TransformerEncoder compatibility, not in original paper)
if self.output_dim != total_num_filters:
# (batch_size * seq_len/stride, outut_dim)
seg_embedding = mx.sym.FullyConnected(data=seg_embedding,
num_hidden=self.output_dim,
weight=self.project_weight,
bias=self.project_bias)
seg_embedding = mx.sym.Activation(data=seg_embedding, act_type="relu")
if self.dropout > 0:
seg_embedding = mx.sym.Dropout(data=seg_embedding, p=self.dropout)
# Highway network
for i in range(self.num_highway_layers):
# Gate
gate = mx.sym.FullyConnected(data=seg_embedding,
num_hidden=self.output_dim,
weight=self.gate_weight[i],
bias=self.gate_bias[i])
gate = mx.sym.Activation(data=gate, act_type="sigmoid")
if self.dropout > 0:
gate = mx.sym.Dropout(data=gate, p=self.dropout)
# Transform
transform = mx.sym.FullyConnected(data=seg_embedding,
num_hidden=self.output_dim,
weight=self.transform_weight[i],
bias=self.transform_bias[i])
transform = mx.sym.Activation(data=transform, act_type="relu")
if self.dropout > 0:
transform = mx.sym.Dropout(data=transform, p=self.dropout)
# Connection
seg_embedding = gate * transform + (1 - gate) * seg_embedding
# (batch_size, seq_len/stride, outut_dim) aka
# (batch_size, encoded_seq_len, num_segment_emded)
seg_embedding = mx.sym.Reshape(data=seg_embedding,
shape=(-1, encoded_seq_len, self.output_dim))
# If specified, add positional encodings to segment embeddings
# (TransformerEncoder compatibility, not in original paper)
if self.add_positional_encoding:
seg_embedding = mx.sym.broadcast_add(seg_embedding,
Embedding.get_positional_encoding(
length=encoded_seq_len,
depth=self.output_dim,
name="%spositional_encodings" % self.prefix),
name='%sadd_positional_encodings' % self.prefix)
# Dropout on final segment embeddings
if self.dropout > 0:
seg_embedding = mx.sym.Dropout(data=seg_embedding, p=self.dropout)
# Ceiling function isn't differentiable so this will throw errors if we
# attempt to compute gradients. Fortunately we aren't updating inputs
# so we can just block the backward pass here.
encoded_data_length = mx.sym.BlockGrad(mx.sym.ceil(data_length / self.pool_stride))
return seg_embedding, encoded_data_length, encoded_seq_len
def get_num_hidden(self) -> int:
"""
Return the representation size of this encoder.
"""
return self.output_dim
def get_encoded_seq_len(self, seq_len: int) -> int:
"""
Returns the size of the encoded sequence.
"""
return int(ceil(seq_len / self.pool_stride))
| 42.539267
| 119
| 0.604185
|
319e38e1776b41e5c512ef361a49a7d8bed04377
| 997
|
py
|
Python
|
boot-hid.py
|
dhylands/upy-examples
|
90cca32f0c6c65c33967da9ac1a998e731c60d91
|
[
"MIT"
] | 78
|
2015-01-15T23:24:21.000Z
|
2022-02-25T09:24:58.000Z
|
boot-hid.py
|
dhylands/upy-examples
|
90cca32f0c6c65c33967da9ac1a998e731c60d91
|
[
"MIT"
] | 1
|
2015-02-04T00:51:52.000Z
|
2015-02-04T00:51:52.000Z
|
boot-hid.py
|
dhylands/upy-examples
|
90cca32f0c6c65c33967da9ac1a998e731c60d91
|
[
"MIT"
] | 26
|
2015-02-03T21:26:33.000Z
|
2022-02-21T02:57:46.000Z
|
# boot.py -- run on boot-up
#
# This is some common initialization that I like to keep around.
import pyb
import micropython
import sys
#pyb.main('main.py') # main script to run after this one
#pyb.usb_mode('CDC') # act as a serial only
#pyb.usb_mode('CDC+MSC') # act as a serial and a storage device
pyb.usb_mode('CDC+HID') # act as a serial device and a mouse
def bl():
pyb.bootloader()
def pins():
for pin_name in dir(pyb.Pin.board):
pin = pyb.Pin(pin_name)
print('{:10s} {:s}'.format(pin_name, str(pin)))
def af():
for pin_name in dir(pyb.Pin.board):
pin = pyb.Pin(pin_name)
print('{:10s} {:s}'.format(pin_name, str(pin.af_list())))
def init():
if True:
uart = pyb.UART(6,115200)
pyb.repl_uart(uart)
print("REPL is also on UART 6 (Y1=Tx Y2=Rx)")
if True:
bufsize = 100
print("Setting alloc_emergency_exception_buf to", bufsize)
micropython.alloc_emergency_exception_buf(bufsize)
init()
| 26.945946
| 67
| 0.641926
|
834d4c93301bbd137e67de86cba380ce2969050a
| 2,796
|
py
|
Python
|
data/cirq_new/cirq_program/startCirq_pragma773.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/cirq_new/cirq_program/startCirq_pragma773.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/cirq_new/cirq_program/startCirq_pragma773.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=19
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
class Opty(cirq.PointOptimizer):
def optimization_at(
self,
circuit: 'cirq.Circuit',
index: int,
op: 'cirq.Operation'
) -> Optional[cirq.PointOptimizationSummary]:
if (isinstance(op, cirq.ops.GateOperation) and isinstance(op.gate, cirq.CZPowGate)):
return cirq.PointOptimizationSummary(
clear_span=1,
clear_qubits=op.qubits,
new_operations=[
cirq.CZ(*op.qubits),
cirq.X.on_each(*op.qubits),
cirq.X.on_each(*op.qubits),
]
)
#thatsNoCode
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.Y.on(input_qubit[2])) # number=13
c.append(cirq.H.on(input_qubit[1])) # number=7
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.H.on(input_qubit[0])) # number=10
c.append(cirq.CZ.on(input_qubit[3],input_qubit[0])) # number=11
c.append(cirq.H.on(input_qubit[0])) # number=12
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[0])) # number=6
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=8
c.append(cirq.H.on(input_qubit[1])) # number=16
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=9
c.append(cirq.Y.on(input_qubit[1])) # number=14
c.append(cirq.Y.on(input_qubit[1])) # number=15
c.append(cirq.Y.on(input_qubit[3])) # number=17
c.append(cirq.Y.on(input_qubit[3])) # number=18
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =4000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq_pragma773.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close()
| 32.894118
| 92
| 0.643777
|
fe1d9841a03e601b108132654f1df7937f543a90
| 533
|
py
|
Python
|
scripts/rpc/pmem.py
|
ykirichok/spdk
|
db7f82baf819740025da0ba271745e89ba682f47
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/rpc/pmem.py
|
ykirichok/spdk
|
db7f82baf819740025da0ba271745e89ba682f47
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/rpc/pmem.py
|
ykirichok/spdk
|
db7f82baf819740025da0ba271745e89ba682f47
|
[
"BSD-3-Clause"
] | 2
|
2019-01-30T16:18:59.000Z
|
2020-05-27T15:41:37.000Z
|
def create_pmem_pool(args):
num_blocks = (args.total_size * 1024 * 1024) / args.block_size
params = {'pmem_file': args.pmem_file,
'num_blocks': num_blocks,
'block_size': args.block_size}
return args.client.call('create_pmem_pool', params)
def pmem_pool_info(args):
params = {'pmem_file': args.pmem_file}
return args.client.call('pmem_pool_info', params)
def delete_pmem_pool(args):
params = {'pmem_file': args.pmem_file}
return args.client.call('delete_pmem_pool', params)
| 31.352941
| 66
| 0.684803
|
a854c012226cff8495d0aea0f51b22dcf71c1bea
| 4,929
|
py
|
Python
|
pandas/core/config_init.py
|
seberg/pandas
|
223a1fdd8286f77f1d5c3b3c589816af627d295b
|
[
"BSD-2-Clause"
] | null | null | null |
pandas/core/config_init.py
|
seberg/pandas
|
223a1fdd8286f77f1d5c3b3c589816af627d295b
|
[
"BSD-2-Clause"
] | null | null | null |
pandas/core/config_init.py
|
seberg/pandas
|
223a1fdd8286f77f1d5c3b3c589816af627d295b
|
[
"BSD-2-Clause"
] | null | null | null |
import pandas.core.config as cf
from pandas.core.config import is_int,is_bool,is_text,is_float
from pandas.core.format import detect_console_encoding
"""
This module is imported from the pandas package __init__.py file
in order to ensure that the core.config options registered here will
be available as soon as the user loads the package. if register_option
is invoked inside specific modules, they will not be registered until that
module is imported, which may or may not be a problem.
If you need to make sure options are available even before a certain
module is imported, register them here rather then in the module.
"""
###########################################
# options from the "print" namespace
pc_precision_doc="""
: int
Floating point output precision (number of significant digits). This is
only a suggestion
"""
pc_colspace_doc="""
: int
Default space for DataFrame columns, defaults to 12
"""
pc_max_rows_doc="""
: int
This sets the maximum number of rows pandas should output when printing
out various output. For example, this value determines whether the repr()
for a dataframe prints out fully or just an summary repr.
"""
pc_max_cols_doc="""
: int
max_rows and max_columns are used in __repr__() methods to decide if
to_string() or info() is used to render an object to a string.
Either one, or both can be set to 0 (experimental). Pandas will figure
out how big the terminal is and will not display more rows or/and
columns that can fit on it.
"""
pc_nb_repr_h_doc="""
: boolean
When True (default), IPython notebook will use html representation for
pandas objects (if it is available).
"""
pc_date_dayfirst_doc="""
: boolean
When True, prints and parses dates with the day first, eg 20/01/2005
"""
pc_date_yearfirst_doc="""
: boolean
When True, prints and parses dates with the year first, eg 2005/01/20
"""
pc_pprint_nest_depth="""
: int
Defaults to 3.
Controls the number of nested levels to process when pretty-printing
"""
pc_multi_sparse_doc="""
: boolean
Default True, "sparsify" MultiIndex display (don't display repeated
elements in outer levels within groups)
"""
pc_encoding_doc="""
: str/unicode
Defaults to the detected encoding of the console.
Specifies the encoding to be used for strings returned by to_string,
these are generally strings meant to be displayed on the console.
"""
float_format_doc="""
: callable
The callable should accept a floating point number and return
a string with the desired format of the number. This is used
in some places like SeriesFormatter.
See core.format.EngFormatter for an example.
"""
max_colwidth_doc="""
: int
The maximum width in characters of a column in the repr of
a pandas data structure. When the column overflows, a "..."
placeholder is embedded in the output.
"""
colheader_justify_doc="""
: 'left'/'right'
Controls the justification of column headers. used by DataFrameFormatter.
"""
pc_expand_repr_doc="""
: boolean
Default False
Whether to print out the full DataFrame repr for wide DataFrames
across multiple lines.
If False, the summary representation is shown.
"""
pc_line_width_doc="""
: int
Default 80
When printing wide DataFrames, this is the width of each line.
"""
with cf.config_prefix('print'):
cf.register_option('precision', 7, pc_precision_doc, validator=is_int)
cf.register_option('float_format', None, float_format_doc)
cf.register_option('column_space', 12, validator=is_int)
cf.register_option('max_rows', 100, pc_max_rows_doc, validator=is_int)
cf.register_option('max_colwidth', 50, max_colwidth_doc, validator=is_int)
cf.register_option('max_columns', 20, pc_max_cols_doc, validator=is_int)
cf.register_option('colheader_justify', 'right', colheader_justify_doc,
validator=is_text)
cf.register_option('notebook_repr_html', True, pc_nb_repr_h_doc,
validator=is_bool)
cf.register_option('date_dayfirst', False, pc_date_dayfirst_doc,
validator=is_bool)
cf.register_option('date_yearfirst', False, pc_date_yearfirst_doc,
validator=is_bool)
cf.register_option('pprint_nest_depth', 3, pc_pprint_nest_depth,
validator=is_int)
cf.register_option('multi_sparse', True, pc_multi_sparse_doc,
validator=is_bool)
cf.register_option('encoding', detect_console_encoding(), pc_encoding_doc,
validator=is_text)
cf.register_option('expand_frame_repr', True, pc_expand_repr_doc)
cf.register_option('line_width', 80, pc_line_width_doc)
tc_interactive_doc="""
: boolean
Default False
Whether to simulate interactive mode for purposes of testing
"""
with cf.config_prefix('test'):
cf.register_option('interactive', False, tc_interactive_doc)
| 33.080537
| 78
| 0.722256
|
b82e5e935117f34bc652a92eb79f7d65ba2ec48b
| 49,033
|
py
|
Python
|
gui/qt/__init__.py
|
stevehromov/Electron-Cash
|
1d4910d03d547b28c71070672df7ce2ebb67c5dd
|
[
"MIT"
] | 1
|
2020-06-09T04:01:43.000Z
|
2020-06-09T04:01:43.000Z
|
gui/qt/__init__.py
|
baby636/Electron-Cash
|
2deab3a3b15c9194949199bac662401803c10227
|
[
"MIT"
] | 18
|
2019-11-14T03:55:32.000Z
|
2020-10-25T10:53:35.000Z
|
gui/qt/__init__.py
|
stevehromov/Electron-Cash
|
1d4910d03d547b28c71070672df7ce2ebb67c5dd
|
[
"MIT"
] | 3
|
2019-02-21T07:57:57.000Z
|
2020-01-09T15:03:17.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import gc, os, platform, shutil, signal, sys, traceback
try:
import PyQt5
except Exception:
if sys.platform.startswith('win'):
msg = ("\n\nError: Could not import PyQt5.\n"
"If you are running the release .exe, this is a bug (please"
" contact the developers in that case).\n"
"If you are running from source, then you may try this from the command-line:\n\n"
" python -m pip install pyqt5\n\n")
elif sys.platform.startswith('darw'):
msg = ("\n\nError: Could not import PyQt5.\n"
"If you are running the release .app, this is a bug (please"
" contact the developers in that case).\n"
"If you are running from source, then you may try this from the command-line:\n\n"
" python3 -m pip install --user -I pyqt5\n\n")
else:
msg = ("\n\nError: Could not import PyQt5.\n"
"You may try:\n\n"
" python3 -m pip install --user -I pyqt5\n\n"
"Or, if on Linux Ubuntu, Debian, etc:\n\n"
" sudo apt-get install python3-pyqt5\n\n")
sys.exit(msg)
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from electroncash.i18n import _, set_language
from electroncash.plugins import run_hook
from electroncash import WalletStorage
from electroncash.util import (UserCancelled, PrintError, print_error,
standardize_path, finalization_print_error, Weak,
get_new_wallet_name, Handlers)
from electroncash import version
from electroncash.address import Address
from .installwizard import InstallWizard, GoBack
from . import icons # This needs to be imported once app-wide then the :icons/ namespace becomes available for Qt icon filenames.
from .util import * # * needed for plugins
from .main_window import ElectrumWindow
from .network_dialog import NetworkDialog
from .exception_window import Exception_Hook
from .update_checker import UpdateChecker
class ElectrumGui(QObject, PrintError):
new_window_signal = pyqtSignal(str, object)
update_available_signal = pyqtSignal(bool)
cashaddr_toggled_signal = pyqtSignal() # app-wide signal for when cashaddr format is toggled. This used to live in each ElectrumWindow instance but it was recently refactored to here.
cashaddr_status_button_hidden_signal = pyqtSignal(bool) # app-wide signal for when cashaddr toggle button is hidden from the status bar
shutdown_signal = pyqtSignal() # signal for requesting an app-wide full shutdown
do_in_main_thread_signal = pyqtSignal(object, object, object)
instance = None
def __init__(self, config, daemon, plugins):
super(__class__, self).__init__() # QObject init
assert __class__.instance is None, "ElectrumGui is a singleton, yet an instance appears to already exist! FIXME!"
__class__.instance = self
set_language(config.get('language'))
self.config = config
self.daemon = daemon
self.plugins = plugins
self.windows = []
self._setup_do_in_main_thread_handler()
# Uncomment this call to verify objects are being properly
# GC-ed when windows are closed
#if daemon.network:
# from electroncash.util import DebugMem
# from electroncash.wallet import Abstract_Wallet
# from electroncash.verifier import SPV
# from electroncash.synchronizer import Synchronizer
# daemon.network.add_jobs([DebugMem([Abstract_Wallet, SPV, Synchronizer,
# ElectrumWindow], interval=5)])
call_after_app = self._pre_and_post_app_setup()
try:
self.app = QApplication(sys.argv)
finally:
call_after_app()
self._load_fonts() # this needs to be done very early, before the font engine loads fonts.. out of paranoia
self._exit_if_required_pyqt_is_missing() # This may immediately exit the app if missing required PyQt5 modules, so it should also be done early.
self.new_version_available = None
self._set_icon()
self.app.installEventFilter(self)
self.timer = QTimer(self); self.timer.setSingleShot(False); self.timer.setInterval(500) #msec
self.gc_timer = QTimer(self); self.gc_timer.setSingleShot(True); self.gc_timer.timeout.connect(ElectrumGui.gc); self.gc_timer.setInterval(500) #msec
self.nd = None
self._last_active_window = None # we remember the last activated ElectrumWindow as a Weak.ref
Address.show_cashaddr(self.is_cashaddr())
# Dark Theme -- ideally set this before any widgets are created.
self.set_dark_theme_if_needed()
# /
# Wallet Password Cache
# wallet -> (password, QTimer) map for some plugins (like CashShuffle)
# that need wallet passwords to operate, and we don't want to prompt
# for pw twice right after the InstallWizard runs (see #106).
# Entries in this map are deleted after 10 seconds by the QTimer (which
# also deletes itself)
self._wallet_password_cache = Weak.KeyDictionary()
# /
self.update_checker = UpdateChecker()
self.update_checker_timer = QTimer(self); self.update_checker_timer.timeout.connect(self.on_auto_update_timeout); self.update_checker_timer.setSingleShot(False)
self.update_checker.got_new_version.connect(self.on_new_version)
# init tray
self.dark_icon = self.config.get("dark_icon", False)
self.tray = QSystemTrayIcon(self.tray_icon(), self)
self.tray.setToolTip('Electron Cash')
self.tray.activated.connect(self.tray_activated)
self.build_tray_menu()
self.tray.show()
self.new_window_signal.connect(self.start_new_window)
if self.has_auto_update_check():
self._start_auto_update_timer(first_run = True)
self.app.focusChanged.connect(self.on_focus_change) # track last window the user interacted with
self.shutdown_signal.connect(self.close, Qt.QueuedConnection)
run_hook('init_qt', self)
# We did this once already in the set_dark_theme call, but we do this
# again here just in case some plugin modified the color scheme.
ColorScheme.update_from_widget(QWidget())
self._check_and_warn_qt_version()
def __del__(self):
stale = True
if __class__.instance is self:
stale = False
__class__.instance = None
print_error("[{}] finalized{}".format(__class__.__name__, ' (stale instance)' if stale else ''))
if hasattr(super(), '__del__'):
super().__del__()
def _setup_do_in_main_thread_handler(self):
''' Sets up "do_in_main_thread" handler mechanism for Qt GUI. '''
self.do_in_main_thread_signal.connect(self._do_in_main_thread_handler_slot)
orig_handler = Handlers.do_in_main_thread
weakSelf = Weak.ref(self)
def my_do_in_main_thread_handler(func, *args, **kwargs):
strongSelf = weakSelf()
if strongSelf:
# We are still alive, emit the signal which will be handled
# in the main thread.
strongSelf.do_in_main_thread_signal.emit(func, args, kwargs)
else:
# We died. Uninstall this handler, invoke original handler.
Handlers.do_in_main_thread = orig_handler
orig_handler(func, *args, **kwargs)
Handlers.do_in_main_thread = my_do_in_main_thread_handler
def _do_in_main_thread_handler_slot(self, func, args, kwargs):
''' Hooked in to util.Handlers.do_in_main_thread via the
do_in_main_thread_signal. This ensures that there is an app-wide
mechanism for posting invocations to the main thread. Currently
CashFusion uses this mechanism, but other code may as well. '''
func(*args, **kwargs)
def _pre_and_post_app_setup(self):
''' Call this before instantiating the QApplication object. It sets up
some platform-specific miscellany that need to happen before the
QApplication is constructed.
A function is returned. This function *must* be called after the
QApplication is constructed. '''
callables = []
def call_callables():
for func in callables:
func()
ret = call_callables
if hasattr(QGuiApplication, 'setDesktopFileName'):
QGuiApplication.setDesktopFileName('electron-cash.desktop')
if self.windows_qt_use_freetype:
# Use FreeType for font rendering on Windows. This fixes rendering
# of the Schnorr sigil and allows us to load the Noto Color Emoji
# font if needed.
os.environ['QT_QPA_PLATFORM'] = 'windows:fontengine=freetype'
QCoreApplication.setAttribute(Qt.AA_X11InitThreads)
if hasattr(Qt, "AA_ShareOpenGLContexts"):
QCoreApplication.setAttribute(Qt.AA_ShareOpenGLContexts)
if sys.platform not in ('darwin',) and hasattr(Qt, "AA_EnableHighDpiScaling"):
# The below only applies to non-macOS. On macOS this setting is
# never used (because it is implicitly auto-negotiated by the OS
# in a differernt way).
#
# qt_disable_highdpi will be set to None by default, or True if
# specified on command-line. The command-line override is intended
# to supporess high-dpi mode just for this run for testing.
#
# The more permanent setting is qt_enable_highdpi which is the GUI
# preferences option, so we don't enable highdpi if it's explicitly
# set to False in the GUI.
#
# The default on Linux, Windows, etc is to enable high dpi
disable_scaling = self.config.get('qt_disable_highdpi', False)
enable_scaling = self.config.get('qt_enable_highdpi', True)
if not disable_scaling and enable_scaling:
QCoreApplication.setAttribute(Qt.AA_EnableHighDpiScaling)
if hasattr(Qt, "AA_UseHighDpiPixmaps"):
QCoreApplication.setAttribute(Qt.AA_UseHighDpiPixmaps)
# macOS Mojave "font rendering looks terrible on PyQt5.11" workaround.
# See: https://old.reddit.com/r/apple/comments/9leavs/fix_mojave_font_rendering_issues_on_a_perapp_basis/
# This affects PyQt 5.11 (which is what we ship in the macOS El Capitan
# .dmg). We apply the workaround and also warn the user to not use
# the El Capitan compatibility .dmg.
if sys.platform in ('darwin',) and self.qt_version() < (5, 12):
# macOS hacks. On Mojave with PyQt <5.12 the font rendering is terrible.
# As a workaround we need to temporarily set this 'defaults' keys
# which we immediately disable after the QApplication is started.
try:
ver = tuple(int(a) for a in platform.mac_ver()[0].split('.'))
except (TypeError, ValueError):
self.print_error("WARNING: Cannot parse platform.mac_ver", f"'{platform.mac_ver()[0]}'")
ver = None
if ver and ver >= (10, 14):
from electroncash.utils import macos
self.print_error("Mojave+ with PyQt<5.12 detected; applying CGFontRenderingFontSmoothingDisabled workaround...")
bundle = macos.get_bundle_identifier()
os.system(f'defaults write {bundle} CGFontRenderingFontSmoothingDisabled -bool NO')
def undo_hack():
os.system(f'defaults delete {bundle} CGFontRenderingFontSmoothingDisabled')
self.print_error("Mojave+ font rendering workaround applied.")
#msg = _("Mojave or newer system detected, however you are running the "
# "El Capitan compatibility release of Electron Cash. "
# "Font and graphics rendering may be affected."
# "\n\nPlease obtain the latest non-compatibility version "
# "of Electron Cash for MacOS.")
#QMessageBox.warning(None, _("Warning"), msg) # this works even if app is not exec_() yet.
callables.append(undo_hack)
return ret
def _exit_if_required_pyqt_is_missing(self):
''' Will check if required PyQt5 modules are present and if not,
display an error message box to the user and immediately quit the app.
This is because some Linux systems break up PyQt5 into multiple
subpackages, and for instance PyQt5 QtSvg is its own package, and it
may be missing.
'''
try:
from PyQt5 import QtSvg
except ImportError:
# Closes #1436 -- Some "Run from source" Linux users lack QtSvg
# (partial PyQt5 install)
msg = _("A required Qt module, QtSvg was not found. Please fully install all of PyQt5 5.12 or above to resolve this issue.")
if sys.platform == 'linux':
msg += "\n\n" + _("On Linux, you may try:\n\n python3 -m pip install --user -I pyqt5")
if shutil.which('apt'):
msg += "\n\n" + _("On Debian-based distros, you can run:\n\n sudo apt install python3-pyqt5.qtsvg")
QMessageBox.critical(None, _("QtSvg Missing"), msg) # this works even if app is not exec_() yet.
self.app.exit(1)
sys.exit(msg)
def is_dark_theme_available(self):
if sys.platform in ('darwin',):
# On OSX, qdarkstyle is kind of broken. We instead rely on Mojave
# dark mode if (built in to the OS) for this facility, which the
# user can set outside of this application.
return False
try:
import qdarkstyle
except:
return False
return True
def set_dark_theme_if_needed(self):
if sys.platform in ('darwin',):
# On OSX, qdarkstyle is kind of broken. We instead rely on Mojave
# dark mode if (built in to the OS) for this facility, which the
# user can set outside of this application.
use_dark_theme = False
else:
use_dark_theme = self.config.get('qt_gui_color_theme', 'default') == 'dark'
darkstyle_ver = None
if use_dark_theme:
try:
import qdarkstyle
self.app.setStyleSheet(qdarkstyle.load_stylesheet_pyqt5())
try:
darkstyle_ver = version.normalize_version(qdarkstyle.__version__)
except (ValueError, IndexError, TypeError, NameError, AttributeError) as e:
self.print_error("Warning: Could not determine qdarkstyle version:", repr(e))
except BaseException as e:
use_dark_theme = False
self.print_error('Error setting dark theme: {}'.format(repr(e)))
# Apply any necessary stylesheet patches. For now this only does anything
# if the version is < 2.6.8.
# 2.6.8+ seems to have fixed all the issues (for now!)
from . import style_patcher
style_patcher.patch(dark=use_dark_theme, darkstyle_ver=darkstyle_ver)
# Even if we ourselves don't set the dark theme,
# the OS/window manager/etc might set *a dark theme*.
# Hence, try to choose colors accordingly:
ColorScheme.update_from_widget(QWidget(), force_dark=use_dark_theme)
def get_cached_password(self, wallet):
''' Passwords in the cache only live for a very short while (10 seconds)
after wallet window creation, and only if it's a new window. This
mechanism is a convenience for plugins that need access to the wallet
password and it would make for poor UX for the user to enter their
password twice when opening a new window '''
entry = self._wallet_password_cache.get(wallet)
if entry:
return entry[0]
def _expire_cached_password(self, weakWallet):
''' Timer callback, called after 10 seconds. '''
wallet = weakWallet() if isinstance(weakWallet, Weak.ref) else weakWallet
if wallet:
entry = self._wallet_password_cache.pop(wallet, None)
if entry:
timer = entry[1]
timer.stop(); timer.deleteLater()
def _cache_password(self, wallet, password):
self._expire_cached_password(wallet)
if password is None:
return
timer = QTimer() # NB a top-level parentless QObject will get delete by Python when its Python refct goes to 0, which is what we want here. Future programmers: Do not give this timer a parent!
self._wallet_password_cache[wallet] = (password, timer)
weakWallet = Weak.ref(wallet)
weakSelf = Weak.ref(self)
def timeout():
slf = weakSelf()
slf and slf._expire_cached_password(weakWallet)
timer.setSingleShot(True); timer.timeout.connect(timeout); timer.start(10000) # 10 sec
def cache_password(self, wallet, password):
self._cache_password(wallet, password)
def _set_icon(self):
icon = None
if sys.platform == 'darwin':
# on macOS, in "running from source" mode, we want to set the app
# icon, otherwise we get the generic Python icon.
# In non-running-from-source mode, macOS will get the icon from
# the .app bundle Info.plist spec (which ends up being
# electron.icns). However, in .app mode, Qt will not know about
# this icon and won't be able to use it for e.g. the About dialog.
# In the latter case the branch below will tell Qt to use
# electron-cash.svg as the "window icon".
icon = QIcon("electron.icns") if os.path.exists("electron.icns") else None
if not icon:
# Set this on all other platforms (and macOS built .app) as it can
# only help and never harm, and is always available.
icon = QIcon(":icons/electron-cash.svg")
if icon:
self.app.setWindowIcon(icon)
@staticmethod
def qt_version() -> tuple:
''' Returns a 3-tuple of the form (major, minor, revision) eg
(5, 12, 4) for the current Qt version derived from the QT_VERSION
global provided by Qt. '''
return ( (QT_VERSION >> 16) & 0xff, (QT_VERSION >> 8) & 0xff, QT_VERSION & 0xff )
def _load_fonts(self):
''' All apologies for the contorted nature of this platform code.
Fonts on Windows & Linux are .. a sensitive situation. :) '''
# Only load the emoji font on Linux and Windows
if sys.platform not in ('linux', 'win32', 'cygwin'):
return
# TODO: Check if we already have the needed emojis
# TODO: Allow the user to download a full color emoji set
linux_font_config_file = os.path.join(os.path.dirname(__file__), 'data', 'fonts.xml')
emojis_ttf_name = 'ecsupplemental_lnx.ttf'
emojis_ttf_path = os.path.join(os.path.dirname(__file__), 'data', emojis_ttf_name)
did_set_custom_fontconfig = False
if (sys.platform == 'linux'
and self.linux_qt_use_custom_fontconfig # method-backed property, checks config settings
and not os.environ.get('FONTCONFIG_FILE')
and os.path.exists('/etc/fonts/fonts.conf')
and os.path.exists(linux_font_config_file)
and os.path.exists(emojis_ttf_path)
and self.qt_version() >= (5, 12)): # doing this on Qt < 5.12 causes harm and makes the whole app render fonts badly
# On Linux, we override some fontconfig rules by loading our own
# font config XML file. This makes it so that our custom emojis and
# other needed glyphs are guaranteed to get picked up first,
# regardless of user font config. Without this some Linux systems
# had black and white or missing emoji glyphs. We only do this if
# the user doesn't have their own fontconfig file in env and
# also as a sanity check, if they have the system
# /etc/fonts/fonts.conf file in the right place.
os.environ['FONTCONFIG_FILE'] = linux_font_config_file
did_set_custom_fontconfig = True
if sys.platform in ('win32', 'cygwin'):
env_var = os.environ.get('QT_QPA_PLATFORM')
if not env_var or 'windows:fontengine=freetype' not in env_var.lower():
# not set up to use freetype, so loading the .ttf would fail.
# abort early.
return
del env_var
# use a different .ttf file on Windows
emojis_ttf_name = 'ecsupplemental_win.ttf'
emojis_ttf_path = os.path.join(os.path.dirname(__file__), 'data', emojis_ttf_name)
if QFontDatabase.addApplicationFont(emojis_ttf_path) < 0:
self.print_error('Failed to add unicode emoji font to application fonts:', emojis_ttf_path)
if did_set_custom_fontconfig:
self.print_error('Deleting custom (fonts.xml) FONTCONFIG_FILE env. var')
del os.environ['FONTCONFIG_FILE']
def _check_and_warn_qt_version(self):
if sys.platform == 'linux' and self.qt_version() < (5, 12):
msg = _("Electron Cash on Linux requires PyQt5 5.12+.\n\n"
"You have version {version_string} installed.\n\n"
"Please upgrade otherwise you may experience "
"font rendering issues with emojis and other unicode "
"characters used by Electron Cash.").format(version_string=QT_VERSION_STR)
QMessageBox.warning(None, _("PyQt5 Upgrade Needed"), msg) # this works even if app is not exec_() yet.
def eventFilter(self, obj, event):
''' This event filter allows us to open bitcoincash: URIs on macOS '''
if event.type() == QEvent.FileOpen:
if len(self.windows) >= 1:
self.windows[0].pay_to_URI(event.url().toString())
return True
return False
def build_tray_menu(self):
''' Rebuild the tray menu by tearing it down and building it new again '''
m_old = self.tray.contextMenu()
if m_old is not None:
# Tray does NOT take ownership of menu, so we are tasked with
# deleting the old one. Note that we must delete the old one rather
# than just clearing it because otherwise the old sub-menus stick
# around in Qt. You can try calling qApp.topLevelWidgets() to
# convince yourself of this. Doing it this way actually cleans-up
# the menus and they do not leak.
m_old.clear()
m_old.deleteLater() # C++ object and its children will be deleted later when we return to the event loop
m = QMenu()
m.setObjectName("SysTray.QMenu")
self.tray.setContextMenu(m)
destroyed_print_error(m)
for window in self.windows:
submenu = m.addMenu(window.wallet.basename())
submenu.addAction(_("Show/Hide"), window.show_or_hide)
submenu.addAction(_("Close"), window.close)
m.addAction(_("Dark/Light"), self.toggle_tray_icon)
m.addSeparator()
m.addAction(_("&Check for updates..."), lambda: self.show_update_checker(None))
m.addSeparator()
m.addAction(_("Exit Electron Cash"), self.close)
self.tray.setContextMenu(m)
def tray_icon(self):
if self.dark_icon:
return QIcon(':icons/electron_dark_icon.svg')
else:
return QIcon(':icons/electron_light_icon.svg')
def toggle_tray_icon(self):
self.dark_icon = not self.dark_icon
self.config.set_key("dark_icon", self.dark_icon, True)
self.tray.setIcon(self.tray_icon())
def tray_activated(self, reason):
if reason == QSystemTrayIcon.DoubleClick:
if all([w.is_hidden() for w in self.windows]):
for w in self.windows:
w.bring_to_top()
else:
for w in self.windows:
w.hide()
def close(self):
for window in list(self.windows):
window.close()
def new_window(self, path, uri=None):
# Use a signal as can be called from daemon thread
self.new_window_signal.emit(path, uri)
def show_network_dialog(self, parent, *, jumpto : str = ''):
if self.warn_if_no_network(parent):
return
if self.nd:
self.nd.on_update()
run_hook("on_network_dialog", self.nd)
self.nd.show()
self.nd.raise_()
if jumpto: self.nd.jumpto(jumpto)
return
self.nd = NetworkDialog(self.daemon.network, self.config)
run_hook("on_network_dialog", self.nd)
self.nd.show()
if jumpto: self.nd.jumpto(jumpto)
def create_window_for_wallet(self, wallet):
w = ElectrumWindow(self, wallet)
self.windows.append(w)
finalization_print_error(w, "[{}] finalized".format(w.diagnostic_name()))
self.build_tray_menu()
run_hook('on_new_window', w)
return w
def get_wallet_folder(self):
''' may raise FileNotFoundError '''
return os.path.dirname(os.path.abspath(self.config.get_wallet_path()))
def get_new_wallet_path(self):
''' may raise FileNotFoundError '''
wallet_folder = self.get_wallet_folder()
filename = get_new_wallet_name(wallet_folder)
full_path = os.path.join(wallet_folder, filename)
return full_path
def on_focus_change(self, ignored, new_focus_widget):
''' Remember the last wallet window that was activated because
start_new_window uses this information. We store the ElectrumWindow
in a weak reference so that we don't interfere with its gc when it is
closed.'''
if not new_focus_widget:
return
if isinstance(new_focus_widget, QWidget):
window = QWidget.window(new_focus_widget) # call base class because some widgets may actually override 'window' with Python attributes.
if isinstance(window, ElectrumWindow):
self._last_active_window = Weak.ref(window)
def start_new_window(self, path, uri):
'''Raises the window for the wallet if it is open. Otherwise
opens the wallet and creates a new window for it.
`path=None` is a special usage which will raise the last activated
window or open the 'last wallet' if no windows are open.'''
if not path:
if not self.windows:
# This branch is taken if nothing is currently open but
# path == None, in which case set path=last wallet
self.config.open_last_wallet()
path = self.config.get_wallet_path()
elif self._last_active_window:
# This branch is taken if we have windows open and we have
# _last_active_window defined, in which case we specify
# that this window should be activated by setting path
# so that the for loop below will trigger on this window.
w = self._last_active_window() # weak ref -> strong ref
if w and w in self.windows: # check ref still alive
# this will cause the last active window to be used in the
# for loop below
path = w.wallet.storage.path
# NB: path may still be None here if it came in as None from args and
# if the above logic couldn't select a window to use -- in which case
# we'll end up picking self.windows[0]
path = path and standardize_path(path) # just make sure some plugin didn't give us a symlink
for w in self.windows:
if not path or w.wallet.storage.path == path:
path = w.wallet.storage.path # remember path in case it was None
w.bring_to_top()
break
else:
try:
if not self.windows:
self.warn_if_no_secp(relaxed=True)
try:
wallet = self.daemon.load_wallet(path, None)
except BaseException as e:
self.print_error(repr(e))
if self.windows:
# *Not* starting up. Propagate exception out to present
# error message box to user.
raise e
# We're just starting up, so we are tolerant of bad wallets
# and just want to proceed to the InstallWizard so the user
# can either specify a different wallet or create a new one.
# (See issue #1189 where before they would get stuck)
path = self.get_new_wallet_path() # give up on this unknown wallet and try a new name.. note if things get really bad this will raise FileNotFoundError and the app aborts here.
wallet = None # fall thru to wizard
if not wallet:
storage = WalletStorage(path, manual_upgrades=True)
wizard = InstallWizard(self.config, self.app, self.plugins, storage)
try:
wallet, password = wizard.run_and_get_wallet() or (None, None)
except UserCancelled:
pass
except GoBack as e:
self.print_error('[start_new_window] Exception caught (GoBack)', e)
finally:
wizard.terminate()
del wizard
gc.collect() # wizard sticks around in memory sometimes, otherwise :/
if not wallet:
return
wallet.start_threads(self.daemon.network)
self.daemon.add_wallet(wallet)
self._cache_password(wallet, password)
except BaseException as e:
traceback.print_exc(file=sys.stdout)
if '2fa' in str(e):
self.warning(title=_('Error'), message = '2FA wallets for Bitcoin Cash are currently unsupported by <a href="https://api.trustedcoin.com/#/">TrustedCoin</a>. Follow <a href="https://github.com/Electron-Cash/Electron-Cash/issues/41#issuecomment-357468208">this guide</a> in order to recover your funds.')
else:
self.warning(title=_('Error'), message = 'Cannot load wallet:\n' + str(e), icon=QMessageBox.Critical)
return
w = self.create_window_for_wallet(wallet)
if uri:
w.pay_to_URI(uri)
w.bring_to_top()
w.setWindowState(w.windowState() & ~Qt.WindowMinimized | Qt.WindowActive)
# this will activate the window
w.activateWindow()
return w
def close_window(self, window):
self.windows.remove(window)
self.build_tray_menu()
# save wallet path of last open window
run_hook('on_close_window', window)
# GC on ElectrumWindows takes forever to actually happen due to the
# circular reference zoo they create around them (they end up stuck in
# generation 2 for a long time before being collected). The below
# schedules a more comprehensive GC to happen in the very near future.
# This mechanism takes on the order of 40-100ms to execute (depending
# on hardware) but frees megabytes of memory after closing a window
# (which itslef is a relatively infrequent UI event, so it's
# an acceptable tradeoff).
self.gc_schedule()
if not self.windows:
self.config.save_last_wallet(window.wallet)
# NB: We see if we should quit the app after the last wallet
# window is closed, even if a network dialog or some other window is
# open. It was bizarre behavior to keep the app open when
# things like a transaction dialog or the network dialog were still
# up.
self._quit_after_last_window() # central point that checks if we should quit.
#window.deleteLater() # <--- This has the potential to cause bugs (esp. with misbehaving plugins), so commented-out. The object gets deleted anyway when Python GC kicks in. Forcing a delete may risk python to have a dangling reference to a deleted C++ object.
def gc_schedule(self):
''' Schedule garbage collection to happen in the near future.
Note that rapid-fire calls to this re-start the timer each time, thus
only the last call takes effect (it's rate-limited). '''
self.gc_timer.start() # start/re-start the timer to fire exactly once in timeInterval() msecs
@staticmethod
def gc():
''' self.gc_timer timeout() slot '''
gc.collect()
def init_network(self):
# Show network dialog if config does not exist
if self.daemon.network:
if self.config.get('auto_connect') is None:
wizard = InstallWizard(self.config, self.app, self.plugins, None)
wizard.init_network(self.daemon.network)
wizard.terminate()
def on_new_version(self, newver):
''' Called by the auto update check mechanism to notify
that a new version is available. We propagate the signal out
using our own update_available_signal as well as post a message
to the system tray. '''
self.new_version_available = newver
self.update_available_signal.emit(True)
self.notify(_("A new version of Electron Cash is available: {}").format(newver))
def show_update_checker(self, parent, *, skip_check = False):
if self.warn_if_no_network(parent):
return
self.update_checker.show()
self.update_checker.raise_()
if not skip_check:
self.update_checker.do_check()
def on_auto_update_timeout(self):
if not self.daemon.network:
# auto-update-checking never is done in offline mode
self.print_error("Offline mode; update check skipped")
elif not self.update_checker.did_check_recently(): # make sure auto-check doesn't happen right after a manual check.
self.update_checker.do_check()
if self.update_checker_timer.first_run:
self._start_auto_update_timer(first_run = False)
def _start_auto_update_timer(self, *, first_run = False):
self.update_checker_timer.first_run = bool(first_run)
if first_run:
interval = 10.0*1e3 # do it very soon (in 10 seconds)
else:
interval = 4.0*3600.0*1e3 # once every 4 hours (in ms)
self.update_checker_timer.start(interval)
self.print_error("Auto update check: interval set to {} seconds".format(interval//1e3))
def _stop_auto_update_timer(self):
self.update_checker_timer.stop()
self.print_error("Auto update check: disabled")
def warn_if_cant_import_qrreader(self, parent, show_warning=True):
''' Checks it QR reading from camera is possible. It can fail on a
system lacking QtMultimedia. This can be removed in the future when
we are unlikely to encounter Qt5 installations that are missing
QtMultimedia '''
try:
from .qrreader import QrReaderCameraDialog
except ImportError as e:
if show_warning:
self.warning(parent=parent,
title=_("QR Reader Error"),
message=_("QR reader failed to load. This may "
"happen if you are using an older version "
"of PyQt5.<br><br>Detailed error: ") + str(e),
rich_text=True)
return True
return False
def warn_if_no_network(self, parent):
if not self.daemon.network:
self.warning(message=_('You are using Electron Cash in offline mode; restart Electron Cash if you want to get connected'), title=_('Offline'), parent=parent, rich_text=True)
return True
return False
def warn_if_no_secp(self, parent=None, message=None, icon=QMessageBox.Warning, relaxed=False):
''' Returns True if it DID warn: ie if there's no secp and ecc operations
are slow, otherwise returns False if we have secp.
Pass message (rich text) to provide a custom message.
Note that the URL link to the HOWTO will always be appended to the custom message.'''
from electroncash import ecc_fast
has_secp = ecc_fast.is_using_fast_ecc()
if has_secp:
return False
# When relaxwarn is set return True without showing the warning
from electroncash import get_config
if relaxed and get_config().cmdline_options["relaxwarn"]:
return True
# else..
howto_url='https://github.com/Electron-Cash/Electron-Cash/blob/master/contrib/secp_HOWTO.md#libsecp256k1-0-for-electron-cash'
template = '''
<html><body>
<p>
{message}
<p>
{url_blurb}
</p>
<p><a href="{url}">Electron Cash Secp Mini-HOWTO</a></p>
</body></html>
'''
msg = template.format(
message = message or _("Electron Cash was unable to find the secp256k1 library on this system. Elliptic curve cryptography operations will be performed in slow Python-only mode."),
url=howto_url,
url_blurb = _("Please visit this page for instructions on how to correct the situation:")
)
self.warning(parent=parent, title=_("Missing libsecp256k1"),
message=msg, rich_text=True)
return True
def warning(self, title, message, icon = QMessageBox.Warning, parent = None, rich_text=False):
if not isinstance(icon, QMessageBox.Icon):
icon = QMessageBox.Warning
if isinstance(parent, MessageBoxMixin):
parent.msg_box(title=title, text=message, icon=icon, parent=None, rich_text=rich_text)
else:
parent = parent if isinstance(parent, QWidget) else None
d = QMessageBoxMixin(icon, title, message, QMessageBox.Ok, parent)
if not rich_text:
d.setTextFormat(Qt.PlainText)
d.setTextInteractionFlags(Qt.TextSelectableByMouse)
else:
d.setTextFormat(Qt.AutoText)
d.setTextInteractionFlags(Qt.TextSelectableByMouse|Qt.LinksAccessibleByMouse)
d.setWindowModality(Qt.WindowModal if parent else Qt.ApplicationModal)
d.exec_()
d.setParent(None)
def lin_win_maybe_show_highdpi_caveat_msg(self, parent):
''' Called from main_window.py -- tells user once and only once about
the high DPI mode and its caveats on Linux only. Is a no-op otherwise. '''
is_win = sys.platform[:3] in ('win', 'cyg')
is_lin = sys.platform in ('linux',)
if not is_win and not is_lin:
return
if (hasattr(Qt, "AA_EnableHighDpiScaling")
and self.app.testAttribute(Qt.AA_EnableHighDpiScaling)
# first run check:
and self.config.get('qt_enable_highdpi', None) is None
and (is_lin # we can't check pixel ratio on linux as apparently it's unreliable, so always show this message on linux
# on some windows systems running in highdpi causes
# glitches to the QMessageBox windows, so we need
# to also warn Windows users that they can turn this off,
# but only if they actually are using a high dpi display
or (is_win and hasattr(QScreen, 'devicePixelRatio')
and any(s.devicePixelRatio() > 1.0 # do they have any screens that are high dpi?
for s in self.app.screens()) ))):
# write to the config key to immediately suppress this warning in
# the future -- it only appears on first-run if key was None
self.config.set_key('qt_enable_highdpi', True)
if is_lin:
msg = (_("Automatic high DPI scaling has been enabled for Electron Cash, which should result in improved graphics quality.")
+ "\n\n" + _("However, on some esoteric Linux systems, this mode may cause disproportionately large status bar icons.")
+ "\n\n" + _("If that is the case for you, then you may disable automatic DPI scaling in the preferences, under 'General'."))
else: # is_win
msg = (_("Automatic high DPI scaling has been enabled for Electron Cash, which should result in improved graphics quality.")
+ "\n\n" + _("However, on some Windows systems, bugs in Qt may result in minor graphics glitches in system 'message box' dialogs.")
+ "\n\n" + _("If that is the case for you, then you may disable automatic DPI scaling in the preferences, under 'General'."))
parent.show_message( title = _('Automatic High DPI'), msg = msg)
def has_auto_update_check(self):
return bool(self.config.get('auto_update_check', True))
def set_auto_update_check(self, b):
was, b = self.has_auto_update_check(), bool(b)
if was != b:
self.config.set_key('auto_update_check', b, save=True)
if b:
self._start_auto_update_timer()
else:
self._stop_auto_update_timer()
def _quit_after_last_window(self):
if any(1 for w in self.windows
if isinstance(w, ElectrumWindow) and not w.cleaned_up):
# We can get here if we have some top-level ElectrumWindows that
# are "minimized to tray" (hidden). "lastWindowClosed "is emitted
# if there are no *visible* windows. If we actually have hidden
# app windows (because the user hid them), then we want to *not*
# quit the app. https://doc.qt.io/qt-5/qguiapplication.html#lastWindowClosed
# This check and early return fixes issue #1727.
return
qApp.quit()
def notify(self, message):
''' Display a message in the system tray popup notification. On macOS
this is the GROWL thing. On Windows it's a balloon popup from the system
tray. On Linux it's usually a banner in the top of the screen.'''
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Electron Cash", message, QIcon(":icons/electron-cash.svg"), 20000)
except TypeError:
self.tray.showMessage("Electron Cash", message, QSystemTrayIcon.Information, 20000)
def is_cashaddr(self):
return bool(self.config.get('show_cashaddr', True))
def toggle_cashaddr(self, on = None):
was = self.is_cashaddr()
if on is None:
on = not was
else:
on = bool(on)
self.config.set_key('show_cashaddr', on)
Address.show_cashaddr(on)
if was != on:
self.cashaddr_toggled_signal.emit()
def is_cashaddr_status_button_hidden(self):
return bool(self.config.get('hide_cashaddr_button', False))
def set_cashaddr_status_button_hidden(self, b):
b = bool(b)
was = self.is_cashaddr_status_button_hidden()
if was != b:
self.config.set_key('hide_cashaddr_button', bool(b))
self.cashaddr_status_button_hidden_signal.emit(b)
@property
def windows_qt_use_freetype(self):
''' Returns True iff we are windows and we are set to use freetype as
the font engine. This will always return false on platforms where the
question doesn't apply. This config setting defaults to True for
Windows < Win10 and False otherwise. It is only relevant when
using the Qt GUI, however. '''
if sys.platform not in ('win32', 'cygwin'):
return False
try:
winver = float(platform.win32_ver()[0]) # '7', '8', '8.1', '10', etc
except (AttributeError, ValueError, IndexError):
# We can get here if cygwin, which has an empty win32_ver tuple
# in some cases.
# In that case "assume windows 10" and just proceed. Cygwin users
# can always manually override this setting from GUI prefs.
winver = 10
# setting defaults to on for Windows < Win10
return bool(self.config.get('windows_qt_use_freetype', winver < 10))
@windows_qt_use_freetype.setter
def windows_qt_use_freetype(self, b):
if self.config.is_modifiable('windows_qt_use_freetype') and sys.platform in ('win32', 'cygwin'):
self.config.set_key('windows_qt_use_freetype', bool(b))
@property
def linux_qt_use_custom_fontconfig(self):
''' Returns True iff we are Linux and we are set to use the fonts.xml
fontconfig override, False otherwise. This config setting defaults to
True for all Linux, but only is relevant to Qt GUI. '''
return bool(sys.platform in ('linux',) and self.config.get('linux_qt_use_custom_fontconfig', True))
@linux_qt_use_custom_fontconfig.setter
def linux_qt_use_custom_fontconfig(self, b):
if self.config.is_modifiable('linux_qt_use_custom_fontconfig') and sys.platform in ('linux',):
self.config.set_key('linux_qt_use_custom_fontconfig', bool(b))
def main(self):
try:
self.init_network()
except UserCancelled:
return
except GoBack:
return
except BaseException as e:
traceback.print_exc(file=sys.stdout)
return
self.timer.start()
self.config.open_last_wallet()
path = self.config.get_wallet_path()
if not self.start_new_window(path, self.config.get('url')):
return
signal.signal(signal.SIGINT, lambda signum, frame: self.shutdown_signal.emit())
self.app.setQuitOnLastWindowClosed(False) # we want to control this in our slot (since we support non-visible, backgrounded windows via the systray show/hide facility)
self.app.lastWindowClosed.connect(self._quit_after_last_window)
def clean_up():
# Just in case we get an exception as we exit, uninstall the Exception_Hook
Exception_Hook.uninstall()
# Shut down the timer cleanly
self.timer.stop()
self.gc_timer.stop()
self._stop_auto_update_timer()
# clipboard persistence. see http://www.mail-archive.com/pyqt@riverbankcomputing.com/msg17328.html
event = QEvent(QEvent.Clipboard)
self.app.sendEvent(self.app.clipboard(), event)
self.tray.hide()
self.app.aboutToQuit.connect(clean_up)
Exception_Hook(self.config) # This wouldn't work anyway unless the app event loop is active, so we must install it once here and no earlier.
# main loop
self.app.exec_()
# on some platforms the exec_ call may not return, so use clean_up()
| 49.880977
| 323
| 0.629923
|
23a527da58027b1912d6d6d4d8a3e5cf2cdf81f8
| 942
|
py
|
Python
|
2015/day_23.py
|
nabiirah/advent-of-code
|
9c7e7cae437c024aa05d9cb7f9211fd47f5226a2
|
[
"MIT"
] | 24
|
2020-12-08T20:07:52.000Z
|
2022-01-18T20:08:06.000Z
|
2015/day_23.py
|
nestorhf/advent-of-code
|
1bb827e9ea85e03e0720e339d10b3ed8c44d8f27
|
[
"MIT"
] | null | null | null |
2015/day_23.py
|
nestorhf/advent-of-code
|
1bb827e9ea85e03e0720e339d10b3ed8c44d8f27
|
[
"MIT"
] | 10
|
2020-12-04T10:04:15.000Z
|
2022-02-21T22:22:26.000Z
|
"""Advent of Code Day 23 - Opening the Turing Lock"""
with open('inputs/day_23.txt') as f:
instructions = [line.strip() for line in f]
registers = {'a': 1, 'b': 0} # Change a to 0 for Part One
pos = 0
while pos < len(instructions):
parsed = instructions[pos].split(' ')
if len(parsed) == 3:
if parsed[0] == 'jio':
if registers[parsed[1].strip(',')] == 1:
pos += int(parsed[2])
continue
elif parsed[0] == 'jie':
if registers[parsed[1].strip(',')] % 2 == 0:
pos += int(parsed[2])
continue
elif parsed[0] == 'hlf':
registers[parsed[1]] /= 2
elif parsed[0] == 'tpl':
registers[parsed[1]] *= 3
elif parsed[0] == 'inc':
registers[parsed[1]] += 1
elif parsed[0] == 'jmp':
pos += int(parsed[1])
continue
pos += 1
print("Value of Register b =", registers['b'])
| 24.789474
| 61
| 0.501062
|
d0cf6c9f1c42fd1aa8a337b2c6b20cc176e70401
| 28,375
|
py
|
Python
|
train_R3D_CatNet.py
|
villawang/CatNet
|
6f4c3bce7106e55588eecfa9c1ed04bc52bba222
|
[
"MIT"
] | 12
|
2020-04-21T21:49:43.000Z
|
2022-03-16T05:38:52.000Z
|
train_R3D_CatNet.py
|
villawang/CatNet
|
6f4c3bce7106e55588eecfa9c1ed04bc52bba222
|
[
"MIT"
] | 3
|
2020-10-06T23:39:04.000Z
|
2021-12-09T05:37:44.000Z
|
train_R3D_CatNet.py
|
villawang/CatNet
|
6f4c3bce7106e55588eecfa9c1ed04bc52bba222
|
[
"MIT"
] | 2
|
2020-04-21T14:59:30.000Z
|
2020-08-11T03:34:31.000Z
|
import numpy as np
import pickle
import os
from PIL import Image
import time
from tqdm import tqdm, trange
import shutil
from random import randint
import argparse
import glob
import pdb
import random
import math
import time
import argparse
import matplotlib.pyplot as plt
from copy import deepcopy
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
import sys
sys.path.append(os.path.join(os.getcwd(), 'dataset'))
import torchvision.transforms as transforms
import torchvision.models as models
import torch.nn as nn
import torch.optim as optim
import torch
from torch.autograd import Variable
from torch.optim import lr_scheduler
from torch.utils.data import Dataset, DataLoader, RandomSampler, ConcatDataset
from torchsummary import summary
from models import resnext
from model import generate_model
from opts import parse_opts
import utils
from spatial_transforms import *
from temporal_transforms import *
import dataset_class
import warnings
import os
# os.environ['CUDA_VISIBLE_DEVICES']='3'
warnings.filterwarnings("ignore")
args = parse_opts()
annot_dir = 'dataset'
save_dir = 'output_CatNet/{}-{}'.format(args.arch, args.n_frames_per_clip)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
model_dir = 'models/task0_model'
os.environ['CUDA_VISIBLE_DEVICES']=args.cuda_id
device = 'cuda:0'
if isinstance(eval(args.cuda_id), int):
device_ids = [eval(args.cuda_id)]
else:
device_ids = [i for i in eval(args.cuda_id)]
class iCaRL(object):
def __init__(self, net, K):
self.net = net
self.K = K # number of cached exemplar sets for all observed classes
self.exemplar_sets = [] # list contains exemplar sets for different classes
self.exemplar_labels = []
self.criterion = nn.CrossEntropyLoss()
def increment_classes(self, num_AddedClasses):
"""Add n classes in the final fc layer"""
in_features = self.net.module.fc.in_features
out_features = self.net.module.fc.out_features
weight = self.net.module.fc.weight.data
# self.net.module.fc = nn.Linear(in_features, out_features+num_AddedClasses, bias=False)
self.net.module.fc = nn.Linear(in_features, out_features+num_AddedClasses)
self.net.module.fc.weight.data[:out_features] = weight
self.net.module.fc.to(device)
def update_representation(self, exemplar_dataset, new_class_dataset):
print('Updating representation........')
# DO NOT CHANGE batch size HERE!!!!!!!!!!!!!!!
exemplar_loader = torch.utils.data.DataLoader(exemplar_dataset, batch_size=1, num_workers=args.num_workers)
# exemplar_dataset_UpdateLables contains stored network predicted label and new class label
# replace the ground truth label with the predicted labels by the stored model
exemplar_dataset_UpdateLables = []
for data in exemplar_loader:
inputs, labels = data
inputs = inputs.to(device, non_blocking=True).float()
probs, logits = self.net(inputs)
update_labels = int(probs.max(1)[1].detach().cpu().item())
exemplar_dataset_UpdateLables.append((inputs[0].detach().cpu(), update_labels))
D_dataset = ConcatDataset([dataset_class.make_dataset_instance(exemplar_dataset_UpdateLables)]+new_class_dataset)
D_loader = DataLoader(D_dataset, batch_size=args.batch_size, num_workers=args.num_workers, shuffle=True)
del inputs
torch.cuda.empty_cache()
return D_loader
def reduce_exemplar_sets(self, m):
for y, P_y in enumerate(self.exemplar_sets):
self.exemplar_sets[y] = P_y[:m]
self.exemplar_labels[y] = self.exemplar_labels[y][:m]
def train(self, new_class_train_dataset, new_class_test_dataset, added_class_ids, dataloader_val):
# representation step
exemplar_dataset = self.combine_dataset_with_exemplars(self.exemplar_sets, self.exemplar_labels)
self.net.eval()
D_loader = self.update_representation(exemplar_dataset, new_class_train_dataset)
self.net.train()
self.increment_classes(len(added_class_ids))
# training
learning_rate = 1e-3
lr_steps = [6]
num_epochs = 12
step = 0
self.optimizer = torch.optim.SGD(self.net.parameters(), lr=learning_rate, momentum=0.9,
weight_decay=1e-3, dampening=0.9)
train_loss = utils.AverageMeter()
train_acc = utils.AverageMeter()
val_loss = utils.AverageMeter()
val_acc = utils.AverageMeter()
print('Start training.......')
for epoch in trange(num_epochs):
train_loss.reset()
train_acc.reset()
for data in D_loader:
inputs, labels = data
inputs = inputs.to(device, non_blocking=True).float()
labels = labels.to(device, non_blocking=True)
probs, logits = self.net(inputs)
self.optimizer.zero_grad()
loss_ = self.criterion(logits, labels)
loss_.backward()
self.optimizer.step()
train_loss.update(loss_.item())
if step % 100 == 0:
loss_val, acc_val_old, acc_val_new, acc_val = self.val_model(dataloader_val)
train_logger.log({
'num_classes': self.net.module.fc.out_features,
'train_loss': train_loss.val,
'val_loss': loss_val,
'val_acc_old': acc_val_old,
'val_acc_new': acc_val_new,
'val_acc': acc_val,
'lr': self.optimizer.param_groups[0]['lr']
})
print('train loss: {:.3f}'.format(train_loss.avg),
'val loss: {:.3f}'.format(loss_val),
'acc val old: {:.3f}'.format(acc_val_old),
'acc val new: {:.3f}'.format(acc_val_new),
'acc val mean: {:.3f}'.format(acc_val))
step += 1
utils.save_checkpoint(self.net, self.optimizer, step, save_dir,
'{}-{}-{}.pth'.format(args.arch, args.modality, self.net.module.fc.out_features))
utils.adjust_learning_rate(learning_rate, self.optimizer, epoch, lr_steps)
# print('Updating the exemplar sets.......')
# m = int(self.K / self.net.module.fc.out_features)
# shrink the exemplar sets for old class
# self.reduce_exemplar_sets(m)
new_class_dataloader = []
###########################Dirty way to construct the dataloader list. No idea why the second method does not work......#####################################################
for i in range(len(new_class_test_dataset)):
new_class_dataloader.append(DataLoader(new_class_test_dataset[i], batch_size=args.batch_size,
num_workers=args.num_workers))
# new_class_dataloader = [DataLoader(new_class_dataset[i], batch_size=16,
# num_workers=4) for i in range(len(new_class_dataset))]
#############################################################################################
# attach the new class representation to exemplar sets
self.net.eval()
for dataloader_class in new_class_dataloader:
self.construct_exemplar_set(dataloader_class)
self.net.train()
# need to be saved for testing phase
# exemplar_dataset = self.combine_dataset_with_exemplars(self.exemplar_sets, self.exemplar_labels)
with open(os.path.join(save_dir,
'{}_ExemplarSet_{}.pkl'.format(args.modality, self.net.module.fc.out_features)), 'wb') as f:
pickle.dump(self.exemplar_sets, f)
del inputs, labels, probs, logits
torch.cuda.empty_cache()
def val_model(self, dataloader):
self.net.eval()
acc = utils.AverageMeter()
acc_class = utils.AverageMeter()
acc_class_cache = []
loss_val = utils.AverageMeter()
for class_i, dataloader_i in enumerate(dataloader):
acc_class.reset()
for data in dataloader_i:
inputs, labels = data
inputs = inputs.to(device, non_blocking=True).float()
labels = labels.to(device, non_blocking=True)
probs, logits = self.net(inputs)
val_loss_ = self.criterion(logits, labels)
acc.update(utils.calculate_accuracy(probs, labels))
acc_class.update(utils.calculate_accuracy(probs, labels))
loss_val.update(val_loss_.item())
acc_class_cache.append(acc_class.avg)
self.net.train()
return loss_val.avg, acc_class_cache[0], acc_class_cache[1], acc.avg
# function prepare exemplar dataloader for update_representation
def combine_dataset_with_exemplars(self, exemplar_sets, exemplar_labels):
exemplar_dataset = []
for y, P_y in enumerate(exemplar_sets):
for i in range(P_y.size(0)):
exemplar_dataset.append((P_y[i], exemplar_labels[y][i]))
return exemplar_dataset
def construct_exemplar_set(self, dataloader):
"""Construct an exemplar set for videos frames set
Args:
dataloader: dataloader containing videos frames of a class
"""
# m = int(self.K / self.net.module.fc.out_features)
m = int(self.K / 40)
# assert m <= len(dataloader) * 16
features = []
frames = []
for data in dataloader:
inputs, labels = data
inputs = inputs.to(device, non_blocking=True).float()
labels = labels.to(device, non_blocking=True)
feature = feature_extractor(self.net, inputs).detach().cpu()
feature = feature/torch.norm(feature, p=2, dim=1, keepdim=True)
features.append(feature)
frames.append(inputs.detach().cpu())
features = torch.cat(features)
class_mean = torch.mean(features, dim=0, keepdim=True)
class_mean = class_mean/torch.norm(class_mean, p=2, dim=1, keepdim=True)
frames = torch.cat(frames)
exemplar_set = []
exemplar_features = [] # list of Variables of shape (feature_size,)
for k in range(m):
if k == 1:
S = torch.cat(exemplar_features).view(1,-1)
elif k == 0:
S = 0.
else:
S = torch.sum(torch.stack(exemplar_features), dim=0, keepdim=True)
phi = features
mu = class_mean
mu_p = 1/(k+1) * (phi + S)
selected_indice = torch.argmin(torch.sqrt(torch.sum((mu - mu_p)**2, dim=1))).item()
exemplar_set.append(frames[selected_indice:selected_indice+1])
exemplar_features.append(features[selected_indice])
exemplar_set = torch.cat(exemplar_set)
exemplar_features = torch.cat(exemplar_features)
exemplar_label = torch.tensor([labels[0]]*m).long()
self.exemplar_sets.append(exemplar_set)
self.exemplar_labels.append(exemplar_label)
del inputs
torch.cuda.empty_cache()
def compute_mean(net, exemplar_sets):
# prior knowledge of the statistics in the exemplar dataset
exemplar_means = []
for P_y in exemplar_sets:
loader = torch.utils.data.DataLoader(P_y, batch_size=args.batch_size, num_workers=args.num_workers)
features = []
for inputs in loader:
feature = feature_extractor(net, inputs.to(device,non_blocking=True).float()).detach().cpu()
feature = feature / torch.norm(feature, p=2, dim=1, keepdim=True) # batch_size * feature_size
features.append(feature)
features = torch.cat(features) # batch_size * feature_size
mu_y = torch.mean(features, dim=0, keepdim=True) # 1 * feature_size
mu_y = mu_y/torch.norm(mu_y, p=2, dim=1, keepdim=True)
exemplar_means.append(mu_y)
# save gpu memory
del feature
torch.cuda.empty_cache()
exemplar_means = torch.cat(exemplar_means) # (n_classes, feature_size)
return exemplar_means
def feature_extractor(net, x):
"""Classify images by neares-means-of-exemplars
Args:
x: input video batch
Returns:
feature: Tensor of extracted features (batch_size,)
"""
net_FeatureExtractor = nn.Sequential(*list([i for i in net.children()][0].children())[:-2])
feature = net_FeatureExtractor(x)
feature = feature.view(feature.size(0), -1)
return feature
def test(model, load_dir, ExemplarSet_file, checkpoint_file, dataloader, class_id1, class_id2):
in_features = model.module.fc.in_features
out_features = model.module.fc.out_features
# model.module.fc = nn.Linear(in_features, len(class_id1)+len(class_id2), bias=False)
model.module.fc = nn.Linear(in_features, len(class_id1)+len(class_id2))
model.to(device)
print('Start testing for class {}.....'.format(range(class_id1[0], class_id2[-1])))
print('Model {} and exemplar sets {} loaded'.format(checkpoint_file, ExemplarSet_file))
ExemplarSet_file = os.path.join(load_dir, ExemplarSet_file)
checkpoint_file = os.path.join(load_dir, checkpoint_file)
with open(ExemplarSet_file, 'rb') as f:
exemplar_sets = pickle.load(f)
checkpoint = torch.load(checkpoint_file)
# model = checkpoint['model']
model.load_state_dict(checkpoint['state_dict'])
model.eval()
print('Computing exemplar means for {} classes........'.format(len(exemplar_sets)))
exemplar_means = compute_mean(model, exemplar_sets)
acc = utils.AverageMeter()
acc_class = utils.AverageMeter()
acc_class_cache = []
for class_i, dataloader_i in enumerate(dataloader):
acc_class.reset()
for data in dataloader_i:
inputs, labels = data
inputs = inputs.to(device, non_blocking=True).float()
preds = classfier(model, exemplar_means, inputs)
acc.update(utils.calculate_accuracy_ForIcarl(preds, labels))
acc_class.update(utils.calculate_accuracy_ForIcarl(preds, labels))
acc_class_cache.append(acc_class.avg)
print('Accuracy for old classes:')
print(acc_class_cache[0])
print('Accuracy for new classes:')
print(acc_class_cache[1])
print('Mean accuracy')
print(acc.avg)
acc.reset()
acc_class.reset()
acc_class_cache = []
for class_i, dataloader_i in enumerate(dataloader):
acc_class.reset()
for data in dataloader_i:
inputs, labels = data
inputs = inputs.to(device, non_blocking=True).float()
labels = labels.to(device, non_blocking=True)
probs, logits = model(inputs)
acc.update(utils.calculate_accuracy(probs, labels))
acc_class.update(utils.calculate_accuracy(probs, labels))
acc_class_cache.append(acc_class.avg)
print('\n')
print('Accuracy for old classes:')
print(acc_class_cache[0])
print('Accuracy for new classes:')
print(acc_class_cache[1])
print('Mean accuracy')
print(acc.avg)
del inputs, labels, probs, logits, model
torch.cuda.empty_cache()
def classfier(net, exemplar_means, x):
"""Classify images by neares-means-of-exemplars
Args:
x: input video batch
Returns:
preds: Tensor of size (batch_size,)
"""
batch_size = x.size(0)
# feature = self.feature_extractor(x).detach().cpu() # (batch_size, feature_size)
# feature = feature / torch.norm(feature, p=2, dim=1, keepdim=True)
# feature_extractor = nn.Sequential(*list([i for i in net.children()][0].children())[:-2])
feature = feature_extractor(net, x).detach().cpu()
feature = feature / torch.norm(feature, p=2, dim=1, keepdim=True)
feature = feature.unsqueeze(2) # (batch_size, feature_size, 1)
exemplar_means = torch.stack([exemplar_means] * batch_size) # (batch_size, n_classes, feature_size)
exemplar_means = exemplar_means.transpose(1, 2)
feature = feature.expand_as(exemplar_means) # (batch_size, feature_size, n_classes)
dists = (feature - exemplar_means).pow(2).sum(1) #(batch_size, n_classes)
_, preds = dists.min(1)
return preds
def get_confusion_matrix(model, load_dir, ExemplarSet_file, checkpoint_file, dataloader, class_id):
in_features = model.module.fc.in_features
out_features = model.module.fc.out_features
# model.module.fc = nn.Linear(in_features, len(class_id1)+len(class_id2), bias=False)
model.module.fc = nn.Linear(in_features, len(class_id))
model.to(device)
print('Model {} and exemplar sets {} loaded'.format(checkpoint_file, ExemplarSet_file))
ExemplarSet_file = os.path.join(load_dir, ExemplarSet_file)
checkpoint_file = os.path.join(load_dir, checkpoint_file)
with open(ExemplarSet_file, 'rb') as f:
exemplar_sets = pickle.load(f)
checkpoint = torch.load(checkpoint_file)
# model = checkpoint['model']
model.load_state_dict(checkpoint['state_dict'])
model.eval()
print('Computing exemplar means for {} classes........'.format(len(exemplar_sets)))
exemplar_means = compute_mean(model, exemplar_sets)
labels_all = []
preds_exemplar_all = []
preds_softmax_all = []
# for class_i, dataloader_i in enumerate(dataloader):
for data in tqdm(dataloader):
inputs, labels = data
inputs = inputs.to(device, non_blocking=True).float()
labels = labels.to(device, non_blocking=True)
preds_exemplar = classfier(model, exemplar_means, inputs)
probs, logits = model(inputs)
pred_softmax = probs.max(1)[1]
labels_all.append(labels.detach().cpu())
preds_exemplar_all.append(preds_exemplar.detach().cpu())
preds_softmax_all.append(pred_softmax.detach().cpu())
preds_exemplar_all = torch.cat(preds_exemplar_all)
preds_softmax_all = torch.cat(preds_softmax_all)
labels_all = torch.cat(labels_all)
preds_exemplar_all = preds_exemplar_all.numpy()
preds_softmax_all = preds_softmax_all.numpy()
labels_all = labels_all.numpy()
C_exemplar = confusion_matrix(labels_all,preds_exemplar_all)
C_softmax = confusion_matrix(labels_all,preds_softmax_all)
plt.figure()
plt.matshow(C_exemplar)
plt.ylabel('True Label')
plt.xlabel('Predicated Label')
plt.savefig(os.path.join(load_dir, 'confusion_matrix_exemplar.jpg'))
plt.figure()
plt.matshow(C_softmax)
plt.ylabel('True Label')
plt.xlabel('Predicated Label')
plt.savefig(os.path.join(load_dir, 'confusion_matrix_softmax.jpg'))
if __name__ == '__main__':
# keep shuffling be constant every time
seed = 1
torch.manual_seed(seed)
# torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # if you are using multi-GPU.
# activitynet mean value
mean = [114.7, 107.7, 99.4]
norm_method = Normalize(mean, [1, 1, 1])
scales = [args.initial_scale]
for i in range(1, args.n_scales):
scales.append(scales[-1] * args.scale_step)
trans_train = Compose([
Scale([112,112]),
MultiScaleRandomCrop(scales, [112,112]),
SpatialElasticDisplacement(),
# RandomHorizontalFlip(),
ToTensor(1), norm_method
])
temporal_transform_train = Compose([
TemporalRandomCrop(args.n_frames_per_clip)
])
trans_test = Compose([
Scale([112,112]),
CenterCrop([112, 112]),
ToTensor(1), norm_method
])
temporal_transform_test = Compose([
TemporalCenterCrop(args.n_frames_per_clip)
])
net, parameters = generate_model(args)
checkpoint = utils.load_checkpoint(model_dir, '{}-{}-{}.pth'.format(args.arch, args.n_frames_per_clip, args.modality))
net.load_state_dict(checkpoint['state_dict'])
# # # set fine tune parameters: Conv5_x and fc layer from original paper
# for param in net.module.parameters():
# param.requires_grad = False
# for named_child in net.module.named_children():
# if named_child[0] == 'fc' or named_child[0] == 'layer4' or named_child[0] == 'layer3':
# # if named_child[0] == 'fc':
# for param in named_child[1].parameters():
# param.requires_grad = True
net_train = deepcopy(net)
net_train.to(device)
net_test = deepcopy(net)
icarl = iCaRL(net_train, 2000)
# load dataset
if args.is_train:
train_logger = utils.Logger(os.path.join(save_dir, '{}.log'.format(args.modality)),
['num_classes', 'train_loss', 'val_loss',
'val_acc_old', 'val_acc_new','val_acc', 'lr'])
class_id1 = [i for i in range(1, 41)] # initial learned class
print('Preparing initial exemplar sets........')
print('Loading the initial class training data..... class {}'.format(range(class_id1[0], class_id1[-1])))
# class_id1 dataloader for creating the exemplar set
dataset_init = [dataset_class.dataset_video_class(annot_dir, 'train_plus_val',
class_id = [class_id1[i]],
n_frames_per_clip=args.n_frames_per_clip,
img_size=(args.w, args.h),
reverse=False, transform=trans_test,
temporal_transform = temporal_transform_test,
modality = args.modality)
for i in range(len(class_id1))]
dataloader_init = [DataLoader(dataset_init[i], batch_size=args.batch_size,
shuffle=True,
num_workers=args.num_workers, pin_memory=True)
for i in range(len(class_id1))]
icarl.net.eval()
for dataloader_init_class in tqdm(dataloader_init):
icarl.construct_exemplar_set(dataloader_init_class)
icarl.net.train()
class_step = 5 # incremental steps for new class
for incremenral_class in trange(41, 84, class_step):
if incremenral_class == range(41, 84, class_step)[-1]:
class_id2 = [i for i in range(incremenral_class, 84)]
else:
class_id2 = [i for i in range(incremenral_class, incremenral_class+class_step)]
class_all = class_id1 + class_id2
print('Loading new class training data..... class {}'.format(range(class_id2[0], class_id2[-1])))
# this dataset is used for preparing the exemplar set. DO NOT use any augmentation rules e.g. transforms
dataset_new = [dataset_class.dataset_video_class(annot_dir, 'train_plus_val',
class_id = [class_id2[i]],
n_frames_per_clip=args.n_frames_per_clip,
img_size=(args.w, args.h),
reverse=False, transform=trans_test,
temporal_transform = temporal_transform_test,
modality = args.modality)
for i in range(len(class_id2))]
# new train dataset does not use augmentation (otherwise may causes some unstable issues)
dataset_new_train = [dataset_class.dataset_video_class(annot_dir, 'train_plus_val',
class_id = [class_id2[i]],
n_frames_per_clip=args.n_frames_per_clip,
img_size=(args.w, args.h),
reverse=False, transform=trans_test,
temporal_transform = temporal_transform_test,
modality = args.modality)
for i in range(len(class_id2))]
print('Loading validating data..... class {}'.format(range(class_all[0], class_all[-1])))
print('Loading testing data..... class_id1 {} class_id2 {}'.format(range(class_id1[0], class_id1[-1]),
range(class_id2[0], class_id2[-1]) ))
dataset_test = [dataset_class.dataset_video_class(annot_dir, 'test',
class_id = class_id,
n_frames_per_clip=args.n_frames_per_clip,
img_size=(args.w, args.h),
reverse=False, transform=trans_test,
temporal_transform = temporal_transform_test,
modality = args.modality)
for class_id in [class_id1, class_id2]]
dataloader_test = [DataLoader(dataset_test[i], batch_size=args.batch_size_val,
num_workers=args.num_workers,pin_memory=True)
for i in range(len(dataset_test))]
icarl.train(dataset_new_train, dataset_new, class_id2, dataloader_test)
test(net_test, save_dir,
'{}_ExemplarSet_{}.pkl'.format(args.modality, len(class_all)),
'{}-{}-{}.pth'.format(args.arch, args.modality, len(class_all)),
dataloader_test, class_id1, class_id2)
class_id1 = deepcopy(class_all) # update learned class
pdb.set_trace()
else:
class_id1 = [i for i in range(1, 41)]
class_id2 = [i for i in range(41, 46)]
# class_all = class_id1 + class_id2
class_all = [i for i in range(1, 84)]
print('Loading validating data.....')
dataset_test = dataset_class.dataset_video_class(annot_dir, 'test',
class_id = class_all,
n_frames_per_clip=args.n_frames_per_clip,
img_size=(args.w, args.h),
reverse=False, transform=trans_test,
temporal_transform = temporal_transform_test,
modality = args.modality)
dataloader_test = DataLoader(dataset_test, batch_size=16,
num_workers=args.num_workers,pin_memory=True)
test(net_test, save_dir,
'{}_ExemplarSet_{}.pkl'.format(args.modality, len(class_all)),
'resnext-101-{}-{}.pth'.format(args.modality, len(class_all)),
dataloader_test, class_id1, class_id2)
# get_confusion_matrix(net_test, save_dir,
# '{}_ExemplarSet_{}.pkl'.format(args.modality, len(class_all)),
# 'resnext-101-{}-{}.pth'.format(args.modality, len(class_all)),
# dataloader_test, class_all)
| 45.766129
| 182
| 0.588828
|
bdd5e8e129828164920a6c6fbd77cf246b3947f6
| 7,388
|
py
|
Python
|
Yoyo/test.py
|
teabao/AI-introduction
|
0a608326f627446011f723201b8f705ad7c77e8c
|
[
"MIT"
] | null | null | null |
Yoyo/test.py
|
teabao/AI-introduction
|
0a608326f627446011f723201b8f705ad7c77e8c
|
[
"MIT"
] | 1
|
2021-05-19T08:34:44.000Z
|
2021-05-19T08:34:44.000Z
|
Yoyo/test.py
|
teabao/AI-introduction
|
0a608326f627446011f723201b8f705ad7c77e8c
|
[
"MIT"
] | 1
|
2021-05-19T08:23:28.000Z
|
2021-05-19T08:23:28.000Z
|
import argparse
import gym
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
# from drawnow import drawnow
import matplotlib.pyplot as plt
last_score_plot = [-100]
avg_score_plot = [-100]
def draw_fig():
plt.title('reward')
plt.plot(last_score_plot, '-')
plt.plot(avg_score_plot, 'r-')
parser = argparse.ArgumentParser(
description='PyTorch PPO solution of MountainCarContinuous-v0')
parser.add_argument('--gamma', type=float, default=0.99)
parser.add_argument('--actor_lr', type=float, default=1e-3)
parser.add_argument('--critic_lr', type=float, default=1e-3)
parser.add_argument('--clip_epsilon', type=float, default=0.2)
parser.add_argument('--gae_lambda', type=float, default=0.97)
parser.add_argument('--batch_size', type=int, default=10000)
parser.add_argument('--max_episode', type=int, default=100)
cfg = parser.parse_args()
env = gym.make('MountainCarContinuous-v0')
class running_state:
def __init__(self, state):
self.len = 1
self.running_mean = state
self.running_std = state ** 2
def update(self, state):
self.len += 1
old_mean = self.running_mean.copy()
self.running_mean[...] = old_mean + (state - old_mean) / self.len
self.running_std[...] = self.running_std + \
(state - old_mean) * (state - self.running_mean)
def mean(self):
return self.running_mean
def std(self):
return np.sqrt(self.running_std / (self.len - 1))
class Actor(nn.Module):
def __init__(self):
super(Actor, self).__init__()
self.fc1 = nn.Linear(2, 64)
self.fc2 = nn.Linear(64, 64)
self.fc_mean = nn.Linear(64, 1)
self.fc_log_std = nn.Linear(64, 1)
def forward(self, x):
x = F.elu(self.fc1(x))
x = F.elu(self.fc2(x))
action_mean = self.fc_mean(x)
action_std = torch.exp(self.fc_log_std(x))
return action_mean.squeeze(), action_std.squeeze()
def get_action(state):
action_mean, action_std = actor(state)
action_dist = torch.distributions.Normal(action_mean, action_std)
action = action_dist.sample()
return action.item()
def synchronize_actors():
for target_param, param in zip(actor_old.parameters(), actor.parameters()):
target_param.data.copy_(param.data)
def update_actor(state, action, advantage):
mean_old, std_old = actor_old(state)
action_dist_old = torch.distributions.Normal(mean_old, std_old)
action_log_probs_old = action_dist_old.log_prob(action)
mean, std = actor(state)
print(mean.size(), std.size())
action_dist = torch.distributions.Normal(mean, std)
action_log_probs = action_dist.log_prob(action)
# update old actor before update current actor
synchronize_actors()
r_theta = torch.exp(action_log_probs - action_log_probs_old)
surrogate1 = r_theta * advantage
surrogate2 = torch.clamp(r_theta, 1.0 - cfg.clip_epsilon,
1.0 + cfg.clip_epsilon) * advantage
loss = -torch.min(surrogate1, surrogate2).mean()
entropy = action_dist.entropy()
loss = torch.mean(loss - 1e-2 * entropy)
actor_optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm(actor.parameters(), 40)
actor_optimizer.step()
return
class Critic(nn.Module):
def __init__(self):
super(Critic, self).__init__()
self.fc1 = nn.Linear(2, 64)
self.fc2 = nn.Linear(64, 64)
self.fc3 = nn.Linear(64, 1)
def forward(self, x):
x = F.elu(self.fc1(x))
x = F.elu(self.fc2(x))
value = self.fc3(x)
return value.squeeze()
def get_state_value(state):
state_value = critic(state)
return state_value
def update_critic(state, target):
state_value = critic(state)
loss = F.mse_loss(state_value, target)
critic_optimizer.zero_grad()
loss.backward()
critic_optimizer.step()
return
actor = Actor()
actor_old = Actor()
critic = Critic()
actor_optimizer = optim.Adam(actor.parameters(), lr=cfg.actor_lr)
critic_optimizer = optim.Adam(critic.parameters(), lr=cfg.critic_lr)
def main():
state = env.reset()
state_stat = running_state(state)
for i in range(cfg.max_episode):
start_time = time.perf_counter()
episode_score = 0
episode = 0
memory = []
with torch.no_grad():
while len(memory) < cfg.batch_size:
episode += 1
state = env.reset()
state_stat.update(state)
state = np.clip((state - state_stat.mean()) /
(state_stat.std() + 1e-6), -10., 10.)
for s in range(1000):
# env.render()
action = get_action(torch.tensor(state).float()[None, :])
next_state, reward, done, _ = env.step([action])
state_stat.update(next_state)
next_state = np.clip(
(next_state - state_stat.mean()) / (state_stat.std() + 1e-6), -10., 10.)
memory.append([state, action, reward, next_state, done])
state = next_state
episode_score += reward
if done:
break
state_batch, \
action_batch, \
reward_batch, \
next_state_batch, \
done_batch = map(lambda x: np.array(
x).astype(np.float32), zip(*memory))
state_batch = torch.tensor(state_batch).float()
values = get_state_value(state_batch).detach().cpu().numpy()
returns = np.zeros(action_batch.shape)
deltas = np.zeros(action_batch.shape)
advantages = np.zeros(action_batch.shape)
prev_return = 0
prev_value = 0
prev_advantage = 0
for i in reversed(range(reward_batch.shape[0])):
returns[i] = reward_batch[i] + cfg.gamma * \
prev_return * (1 - done_batch[i])
# generalized advantage estimation
deltas[i] = reward_batch[i] + cfg.gamma * \
prev_value * (1 - done_batch[i]) - values[i]
advantages[i] = deltas[i] + cfg.gamma * \
cfg.gae_lambda * prev_advantage * (1 - done_batch[i])
prev_return = returns[i]
prev_value = values[i]
prev_advantage = advantages[i]
advantages = (advantages - advantages.mean()) / advantages.std()
advantages = torch.tensor(advantages).float()
action_batch = torch.tensor(action_batch).float()
returns = torch.tensor(returns).float()
# using discounted reward as target q-value to update critic
update_critic(state_batch, returns)
update_actor(state_batch, action_batch, advantages)
episode_score /= episode
print('last_score {:5f}, steps {}, ({:2f} sec/eps)'.
format(episode_score, len(memory), time.perf_counter() - start_time))
avg_score_plot.append(avg_score_plot[-1] * 0.99 + episode_score * 0.01)
last_score_plot.append(episode_score)
# drawnow(draw_fig)
env.close()
if __name__ == '__main__':
main()
| 31.172996
| 96
| 0.606253
|
e3faddd5e69246c4361590b6fe169ceb1ca329cb
| 871
|
py
|
Python
|
myaxf/urls.py
|
Pyrans/test1806
|
1afc62e09bbebf74521b4b6fdafde8eeaa260ed9
|
[
"Apache-2.0"
] | null | null | null |
myaxf/urls.py
|
Pyrans/test1806
|
1afc62e09bbebf74521b4b6fdafde8eeaa260ed9
|
[
"Apache-2.0"
] | null | null | null |
myaxf/urls.py
|
Pyrans/test1806
|
1afc62e09bbebf74521b4b6fdafde8eeaa260ed9
|
[
"Apache-2.0"
] | null | null | null |
from django.conf.urls import url
from django.contrib import admin
from .views import *
urlpatterns = [
url(r'^home/', home, name='home'),
url(r'^market/', market, name='market'),
url(r'^cart/', cart, name='cart'),
url(r'^mine/', mine, name='mine'),
url(r'^market_with_params/(\d+)/(\d+)/(\d+)', market_with_params, name='market_params'),
url(r'^register&', RegisterAPI.as_view(), name='register'),
url(r'^login$', LoginAPI.as_view(), name='login'),
url(r'^logout$', LogoutAPI.as_view(), name='logout'),
url(r'^confirm/(.*)', confirm),
url(r'^check_uname$', check_uname),
url(r'^cart_api$', CartAPI.as_view()),
url(r'^cart_status$', CartStatusAPI.as_view()),
url(r'^cart_all_status$', CartAllStatusAPI.as_view()),
url(r'^cart_item$', CartItemAPI.as_view()),
url(r'^order$', OrderAPI.as_view(), name='order')
]
| 37.869565
| 92
| 0.63031
|
916477a6c7c827adc33128660cf93d7aa22ae997
| 1,804
|
py
|
Python
|
tests/test_transpile.py
|
blthree/sqlglot
|
c3130584db6d767575854ba0d57da37e026863c9
|
[
"MIT"
] | null | null | null |
tests/test_transpile.py
|
blthree/sqlglot
|
c3130584db6d767575854ba0d57da37e026863c9
|
[
"MIT"
] | null | null | null |
tests/test_transpile.py
|
blthree/sqlglot
|
c3130584db6d767575854ba0d57da37e026863c9
|
[
"MIT"
] | null | null | null |
import os
import unittest
from sqlglot import transpile
class TestTranspile(unittest.TestCase):
file_dir = os.path.dirname(__file__)
fixtures_dir = os.path.join(file_dir, 'fixtures')
def test_comments(self):
sql = transpile('SELECT 1 FROM foo -- comment')[0]
self.assertEqual(sql, 'SELECT 1 FROM foo')
sql = transpile('SELECT 1 /* inline */ FROM foo -- comment')[0]
self.assertEqual(sql, 'SELECT 1 FROM foo')
sql = transpile(
"""
SELECT 1 -- comment
FROM foo -- comment
"""
)[0]
self.assertEqual(sql, 'SELECT 1 FROM foo')
sql = transpile(
"""
SELECT 1 /* big comment
like this */
FROM foo -- comment
"""
)[0]
self.assertEqual(sql, 'SELECT 1 FROM foo')
def test_if(self):
sql = transpile('SELECT IF(a > 1, 1, 0) FROM foo')[0]
self.assertEqual(sql, 'SELECT CASE WHEN a > 1 THEN 1 ELSE 0 END FROM foo')
sql = transpile('SELECT IF(a > 1, 1) FROM foo')[0]
self.assertEqual(sql, 'SELECT CASE WHEN a > 1 THEN 1 END FROM foo')
def test_identity(self):
with open(os.path.join(self.fixtures_dir, 'identity.sql')) as f:
for sql in f:
self.assertEqual(transpile(sql)[0], sql.strip())
def test_pretty(self):
with open(os.path.join(self.fixtures_dir, 'pretty.sql')) as f:
lines = f.read().split(';')
size = len(lines)
for i in range(0, size, 2):
if i + 1 < size:
sql = lines[i]
pretty = lines[i + 1].strip()
generated = transpile(sql, pretty=True)[0]
self.assertEqual(generated, pretty)
| 32.214286
| 82
| 0.533259
|
74ee6db22e3da5e381be4b98c9bad3a7e31977c0
| 1,403
|
py
|
Python
|
corehq/apps/accounting/migrations/0009_make_billingaccount_name_unique.py
|
kkrampa/commcare-hq
|
d64d7cad98b240325ad669ccc7effb07721b4d44
|
[
"BSD-3-Clause"
] | 1
|
2020-05-05T13:10:01.000Z
|
2020-05-05T13:10:01.000Z
|
corehq/apps/accounting/migrations/0009_make_billingaccount_name_unique.py
|
kkrampa/commcare-hq
|
d64d7cad98b240325ad669ccc7effb07721b4d44
|
[
"BSD-3-Clause"
] | 1
|
2019-12-09T14:00:14.000Z
|
2019-12-09T14:00:14.000Z
|
corehq/apps/accounting/migrations/0009_make_billingaccount_name_unique.py
|
MaciejChoromanski/commcare-hq
|
fd7f65362d56d73b75a2c20d2afeabbc70876867
|
[
"BSD-3-Clause"
] | 5
|
2015-11-30T13:12:45.000Z
|
2019-07-01T19:27:07.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-08-01 21:47
from __future__ import unicode_literals
from __future__ import absolute_import
from django.db import migrations, models
from django.db.models import Count
from corehq.apps.accounting.utils import get_account_name_from_default_name
def _make_existing_billing_account_names_unique(apps, schema_editor):
BillingAccount = apps.get_model('accounting', 'BillingAccount')
counts_by_name = BillingAccount.objects.values('name').annotate(Count('name'))
duplicated_names = [
count_by_name['name']
for count_by_name in counts_by_name
if count_by_name['name__count'] > 1
]
for duplicated_name in duplicated_names:
for billing_account in BillingAccount.objects.filter(name=duplicated_name).order_by('date_created')[1:]:
billing_account.name = get_account_name_from_default_name(billing_account.name)
billing_account.save()
class Migration(migrations.Migration):
dependencies = [
('accounting', '0008_update_report_builder_included_feature_numbers'),
]
operations = [
migrations.RunPython(_make_existing_billing_account_names_unique),
migrations.AlterField(
model_name='billingaccount',
name='name',
field=models.CharField(db_index=True, max_length=200, unique=True),
),
]
| 32.627907
| 112
| 0.72345
|
3d8420383691bad5db348fe0cf21b10f0da91759
| 6,684
|
py
|
Python
|
test_code/test_main_unspacer.py
|
JKamlah/ocromore
|
f9d302eff234478c98e03740adf6bbeeafe7db8d
|
[
"Apache-2.0"
] | 16
|
2018-04-20T11:11:18.000Z
|
2020-01-14T11:11:59.000Z
|
test_code/test_main_unspacer.py
|
JKamlah/ocromore
|
f9d302eff234478c98e03740adf6bbeeafe7db8d
|
[
"Apache-2.0"
] | 2
|
2019-09-16T06:22:02.000Z
|
2020-08-27T23:11:49.000Z
|
test_code/test_main_unspacer.py
|
JKamlah/ocromore
|
f9d302eff234478c98e03740adf6bbeeafe7db8d
|
[
"Apache-2.0"
] | 4
|
2019-07-18T18:01:05.000Z
|
2020-07-10T18:44:09.000Z
|
"""
This is the starting file for testing unspacing capabilities (was developed before msa_algorithm)
generates unspaced and line height adapted files from different ocr
results and compares them to ground truth
"""
from n_dist_keying.hocr_line_normalizer import HocrLineNormalizer
from n_dist_keying.hocr_bbox_comparator import HocrBBoxComparator
from n_dist_keying.hocr_line_height import LineHeightCalculator
from n_dist_keying.textfile_generator import TextFileGenerator
from ocr_validation.ocr_validator import OCRvalidator
from ocr_validation.visualization_handler import VisualizationHandler
USE_REFSPACING = True # instead of unspacing algorithm use the respacing algorithm
DISPLAY_DIFFERENCES = True
IGNORE_LINEFEED = False
IGNORE_WHITESPACE = False
# base files (input)
filepath_ground_truth = "./Testfiles/oneprof.gt.txt"
filepath_ocropus = "../Testfiles/oneprof_ocropus.html"
filepath_tesseract = "../Testfiles/oneprof_tesseract_sure.html"
filepath_abbyy = "../Testfiles/oneprof_abbyy_tables_ok.hocr.html"
filepath_tesseract_txt = "./Testfiles/oneprof_tesseract_sure.txt"
# textfiles which get generated
filepath_ocropus_txt = "./Testfiles/oneprof_ocropus_spaced.txt" #ocropus without lhi adaption
filepath_abbyy_txt = "./Testfiles/oneprof_abbyy.txt"
filepath_ocropus_lha_txt = "./Testfiles/oneprof_ocropus_spaced_lha.txt"
filepath_abbyy_lha_txt = "./Testfiles/oneprof_abbyy_lha.txt"
# textfiles which get generated for final spacing comparison
filepath_ocropus_unspaced_lha = "./Testfiles/oneprof_ocropus_unspaced_lha.txt"
filepath_abbyy_unspaced_lha = "./Testfiles/oneprof_abbyy_unspaced_lha.txt"
hocr_comparator = HocrBBoxComparator()
ocrolist = hocr_comparator.get_ocropus_boxes(filepath_ocropus)
tesslist = hocr_comparator.get_tesseract_boxes(filepath_tesseract)
abbylist = hocr_comparator.get_abbyy_boxes(filepath_abbyy)
hocr_normalizer = HocrLineNormalizer()
ocrolist_normalized = hocr_normalizer.normalize_ocropus_list(ocrolist)
abbylist_normalized = hocr_normalizer.normalize_abbyy_list(abbylist)
tesslist_normalized = hocr_normalizer.normalize_tesseract_list(tesslist)
lh_calculator = LineHeightCalculator()
lhi_abbyy_normalized = lh_calculator.calculate_line_distance_information(abbylist_normalized, False, True, "abbyy_normalized")
lhi_tesseract_normalized = lh_calculator.calculate_line_distance_information(tesslist_normalized, False, True, "tesseract_normalized")
lhi_ocropus_normalized = lh_calculator.calculate_line_distance_information(ocrolist_normalized, False, True, "ocropus_normalized")
tfg = TextFileGenerator()
tfg.create_file(lhi_abbyy_normalized, abbylist_normalized, filepath_abbyy_lha_txt)
tfg2 = TextFileGenerator()
tfg2.create_file(lhi_ocropus_normalized, ocrolist_normalized, filepath_ocropus_lha_txt)
tfg3 = TextFileGenerator()
tfg3.create_file(None, ocrolist_normalized, filepath_ocropus_txt, False)
tfg4 = TextFileGenerator()
tfg4.create_file(None, abbylist_normalized, filepath_abbyy_txt, False)
base_ocr_lists = []
base_ocr_lists.append(abbylist_normalized)
base_ocr_lists.append(tesslist_normalized)
base_ocr_lists.append(ocrolist_normalized)
ocr_comparison = hocr_comparator.compare_lists(base_ocr_lists)
ocr_comparison.add_line_information(lhi_abbyy_normalized)
ocr_comparison.add_line_information(lhi_tesseract_normalized)
ocr_comparison.add_line_information(lhi_ocropus_normalized)
ocr_comparison.sort_set()
print("Print mean||decision||abbyy||tesseract||ocropus|||| without unspacing-------------------")
ocr_comparison.print_sets(False)
if not USE_REFSPACING:
ocr_comparison.unspace_list(2, 1) # unspace ocropus with tesseract as unspacing template
ocr_comparison.unspace_list(0, 1) # unspace abbyy with tesseract as unspacing template
else:
ocr_comparison.refspace_list(2, 1) # refspace ocropus with tesseract as reference spacing template
ocr_comparison.refspace_list(0, 1) # refspace abbyy with tesseract as reference spacing template
print("Print mean||decision||abbyy||tesseract||ocropus|||| ocropus and abbyy unspaced--------------------")
ocr_comparison.print_sets(False)
ocr_comparison.save_n_distance_keying_results_to_file("./Testfiles/oneprof_keying_result.txt", True)
ocr_comparison.save_dataset_to_file(filepath_ocropus_unspaced_lha, 2, True)
ocr_comparison.save_dataset_to_file(filepath_abbyy_unspaced_lha, 0, True)
ocr_validator = OCRvalidator()
print("Comparison of the unspaced files and spaced files to groundtruth--------------")
print("Refspacing is: ", USE_REFSPACING)
ocr_validator.set_groundtruth(filepath_ground_truth)
# plain file comparison
print("Plain file comparison---(tesseract is lha by default)--")
ocr_validator.set_ocr_file(filepath_ocropus_txt)
ocr_validator.compare_ocrolib_edist(IGNORE_LINEFEED, IGNORE_WHITESPACE)
ocr_validator.set_ocr_file(filepath_abbyy_txt)
ocr_validator.compare_ocrolib_edist(IGNORE_LINEFEED, IGNORE_WHITESPACE)
ocr_validator.set_ocr_file(filepath_tesseract_txt)
ocr_validator.compare_ocrolib_edist(IGNORE_LINEFEED, IGNORE_WHITESPACE)
# lha file comparison
print("LHA file comparison---(tesseract is lha by default)-------------")
ocr_validator.set_ocr_file(filepath_ocropus_lha_txt)
ocr_validator.compare_ocrolib_edist(IGNORE_LINEFEED, IGNORE_WHITESPACE)
ocr_validator.set_ocr_file(filepath_abbyy_lha_txt)
ocr_validator.compare_ocrolib_edist(IGNORE_LINEFEED, IGNORE_WHITESPACE)
ocr_validator.set_ocr_file(filepath_tesseract_txt)
ocr_validator.compare_ocrolib_edist(IGNORE_LINEFEED, IGNORE_WHITESPACE)
# unspaced + lha file comparsion
print("Unspaced + LHA file comparison---(tesseract is lha and us by default)-------------")
ocr_validator.set_ocr_file(filepath_ocropus_unspaced_lha)
ocr_validator.compare_ocrolib_edist(IGNORE_LINEFEED, IGNORE_WHITESPACE)
ocr_validator.set_ocr_file(filepath_abbyy_unspaced_lha)
ocr_validator.compare_ocrolib_edist(IGNORE_LINEFEED, IGNORE_WHITESPACE)
ocr_validator.set_ocr_file(filepath_tesseract_txt)
ocr_validator.compare_ocrolib_edist(IGNORE_LINEFEED, IGNORE_WHITESPACE)
if DISPLAY_DIFFERENCES:
pyc_handler = VisualizationHandler()
# the lha visual comparison
#pyc_handler.show_file_comparison(filepath_ocropus_lha_txt, filepath_ocropus_txt)
#pyc_handler.show_file_comparison(filepath_abbyy_lha_txt, filepath_abbyy_txt)
pyc_handler.show_file_comparison(filepath_ground_truth, filepath_ocropus_unspaced_lha)
# mind this is the line height adapted text, generated by this file
pyc_handler.show_file_comparison(filepath_ground_truth, filepath_abbyy_unspaced_lha)
#pyc_handler.show_file_comparison(filepath_ground_truth, filepath_abbyy_lha_txt)
#pyc_handler.show_file_comparison(filepath_ground_truth, filepath_abbyy_unspaced_lha)
| 45.469388
| 134
| 0.844405
|
44568a63b711952a8695aab87de40ae8c7b3c03d
| 12,632
|
py
|
Python
|
mailchimp_marketing_asyncio/models/open_activity.py
|
john-parton/mailchimp-asyncio
|
3865ca0867bec8f537dc1e3256aa3a160c00f8a2
|
[
"Apache-2.0"
] | null | null | null |
mailchimp_marketing_asyncio/models/open_activity.py
|
john-parton/mailchimp-asyncio
|
3865ca0867bec8f537dc1e3256aa3a160c00f8a2
|
[
"Apache-2.0"
] | null | null | null |
mailchimp_marketing_asyncio/models/open_activity.py
|
john-parton/mailchimp-asyncio
|
3865ca0867bec8f537dc1e3256aa3a160c00f8a2
|
[
"Apache-2.0"
] | 1
|
2022-03-09T14:52:22.000Z
|
2022-03-09T14:52:22.000Z
|
# coding: utf-8
"""
Mailchimp Marketing API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 3.0.74
Contact: apihelp@mailchimp.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class OpenActivity(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'campaign_id': 'str',
'list_id': 'str',
'list_is_active': 'bool',
'contact_status': 'str',
'email_id': 'str',
'email_address': 'str',
'merge_fields': 'dict(str, object)',
'vip': 'bool',
'opens_count': 'int',
'opens': 'list[MemberActivity1]',
'links': 'list[ResourceLink]'
}
attribute_map = {
'campaign_id': 'campaign_id',
'list_id': 'list_id',
'list_is_active': 'list_is_active',
'contact_status': 'contact_status',
'email_id': 'email_id',
'email_address': 'email_address',
'merge_fields': 'merge_fields',
'vip': 'vip',
'opens_count': 'opens_count',
'opens': 'opens',
'links': '_links'
}
def __init__(self, campaign_id=None, list_id=None, list_is_active=None, contact_status=None, email_id=None, email_address=None, merge_fields=None, vip=None, opens_count=None, opens=None, links=None): # noqa: E501
"""OpenActivity - a model defined in Swagger""" # noqa: E501
self._campaign_id = None
self._list_id = None
self._list_is_active = None
self._contact_status = None
self._email_id = None
self._email_address = None
self._merge_fields = None
self._vip = None
self._opens_count = None
self._opens = None
self._links = None
self.discriminator = None
if campaign_id is not None:
self.campaign_id = campaign_id
if list_id is not None:
self.list_id = list_id
if list_is_active is not None:
self.list_is_active = list_is_active
if contact_status is not None:
self.contact_status = contact_status
if email_id is not None:
self.email_id = email_id
if email_address is not None:
self.email_address = email_address
if merge_fields is not None:
self.merge_fields = merge_fields
if vip is not None:
self.vip = vip
if opens_count is not None:
self.opens_count = opens_count
if opens is not None:
self.opens = opens
if links is not None:
self.links = links
@property
def campaign_id(self):
"""Gets the campaign_id of this OpenActivity. # noqa: E501
The unique id for the campaign. # noqa: E501
:return: The campaign_id of this OpenActivity. # noqa: E501
:rtype: str
"""
return self._campaign_id
@campaign_id.setter
def campaign_id(self, campaign_id):
"""Sets the campaign_id of this OpenActivity.
The unique id for the campaign. # noqa: E501
:param campaign_id: The campaign_id of this OpenActivity. # noqa: E501
:type: str
"""
self._campaign_id = campaign_id
@property
def list_id(self):
"""Gets the list_id of this OpenActivity. # noqa: E501
The unique id for the list. # noqa: E501
:return: The list_id of this OpenActivity. # noqa: E501
:rtype: str
"""
return self._list_id
@list_id.setter
def list_id(self, list_id):
"""Sets the list_id of this OpenActivity.
The unique id for the list. # noqa: E501
:param list_id: The list_id of this OpenActivity. # noqa: E501
:type: str
"""
self._list_id = list_id
@property
def list_is_active(self):
"""Gets the list_is_active of this OpenActivity. # noqa: E501
The status of the list used, namely if it's deleted or disabled. # noqa: E501
:return: The list_is_active of this OpenActivity. # noqa: E501
:rtype: bool
"""
return self._list_is_active
@list_is_active.setter
def list_is_active(self, list_is_active):
"""Sets the list_is_active of this OpenActivity.
The status of the list used, namely if it's deleted or disabled. # noqa: E501
:param list_is_active: The list_is_active of this OpenActivity. # noqa: E501
:type: bool
"""
self._list_is_active = list_is_active
@property
def contact_status(self):
"""Gets the contact_status of this OpenActivity. # noqa: E501
The status of the member, namely if they are subscribed, unsubscribed, deleted, non-subscribed, transactional, pending, or need reconfirmation. # noqa: E501
:return: The contact_status of this OpenActivity. # noqa: E501
:rtype: str
"""
return self._contact_status
@contact_status.setter
def contact_status(self, contact_status):
"""Sets the contact_status of this OpenActivity.
The status of the member, namely if they are subscribed, unsubscribed, deleted, non-subscribed, transactional, pending, or need reconfirmation. # noqa: E501
:param contact_status: The contact_status of this OpenActivity. # noqa: E501
:type: str
"""
self._contact_status = contact_status
@property
def email_id(self):
"""Gets the email_id of this OpenActivity. # noqa: E501
The MD5 hash of the lowercase version of the list member's email address. # noqa: E501
:return: The email_id of this OpenActivity. # noqa: E501
:rtype: str
"""
return self._email_id
@email_id.setter
def email_id(self, email_id):
"""Sets the email_id of this OpenActivity.
The MD5 hash of the lowercase version of the list member's email address. # noqa: E501
:param email_id: The email_id of this OpenActivity. # noqa: E501
:type: str
"""
self._email_id = email_id
@property
def email_address(self):
"""Gets the email_address of this OpenActivity. # noqa: E501
Email address for a subscriber. # noqa: E501
:return: The email_address of this OpenActivity. # noqa: E501
:rtype: str
"""
return self._email_address
@email_address.setter
def email_address(self, email_address):
"""Sets the email_address of this OpenActivity.
Email address for a subscriber. # noqa: E501
:param email_address: The email_address of this OpenActivity. # noqa: E501
:type: str
"""
self._email_address = email_address
@property
def merge_fields(self):
"""Gets the merge_fields of this OpenActivity. # noqa: E501
A dictionary of merge fields where the keys are the merge tags. See the [Merge Fields documentation](https://mailchimp.com/developer/marketing/docs/merge-fields/#structure) for more about the structure. # noqa: E501
:return: The merge_fields of this OpenActivity. # noqa: E501
:rtype: dict(str, object)
"""
return self._merge_fields
@merge_fields.setter
def merge_fields(self, merge_fields):
"""Sets the merge_fields of this OpenActivity.
A dictionary of merge fields where the keys are the merge tags. See the [Merge Fields documentation](https://mailchimp.com/developer/marketing/docs/merge-fields/#structure) for more about the structure. # noqa: E501
:param merge_fields: The merge_fields of this OpenActivity. # noqa: E501
:type: dict(str, object)
"""
self._merge_fields = merge_fields
@property
def vip(self):
"""Gets the vip of this OpenActivity. # noqa: E501
[VIP status](https://mailchimp.com/help/designate-and-send-to-vip-contacts/) for subscriber. # noqa: E501
:return: The vip of this OpenActivity. # noqa: E501
:rtype: bool
"""
return self._vip
@vip.setter
def vip(self, vip):
"""Sets the vip of this OpenActivity.
[VIP status](https://mailchimp.com/help/designate-and-send-to-vip-contacts/) for subscriber. # noqa: E501
:param vip: The vip of this OpenActivity. # noqa: E501
:type: bool
"""
self._vip = vip
@property
def opens_count(self):
"""Gets the opens_count of this OpenActivity. # noqa: E501
The total number of times the this campaign was opened by the list member. # noqa: E501
:return: The opens_count of this OpenActivity. # noqa: E501
:rtype: int
"""
return self._opens_count
@opens_count.setter
def opens_count(self, opens_count):
"""Sets the opens_count of this OpenActivity.
The total number of times the this campaign was opened by the list member. # noqa: E501
:param opens_count: The opens_count of this OpenActivity. # noqa: E501
:type: int
"""
self._opens_count = opens_count
@property
def opens(self):
"""Gets the opens of this OpenActivity. # noqa: E501
An array of timestamps for each time a list member opened the campaign. If a list member opens an email multiple times, this will return a separate timestamp for each open event. # noqa: E501
:return: The opens of this OpenActivity. # noqa: E501
:rtype: list[MemberActivity1]
"""
return self._opens
@opens.setter
def opens(self, opens):
"""Sets the opens of this OpenActivity.
An array of timestamps for each time a list member opened the campaign. If a list member opens an email multiple times, this will return a separate timestamp for each open event. # noqa: E501
:param opens: The opens of this OpenActivity. # noqa: E501
:type: list[MemberActivity1]
"""
self._opens = opens
@property
def links(self):
"""Gets the links of this OpenActivity. # noqa: E501
A list of link types and descriptions for the API schema documents. # noqa: E501
:return: The links of this OpenActivity. # noqa: E501
:rtype: list[ResourceLink]
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this OpenActivity.
A list of link types and descriptions for the API schema documents. # noqa: E501
:param links: The links of this OpenActivity. # noqa: E501
:type: list[ResourceLink]
"""
self._links = links
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(OpenActivity, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, OpenActivity):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 31.738693
| 224
| 0.611146
|
4e8ab5b14c699aa7e0402c4b0789cd8027c3135e
| 138
|
py
|
Python
|
urlcf/exceptions.py
|
alkorgun/urlcf
|
3d8752ac94b4f57a07f8de2381ffbe53f815ebfb
|
[
"MIT"
] | 1
|
2017-07-06T02:10:23.000Z
|
2017-07-06T02:10:23.000Z
|
urlcf/exceptions.py
|
alkorgun/urlcf
|
3d8752ac94b4f57a07f8de2381ffbe53f815ebfb
|
[
"MIT"
] | null | null | null |
urlcf/exceptions.py
|
alkorgun/urlcf
|
3d8752ac94b4f57a07f8de2381ffbe53f815ebfb
|
[
"MIT"
] | null | null | null |
"""
Created on Nov 17, 2016
@author: alkorgun
"""
class UrlcfError(Exception):
pass
class UIDNotFoundError(UrlcfError):
pass
| 9.857143
| 35
| 0.688406
|
c34bc02ae30160d892fdafb80e7caeec298e7547
| 1,075
|
py
|
Python
|
h2o-py/h2o/model/__init__.py
|
kernelrich/h2o-3
|
16bd6be6d0ac22b037cb55b4c647e63e2b112e1e
|
[
"Apache-2.0"
] | 2
|
2020-09-23T14:23:55.000Z
|
2020-09-23T19:26:30.000Z
|
h2o-py/h2o/model/__init__.py
|
kernelrich/h2o-3
|
16bd6be6d0ac22b037cb55b4c647e63e2b112e1e
|
[
"Apache-2.0"
] | 1
|
2021-04-06T13:07:04.000Z
|
2021-04-06T13:07:04.000Z
|
h2o-py/h2o/model/__init__.py
|
kernelrich/h2o-3
|
16bd6be6d0ac22b037cb55b4c647e63e2b112e1e
|
[
"Apache-2.0"
] | 1
|
2020-04-17T13:06:26.000Z
|
2020-04-17T13:06:26.000Z
|
from .autoencoder import H2OAutoEncoderModel
from .binomial import H2OBinomialModel
from .clustering import H2OClusteringModel
from .confusion_matrix import ConfusionMatrix
from .dim_reduction import H2ODimReductionModel
from .metrics_base import MetricsBase
from .model_base import ModelBase
from .model_future import H2OModelFuture
from .multinomial import H2OMultinomialModel
from .ordinal import H2OOrdinalModel
from .regression import H2ORegressionModel
from .segment_models import H2OSegmentModels
from .metrics_base import H2OAutoEncoderModelMetrics
from .metrics_base import H2OBinomialModelMetrics
from .metrics_base import H2OClusteringModelMetrics
from .metrics_base import H2ODimReductionModelMetrics
from .metrics_base import H2OMultinomialModelMetrics
from .metrics_base import H2OOrdinalModelMetrics
from .metrics_base import H2ORegressionModelMetrics
__all__ = ["H2OAutoEncoderModel", "H2OBinomialModel", "H2OClusteringModel",
"ConfusionMatrix", "H2ODimReductionModel", "MetricsBase", "ModelBase",
"H2OModelFuture", "H2OSegmentModels"]
| 44.791667
| 81
| 0.853023
|
5c4a167ff55b871ef9c337f494458d7b118f4d2b
| 1,490
|
py
|
Python
|
soundrts/worldplayercomputer2.py
|
fcnjd/soundrts
|
3492503d0f4712a31c662d57434ddf8a852d0816
|
[
"BSD-3-Clause"
] | 1
|
2021-12-28T20:57:20.000Z
|
2021-12-28T20:57:20.000Z
|
soundrts/worldplayercomputer2.py
|
fcnjd/soundrts
|
3492503d0f4712a31c662d57434ddf8a852d0816
|
[
"BSD-3-Clause"
] | 1
|
2018-02-17T10:41:18.000Z
|
2018-02-17T10:41:18.000Z
|
soundrts/worldplayercomputer2.py
|
fcnjd/soundrts
|
3492503d0f4712a31c662d57434ddf8a852d0816
|
[
"BSD-3-Clause"
] | null | null | null |
from worldorders import ORDERS_DICT
from worldplayerbase import Player
orders = sorted(ORDERS_DICT.keys()) # sort to avoid desync
orders.remove("enter") # later
orders.remove("stop")
orders.remove("attack")
orders.remove("patrol")
orders.remove("wait")
orders.remove("auto_explore")
orders.remove("auto_attack")
orders.remove("block")
orders.remove("join_group")
orders.remove("cancel_building")
orders.remove("cancel_training")
orders.remove("cancel_upgrading")
orders.remove("rallying_point")
def _id(x):
return x.id
class Computer2(Player):
name = ["ai2"]
def __init__(self, *args, **kargs):
Player.__init__(self, *args)
def __repr__(self):
return "Computer2(%s)" % self.client
def _random_order(self, unit, targets):
order = ORDERS_DICT[self.world.random.choice(orders)]
args = order.nb_args
menu = order.menu(unit, strict=True)
if menu:
order = self.world.random.choice(menu).split()
if args:
order.append(self.world.random.choice(targets).id)
return order
def play(self):
# sort to avoid desync
targets = list(self.perception) + list(self.memory)
targets = [x for x in targets if not getattr(x, "is_an_exit", False)]
targets.sort(key=_id)
for u in self.units:
if not u.orders:
order = self._random_order(u, targets)
if order:
u.take_order(order)
| 27.592593
| 77
| 0.637584
|
ea3c994b779a9c5e2c64762029f80fa5cc0998a9
| 8,570
|
py
|
Python
|
Project 6 Capstone Project/airflow/dag.py
|
senthilvel-dev/UD_DE
|
536cb92f47743b7a52fd62b91845992943965411
|
[
"MIT"
] | 42
|
2020-05-22T01:24:33.000Z
|
2022-03-28T14:51:13.000Z
|
Project 6 Capstone Project/airflow/dag.py
|
senthilvel-dev/UD_DE
|
536cb92f47743b7a52fd62b91845992943965411
|
[
"MIT"
] | 9
|
2020-12-17T21:21:08.000Z
|
2022-03-29T22:29:16.000Z
|
Project 6 Capstone Project/airflow/dag.py
|
vineeths96/Data-Engineering-Nanodegree
|
4b588607ca654db7e8a2469ab113d4323a1fc079
|
[
"MIT"
] | 30
|
2020-05-22T01:19:31.000Z
|
2022-03-13T20:06:28.000Z
|
import datetime
from pathlib import Path
from airflow import DAG
from airflow.models import Variable
from airflow.operators.bash_operator import BashOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.postgres_operator import PostgresOperator
from airflow.operators.s3_to_redshift_operator import S3ToRedshiftTransfer
from airflow.operators.check_operator import CheckOperator, ValueCheckOperator
# DAG default argument dictionary
default_args = {
'owner': 'vineeths',
'start_date': datetime.datetime(2020, 2, 1),
'depends_on_past': False,
'catchup': False,
'email_on_retry': False,
'retries': 3,
'retry_delay': datetime.timedelta(minutes=5),
'aws_conn_id': 'aws',
'postgres_conn_id': 'redshift',
'redshift_conn_id': 'redshift',
'params': {
's3_bucket': 'udacity-twitter-stream',
's3_region': 'us-west-2',
's3_json_path': None,
'redshift_schema': 'public',
'aws_iam_role': Variable.get("aws_iam_role")
}
}
# Define DAG with template SQL path
with DAG(dag_id='capstone_dag',
description='ETL data from S3 into AWS Redshift',
default_args=default_args,
schedule_interval='@daily',
template_searchpath=str(Path(__file__).parent.parent.joinpath('sql'))) as dag:
# Dummy start task
start = DummyOperator(task_id='start_execution')
# Create AWS Redshift cluster
create_cluster = BashOperator(
task_id='create_redshift_cluster',
bash_command='python aws_cluster_create.py'
)
# Upload static datasets to S3 bucket
upload_data_s3 = BashOperator(
task_id='upload_data_to_s3',
bash_command='python upload_to_s3.py'
)
# Stream tweet data to AWS Kineses
stream_tweet = BashOperator(
task_id='stream_data_to_kinesis',
bash_command='python stream_tweets.py'
)
# Stream historical tweet data to AWS Kineses
stream_historical_tweet = BashOperator(
task_id='stream_batch_data_to_kinesis',
bash_command='python search_tweets.py'
)
# Create the tables in AWS Redshift cluster
create_tables = PostgresOperator(
task_id='create_tables',
sql='create_tables.sql'
)
# Stage tweets data in Redshift
stage_tweets = S3ToRedshiftTransfer(
task_id='stage_tweets',
schema='{{ params.redshift_schema }}',
table='staging_tweets',
s3_bucket='{{ params.s3_bucket }}',
s3_key='twitter_feed',
copy_options=['COMPUPDATE OFF', 'STATUPDATE OFF', 'TRUNCATECOLUMNS']
)
# Stage happiness data in Redshift
stage_happiness = S3ToRedshiftTransfer(
task_id='stage_happiness',
schema='{{ params.redshift_schema }}',
table='staging_happiness',
s3_bucket='{{ params.s3_bucket }}',
s3_key='happiness',
copy_options=['COMPUPDATE OFF', 'STATUPDATE OFF', 'TRUNCATECOLUMNS']
)
# Stage temperature data in Redshift
stage_temperature = S3ToRedshiftTransfer(
task_id='stage_temperature',
schema='{{ params.redshift_schema }}',
table='staging_temperature',
s3_bucket='{{ params.s3_bucket }}',
s3_key='temperature',
copy_options=['COMPUPDATE OFF', 'STATUPDATE OFF', 'TRUNCATECOLUMNS']
)
# Runs a basic data quality check to ensure data is inserted
check_tweets = CheckOperator(
task_id='quality_check_staging_tweets_table',
sql='SELECT COUNT(*) FROM public.staging_tweets',
conn_id='{{ redshift_conn_id }}'
)
# Runs a basic data quality check to ensure data is inserted
check_happiness = ValueCheckOperator(
task_id='quality_check_staging_happiness_table',
sql='SELECT COUNT(*) FROM public.staging_happiness',
pass_value=154,
conn_id='{{ redshift_conn_id }}'
)
# Runs a basic data quality check to ensure data is inserted
check_temperature = ValueCheckOperator(
task_id='quality_check_staging_temperature_table',
sql='SELECT COUNT(*) FROM public.staging_temperature',
pass_value=8599212,
conn_id='{{ redshift_conn_id }}'
)
# Load user table from staging tables
load_users_table = PostgresOperator(
task_id='load_users_table',
sql='users_insert.sql'
)
# Load sources table from staging tables
load_sources_table = PostgresOperator(
task_id='load_sources_table',
sql='sources_insert.sql'
)
# Load happiness table from staging tables
load_happiness_table = PostgresOperator(
task_id='load_happiness_table',
sql='happiness_insert.sql'
)
# Load temperature table from staging tables
load_temperature_table = PostgresOperator(
task_id='load_temperature_table',
sql='temperature_insert.sql'
)
# Load time table from staging tables
load_time_table = PostgresOperator(
task_id='load_time_table',
sql='time_insert.sql'
)
# Load tweets table from staging tables
load_tweets_table = PostgresOperator(
task_id='load_tweets_fact_table',
sql='tweets_insert.sql'
)
# Runs a basic data quality check to ensure data is inserted
check_users_dim = CheckOperator(
task_id='quality_check_users_table',
sql='SELECT COUNT(*) FROM public.users',
conn_id='{{ redshift_conn_id }}'
)
# Runs a basic data quality check to ensure data is inserted
check_sources_dim = CheckOperator(
task_id='quality_check_sources_table',
sql='SELECT COUNT(*) FROM public.sources',
conn_id='{{ redshift_conn_id }}'
)
# Runs a basic data quality check to ensure data is inserted
check_happiness_dim = CheckOperator(
task_id='quality_check_dim_happiness_table',
sql='SELECT COUNT(*) FROM public.happiness',
conn_id='{{ redshift_conn_id }}'
)
# Runs a basic data quality check to ensure data is inserted
check_temperature_dim = CheckOperator(
task_id='quality_check_dim_temperature_table',
sql='SELECT COUNT(*) FROM public.temperature',
conn_id='{{ redshift_conn_id }}'
)
# Runs a basic data quality check to ensure data is inserted
check_time = CheckOperator(
task_id='quality_check_time_table',
sql='SELECT COUNT(*) FROM public.time',
conn_id='{{ redshift_conn_id }}'
)
# Runs a basic data quality check to ensure data is inserted
check_tweets_fact = CheckOperator(
task_id='quality_check_fact_tweets_table',
sql='SELECT COUNT(*) FROM public.tweets',
conn_id='{{ redshift_conn_id }}'
)
# Destroy AWS Redshift cluster
destroy_cluster = BashOperator(
task_id='destroy_redshift_cluster',
bash_command='python aws_cluster_destroy.py'
)
# Dummy end task
end = DummyOperator(task_id='stop_execution')
# Setup task dependencies
"""
# Stream real time tweets (Uncomment this block and comment next block)
start >> upload_data_s3 >> stream_tweet >> create_tables >> [stage_tweets,
stage_happiness,
stage_temperature]
"""
# Stream historical tweets
start >> create_cluster >> upload_data_s3 >> stream_historical_tweet >> create_tables >> [stage_tweets,
stage_happiness,
stage_temperature]
[stage_tweets >> check_tweets,
stage_happiness >> check_happiness,
stage_temperature >> check_temperature] >> load_tweets_table >> check_tweets_fact >> [load_users_table,
load_sources_table,
load_happiness_table,
load_temperature_table,
load_time_table]
[load_users_table >> check_users_dim,
load_sources_table >> check_sources_dim,
load_happiness_table >> check_happiness_dim,
load_temperature_table >> check_temperature_dim,
load_time_table >> check_time] >> destroy_cluster >> end
| 35.857741
| 114
| 0.632439
|
017370f2a0f8f69bf4b6af62cf06e1be1d5bded3
| 13,177
|
py
|
Python
|
thrift/test/py/TestClientServer.py
|
fakeNetflix/facebook-repo-fbthrift
|
24f2357142d1da8c89f4cabc6cb144d83749b3c6
|
[
"Apache-2.0"
] | 2
|
2021-06-29T13:42:22.000Z
|
2021-09-06T10:57:34.000Z
|
thrift/test/py/TestClientServer.py
|
fakeNetflix/facebook-repo-fbthrift
|
24f2357142d1da8c89f4cabc6cb144d83749b3c6
|
[
"Apache-2.0"
] | null | null | null |
thrift/test/py/TestClientServer.py
|
fakeNetflix/facebook-repo-fbthrift
|
24f2357142d1da8c89f4cabc6cb144d83749b3c6
|
[
"Apache-2.0"
] | 5
|
2021-06-29T13:42:26.000Z
|
2022-02-08T02:41:34.000Z
|
#!/usr/bin/env python
# This starts up a bunch of servers, one for each of the server type
# and socket typecombinations we have. It then runs through the tests
# for each server, which entails connecting to calling a method on the
# server, asserting something about that method, and then closing the
# connection
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os.path
import sys
from subprocess import Popen
import time
import unittest
import string
import socket
import errno
import ssl as SSL
from ThriftTest import ThriftTest, SecondService
from ThriftTest.ttypes import *
from thrift.transport import TTransport
from thrift.transport.THeaderTransport import (
THeaderTransport, TRANSFORM, CLIENT_TYPE,
)
from thrift.transport import TSocket, TSSLSocket
from thrift.protocol import (
TBinaryProtocol, THeaderProtocol, TMultiplexedProtocol, TCompactProtocol
)
_servers = []
_ports = {}
try:
from thrift.protocol import fastproto
except ImportError:
fastproto = None
def start_server(server_type, ssl, server_header, server_context,
multiple, port):
server_path = os.path.dirname(sys.argv[0])
if sys.version_info[0] >= 3:
server_bin = os.path.join(server_path, 'py3_test_server.par')
else:
server_bin = os.path.join(server_path, 'python_test_server.par')
args = [server_bin, '--port', str(port)]
if ssl:
args.append('--ssl')
if server_header:
args.append('--header')
if server_context:
args.append('--context')
if multiple:
args.append('--multiple')
args.append(server_type)
stdout = None
stderr = None
if sys.stdout.isatty():
stdout = sys.stdout
stderr = sys.stderr
return Popen(args, stdout=stdout, stderr=stderr)
def isConnectionRefused(e):
if sys.version_info[0] >= 3:
return isinstance(e, ConnectionRefusedError)
else:
return e[0] == errno.ECONNREFUSED
def wait_for_server(port, timeout, ssl=False):
end = time.time() + timeout
while time.time() < end:
try:
sock = socket.socket()
sock.settimeout(end - time.time())
if ssl:
sock = SSL.wrap_socket(sock)
sock.connect(('localhost', port))
return True
except socket.timeout:
return False
except socket.error as e:
if not isConnectionRefused(e):
raise
finally:
sock.close()
time.sleep(0.1)
return False
class AbstractTest(object):
@classmethod
def setUpClass(cls):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('0.0.0.0', 0))
port = sock.getsockname()[1]
server = start_server(
cls.server_type,
cls.ssl,
cls.server_header,
cls.server_context,
cls.multiple,
port)
if not wait_for_server(port, 5.0, ssl=cls.ssl):
msg = "Failed to start " + cls.server_type
if cls.ssl:
msg += " using ssl"
if cls.server_header:
msg += " using header protocol"
if cls.server_context:
msg += " using context"
raise Exception(msg)
cls._port = port
cls._server = server
@classmethod
def tearDownClass(cls):
cls._server.kill()
cls._server.wait()
def bytes_comp(self, seq1, seq2):
if not isinstance(seq1, bytes):
seq1 = seq1.encode('utf-8')
if not isinstance(seq2, bytes):
seq2 = seq2.encode('utf-8')
self.assertEquals(seq1, seq2)
def setUp(self):
if self.ssl:
self.socket = TSSLSocket.TSSLSocket("localhost", self._port)
else:
self.socket = TSocket.TSocket("localhost", self._port)
self.transport = TTransport.TBufferedTransport(self.socket)
self.protocol = self.protocol_factory.getProtocol(self.transport)
if isinstance(self, HeaderAcceleratedCompactTest):
self.protocol.trans.set_protocol_id(
THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)
self.protocol.reset_protocol()
self.transport.open()
self.client = ThriftTest.Client(self.protocol)
if self.multiple:
p = TMultiplexedProtocol.TMultiplexedProtocol(self.protocol,
"ThriftTest")
self.client = ThriftTest.Client(p)
p = TMultiplexedProtocol.TMultiplexedProtocol(self.protocol,
"SecondService")
self.client2 = SecondService.Client(p)
else:
self.client = ThriftTest.Client(self.protocol)
self.client2 = None
def tearDown(self):
self.transport.close()
def testVoid(self):
self.client.testVoid()
def testString(self):
self.bytes_comp(self.client.testString('Python'), 'Python')
def testByte(self):
self.assertEqual(self.client.testByte(63), 63)
def testI32(self):
self.assertEqual(self.client.testI32(-1), -1)
self.assertEqual(self.client.testI32(0), 0)
def testI64(self):
self.assertEqual(self.client.testI64(-34359738368), -34359738368)
def testDouble(self):
self.assertEqual(self.client.testDouble(-5.235098235), -5.235098235)
def testFloat(self):
self.assertEqual(self.client.testFloat(-5.25), -5.25)
def testStruct(self):
x = Xtruct()
x.string_thing = "Zero"
x.byte_thing = 1
x.i32_thing = -3
x.i64_thing = -5
y = self.client.testStruct(x)
self.bytes_comp(y.string_thing, "Zero")
self.assertEqual(y.byte_thing, 1)
self.assertEqual(y.i32_thing, -3)
self.assertEqual(y.i64_thing, -5)
def testException(self):
self.client.testException('Safe')
try:
self.client.testException('Xception')
self.fail("should have gotten exception")
except Xception as x:
self.assertEqual(x.errorCode, 1001)
self.bytes_comp(x.message, 'Xception') # noqa
try:
self.client.testException("throw_undeclared")
self.fail("should have thrown exception")
except Exception: # type is undefined
pass
def testOneway(self):
start = time.time()
self.client.testOneway(1)
end = time.time()
self.assertTrue(end - start < 0.2,
"oneway sleep took %f sec" % (end - start))
def testblahBlah(self):
if self.client2:
self.assertEqual(self.client2.blahBlah(), None)
class NormalBinaryTest(AbstractTest):
protocol_factory = TBinaryProtocol.TBinaryProtocolFactory()
class AcceleratedBinaryTest(AbstractTest):
protocol_factory = TBinaryProtocol.TBinaryProtocolAcceleratedFactory()
class HeaderBase(AbstractTest):
protocol_factory = THeaderProtocol.THeaderProtocolFactory(
True,
[CLIENT_TYPE.HEADER,
CLIENT_TYPE.FRAMED_DEPRECATED,
CLIENT_TYPE.UNFRAMED_DEPRECATED,
CLIENT_TYPE.HTTP_SERVER]
)
class HeaderTest(HeaderBase):
def testZlibCompression(self):
htrans = self.protocol.trans
if isinstance(htrans, THeaderTransport):
htrans.add_transform(TRANSFORM.ZLIB)
self.testStruct()
def testSnappyCompression(self):
htrans = self.protocol.trans
if isinstance(htrans, THeaderTransport):
htrans.add_transform(TRANSFORM.SNAPPY)
self.testStruct()
def testMultipleCompression(self):
htrans = self.protocol.trans
if isinstance(htrans, THeaderTransport):
htrans.add_transform(TRANSFORM.ZLIB)
htrans.add_transform(TRANSFORM.SNAPPY)
self.testStruct()
def testKeyValueHeader(self):
htrans = self.protocol.trans
if isinstance(htrans, THeaderTransport):
# Try just persistent header
htrans.set_persistent_header("permanent", "true")
self.client.testString('test')
headers = htrans.get_headers()
self.assertTrue("permanent" in headers)
self.assertEquals(headers["permanent"], "true")
# Try with two transient headers
htrans.set_header("transient1", "true")
htrans.set_header("transient2", "true")
self.client.testString('test')
headers = htrans.get_headers()
self.assertTrue("permanent" in headers)
self.assertEquals(headers["permanent"], "true")
self.assertTrue("transient1" in headers)
self.assertEquals(headers["transient1"], "true")
self.assertTrue("transient2" in headers)
self.assertEquals(headers["transient2"], "true")
# Add one, update one and delete one transient header
htrans.set_header("transient2", "false")
htrans.set_header("transient3", "true")
self.client.testString('test')
headers = htrans.get_headers()
self.assertTrue("permanent" in headers)
self.assertEquals(headers["permanent"], "true")
self.assertTrue("transient1" not in headers)
self.assertTrue("transient2" in headers)
self.assertEquals(headers["transient2"], "false")
self.assertTrue("transient3" in headers)
self.assertEquals(headers["transient3"], "true")
class HeaderAcceleratedCompactTest(HeaderBase):
pass
class HeaderFramedCompactTest(HeaderBase):
def setUp(self):
self.socket = TSocket.TSocket("localhost", self._port)
self.transport = TTransport.TFramedTransport(self.socket)
self.protocol = TCompactProtocol.TCompactProtocol(self.transport)
self.transport.open()
self.client = ThriftTest.Client(self.protocol)
self.client2 = None
class HeaderFramedBinaryTest(HeaderBase):
def setUp(self):
self.socket = TSocket.TSocket("localhost", self._port)
self.transport = TTransport.TFramedTransport(self.socket)
self.protocol = TBinaryProtocol.TBinaryProtocol(self.transport)
self.transport.open()
self.client = ThriftTest.Client(self.protocol)
self.client2 = None
class HeaderUnframedCompactTest(HeaderBase):
def setUp(self):
self.socket = TSocket.TSocket("localhost", self._port)
self.transport = TTransport.TBufferedTransport(self.socket)
self.protocol = TCompactProtocol.TCompactProtocol(self.transport)
self.transport.open()
self.client = ThriftTest.Client(self.protocol)
self.client2 = None
class HeaderUnframedBinaryTest(HeaderBase):
def setUp(self):
self.socket = TSocket.TSocket("localhost", self._port)
self.transport = TTransport.TBufferedTransport(self.socket)
self.protocol = TBinaryProtocol.TBinaryProtocol(self.transport)
self.transport.open()
self.client = ThriftTest.Client(self.protocol)
self.client2 = None
def camelcase(s):
if not s[0].isupper():
if sys.version_info[0] >= 3:
s = ''.join([x.capitalize() for x in s.split('_')])
else:
s = ''.join(map(string.capitalize, s.split('_')))
return s
def class_name_mixin(k, v):
mixin = camelcase(k)
if isinstance(v, bool):
mixin += 'On' if v else 'Off'
else:
mixin += camelcase(v)
return mixin
def new_test_class(cls, vars):
template = ""
name = cls.__name__
for k, v in sorted(vars.items()):
name += class_name_mixin(k, v)
template += " {} = {!r}\n".format(k, v)
template = "class {}(cls, unittest.TestCase):\n".format(name) + template
exec(template)
return locals()[name]
def add_test_classes(module):
classes = []
for server_context in (True, False):
for multiple in (True, False):
config1 = {
'server_type': "TCppServer",
'ssl': False,
'server_header': False,
'server_context': server_context,
'multiple': multiple,
}
classes.append(new_test_class(NormalBinaryTest, config1))
classes.append(new_test_class(AcceleratedBinaryTest, config1))
config2 = {
'server_type': "TCppServer",
'ssl': False,
'server_header': False,
'server_context': False,
'multiple': False,
}
if fastproto is not None:
classes.append(new_test_class(HeaderAcceleratedCompactTest, config2))
for header in (HeaderFramedCompactTest,
HeaderFramedBinaryTest,
HeaderUnframedCompactTest,
HeaderUnframedBinaryTest):
classes.append(new_test_class(header, config2))
for cls in classes:
setattr(module, cls.__name__, cls)
add_test_classes(sys.modules[__name__])
| 32.139024
| 77
| 0.629658
|
455700a4c6c5eaf6ed7bafea602e7dfcd0d4a927
| 175
|
py
|
Python
|
lab_assignment/lab_bla/linux_mac/sample/vector.py
|
caru1613/introduction_to_python_TEAMLAB_MOOC
|
e0ac95f7a6b889e7d18b7bdaaab49820e73d5477
|
[
"MIT"
] | null | null | null |
lab_assignment/lab_bla/linux_mac/sample/vector.py
|
caru1613/introduction_to_python_TEAMLAB_MOOC
|
e0ac95f7a6b889e7d18b7bdaaab49820e73d5477
|
[
"MIT"
] | null | null | null |
lab_assignment/lab_bla/linux_mac/sample/vector.py
|
caru1613/introduction_to_python_TEAMLAB_MOOC
|
e0ac95f7a6b889e7d18b7bdaaab49820e73d5477
|
[
"MIT"
] | null | null | null |
u = [2,2]
v = [2,3]
z = [3,5]
#result = []
#for i in range(len(u)):
# result.append(u[i] + v[i] + z[i])
#print(result)
result = [sum(t) for t in zip(u,v,z)]
print(result)
| 15.909091
| 38
| 0.525714
|
569545181dde173594c5bf4600d92d70feec418b
| 5,430
|
py
|
Python
|
tests/3_2_0/blueprints/test_blueprint_having_ahv_helper/blueprint.py
|
tuxtof/calm-dsl
|
5af67435d8304b97e170a690068f2d5975e9bfe6
|
[
"Apache-2.0"
] | 37
|
2019-12-23T15:23:20.000Z
|
2022-03-15T11:12:11.000Z
|
tests/3_2_0/blueprints/test_blueprint_having_ahv_helper/blueprint.py
|
tuxtof/calm-dsl
|
5af67435d8304b97e170a690068f2d5975e9bfe6
|
[
"Apache-2.0"
] | 144
|
2020-03-09T11:22:09.000Z
|
2022-03-28T21:34:09.000Z
|
tests/3_2_0/blueprints/test_blueprint_having_ahv_helper/blueprint.py
|
abhijeetkaurav1st/calm-dsl
|
6487a896967b3fd667b9320e2ad3a397c9960497
|
[
"Apache-2.0"
] | 46
|
2020-01-23T14:28:04.000Z
|
2022-03-09T04:17:10.000Z
|
import json
from calm.dsl.builtins import AhvVmDisk, AhvVmNic, AhvVmGC
from calm.dsl.builtins import ref, AhvVmResources, AhvVm, Ref, Metadata
from calm.dsl.builtins import vm_disk_package, read_local_file, basic_cred
from calm.dsl.builtins import Service, Package, Substrate
from calm.dsl.builtins import Deployment, Profile, Blueprint
from calm.dsl.builtins import CalmVariable, CalmTask, action
CENTOS_KEY = read_local_file(".tests/keys/centos")
CENTOS_PUBLIC_KEY = read_local_file(".tests/keys/centos_pub")
DSL_CONFIG = json.loads(read_local_file(".tests/config.json"))
CENTOS_CI = DSL_CONFIG["AHV"]["IMAGES"]["DISK"]["CENTOS_7_CLOUD_INIT"]
NETWORK1 = DSL_CONFIG["AHV"]["NETWORK"]["VLAN1211"] # TODO change network constants
# project constants
PROJECT = DSL_CONFIG["PROJECTS"]["PROJECT1"]
PROJECT_NAME = PROJECT["NAME"]
ENV_NAME = PROJECT["ENVIRONMENTS"][0]["NAME"]
ACCOUNT_NAME = PROJECT["ACCOUNTS"]["NUTANIX_PC"][0]["NAME"]
Centos = basic_cred("centos", CENTOS_KEY, name="Centos", type="KEY", default=True)
Era_Disk = vm_disk_package(
name="era_disk",
config={
# By default image type is set to DISK_IMAGE
"image": {
"source": "http://download.nutanix.com/era/1.1.1/ERA-Server-build-1.1.1-340d9db1118eac81219bec98507d4982045d8799.qcow2"
}
},
)
Virtio_CdRom = vm_disk_package(
name="virtio_cdrom",
config={
"image": {
"type": "ISO_IMAGE",
"source": "http://10.40.64.33/GoldImages/NuCalm/ISO/Nutanix-VirtIO-1.1.4.iso",
}
},
)
class AhvVmService(Service):
"""Sample mysql service"""
ENV = CalmVariable.Simple("DEV")
class AhvVmPackage(Package):
"""Example package with variables, install tasks and link to service"""
foo = CalmVariable.Simple("bar")
services = [ref(AhvVmService)]
@action
def __install__():
CalmTask.Exec.ssh(name="Task1", script="echo @@{foo}@@")
class AhvVmPackage2(AhvVmPackage):
pass
class MyAhvVmResources(AhvVmResources):
memory = 4
vCPUs = 2
cores_per_vCPU = 1
disks = [
AhvVmDisk(CENTOS_CI),
]
nics = [AhvVmNic(NETWORK1)]
guest_customization = AhvVmGC.CloudInit(
config={
"users": [
{
"name": "centos",
"ssh-authorized-keys": [CENTOS_PUBLIC_KEY],
"sudo": ["ALL=(ALL) NOPASSWD:ALL"],
}
]
}
)
serial_ports = {0: False, 1: False, 2: True, 3: True}
class MyAhvVm(AhvVm):
resources = MyAhvVmResources
categories = {"AppFamily": "Backup", "AppType": "Default"}
class AhvVmSubstrate(Substrate):
"""AHV VM config given by reading a spec file"""
provider_spec = MyAhvVm
account = Ref.Account(ACCOUNT_NAME)
class MyAhvVmResources2(AhvVmResources):
memory = 4
vCPUs = 2
cores_per_vCPU = 1
disks = [
AhvVmDisk(CENTOS_CI),
AhvVmDisk.CdRom.Ide.emptyCdRom(),
]
nics = [AhvVmNic(NETWORK1)]
guest_customization = AhvVmGC.CloudInit(
config={
"users": [
{
"name": "centos",
"ssh-authorized-keys": [CENTOS_PUBLIC_KEY],
"sudo": ["ALL=(ALL) NOPASSWD:ALL"],
}
]
}
)
serial_ports = {0: False, 1: False, 2: True, 3: True}
class MyAhvVm2(AhvVm):
resources = MyAhvVmResources2
class AhvVmSubstrate2(Substrate):
"""AHV VM config given by reading a spec file"""
provider_spec = MyAhvVm2
account = Ref.Account(ACCOUNT_NAME)
class AhvVmDeployment(Deployment):
"""Sample deployment pulling in service and substrate references"""
packages = [ref(AhvVmPackage)]
substrate = ref(AhvVmSubstrate)
class AhvVmDeployment2(Deployment):
"""Sample deployment pulling in service and substrate references"""
packages = [ref(AhvVmPackage2)]
substrate = ref(AhvVmSubstrate2)
class AhvVmProfile(Profile):
"""Sample application profile with variables"""
nameserver = CalmVariable.Simple("10.40.64.15", label="Local DNS resolver")
foo1 = CalmVariable.Simple("bar1", runtime=True)
foo2 = CalmVariable.Simple("bar2", runtime=True)
deployments = [AhvVmDeployment]
environments = [Ref.Environment(name=ENV_NAME)]
@action
def test_profile_action():
"""Sample description for a profile action"""
CalmTask.Exec.ssh(name="Task5", script='echo "Hello"', target=ref(AhvVmService))
class AhvVmProfile2(Profile):
"""Sample application profile with variables"""
nameserver = CalmVariable.Simple("10.40.64.15", label="Local DNS resolver")
foo1 = CalmVariable.Simple("bar1", runtime=True)
foo2 = CalmVariable.Simple("bar2", runtime=True)
deployments = [AhvVmDeployment2]
environments = [Ref.Environment(name=ENV_NAME)]
@action
def test_profile_action():
"""Sample description for a profile action"""
CalmTask.Exec.ssh(name="Task5", script='echo "Hello"', target=ref(AhvVmService))
class AhvBlueprint(Blueprint):
"""Sample Bp that used ahv_vm_helpers"""
credentials = [Centos]
services = [AhvVmService]
packages = [AhvVmPackage, AhvVmPackage2, Era_Disk, Virtio_CdRom]
substrates = [AhvVmSubstrate, AhvVmSubstrate2]
profiles = [AhvVmProfile, AhvVmProfile2]
class BpMetadata(Metadata):
project = Ref.Project(PROJECT_NAME)
| 26.748768
| 131
| 0.653775
|
93726d765b24143bd88950ebfcd1da8d281de6b5
| 866
|
py
|
Python
|
csuibot/utils/__init__.py
|
agungputrap/bribot
|
20cb68508b8e2c354c7cb3d527c0997183bd46aa
|
[
"Apache-2.0"
] | null | null | null |
csuibot/utils/__init__.py
|
agungputrap/bribot
|
20cb68508b8e2c354c7cb3d527c0997183bd46aa
|
[
"Apache-2.0"
] | null | null | null |
csuibot/utils/__init__.py
|
agungputrap/bribot
|
20cb68508b8e2c354c7cb3d527c0997183bd46aa
|
[
"Apache-2.0"
] | null | null | null |
from csuibot.utils import zodiac as z
def lookup_zodiac(month, day):
zodiacs = [
z.Aries(),
z.Taurus(),
z.Gemini(),
z.Cancer(),
z.Leo(),
z.Virgo(),
z.Libra(),
z.Scorpio(),
z.Sagittarius(),
z.Capricorn(),
z.Aquarius(),
z.Pisces()
]
for zodiac in zodiacs:
if zodiac.date_includes(month, day):
return zodiac.name
else:
return 'Unknown zodiac'
def lookup_chinese_zodiac(year):
num_zodiacs = 12
zodiacs = {
0: 'rat',
1: 'buffalo',
2: 'tiger',
3: 'rabbit',
4: 'dragon',
5: 'snake',
6: 'horse',
7: 'goat',
8: 'monkey'
}
ix = (year - 4) % num_zodiacs
try:
return zodiacs[ix]
except KeyError:
return 'Unknown zodiac'
| 18.826087
| 44
| 0.473441
|
2573a3cea4554bd2d7faa6c8d0a1ab25c5cd04f5
| 17,466
|
py
|
Python
|
tests/test_mbgdml_datasets.py
|
keithgroup/mbGDML
|
a68b2a41c26c8e7e8e2f4527939c4564402f36bc
|
[
"MIT"
] | 6
|
2020-08-03T03:44:33.000Z
|
2022-02-24T21:50:03.000Z
|
tests/test_mbgdml_datasets.py
|
keithgroup/mbGDML
|
a68b2a41c26c8e7e8e2f4527939c4564402f36bc
|
[
"MIT"
] | null | null | null |
tests/test_mbgdml_datasets.py
|
keithgroup/mbGDML
|
a68b2a41c26c8e7e8e2f4527939c4564402f36bc
|
[
"MIT"
] | 1
|
2022-02-25T03:03:40.000Z
|
2022-02-25T03:03:40.000Z
|
#!/usr/bin/env python
# MIT License
#
# Copyright (c) 2020-2021, Alex M. Maldonado
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Tests for `mbgdml` package."""
import pytest
import numpy as np
import mbgdml.data as data
from mbgdml import criteria
from mbgdml import utils
# Must be run from mbGDML root directory.
rset_path_140h2o = './tests/data/structuresets/140h2o.sphere.gfn2.md.500k.prod1.npz'
dset_dir = './tests/data/datasets'
molecule_sizes = {
'h2o': 3,
'mecn': 6,
'meoh': 6
}
def trim_140h2o_rset():
"""Trims the 140h2o structure set to make tests easier.
"""
n_R = 3 # Number of structures to keep.
n_entities = 5 # Number of molecules to keep in each structure.
molecule_size = molecule_sizes['h2o'] # Number of atoms in a water molecule.
rset = data.structureSet(rset_path_140h2o)
assert rset.type == 's'
assert rset.md5 == '8726c482c19cdf7889cd1e62b9e9c8e1'
# Trims and checks z.
rset.z = rset.z[:n_entities*molecule_size]
z = np.array([8, 1, 1, 8, 1, 1, 8, 1, 1, 8, 1, 1, 8, 1, 1])
assert np.all(rset.z == z)
# Trims and checks R.
rset.R = rset.R[:n_R, :molecule_size*n_entities]
r_2 = np.array([
[ 6.07124359, 0.7619846, 0.58984577],
[ 6.47807882, -0.18138608, 0.67938893],
[ 5.14951519, 0.76914325, 0.66198299],
[-4.28204826, -3.57395445, 0.81850038],
[-4.33819343, -4.29134079, 0.12722189],
[-4.33829705, -2.80167393, 0.40818626],
[-2.82371582, -3.52131402, -4.12086561],
[-2.96180787, -4.46433929, -3.79287547],
[-1.85909245, -3.46817877, -4.3649756,],
[ 6.24586283, -1.76605224, 0.72883595],
[ 5.51074538, -2.26847206, 1.21432844],
[ 6.92768826, -2.3359825, 0.25592583],
[-2.44826194, -6.14429515, -3.37660252],
[-2.19536627, -6.12210888, -2.51171765],
[-2.65953004, -7.04099688, -3.59504014]
])
assert np.allclose(rset.R[2], r_2)
assert rset.z.shape[0] == rset.R.shape[1]
# Trims and checks entity_ids.
rset.entity_ids = rset.entity_ids[:n_entities*molecule_size]
entity_ids = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4])
assert np.all(rset.entity_ids == entity_ids)
# Trims and checks comp_ids
rset.comp_ids = rset.comp_ids[:n_entities]
comp_ids = np.array([
['0', 'h2o'], ['1', 'h2o'], ['2', 'h2o'], ['3', 'h2o'], ['4', 'h2o']
])
assert np.all(rset.comp_ids == comp_ids)
# Confirms changes with MD5.
assert rset.md5 == 'da254c95956709d1a00512f1ac7c0bbb'
return rset
def dset_sample_structures(
dset, data, quantity, size, selected_rset_id, r_criteria,
z_slice, cutoff, center_structures, sampling_updates
):
"""Generic sampling function.
"""
dset.sample_structures(
data, quantity, size, selected_rset_id=selected_rset_id,
criteria=r_criteria, z_slice=z_slice, cutoff=cutoff,
center_structures=center_structures, sampling_updates=sampling_updates
)
return dset
def check_R_with_rset(dset, rset, centered):
"""Uses structure information from Rset_info to check structure coordinates.
Parameters
----------
dset : :obj:`mbgdml.data.dataset.dataSet`
The data set.
rset : :obj:`mbgdml.data.structureset.structureSet`
The structure set.
centered : :obj:`bool`
If the dset coordinates were centered with respect to the cluster's
center of mass.
"""
z_dset = dset.z
R_dset = dset.R
Rset_info = dset.Rset_info
R_rset = rset.R
rset_entity_ids = rset.entity_ids
for i_r_dset in range(len(dset.R)):
r_dset = R_dset[i_r_dset]
r_rset_info = Rset_info[i_r_dset]
i_r_rset = r_rset_info[1]
r_rset_entity_ids = r_rset_info[2:]
r_slice_rset = utils.get_R_slice(r_rset_entity_ids, rset_entity_ids)
r_rset = R_rset[i_r_rset][r_slice_rset]
if centered == True:
r_rset = utils.center_structures(z_dset, r_rset)
assert np.allclose(r_dset, r_rset, atol=5.1e-07, rtol=0)
def test_rset_sampling_all_2mers_normal():
"""Sampling all dimers (2mers) from trimmed 140h2o structure set.
"""
rset = trim_140h2o_rset()
### NORMAL SAMPLING ###
dset = data.dataSet()
dset.name = '140h2o.sphere.gfn2.md.500k.prod1'
dset = dset_sample_structures(
dset, rset, 'all', 2, None, None,
np.array([]), np.array([]), False, False
)
# Checking properties.
assert dset.Rset_md5 == {0: 'da254c95956709d1a00512f1ac7c0bbb'}
assert dset.Rset_info.shape == (30, 4)
assert np.all(dset.Rset_info[:, :1] == np.zeros((30,)))
assert np.all(
dset.Rset_info[:, 1] == np.array(
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2])
)
assert dset.Rset_info.shape == np.unique(dset.Rset_info, axis=0).shape
assert np.all(dset.entity_ids == np.array([0, 0, 0, 1, 1, 1]))
assert np.all(dset.comp_ids == np.array([['0', 'h2o'], ['1', 'h2o']]))
assert np.all(dset.z == np.array([8, 1, 1, 8, 1, 1]))
# Checking R.
assert dset.R.shape == (30, 6, 3)
rset_info_r_check = np.array([0, 1, 1, 4])
r_index = np.where(
np.all(dset.Rset_info == rset_info_r_check, axis=1)
)[0][0]
r_check = np.array([
[-4.27804369, -3.56574992, 0.81519167],
[-4.3569076, -4.2647005, 0.1558876],
[-4.35184085, -2.82879184, 0.39925437],
[-2.44708832, -6.14572336, -3.36929742],
[-2.18964657, -6.13868747, -2.48473228],
[-2.64909444, -7.04677952, -3.60878085]
])
assert np.allclose(dset.R[r_index], r_check)
# Checking E.
assert dset.E.shape == (30,)
assert np.all(np.isnan(dset.E))
# Checking F
assert dset.F.shape == (30, 6, 3)
assert np.all(np.isnan(dset.F))
def test_rset_sampling_all_2mers_ignore_duplicate():
rset = trim_140h2o_rset()
dset = data.dataSet()
dset.name = '140h2o.sphere.gfn2.md.500k.prod1'
dset = dset_sample_structures(
dset, rset, 'all', 2, None, None,
np.array([]), np.array([]), False, False
)
dset_duplicate = dset_sample_structures(
dset, rset, 'all', 2, None, None,
np.array([]), np.array([]), False, False
)
assert dset_duplicate.Rset_md5 == {0: 'da254c95956709d1a00512f1ac7c0bbb'}
assert dset_duplicate.Rset_info.shape == (30, 4)
assert np.all(dset.entity_ids == np.array([0, 0, 0, 1, 1, 1]))
assert np.all(dset.comp_ids == np.array([['0', 'h2o'], ['1', 'h2o']]))
assert dset_duplicate.R.shape == (30, 6, 3)
print('yes')
def test_rset_sampling_all_2mers_centering():
rset = trim_140h2o_rset()
dset = data.dataSet()
dset.name = '140h2o.sphere.gfn2.md.500k.prod1'
dset = dset_sample_structures(
dset, rset, 'all', 2, None, None,
np.array([]), np.array([]), False, False
)
centered_R = utils.center_structures(dset.z, dset.R)
dset_centered = data.dataSet()
dset_centered.name = '140h2o.sphere.gfn2.md.500k.prod1-centered'
dset_centered = dset_sample_structures(
dset_centered, rset, 'all', 2, None, None,
np.array([]), np.array([]), True, False
)
assert np.allclose(centered_R, dset_centered.R)
def test_rset_sampling_all_2mers_criteria():
rset = trim_140h2o_rset()
dset_centered = data.dataSet()
dset_centered.name = '140h2o.sphere.gfn2.md.500k.prod1-centered'
dset_centered = dset_sample_structures(
dset_centered, rset, 'all', 2, None, None,
np.array([]), np.array([]), True, False
)
dset_criteria = data.dataSet()
dset_criteria.name = '140h2o.sphere.gfn2.md.500k.prod1-criteria'
dset_criteria = dset_sample_structures(
dset_criteria, rset, 'all', 2, None, criteria.cm_distance_sum,
np.array([]), np.array([6.0]), True, False
)
Rset_info_accpetable_criteria = np.array([
[0,0,0,3], [0,0,1,2], [0,0,1,4], [0,0,2,4], [0,1,0,3], [0,1,1,2],
[0,1,1,4], [0,1,2,4], [0,2,0,3], [0,2,1,2], [0,2,1,4], [0,2,2,4]
])
assert np.array_equal(dset_criteria.Rset_info, Rset_info_accpetable_criteria)
def test_dset_default_attributes():
dset = data.dataSet()
assert isinstance(dset.Rset_md5, dict)
assert len(dset.Rset_md5) == 0
assert dset.Rset_info.shape == (1, 0)
assert dset.criteria == ''
assert dset.z_slice.shape == (0,)
assert dset.cutoff.shape == (0,)
assert dset.z.shape == (0,)
assert dset.R.shape == (1, 1, 0)
assert dset.E.shape == (0,)
assert dset.F.shape == (1, 1, 0)
assert dset.entity_ids.shape == (0,)
assert dset.comp_ids.shape == (1, 0)
try:
dset.md5
except AttributeError:
pass
def test_rset_sampling_num_2mers_criteria():
rset = trim_140h2o_rset()
dset = data.dataSet()
dset.name = '140h2o.sphere.gfn2.md.500k.prod1'
dset = dset_sample_structures(
dset, rset, 5, 2, None, criteria.cm_distance_sum,
np.array([]), np.array([6.0]), True, False
)
assert isinstance(dset.criteria, str)
assert dset.criteria in criteria.__dict__
assert dset.z_slice.shape == (0,)
assert dset.cutoff.shape == (1,)
assert np.array_equal(dset.cutoff, np.array([6.]))
assert dset.r_unit == 'Angstrom'
assert np.array_equal(dset.z, np.array([8, 1, 1, 8, 1, 1]))
assert dset.R.shape == (5, 6, 3)
assert dset.E.shape == (5,)
assert dset.F.shape == (5, 6, 3)
assert dset.Rset_md5 == {0: 'da254c95956709d1a00512f1ac7c0bbb'}
assert np.array_equal(dset.entity_ids, np.array([0, 0, 0, 1, 1, 1]))
assert np.array_equal(dset.comp_ids, np.array([['0', 'h2o'], ['1', 'h2o']]))
check_R_with_rset(dset, rset, True)
def test_rset_sampling_num_2mers_additional():
rset = trim_140h2o_rset()
dset = data.dataSet()
dset.name = '140h2o.sphere.gfn2.md.500k.prod1'
dset = dset_sample_structures(
dset, rset, 5, 2, None, criteria.cm_distance_sum,
np.array([]), np.array([6.0]), True, False
)
# Ensure energies and forces are not overwritten
i_test = 1
e_test = -47583.29857
dset.E[i_test] = e_test
f_test = np.array([
[4.4, 2.8, 6.0],
[-3.65, 34.0, 2.3],
[4.4, 2.8, 6.0],
[-3.65, 34.0, 2.3],
[4.4, 2.8, 6.0],
[-3.65, 34.0, 2.3],
])
dset.F[i_test] = f_test
dset = dset_sample_structures(
dset, rset, 5, 2, None, criteria.cm_distance_sum,
np.array([]), np.array([6.0]), True, False
)
assert dset.Rset_md5 == {0: 'da254c95956709d1a00512f1ac7c0bbb'}
assert np.array_equal(dset.entity_ids, np.array([0, 0, 0, 1, 1, 1]))
assert np.array_equal(dset.comp_ids, np.array([['0', 'h2o'], ['1', 'h2o']]))
assert np.array_equal(dset.z, np.array([8, 1, 1, 8, 1, 1]))
assert dset.R.shape == (10, 6, 3)
assert dset.E.shape == (10,)
assert np.allclose(dset.E[i_test], e_test)
assert dset.F.shape == (10, 6, 3)
assert np.allclose(dset.F[i_test], f_test)
check_R_with_rset(dset, rset, True)
def test_dset_sampling_all_2mers_after_3mers():
rset = trim_140h2o_rset()
dset = data.dataSet()
dset.name = '140h2o.sphere.gfn2.md.500k.prod1'
dset = dset_sample_structures(
dset, rset, 'all', 3, None, None,
np.array([]), np.array([]), True, False
)
dset_from_dset = data.dataSet()
dset_from_dset = dset_sample_structures(
dset_from_dset, dset, 'all', 2, None, criteria.cm_distance_sum,
np.array([]), np.array([6.0]), True, False
)
assert np.array_equal(dset_from_dset.entity_ids, np.array([0, 0, 0, 1, 1, 1]))
assert np.array_equal(
dset_from_dset.comp_ids, np.array([['0', 'h2o'], ['1', 'h2o']])
)
assert dset_from_dset.Rset_md5 == {0: 'da254c95956709d1a00512f1ac7c0bbb'}
assert dset_from_dset.Rset_info.shape == (12, 4)
# Same as test_rset_sampling_all_2mers_criteria, but organized to match
# the 3mer then 2mer sampling.
Rset_info_accpetable_criteria = np.array([
[0,0,1,2], [0,0,0,3], [0,0,1,4], [0,0,2,4], [0,1,1,2], [0,1,0,3],
[0,1,1,4], [0,1,2,4], [0,2,1,2], [0,2,0,3], [0,2,1,4], [0,2,2,4]
])
assert np.array_equal(dset_from_dset.Rset_info, Rset_info_accpetable_criteria)
assert dset_from_dset.R.shape == (12, 6, 3)
assert dset_from_dset.E.shape == (12,)
assert dset_from_dset.F.shape == (12, 6, 3)
assert dset_from_dset.criteria == 'cm_distance_sum'
assert np.array_equal(dset_from_dset.cutoff, np.array([6.0]))
def test_sample_dset_same_size():
"""
"""
dset_h2o_2body_path = f'{dset_dir}/2h2o/140h2o.sphere.gfn2.md.500k.prod1.3h2o.dset.2h2o-dset.mb.npz'
dset_h2o_2body = data.dataSet(dset_h2o_2body_path)
# Trim dset_h2o_2body to 50 structures
remaining = 50
for key in ['Rset_info', 'E', 'R', 'F']:
setattr(dset_h2o_2body, key, getattr(dset_h2o_2body, key)[:remaining])
dset_h2o_2body_cm_6 = data.dataSet()
dset_h2o_2body_cm_6.name = '140h2o.sphere.gfn2.md.500k.prod1.3h2o.dset.2h2o-dset.mb-cm.6'
dset_h2o_2body_cm_6 = dset_sample_structures(
dset_h2o_2body_cm_6, dset_h2o_2body, 'all', 2, None, criteria.cm_distance_sum,
np.array([]), np.array([6.0]), True, False
)
assert dset_h2o_2body_cm_6.theory == 'mp2.def2tzvp.frozencore'
assert dset_h2o_2body_cm_6.criteria == 'cm_distance_sum'
assert np.array_equal(dset_h2o_2body_cm_6.z_slice, np.array([]))
assert np.array_equal(dset_h2o_2body_cm_6.cutoff, np.array([6.0]))
assert np.array_equal(dset_h2o_2body_cm_6.entity_ids, np.array([0, 0, 0, 1, 1, 1]))
assert np.array_equal(
dset_h2o_2body_cm_6.comp_ids, np.array([['0', 'h2o'], ['1', 'h2o']])
)
assert dset_h2o_2body_cm_6.centered == True
assert dset_h2o_2body_cm_6.r_unit == 'Angstrom'
# 8726c482c19cdf7889cd1e62b9e9c8e1 is the MD5 has for the full 140h2o rset.
assert dset_h2o_2body_cm_6.Rset_md5 == {0: '8726c482c19cdf7889cd1e62b9e9c8e1'}
assert np.array_equal(dset_h2o_2body_cm_6.z, np.array([8, 1, 1, 8, 1, 1]))
rset = data.structureSet(rset_path_140h2o)
check_R_with_rset(dset_h2o_2body_cm_6, rset, True)
# Checking energies and forces.
dset_Rset_info = dset_h2o_2body_cm_6.Rset_info
dset_E = dset_h2o_2body_cm_6.E
dset_F = dset_h2o_2body_cm_6.F
dset_sample_Rset_info = dset_h2o_2body.Rset_info
dset_sample_E = dset_h2o_2body.E
dset_sample_F = dset_h2o_2body.F
for i_r in range(len(dset_h2o_2body_cm_6.R)):
i_r_dset_sample = np.where(
np.all(dset_sample_Rset_info == dset_Rset_info[i_r], axis=1)
)[0][0]
assert np.allclose(dset_E[i_r], dset_sample_E[i_r_dset_sample])
assert np.allclose(dset_F[i_r], dset_sample_F[i_r_dset_sample])
def test_sample_dset_1mers_multiple_rsets():
"""
"""
dset_4h2o_lit_path = f'{dset_dir}/4h2o/4h2o.temelso.etal-dset.npz'
dset_4h2o_lit_dset = data.dataSet(dset_4h2o_lit_path)
# Sample all 1mers
dset_1mers = data.dataSet()
dset_1mers = dset_sample_structures(
dset_1mers, dset_4h2o_lit_dset, 'all', 1, None, None,
np.array([]), np.array([]), True, False
)
# Checking data set
Rset_info = np.array([
[0,0,0], [0,0,1], [0,0,2], [0,0,3], [1,0,0], [1,0,1], [1,0,2], [1,0,3],
[2,0,0], [2,0,1], [2,0,2], [2,0,3]
])
assert np.array_equal(dset_1mers.Rset_info, Rset_info)
Rset_md5 = {0: '92dd31a90a3d2a443023d9d708010a4f', 1: '5593ef822ede64f6011ece82d6702ff9', 2: '33098027b401c38efcb5f05fa33c93ad'}
assert dset_1mers.Rset_md5 == Rset_md5
assert np.array_equal(dset_1mers.entity_ids, np.array([0, 0, 0]))
assert np.array_equal(dset_1mers.comp_ids, np.array([['0', 'h2o']]))
assert dset_1mers.centered == True
assert dset_1mers.r_unit == 'Angstrom'
assert np.array_equal(dset_1mers.z, np.array([8, 1, 1]))
assert dset_1mers.R.shape == (12, 3, 3)
r_3 = np.array([
[-0.02947763, -0.0325826, -0.05004315],
[ 0.93292237, 0.1104174, 0.10365685],
[-0.46497763, 0.4068174, 0.69075685]
])
assert np.allclose(dset_1mers.R[3], r_3)
assert dset_1mers.E.shape == (12,)
for e in dset_1mers.E:
assert np.isnan(e)
assert dset_1mers.F.shape == (12, 3, 3)
for f in dset_1mers.F.flatten():
assert np.isnan(f)
| 36.236515
| 132
| 0.639643
|
206f36006525514f6654193efce08900327616b0
| 326
|
py
|
Python
|
lib/utils.py
|
MusaTamzid05/simple_transfer_learning_lib
|
adef55fc277268011e526596ad517b68c59c43fb
|
[
"MIT"
] | null | null | null |
lib/utils.py
|
MusaTamzid05/simple_transfer_learning_lib
|
adef55fc277268011e526596ad517b68c59c43fb
|
[
"MIT"
] | null | null | null |
lib/utils.py
|
MusaTamzid05/simple_transfer_learning_lib
|
adef55fc277268011e526596ad517b68c59c43fb
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
def limit_gpu(memory = 2048):
gpus = tf.config.list_physical_devices('GPU')
if gpus:
try:
tf.config.set_logical_device_configuration(
gpus[0],
[tf.config.LogicalDeviceConfiguration(memory_limit = memory)])
except RuntimeError as e:
print(e)
| 27.166667
| 74
| 0.647239
|
a6a894239da5504e0612ddb5d578f76fa7bd942e
| 1,429
|
py
|
Python
|
images/python_analysis/src/venv/lib/python3.7/site-packages/elasticsearch/_async/client/graph.py
|
Jael24/TB_ElasticStack
|
f9aad11eda69045140a90f28739b558bf077d877
|
[
"MIT"
] | 2
|
2021-05-01T05:40:55.000Z
|
2021-06-25T13:34:46.000Z
|
images/python_analysis/src/venv/lib/python3.7/site-packages/elasticsearch/_async/client/graph.py
|
Jael24/TB_ElasticStack
|
f9aad11eda69045140a90f28739b558bf077d877
|
[
"MIT"
] | 2
|
2021-02-22T14:55:01.000Z
|
2021-03-23T12:42:33.000Z
|
images/python_analysis/src/venv/lib/python3.7/site-packages/elasticsearch/_async/client/graph.py
|
Jael24/TB_ElasticStack
|
f9aad11eda69045140a90f28739b558bf077d877
|
[
"MIT"
] | 1
|
2021-03-23T12:24:52.000Z
|
2021-03-23T12:24:52.000Z
|
# Licensed to Elasticsearch B.V under one or more agreements.
# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information
from .utils import NamespacedClient, query_params, _make_path, SKIP_IN_PATH
class GraphClient(NamespacedClient):
@query_params("routing", "timeout")
async def explore(self, index, body=None, doc_type=None, params=None, headers=None):
"""
Explore extracted and summarized information about the documents and terms in
an index.
`<https://www.elastic.co/guide/en/elasticsearch/reference/7.8/graph-explore-api.html>`_
:arg index: A comma-separated list of index names to search; use
`_all` or empty string to perform the operation on all indices
:arg body: Graph Query DSL
:arg doc_type: A comma-separated list of document types to
search; leave empty to perform the operation on all types
:arg routing: Specific routing value
:arg timeout: Explicit operation timeout
"""
if index in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'index'.")
return await self.transport.perform_request(
"POST",
_make_path(index, doc_type, "_graph", "explore"),
params=params,
headers=headers,
body=body,
)
| 42.029412
| 95
| 0.669699
|
6c7e7c64ca5d2041343ebe9528cb6a52563ef2c6
| 1,089
|
py
|
Python
|
libcst/_nodes/tests/test_trailing_whitespace.py
|
mvismonte/LibCST
|
fc430343b5f7e0c30d6d0ca30b5251b74bd3890d
|
[
"MIT"
] | 1
|
2021-08-12T11:52:00.000Z
|
2021-08-12T11:52:00.000Z
|
libcst/_nodes/tests/test_trailing_whitespace.py
|
mvismonte/LibCST
|
fc430343b5f7e0c30d6d0ca30b5251b74bd3890d
|
[
"MIT"
] | 1
|
2019-08-21T18:58:13.000Z
|
2019-08-21T18:58:13.000Z
|
libcst/_nodes/tests/test_trailing_whitespace.py
|
mvismonte/LibCST
|
fc430343b5f7e0c30d6d0ca30b5251b74bd3890d
|
[
"MIT"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-strict
import libcst as cst
from libcst._nodes.tests.base import CSTNodeTest
from libcst.testing.utils import data_provider
class TrailingWhitespaceTest(CSTNodeTest):
@data_provider(
(
(cst.TrailingWhitespace(), "\n"),
(cst.TrailingWhitespace(whitespace=cst.SimpleWhitespace(" ")), " \n"),
(cst.TrailingWhitespace(comment=cst.Comment("# comment")), "# comment\n"),
(cst.TrailingWhitespace(newline=cst.Newline("\r\n")), "\r\n"),
(
cst.TrailingWhitespace(
whitespace=cst.SimpleWhitespace(" "),
comment=cst.Comment("# comment"),
newline=cst.Newline("\r\n"),
),
" # comment\r\n",
),
)
)
def test_valid(self, node: cst.CSTNode, code: str) -> None:
self.validate_node(node, code)
| 35.129032
| 88
| 0.580349
|
6077d0e195ac760235da3b81067f2a87a604f836
| 94,700
|
py
|
Python
|
python/mxnet/ndarray/ndarray.py
|
yinscapital/incubator-mxnet
|
4c0df6249d03841f5eb30e1428aa25fc230fed30
|
[
"Apache-2.0"
] | null | null | null |
python/mxnet/ndarray/ndarray.py
|
yinscapital/incubator-mxnet
|
4c0df6249d03841f5eb30e1428aa25fc230fed30
|
[
"Apache-2.0"
] | null | null | null |
python/mxnet/ndarray/ndarray.py
|
yinscapital/incubator-mxnet
|
4c0df6249d03841f5eb30e1428aa25fc230fed30
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=too-many-lines, protected-access
# pylint: disable=import-error, no-name-in-module, undefined-variable
"""NDArray API of MXNet."""
from __future__ import absolute_import
from __future__ import division
try:
from __builtin__ import slice as py_slice
except ImportError:
from builtins import slice as py_slice
import ctypes
import warnings
import operator
import numpy as np
from ..base import _LIB, numeric_types, integer_types
from ..base import c_array, mx_real_t
from ..base import mx_uint, NDArrayHandle, check_call
from ..base import ctypes2buffer
from ..context import Context
from . import _internal
from . import op
from .op import NDArrayBase
__all__ = ["NDArray", "concatenate", "_DTYPE_NP_TO_MX", "_DTYPE_MX_TO_NP", "_GRAD_REQ_MAP",
"ones", "add", "arange", "divide", "equal", "full", "greater", "greater_equal",
"imdecode", "lesser", "lesser_equal", "maximum", "minimum", "moveaxis", "modulo",
"multiply", "not_equal", "onehot_encode", "power", "subtract", "true_divide",
"waitall", "_new_empty_handle"]
_STORAGE_TYPE_UNDEFINED = -1
_STORAGE_TYPE_DEFAULT = 0
_STORAGE_TYPE_ROW_SPARSE = 1
_STORAGE_TYPE_CSR = 2
# pylint: disable= no-member
_DTYPE_NP_TO_MX = {
None: -1,
np.float32: 0,
np.float64: 1,
np.float16: 2,
np.uint8: 3,
np.int32: 4,
np.int8: 5,
np.int64: 6,
}
_DTYPE_MX_TO_NP = {
-1: None,
0: np.float32,
1: np.float64,
2: np.float16,
3: np.uint8,
4: np.int32,
5: np.int8,
6: np.int64,
}
_STORAGE_TYPE_STR_TO_ID = {
'undefined': _STORAGE_TYPE_UNDEFINED,
'default': _STORAGE_TYPE_DEFAULT,
'row_sparse': _STORAGE_TYPE_ROW_SPARSE,
'csr': _STORAGE_TYPE_CSR,
}
_STORAGE_TYPE_ID_TO_STR = {
_STORAGE_TYPE_UNDEFINED: 'undefined',
_STORAGE_TYPE_DEFAULT: 'default',
_STORAGE_TYPE_ROW_SPARSE: 'row_sparse',
_STORAGE_TYPE_CSR: 'csr',
}
_GRAD_REQ_MAP = {
'null': 0,
'write': 1,
'add': 3
}
# pylint: enable= no-member
def _new_empty_handle():
"""Returns a new empty handle.
Empty handle can be used to hold a result.
Returns
-------
handle
A new empty `NDArray` handle.
"""
hdl = NDArrayHandle()
check_call(_LIB.MXNDArrayCreateNone(ctypes.byref(hdl)))
return hdl
def _new_alloc_handle(shape, ctx, delay_alloc, dtype=mx_real_t):
"""Return a new handle with specified shape and context.
Empty handle is only used to hold results.
Returns
-------
handle
A new empty `NDArray` handle.
"""
hdl = NDArrayHandle()
check_call(_LIB.MXNDArrayCreateEx(
c_array(mx_uint, shape),
mx_uint(len(shape)),
ctypes.c_int(ctx.device_typeid),
ctypes.c_int(ctx.device_id),
ctypes.c_int(int(delay_alloc)),
ctypes.c_int(int(_DTYPE_NP_TO_MX[np.dtype(dtype).type])),
ctypes.byref(hdl)))
return hdl
def waitall():
"""Wait for all async operations to finish in MXNet.
This function is used for benchmarking only.
"""
check_call(_LIB.MXNDArrayWaitAll())
def _storage_type(handle):
storage_type = ctypes.c_int(0)
check_call(_LIB.MXNDArrayGetStorageType(handle, ctypes.byref(storage_type)))
return storage_type.value
class NDArray(NDArrayBase):
"""An array object representing a multidimensional, homogeneous array of
fixed-size items.
"""
__slots__ = []
# make numpy functions return NDArray instead of numpy object array
__array_priority__ = 1000.0
# pylint: disable= no-member, undefined-variable
def __repr__(self):
"""Returns a string representation of the array."""
shape_info = 'x'.join(['%d' % x for x in self.shape])
return '\n%s\n<%s %s @%s>' % (str(self.asnumpy()),
self.__class__.__name__,
shape_info, self.context)
def __reduce__(self):
return NDArray, (None,), self.__getstate__()
def __add__(self, other):
"""x.__add__(y) <=> x+y <=> mx.nd.add(x, y) """
return add(self, other)
def __iadd__(self, other):
"""x.__iadd__(y) <=> x+=y """
if not self.writable:
raise ValueError('trying to add to a readonly NDArray')
if isinstance(other, NDArray):
return op.broadcast_add(self, other, out=self)
elif isinstance(other, numeric_types):
return _internal._plus_scalar(self, float(other), out=self)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __radd__(self, other):
return self.__add__(other)
def __sub__(self, other):
"""x.__sub__(y) <=> x-y <=> mx.nd.subtract(x, y) """
return subtract(self, other)
def __isub__(self, other):
"""x.__isub__(y) <=> x-=y """
if not self.writable:
raise ValueError('trying to subtract from a readonly NDArray')
if isinstance(other, NDArray):
return op.broadcast_sub(self, other, out=self)
elif isinstance(other, numeric_types):
return _internal._minus_scalar(self, float(other), out=self)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __rsub__(self, other):
"""x.__rsub__(y) <=> y-x <=> mx.nd.subtract(y, x) """
return subtract(other, self)
def __mul__(self, other):
"""x.__mul__(y) <=> x*y <=> mx.nd.multiply(x, y) """
return multiply(self, other)
def __neg__(self):
"""x.__neg__(y) <=> -x """
return _internal._mul_scalar(self, -1.0)
def __imul__(self, other):
"""x.__imul__(y) <=> x*=y """
if not self.writable:
raise ValueError('trying to multiply to a readonly NDArray')
if isinstance(other, NDArray):
return op.broadcast_mul(self, other, out=self)
elif isinstance(other, numeric_types):
return _internal._mul_scalar(self, float(other), out=self)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __rmul__(self, other):
return self.__mul__(other)
def __div__(self, other):
"""x.__div__(y) <=> x/y <=> mx.nd.divide(x, y) """
return divide(self, other)
def __rdiv__(self, other):
"""x.__rdiv__(y) <=> y/x <=> mx.nd.divide(y, x) """
return divide(other, self)
def __idiv__(self, other):
"""x.__rdiv__(y) <=> x/=y """
if not self.writable:
raise ValueError('trying to divide from a readonly NDArray')
if isinstance(other, NDArray):
return op.broadcast_div(self, other, out=self)
elif isinstance(other, numeric_types):
return _internal._div_scalar(self, float(other), out=self)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __truediv__(self, other):
return divide(self, other)
def __rtruediv__(self, other):
return divide(other, self)
def __itruediv__(self, other):
return self.__idiv__(other)
def __mod__(self, other):
"""x.__mod__(y) <=> x%y <=> mx.nd.modulo(x, y) """
return modulo(self, other)
def __rmod__(self, other):
"""x.__rmod__(y) <=> y%x <=> mx.nd.modulo(y, x) """
return modulo(other, self)
def __imod__(self, other):
"""x.__rmod__(y) <=> x%=y """
if not self.writable:
raise ValueError('trying to take modulo from a readonly NDArray')
if isinstance(other, NDArray):
return op.broadcast_mod(self, other, out=self)
elif isinstance(other, numeric_types):
return _internal._mod_scalar(self, float(other), out=self)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __pow__(self, other):
"""x.__pow__(y) <=> x**y <=> mx.nd.power(x,y) """
return power(self, other)
def __rpow__(self, other):
"""x.__pow__(y) <=> y**x <=> mx.nd.power(y,x) """
return power(other, self)
def __eq__(self, other):
"""x.__eq__(y) <=> x==y <=> mx.nd.equal(x, y) """
return equal(self, other)
def __ne__(self, other):
"""x.__ne__(y) <=> x!=y <=> mx.nd.not_equal(x, y) """
return not_equal(self, other)
def __gt__(self, other):
"""x.__gt__(y) <=> x>y <=> mx.nd.greater(x, y) """
return greater(self, other)
def __ge__(self, other):
"""x.__ge__(y) <=> x>=y <=> mx.nd.greater_equal(x, y) """
return greater_equal(self, other)
def __lt__(self, other):
"""x.__lt__(y) <=> x<y <=> mx.nd.lesser(x, y) """
return lesser(self, other)
def __le__(self, other):
"""x.__le__(y) <=> x<=y <=> mx.nd.less_equal(x, y) """
return lesser_equal(self, other)
def __bool__(self):
raise ValueError("The truth value of an NDArray is ambiguous. " \
"Please convert to number with asscalar() first.")
__nonzero__ = __bool__
def __len__(self):
"""Number of element along the first axis."""
return self.shape[0]
def __getstate__(self):
handle = self.handle
this = {'handle' : None}
if handle is not None:
length = ctypes.c_size_t()
cptr = ctypes.POINTER(ctypes.c_char)()
check_call(_LIB.MXNDArraySaveRawBytes(self.handle,
ctypes.byref(length),
ctypes.byref(cptr)))
this['handle'] = ctypes2buffer(cptr, length.value)
return this
def __setstate__(self, state):
# pylint: disable=assigning-non-slot
handle = state['handle']
if handle is not None:
buf = handle
handle = NDArrayHandle()
ptr = (ctypes.c_char * len(buf)).from_buffer(buf)
length = ctypes.c_size_t(len(buf))
check_call(_LIB.MXNDArrayLoadFromRawBytes(ptr, length, ctypes.byref(handle)))
self.handle = handle
else:
self.handle = None
def __setitem__(self, key, value):
"""x.__setitem__(i, y) <=> x[i]=y
Set self[key] to value.
Parameters
----------
key : int, slice or tuple
The indexing key.
value : scalar, NDArray or numpy.ndarray
The value to set.
Examples
--------
>>> x = mx.nd.zeros((2,3))
>>> x[:] = 1
>>> x.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> x.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> x[:,1:2] = 2
>>> x.asnumpy()
array([[ 1., 2., 1.],
[ 1., 2., 1.]], dtype=float32)
>>> x[1:2,1:] = 3
>>> x.asnumpy()
array([[ 1., 2., 1.],
[ 1., 3., 3.]], dtype=float32)
>>> x[1:,0:2] = mx.nd.zeros((1,2))
>>> x.asnumpy()
array([[ 1., 2., 1.],
[ 0., 0., 3.]], dtype=float32)
>>> x[1,2] = 4
>>> x.asnumpy()
array([[ 1., 2., 1.],
[ 0., 0., 4.]], dtype=float32)
"""
# pylint: disable=too-many-branches
if not self.writable:
raise ValueError('Cannot assign to readonly NDArray')
if isinstance(key, integer_types):
sliced_arr = self._at(key)
sliced_arr[:] = value
return
elif isinstance(key, py_slice):
if key.step is not None:
raise ValueError('NDArray only supports slicing with step size 1')
if key.start is not None or key.stop is not None:
sliced_arr = self._slice(key.start, key.stop)
sliced_arr[:] = value
return
if isinstance(value, NDArray):
if value.handle is not self.handle:
value.copyto(self)
elif isinstance(value, numeric_types):
_internal._set_value(float(value), out=self)
elif isinstance(value, (np.ndarray, np.generic)):
self._sync_copyfrom(value)
else:
raise TypeError(
'NDArray does not support assignment with %s of type %s'%(
str(value), str(type(value))))
elif isinstance(key, tuple):
# multi-dimension indexing
my_shape = self.shape
assert len(key) <= len(my_shape), \
"Indexing dimensions exceed array dimensions, %d vs %d"%(
len(key), len(my_shape))
begin = [0 for _ in my_shape]
end = [x for x in my_shape]
expand = []
for i, slice_i in enumerate(key):
if isinstance(slice_i, integer_types):
assert slice_i < my_shape[i]
begin[i] = slice_i
end[i] = slice_i + 1
expand.append(i)
elif isinstance(slice_i, py_slice):
# only support continuous slicing
assert slice_i.step is None, \
"NDArray only supports slicing with step size 1."
begin[i] = slice_i.start or 0
end[i] = slice_i.stop or my_shape[i]
assert begin[i] < end[i]
assert end[i] <= my_shape[i]
else:
raise ValueError(
"NDArray does not support slicing with key %s of type %s."%(
str(slice_i), str(type(slice_i))))
if isinstance(value, NDArray):
value = value.as_in_context(self.context)
self._slice_assign(value, begin, end, expand)
elif isinstance(value, numeric_types):
_internal._crop_assign_scalar(self, out=self,
begin=begin, end=end,
scalar=value)
elif isinstance(value, (np.ndarray, np.generic)):
value = array(value, ctx=self.context, dtype=self.dtype)
self._slice_assign(value, begin, end, expand)
else:
raise TypeError(
'NDArray does not support assignment with %s of type %s'%(
str(value), str(type(value))))
else:
raise ValueError(
"NDArray does not support slicing with key %s of type %s."%(
str(key), str(type(key))))
# pylint: enable=too-many-branches
def _slice_assign(self, value, begin, end, expand):
vshape = list(value.shape)
if expand and len(vshape) != len(begin):
if len(expand) + len(vshape) != len(begin):
sshape = [e - b for e, b in zip(end, begin)]
for i in reversed(expand):
sshape.pop(i)
raise ValueError(
"Cannot assign NDArray with shape %s to NDArray slice with " \
"shape %s"%(str(vshape), str(sshape)))
for i in expand:
vshape.insert(i, 1)
value = value.reshape(vshape)
_internal._crop_assign(self, value, out=self,
begin=begin, end=end)
def __getitem__(self, key):
"""x.__getitem__(i) <=> x[i]
Returns a sliced view of this array.
Parameters
----------
key : int or slice, or array like
Indexing key.
Examples
--------
>>> x = mx.nd.arange(0,6).reshape((2,3))
>>> x.asnumpy()
array([[ 0., 1., 2.],
[ 3., 4., 5.]], dtype=float32)
>>> x[1].asnumpy()
array([ 3., 4., 5.], dtype=float32)
>>> y = x[0:1]
>>> y[:] = 2
>>> x.asnumpy()
array([[ 2., 2., 2.],
[ 3., 4., 5.]], dtype=float32)
"""
# multi-dimensional slicing is not supported yet
if isinstance(key, integer_types):
if key > self.shape[0] - 1:
raise IndexError(
'index {} is out of bounds for axis 0 with size {}'.format(
key, self.shape[0]))
return self._at(key)
elif isinstance(key, py_slice):
if key.step is not None:
raise ValueError("NDArray only supports slicing with step size 1.")
if key.start is not None or key.stop is not None:
return self._slice(key.start, key.stop)
return self
elif isinstance(key, tuple):
shape = self.shape
assert len(key) > 0, "Cannot slice with empty indices"
assert len(shape) >= len(key), \
"Slicing dimensions exceeds array dimensions, %d vs %d"%(
len(key), len(shape))
if isinstance(key[0], (NDArray, np.ndarray, list)):
indices = []
dtype = 'int32'
for idx_i in key:
if not isinstance(idx_i, NDArray):
idx_i = array(idx_i, ctx=self.context, dtype=dtype)
else:
dtype = idx_i.dtype
indices.append(idx_i)
indices = op.stack(*indices)
return op.gather_nd(self, indices)
else:
oshape = []
begin = []
end = []
i = -1
for i, slice_i in enumerate(key):
if isinstance(slice_i, integer_types):
begin.append(slice_i)
end.append(slice_i+1)
elif isinstance(slice_i, py_slice):
if slice_i.step is not None:
raise ValueError("NDArray only supports slicing with step size 1.")
begin.append(0 if slice_i.start is None else slice_i.start)
end.append(shape[i] if slice_i.stop is None else slice_i.stop)
oshape.append(end[i] - begin[i])
else:
raise ValueError(
"NDArray does not support slicing with key %s of type %s."%(
str(slice_i), str(type(slice_i))))
oshape.extend(shape[i+1:])
if len(oshape) == 0:
oshape.append(1)
return op.slice(self, begin, end).reshape(oshape)
else:
raise ValueError(
"NDArray does not support slicing with key %s of type %s."%(
str(key), str(type(key))))
def _sync_copyfrom(self, source_array):
"""Performs a synchronized copy from the `source_array` to the current array.
This is called through ``x[:] = source_array``, where the `source_array`
is a `numpy.ndarray` or array-like object.
This function blocks until all the pending read/write operations with respect
to the current `NDArray` are finished and carry out the copy operation to the
current NDArray.
Parameters
----------
source_array : array_like
The data source we would like to copy from.
Example
-------
>>> a = mx.nd.array([1, 2])
>>> a.asnumpy()
array([ 1., 2.], dtype=float32)
>>> a[:] = np.array([3, 4])
>> a.asnumpy()
array([ 3., 4.], dtype=float32)
"""
if not isinstance(source_array, np.ndarray):
try:
source_array = np.array(source_array, dtype=self.dtype)
except:
raise TypeError('array must consist of array-like data,' +
'type %s is not supported' % str(type(array)))
source_array = np.ascontiguousarray(source_array, dtype=self.dtype)
if source_array.shape != self.shape:
raise ValueError('Shape inconsistent: expected %s vs got %s'%(
str(self.shape), str(source_array.shape)))
check_call(_LIB.MXNDArraySyncCopyFromCPU(
self.handle,
source_array.ctypes.data_as(ctypes.c_void_p),
ctypes.c_size_t(source_array.size)))
def _slice(self, start, stop):
"""Returns a sliced NDArray that shares memory with the current one.
This is called through ``x[start:stop]``.
Parameters
----------
start : int
Starting inclusive index of slice in the first dim.
stop : int
Finishing exclusive index of slice in the first dim.
Returns
-------
`NDArray` sharing the memory with the current one sliced from
start to stop in the first dim.
Examples:
>>> a = mx.nd.array([[1,2], [3, 4], [5, 6], [7, 8]])
>>> a[1:2].asnumpy()
array([[ 3., 4.]], dtype=float32)
>>> a[1:1].asnumpy()
array([], shape=(0, 2), dtype=float32)
"""
handle = NDArrayHandle()
if start is None:
start = mx_uint(0)
elif start < 0:
length = self.shape[0]
start += length
assert start >= 0, "Slicing start %d exceeds limit of %d"%(start-length, length)
start = mx_uint(start)
else:
start = mx_uint(start)
if stop is None:
stop = mx_uint(self.shape[0])
elif stop < 0:
length = self.shape[0]
stop += length
assert stop >= 0, "Slicing end %d exceeds limit of %d"%(stop-length, length)
stop = mx_uint(stop)
else:
stop = mx_uint(stop)
check_call(_LIB.MXNDArraySlice(
self.handle, start, stop, ctypes.byref(handle)))
return NDArray(handle=handle, writable=self.writable)
def _at(self, idx):
"""Returns a view of the array sliced at `idx` in the first dim.
This is called through ``x[idx]``.
Parameters
----------
idx : int
index for slicing the `NDArray` in the first dim.
Returns
-------
NDArray
`NDArray` sharing the memory with the current one sliced at `idx` in the first dim.
Examples
--------
>>> a = mx.nd.array([[1,2], [3, 4]])
>>> a[1].asnumpy()
array([ 3., 4.], dtype=float32)
>>> b = mx.nd.array([1, 2, 3, 4])
>>> b[0].asnumpy()
array([ 1.], dtype=float32)
"""
handle = NDArrayHandle()
idx = mx_uint(idx)
check_call(_LIB.MXNDArrayAt(
self.handle, idx, ctypes.byref(handle)))
return NDArray(handle=handle, writable=self.writable)
def reshape(self, shape):
"""Returns a **view** of this array with a new shape without altering any data.
Parameters
----------
shape : tuple of int
The new shape should not change the array size, namely
``np.prod(new_shape)`` should be equal to ``np.prod(self.shape)``.
One dimension can be -1. In this case, the value is inferred
from the length of the array and remaining dimensions.
0 Dimensions in shape will be copied from original shape, i.e.
if x.shape == (3, 4, 5), x.reshape((0, 20)).shape will be (3, 20).
Returns
-------
NDArray
An array with desired shape that shares data with this array.
Examples
--------
>>> x = mx.nd.arange(0,6).reshape((2,3))
>>> x.asnumpy()
array([[ 0., 1., 2.],
[ 3., 4., 5.]], dtype=float32)
>>> y = x.reshape((3,2))
>>> y.asnumpy()
array([[ 0., 1.],
[ 2., 3.],
[ 4., 5.]], dtype=float32)
>>> y = x.reshape((3,-1))
>>> y.asnumpy()
array([[ 0., 1.],
[ 2., 3.],
[ 4., 5.]], dtype=float32)
>>> y[:] = -1
>>> x.asnumpy()
array([[-1., -1., -1.],
[-1., -1., -1.]], dtype=float32)
"""
handle = NDArrayHandle()
# Actual reshape
check_call(_LIB.MXNDArrayReshape(self.handle,
len(shape),
c_array(ctypes.c_int, shape),
ctypes.byref(handle)))
return NDArray(handle=handle, writable=self.writable)
def zeros_like(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`zeros_like`.
The arguments are the same as for :py:func:`zeros_like`, with
this array as data.
"""
return op.zeros_like(self, *args, **kwargs)
def ones_like(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`ones_like`.
The arguments are the same as for :py:func:`ones_like`, with
this array as data.
"""
return op.ones_like(self, *args, **kwargs)
def broadcast_axes(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`broadcast_axes`.
The arguments are the same as for :py:func:`broadcast_axes`, with
this array as data.
"""
return op.broadcast_axes(self, *args, **kwargs)
def repeat(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`repeat`.
The arguments are the same as for :py:func:`repeat`, with
this array as data.
"""
return op.repeat(self, *args, **kwargs)
def pad(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`pad`.
The arguments are the same as for :py:func:`pad`, with
this array as data.
"""
return op.pad(self, *args, **kwargs)
def swapaxes(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`swapaxes`.
The arguments are the same as for :py:func:`swapaxes`, with
this array as data.
"""
return op.swapaxes(self, *args, **kwargs)
def split(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`split`.
The arguments are the same as for :py:func:`split`, with
this array as data.
"""
return op.split(self, *args, **kwargs)
def slice(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`slice`.
The arguments are the same as for :py:func:`slice`, with
this array as data.
"""
return op.slice(self, *args, **kwargs)
def slice_axis(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`slice_axis`.
The arguments are the same as for :py:func:`slice_axis`, with
this array as data.
"""
return op.slice_axis(self, *args, **kwargs)
def take(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`take`.
The arguments are the same as for :py:func:`take`, with
this array as data.
"""
return op.take(self, *args, **kwargs)
def one_hot(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`one_hot`.
The arguments are the same as for :py:func:`one_hot`, with
this array as data.
"""
return op.one_hot(self, *args, **kwargs)
def pick(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`pick`.
The arguments are the same as for :py:func:`pick`, with
this array as data.
"""
return op.pick(self, *args, **kwargs)
def sort(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`sort`.
The arguments are the same as for :py:func:`sort`, with
this array as data.
"""
return op.sort(self, *args, **kwargs)
def topk(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`topk`.
The arguments are the same as for :py:func:`topk`, with
this array as data.
"""
return op.topk(self, *args, **kwargs)
def argsort(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`argsort`.
The arguments are the same as for :py:func:`argsort`, with
this array as data.
"""
return op.argsort(self, *args, **kwargs)
def argmax(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`argmax`.
The arguments are the same as for :py:func:`argmax`, with
this array as data.
"""
return op.argmax(self, *args, **kwargs)
def argmin(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`argmin`.
The arguments are the same as for :py:func:`argmin`, with
this array as data.
"""
return op.argmin(self, *args, **kwargs)
def clip(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`clip`.
The arguments are the same as for :py:func:`clip`, with
this array as data.
"""
return op.clip(self, *args, **kwargs)
def abs(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`abs`.
The arguments are the same as for :py:func:`abs`, with
this array as data.
"""
return op.abs(self, *args, **kwargs)
def sign(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`sign`.
The arguments are the same as for :py:func:`sign`, with
this array as data.
"""
return op.sign(self, *args, **kwargs)
def flatten(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`flatten`.
The arguments are the same as for :py:func:`flatten`, with
this array as data.
"""
return op.flatten(self, *args, **kwargs)
def expand_dims(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`expand_dims`.
The arguments are the same as for :py:func:`expand_dims`, with
this array as data.
"""
return op.expand_dims(self, *args, **kwargs)
def tile(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`tile`.
The arguments are the same as for :py:func:`tile`, with
this array as data.
"""
return op.tile(self, *args, **kwargs)
def transpose(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`transpose`.
The arguments are the same as for :py:func:`transpose`, with
this array as data.
"""
return op.transpose(self, *args, **kwargs)
def flip(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`flip`.
The arguments are the same as for :py:func:`flip`, with
this array as data.
"""
return op.flip(self, *args, **kwargs)
def sum(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`sum`.
The arguments are the same as for :py:func:`sum`, with
this array as data.
"""
return op.sum(self, *args, **kwargs)
def nansum(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`nansum`.
The arguments are the same as for :py:func:`nansum`, with
this array as data.
"""
return op.nansum(self, *args, **kwargs)
def prod(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`prod`.
The arguments are the same as for :py:func:`prod`, with
this array as data.
"""
return op.prod(self, *args, **kwargs)
def nanprod(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`nanprod`.
The arguments are the same as for :py:func:`nanprod`, with
this array as data.
"""
return op.nanprod(self, *args, **kwargs)
def mean(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`mean`.
The arguments are the same as for :py:func:`mean`, with
this array as data.
"""
return op.mean(self, *args, **kwargs)
def max(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`max`.
The arguments are the same as for :py:func:`max`, with
this array as data.
"""
return op.max(self, *args, **kwargs)
def min(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`min`.
The arguments are the same as for :py:func:`min`, with
this array as data.
"""
return op.min(self, *args, **kwargs)
def norm(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`norm`.
The arguments are the same as for :py:func:`norm`, with
this array as data.
"""
return op.norm(self, *args, **kwargs)
def round(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`round`.
The arguments are the same as for :py:func:`round`, with
this array as data.
"""
return op.round(self, *args, **kwargs)
def rint(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`rint`.
The arguments are the same as for :py:func:`rint`, with
this array as data.
"""
return op.rint(self, *args, **kwargs)
def fix(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`fix`.
The arguments are the same as for :py:func:`fix`, with
this array as data.
"""
return op.fix(self, *args, **kwargs)
def floor(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`floor`.
The arguments are the same as for :py:func:`floor`, with
this array as data.
"""
return op.floor(self, *args, **kwargs)
def ceil(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`ceil`.
The arguments are the same as for :py:func:`ceil`, with
this array as data.
"""
return op.ceil(self, *args, **kwargs)
def trunc(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`trunc`.
The arguments are the same as for :py:func:`trunc`, with
this array as data.
"""
return op.trunc(self, *args, **kwargs)
def sin(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`sin`.
The arguments are the same as for :py:func:`sin`, with
this array as data.
"""
return op.sin(self, *args, **kwargs)
def cos(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`cos`.
The arguments are the same as for :py:func:`cos`, with
this array as data.
"""
return op.cos(self, *args, **kwargs)
def tan(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`tan`.
The arguments are the same as for :py:func:`tan`, with
this array as data.
"""
return op.tan(self, *args, **kwargs)
def arcsin(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`arcsin`.
The arguments are the same as for :py:func:`arcsin`, with
this array as data.
"""
return op.arcsin(self, *args, **kwargs)
def arccos(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`arccos`.
The arguments are the same as for :py:func:`arccos`, with
this array as data.
"""
return op.arccos(self, *args, **kwargs)
def arctan(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`arctan`.
The arguments are the same as for :py:func:`arctan`, with
this array as data.
"""
return op.arctan(self, *args, **kwargs)
def degrees(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`degrees`.
The arguments are the same as for :py:func:`degrees`, with
this array as data.
"""
return op.degrees(self, *args, **kwargs)
def radians(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`radians`.
The arguments are the same as for :py:func:`radians`, with
this array as data.
"""
return op.radians(self, *args, **kwargs)
def sinh(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`sinh`.
The arguments are the same as for :py:func:`sinh`, with
this array as data.
"""
return op.sinh(self, *args, **kwargs)
def cosh(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`cosh`.
The arguments are the same as for :py:func:`cosh`, with
this array as data.
"""
return op.cosh(self, *args, **kwargs)
def tanh(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`tanh`.
The arguments are the same as for :py:func:`tanh`, with
this array as data.
"""
return op.tanh(self, *args, **kwargs)
def arcsinh(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`arcsinh`.
The arguments are the same as for :py:func:`arcsinh`, with
this array as data.
"""
return op.arcsinh(self, *args, **kwargs)
def arccosh(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`arccosh`.
The arguments are the same as for :py:func:`arccosh`, with
this array as data.
"""
return op.arccosh(self, *args, **kwargs)
def arctanh(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`arctanh`.
The arguments are the same as for :py:func:`arctanh`, with
this array as data.
"""
return op.arctanh(self, *args, **kwargs)
def exp(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`exp`.
The arguments are the same as for :py:func:`exp`, with
this array as data.
"""
return op.exp(self, *args, **kwargs)
def expm1(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`expm1`.
The arguments are the same as for :py:func:`expm1`, with
this array as data.
"""
return op.expm1(self, *args, **kwargs)
def log(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`log`.
The arguments are the same as for :py:func:`log`, with
this array as data.
"""
return op.log(self, *args, **kwargs)
def log10(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`log10`.
The arguments are the same as for :py:func:`log10`, with
this array as data.
"""
return op.log10(self, *args, **kwargs)
def log2(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`log2`.
The arguments are the same as for :py:func:`log2`, with
this array as data.
"""
return op.log2(self, *args, **kwargs)
def log1p(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`log1p`.
The arguments are the same as for :py:func:`log1p`, with
this array as data.
"""
return op.log1p(self, *args, **kwargs)
def sqrt(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`sqrt`.
The arguments are the same as for :py:func:`sqrt`, with
this array as data.
"""
return op.sqrt(self, *args, **kwargs)
def rsqrt(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`rsqrt`.
The arguments are the same as for :py:func:`rsqrt`, with
this array as data.
"""
return op.rsqrt(self, *args, **kwargs)
def square(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`square`.
The arguments are the same as for :py:func:`square`, with
this array as data.
"""
return op.square(self, *args, **kwargs)
# pylint: disable= undefined-variable
def broadcast_to(self, shape):
"""Broadcasts the input array to a new shape.
Broadcasting is only allowed on axes with size 1. The new shape cannot change
the number of dimensions.
For example, you could broadcast from shape (2, 1) to (2, 3), but not from
shape (2, 3) to (2, 3, 3).
Parameters
----------
shape : tuple of int
The shape of the desired array.
Returns
-------
NDArray
A NDArray with the desired shape that is not sharing data with this
array, even if the new shape is the same as ``self.shape``.
Examples
--------
>>> x = mx.nd.arange(0,3).reshape((1,3,1))
>>> x.asnumpy()
array([[[ 0.],
[ 1.],
[ 2.]]], dtype=float32)
>>> y = x.broadcast_to((2,3,3))
>>> y.asnumpy()
array([[[ 0., 0., 0.],
[ 1., 1., 1.],
[ 2., 2., 2.]],
<BLANKLINE>
[[ 0., 0., 0.],
[ 1., 1., 1.],
[ 2., 2., 2.]]], dtype=float32)
"""
cur_shape = self.shape
err_str = 'operands could not be broadcast together with remapped shapes' \
'[original->remapped]: {} and requested shape {}'.format(cur_shape, shape)
if len(shape) < len(cur_shape):
raise ValueError(err_str)
cur_shape = (1,) * (len(shape) - len(cur_shape)) + cur_shape
cur_shape_arr = np.array(cur_shape)
broadcasting_axes = np.nonzero(cur_shape_arr != np.array(shape))
if (cur_shape_arr[broadcasting_axes] != 1).any():
raise ValueError(err_str)
if cur_shape != self.shape:
return op.broadcast_to(self.reshape(cur_shape), shape=shape)
else:
return op.broadcast_to(self, shape=tuple(shape))
# pylint: enable= undefined-variable
def wait_to_read(self):
"""Waits until all previous write operations on the current array are finished.
This method guarantees that all previous write operations that pushed
into the backend engine for execution are actually finished.
Examples
--------
>>> import time
>>> tic = time.time()
>>> a = mx.nd.ones((1000,1000))
>>> b = mx.nd.dot(a, a)
>>> print(time.time() - tic) # doctest: +SKIP
0.003854036331176758
>>> b.wait_to_read()
>>> print(time.time() - tic) # doctest: +SKIP
0.0893700122833252
"""
check_call(_LIB.MXNDArrayWaitToRead(self.handle))
@property
def ndim(self):
"""Returns the number of dimensions of this array
Examples
--------
>>> x = mx.nd.array([1, 2, 3, 4])
>>> x.ndim
1
>>> x = mx.nd.array([[1, 2], [3, 4]])
>>> x.ndim
2
"""
return len(self.shape)
@property
def shape(self):
"""Tuple of array dimensions.
Examples
--------
>>> x = mx.nd.array([1, 2, 3, 4])
>>> x.shape
(4L,)
>>> y = mx.nd.zeros((2, 3, 4))
>>> y.shape
(2L, 3L, 4L)
"""
ndim = mx_uint()
pdata = ctypes.POINTER(mx_uint)()
check_call(_LIB.MXNDArrayGetShape(
self.handle, ctypes.byref(ndim), ctypes.byref(pdata)))
return tuple(pdata[:ndim.value])
@property
def size(self):
"""Number of elements in the array.
Equivalent to the product of the array's dimensions.
Examples
--------
>>> import numpy as np
>>> x = mx.nd.zeros((3, 5, 2))
>>> x.size
30
>>> np.prod(x.shape)
30
"""
size = 1
for i in self.shape:
size *= i
return size
@property
def context(self):
"""Device context of the array.
Examples
--------
>>> x = mx.nd.array([1, 2, 3, 4])
>>> x.context
cpu(0)
>>> type(x.context)
<class 'mxnet.context.Context'>
>>> y = mx.nd.zeros((2,3), mx.gpu(0))
>>> y.context
gpu(0)
"""
dev_typeid = ctypes.c_int()
dev_id = ctypes.c_int()
check_call(_LIB.MXNDArrayGetContext(
self.handle, ctypes.byref(dev_typeid), ctypes.byref(dev_id)))
return Context(Context.devtype2str[dev_typeid.value], dev_id.value)
@property
def dtype(self):
"""Data-type of the array's elements.
Returns
-------
numpy.dtype
This NDArray's data type.
Examples
--------
>>> x = mx.nd.zeros((2,3))
>>> x.dtype
<type 'numpy.float32'>
>>> y = mx.nd.zeros((2,3), dtype='int32')
>>> y.dtype
<type 'numpy.int32'>
"""
mx_dtype = ctypes.c_int()
check_call(_LIB.MXNDArrayGetDType(
self.handle, ctypes.byref(mx_dtype)))
return _DTYPE_MX_TO_NP[mx_dtype.value]
@property
def stype(self):
"""Storage-type of the array.
"""
return _STORAGE_TYPE_ID_TO_STR[_storage_type(self.handle)]
@property
# pylint: disable= invalid-name, undefined-variable
def T(self):
"""Returns a copy of the array with axes transposed.
Equivalent to ``mx.nd.transpose(self)`` except that
self is returned if ``self.ndim < 2``.
Unlike ``numpy.ndarray.T``, this function returns a copy
rather than a view of the array unless ``self.ndim < 2``.
Examples
--------
>>> x = mx.nd.arange(0,6).reshape((2,3))
>>> x.asnumpy()
array([[ 0., 1., 2.],
[ 3., 4., 5.]], dtype=float32)
>>> x.T.asnumpy()
array([[ 0., 3.],
[ 1., 4.],
[ 2., 5.]], dtype=float32)
"""
if len(self.shape) < 2:
return self
return op.transpose(self)
# pylint: enable= invalid-name, undefined-variable
@property
def _fresh_grad(self):
"""Whether this array's corresponding gradient array
(registered via `autograd.mark_variables`) has been
updated by `autograd.backward` since last reset.
`_fresh_grad` need to be manually set to False
after consuming gradient (usually after updating this
array).
"""
out = ctypes.c_int()
check_call(_LIB.MXNDArrayGetGradState(self.handle, ctypes.byref(out)))
return out.value
@_fresh_grad.setter
def _fresh_grad(self, state):
check_call(_LIB.MXNDArraySetGradState(self.handle, ctypes.c_int(state)))
def asnumpy(self):
"""Returns a ``numpy.ndarray`` object with value copied from this array.
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = x.asnumpy()
>>> type(y)
<type 'numpy.ndarray'>
>>> y
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> z = mx.nd.ones((2,3), dtype='int32')
>>> z.asnumpy()
array([[1, 1, 1],
[1, 1, 1]], dtype=int32)
"""
data = np.empty(self.shape, dtype=self.dtype)
check_call(_LIB.MXNDArraySyncCopyToCPU(
self.handle,
data.ctypes.data_as(ctypes.c_void_p),
ctypes.c_size_t(data.size)))
return data
def asscalar(self):
"""Returns a scalar whose value is copied from this array.
This function is equivalent to ``self.asnumpy()[0]``. This NDArray must have shape (1,).
Examples
--------
>>> x = mx.nd.ones((1,), dtype='int32')
>>> x.asscalar()
1
>>> type(x.asscalar())
<type 'numpy.int32'>
"""
if self.shape != (1,):
raise ValueError("The current array is not a scalar")
return self.asnumpy()[0]
def astype(self, dtype):
"""Returns a copy of the array after casting to a specified type.
Parameters
----------
dtype : numpy.dtype or str
The type of the returned array.
Returns
-------
NDArray, CSRNDArray or RowSparseNDArray
The copied array after casting to the specified type.
Examples
--------
>>> x = mx.nd.zeros((2,3), dtype='float32')
>>> y = x.astype('int32')
>>> y.dtype
<type 'numpy.int32'>
"""
res = empty(self.shape, ctx=self.context, dtype=dtype)
self.copyto(res)
return res
def copyto(self, other):
"""Copies the value of this array to another array.
If ``other`` is a ``NDArray`` object, then ``other.shape`` and
``self.shape`` should be the same. This function copies the value from
``self`` to ``other``.
If ``other`` is a context, a new ``NDArray`` will be first created on
the target context, and the value of ``self`` is copied.
Parameters
----------
other : NDArray or Context
The destination array or context.
Returns
-------
NDArray, CSRNDArray or RowSparseNDArray
The copied array. If ``other`` is an ``NDArray``, then the return value
and ``other`` will point to the same ``NDArray``.
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = mx.nd.zeros((2,3), mx.gpu(0))
>>> z = x.copyto(y)
>>> z is y
True
>>> y.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> y.copyto(mx.gpu(0))
<NDArray 2x3 @gpu(0)>
"""
if isinstance(other, NDArray):
if other.handle is self.handle:
warnings.warn('You are attempting to copy an array to itself', RuntimeWarning)
return
return _internal._copyto(self, out=other)
elif isinstance(other, Context):
hret = NDArray(_new_alloc_handle(self.shape, other, True, self.dtype))
return _internal._copyto(self, out=hret)
else:
raise TypeError('copyto does not support type ' + str(type(other)))
def copy(self):
"""Makes a copy of this ``NDArray``, keeping the same context.
Returns
-------
NDArray, CSRNDArray or RowSparseNDArray
The copied array
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = x.copy()
>>> y.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
"""
return self.copyto(self.context)
def as_in_context(self, context):
"""Returns an array on the target device with the same value as this array.
If the target context is the same as ``self.context``, then ``self`` is
returned. Otherwise, a copy is made.
Parameters
----------
context : Context
The target context.
Returns
-------
NDArray, CSRNDArray or RowSparseNDArray
The target array.
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = x.as_in_context(mx.cpu())
>>> y is x
True
>>> z = x.as_in_context(mx.gpu(0))
>>> z is x
False
"""
if self.context == context:
return self
return self.copyto(context)
def attach_grad(self, grad_req='write', stype=None):
"""Attach a gradient buffer to this NDArray, so that `backward`
can compute gradient with respect to it.
Parameters
----------
grad_req : {'write', 'add', 'null'}
How gradient will be accumulated.
- 'write': gradient will be overwritten on every backward.
- 'add': gradient will be added to existing value on every backward.
- 'null': do not compute gradient for this NDArray.
stype : str, optional
The storage type of the gradient array. Defaults to the same stype of this NDArray.
"""
from . import zeros as _zeros
if stype is not None:
grad = _zeros(self.shape, stype=stype)
else:
grad = op.zeros_like(self) # pylint: disable=undefined-variable
grad_req = _GRAD_REQ_MAP[grad_req]
check_call(_LIB.MXAutogradMarkVariables(
1, ctypes.pointer(self.handle),
ctypes.pointer(mx_uint(grad_req)),
ctypes.pointer(grad.handle)))
@property
def grad(self):
"""Returns gradient buffer attached to this NDArray."""
from . import _ndarray_cls
hdl = NDArrayHandle()
check_call(_LIB.MXNDArrayGetGrad(self.handle, ctypes.byref(hdl)))
if hdl.value is None:
return None
return _ndarray_cls(hdl)
def detach(self):
"""Returns a new NDArray, detached from the current graph."""
from . import _ndarray_cls
hdl = NDArrayHandle()
check_call(_LIB.MXNDArrayDetach(self.handle, ctypes.byref(hdl)))
return _ndarray_cls(hdl)
def backward(self, out_grad=None, retain_graph=False, train_mode=True):
"""Compute the gradients of this NDArray w.r.t variables.
Parameters
----------
out_grad : NDArray, optional
Gradient with respect to head.
retain_graph : bool, optional
Whether to retain the computaion graph for another backward
pass on the same graph. By default the computaion history
is cleared.
train_mode : bool, optional
Whether to compute gradient for training or inference.
"""
if out_grad is None:
ograd_handles = [NDArrayHandle(0)]
else:
ograd_handles = [out_grad.handle]
check_call(_LIB.MXAutogradBackwardEx(
1, c_array(NDArrayHandle, [self.handle]),
c_array(NDArrayHandle, ograd_handles),
0,
ctypes.c_void_p(0),
ctypes.c_int(retain_graph),
ctypes.c_int(0),
ctypes.c_int(train_mode),
ctypes.c_void_p(0),
ctypes.c_void_p(0)))
def tostype(self, stype):
"""Return a copy of the array with chosen storage type.
See Also
----------
:meth:`mxnet.ndarray.cast_storage`.
Returns
-------
NDArray, CSRNDArray or RowSparseNDArray
A copy of the array with the chosen storage stype
"""
return op.cast_storage(self, stype=stype)
def onehot_encode(indices, out):
"""One-hot encoding indices into matrix out.
.. note:: `onehot_encode` is deprecated. Use `one_hot` instead.
"""
# pylint: disable= no-member, protected-access
return _internal._onehot_encode(indices, out, out=out)
# pylint: enable= no-member, protected-access
def ones(shape, ctx=None, dtype=None, **kwargs):
"""Returns a new array filled with all ones, with the given shape and type.
Parameters
----------
shape : int or tuple of int or list of int
The shape of the empty array.
ctx : Context, optional
An optional device context.
Defaults to the current default context (``mxnet.Context.default_ctx``).
dtype : str or numpy.dtype, optional
An optional value type (default is `float32`).
out : NDArray, optional
The output NDArray (default is `None`).
Returns
-------
NDArray
A new array of the specified shape filled with all ones.
Examples
--------
>>> mx.nd.ones(1).asnumpy()
array([ 1.], dtype=float32)
>>> mx.nd.ones((1,2), mx.gpu(0))
<NDArray 1x2 @gpu(0)>
>>> mx.nd.ones((1,2), dtype='float16').asnumpy()
array([[ 1., 1.]], dtype=float16)
"""
# pylint: disable= unused-argument
if ctx is None:
ctx = Context.default_ctx
dtype = mx_real_t if dtype is None else dtype
# pylint: disable= no-member, protected-access
return _internal._ones(shape=shape, ctx=ctx, dtype=dtype, **kwargs)
# pylint: enable= no-member, protected-access
def full(shape, val, ctx=None, dtype=mx_real_t, out=None):
"""Returns a new array of given shape and type, filled with the given value `val`.
Parameters
--------
shape : int or tuple of int
The shape of the new array.
val : scalar
Fill value.
ctx : Context, optional
Device context (default is the current default context).
dtype : `str` or `numpy.dtype`, optional
The data type of the returned `NDArray`. The default datatype is `float32`.
out : NDArray, optional
The output NDArray (default is `None`).
Returns
-------
NDArray
`NDArray` filled with `val`, with the given shape, ctx, and dtype.
Examples
--------
>>> mx.nd.full(1, 2.0).asnumpy()
array([ 2.], dtype=float32)
>>> mx.nd.full((1, 2), 2.0, mx.gpu(0))
<NDArray 1x2 @gpu(0)>
>>> mx.nd.full((1, 2), 2.0, dtype='float16').asnumpy()
array([[ 2., 2.]], dtype=float16)
"""
out = empty(shape, ctx, dtype) if out is None else out
out[:] = val
return out
def array(source_array, ctx=None, dtype=None):
"""Creates an array from any object exposing the array interface.
Parameters
----------
source_array : array_like
An object exposing the array interface, an object whose `__array__`
method returns an array, or any (nested) sequence.
ctx : Context, optional
Device context (default is the current default context).
dtype : str or numpy.dtype, optional
The data type of the output array. The default dtype is ``source_array.dtype``
if `source_array` is an `NDArray`, `float32` otherwise.
Returns
-------
NDArray
An `NDArray` with the same contents as the `source_array`.
"""
if isinstance(source_array, NDArray):
dtype = source_array.dtype if dtype is None else dtype
else:
dtype = mx_real_t if dtype is None else dtype
if not isinstance(source_array, np.ndarray):
try:
source_array = np.array(source_array, dtype=dtype)
except:
raise TypeError('source_array must be array like object')
arr = empty(source_array.shape, ctx, dtype)
arr[:] = source_array
return arr
def moveaxis(tensor, source, destination):
"""Moves the `source` axis into the `destination` position
while leaving the other axes in their original order
Parameters
----------
tensor : mx.nd.array
The array which axes should be reordered
source : int
Original position of the axes to move.
destination : int
Destination position for each of the original axes.
Returns
-------
result : mx.nd.array
Array with moved axes.
Examples
--------
>>> X = mx.nd.array([[1, 2, 3], [4, 5, 6]])
>>> mx.nd.moveaxis(X, 0, 1).shape
(3L, 2L)
"""
axes = list(range(tensor.ndim))
try:
axes.pop(source)
except IndexError:
raise ValueError('Source should verify 0 <= source < tensor.ndim'
'Got %d' % source)
try:
axes.insert(destination, source)
except IndexError:
raise ValueError('Destination should verify 0 <= destination < tensor.ndim'
'Got %d' % destination)
return op.transpose(tensor, axes)
# pylint: disable= no-member, protected-access, too-many-arguments, redefined-outer-name
def arange(start, stop=None, step=1.0, repeat=1, ctx=None, dtype=mx_real_t):
"""Returns evenly spaced values within a given interval.
Values are generated within the half-open interval [`start`, `stop`). In other
words, the interval includes `start` but excludes `stop`. The function is
similar to the built-in Python function `range` and to `numpy.arange`,
but returns an `NDArray`.
Parameters
----------
start : float, optional
Start of interval. The default start value is 0.
stop : float
End of interval.
step : float, optional
Spacing between values. The default step size is 1.
repeat : int, optional
Number of times to repeat each element. The default repeat count is 1.
ctx : Context, optional
Device context. Default context is the current default context.
dtype : str or numpy.dtype, optional
The data type of the `NDArray`. The default datatype is `np.float32`.
Returns
-------
NDArray
`NDArray` of evenly spaced values in the specified range.
Examples
--------
>>> mx.nd.arange(3).asnumpy()
array([ 0., 1., 2.], dtype=float32)
>>> mx.nd.arange(2, 6).asnumpy()
array([ 2., 3., 4., 5.], dtype=float32)
>>> mx.nd.arange(2, 6, step=2).asnumpy()
array([ 2., 4.], dtype=float32)
>>> mx.nd.arange(2, 6, step=1.5, repeat=2).asnumpy()
array([ 2. , 2. , 3.5, 3.5, 5. , 5. ], dtype=float32)
>>> mx.nd.arange(2, 6, step=2, repeat=3, dtype='int32').asnumpy()
array([2, 2, 2, 4, 4, 4], dtype=int32)
"""
if ctx is None:
ctx = Context.default_ctx
return _internal._arange(start=start, stop=stop, step=step, repeat=repeat,
dtype=dtype, ctx=str(ctx))
# pylint: enable= no-member, protected-access, too-many-arguments
#pylint: disable= too-many-arguments, no-member, protected-access
def _ufunc_helper(lhs, rhs, fn_array, fn_scalar, lfn_scalar, rfn_scalar=None):
""" Helper function for element-wise operation.
The function will perform numpy-like broadcasting if needed and call different functions.
Parameters
--------
lhs : NDArray or numeric value
Left-hand side operand.
rhs : NDArray or numeric value
Right-hand operand,
fn_array : function
Function to be called if both lhs and rhs are of ``NDArray`` type.
fn_scalar : function
Function to be called if both lhs and rhs are numeric values.
lfn_scalar : function
Function to be called if lhs is ``NDArray`` while rhs is numeric value
rfn_scalar : function
Function to be called if lhs is numeric value while rhs is ``NDArray``;
if none is provided, then the function is commutative, so rfn_scalar is equal to lfn_scalar
Returns
--------
NDArray
result array
"""
if isinstance(lhs, numeric_types):
if isinstance(rhs, numeric_types):
return fn_scalar(lhs, rhs)
else:
if rfn_scalar is None:
# commutative function
return lfn_scalar(rhs, float(lhs))
else:
return rfn_scalar(rhs, float(lhs))
elif isinstance(rhs, numeric_types):
return lfn_scalar(lhs, float(rhs))
elif isinstance(rhs, NDArray):
return fn_array(lhs, rhs)
else:
raise TypeError('type %s not supported' % str(type(rhs)))
#pylint: enable= too-many-arguments, no-member, protected-access
def add(lhs, rhs):
"""Returns element-wise sum of the input arrays with broadcasting.
Equivalent to ``lhs + rhs``, ``mx.nd.broadcast_add(lhs, rhs)`` and
``mx.nd.broadcast_plus(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or array
First array to be added.
rhs : scalar or array
Second array to be added.
If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
The element-wise sum of the input arrays.
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = mx.nd.arange(2).reshape((2,1))
>>> z = mx.nd.arange(2).reshape((1,2))
>>> x.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> y.asnumpy()
array([[ 0.],
[ 1.]], dtype=float32)
>>> z.asnumpy()
array([[ 0., 1.]], dtype=float32)
>>> (x+2).asnumpy()
array([[ 3., 3., 3.],
[ 3., 3., 3.]], dtype=float32)
>>> (x+y).asnumpy()
array([[ 1., 1., 1.],
[ 2., 2., 2.]], dtype=float32)
>>> mx.nd.add(x,y).asnumpy()
array([[ 1., 1., 1.],
[ 2., 2., 2.]], dtype=float32)
>>> (z + y).asnumpy()
array([[ 0., 1.],
[ 1., 2.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
return _ufunc_helper(
lhs,
rhs,
op.broadcast_add,
operator.add,
_internal._plus_scalar,
None)
# pylint: enable= no-member, protected-access
def subtract(lhs, rhs):
"""Returns element-wise difference of the input arrays with broadcasting.
Equivalent to ``lhs - rhs``, ``mx.nd.broadcast_sub(lhs, rhs)`` and
``mx.nd.broadcast_minus(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or array
First array to be subtracted.
rhs : scalar or array
Second array to be subtracted.
If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
The element-wise difference of the input arrays.
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = mx.nd.arange(2).reshape((2,1))
>>> z = mx.nd.arange(2).reshape((1,2))
>>> x.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> y.asnumpy()
array([[ 0.],
[ 1.]], dtype=float32)
>>> z.asnumpy()
array([[ 0., 1.]], dtype=float32)
>>> (x-2).asnumpy()
array([[-1., -1., -1.],
[-1., -1., -1.]], dtype=float32)
>>> (x-y).asnumpy()
array([[ 1., 1., 1.],
[ 0., 0., 0.]], dtype=float32)
>>> mx.nd.subtract(x,y).asnumpy()
array([[ 1., 1., 1.],
[ 0., 0., 0.]], dtype=float32)
>>> (z-y).asnumpy()
array([[ 0., 1.],
[-1., 0.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
return _ufunc_helper(
lhs,
rhs,
op.broadcast_sub,
operator.sub,
_internal._minus_scalar,
_internal._rminus_scalar)
# pylint: enable= no-member, protected-access
def multiply(lhs, rhs):
"""Returns element-wise product of the input arrays with broadcasting.
Equivalent to ``lhs * rhs`` and ``mx.nd.broadcast_mul(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or array
First array to be multiplied.
rhs : scalar or array
Second array to be multiplied.
If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
The element-wise multiplication of the input arrays.
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = mx.nd.arange(2).reshape((2,1))
>>> z = mx.nd.arange(2).reshape((1,2))
>>> x.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> y.asnumpy()
array([[ 0.],
[ 1.]], dtype=float32)
>>> z.asnumpy()
array([[ 0., 1.]], dtype=float32)
>>> (x*2).asnumpy()
array([[ 2., 2., 2.],
[ 2., 2., 2.]], dtype=float32)
>>> (x*y).asnumpy()
array([[ 0., 0., 0.],
[ 1., 1., 1.]], dtype=float32)
>>> mx.nd.multiply(x, y).asnumpy()
array([[ 0., 0., 0.],
[ 1., 1., 1.]], dtype=float32)
>>> (z*y).asnumpy()
array([[ 0., 0.],
[ 0., 1.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
return _ufunc_helper(
lhs,
rhs,
op.broadcast_mul,
operator.mul,
_internal._mul_scalar,
None)
# pylint: enable= no-member, protected-access
def divide(lhs, rhs):
"""Returns element-wise division of the input arrays with broadcasting.
Equivalent to ``lhs / rhs`` and ``mx.nd.broadcast_div(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or array
First array in division.
rhs : scalar or array
Second array in division.
The arrays to be divided. If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
The element-wise division of the input arrays.
Examples
--------
>>> x = mx.nd.ones((2,3))*6
>>> y = mx.nd.ones((2,1))*2
>>> x.asnumpy()
array([[ 6., 6., 6.],
[ 6., 6., 6.]], dtype=float32)
>>> y.asnumpy()
array([[ 2.],
[ 2.]], dtype=float32)
>>> x/2
<NDArray 2x3 @cpu(0)>
>>> (x/3).asnumpy()
array([[ 2., 2., 2.],
[ 2., 2., 2.]], dtype=float32)
>>> (x/y).asnumpy()
array([[ 3., 3., 3.],
[ 3., 3., 3.]], dtype=float32)
>>> mx.nd.divide(x,y).asnumpy()
array([[ 3., 3., 3.],
[ 3., 3., 3.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
return _ufunc_helper(
lhs,
rhs,
op.broadcast_div,
operator.truediv,
_internal._div_scalar,
_internal._rdiv_scalar)
# pylint: enable= no-member, protected-access
def modulo(lhs, rhs):
"""Returns element-wise modulo of the input arrays with broadcasting.
Equivalent to ``lhs % rhs`` and ``mx.nd.broadcast_mod(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or array
First array in modulo.
rhs : scalar or array
Second array in modulo.
The arrays to be taken modulo. If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
The element-wise modulo of the input arrays.
Examples
--------
>>> x = mx.nd.ones((2,3))*6
>>> y = mx.nd.ones((2,1))*4
>>> x.asnumpy()
array([[ 6., 6., 6.],
[ 6., 6., 6.]], dtype=float32)
>>> y.asnumpy()
array([[ 4.],
[ 4.]], dtype=float32)
>>> x%5
<NDArray 2x3 @cpu(0)>
>>> (x%5).asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> (x%y).asnumpy()
array([[ 2., 2., 2.],
[ 2., 2., 2.]], dtype=float32)
>>> mx.nd.modulo(x,y).asnumpy()
array([[ 2., 2., 2.],
[ 2., 2., 2.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
return _ufunc_helper(
lhs,
rhs,
op.broadcast_mod,
operator.mod,
_internal._mod_scalar,
_internal._rmod_scalar)
# pylint: enable= no-member, protected-access
def power(base, exp):
"""Returns result of first array elements raised to powers from second array, element-wise
with broadcasting.
Equivalent to ``base ** exp`` and ``mx.nd.broadcast_power(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
base : scalar or NDArray
The base array
exp : scalar or NDArray
The exponent array. If ``base.shape != exp.shape``, they must be
broadcastable to a common shape.
Returns
--------
NDArray
The bases in x raised to the exponents in y.
Examples
--------
>>> x = mx.nd.ones((2,3))*2
>>> y = mx.nd.arange(1,3).reshape((2,1))
>>> z = mx.nd.arange(1,3).reshape((2,1))
>>> x.asnumpy()
array([[ 2., 2., 2.],
[ 2., 2., 2.]], dtype=float32)
>>> y.asnumpy()
array([[ 1.],
[ 2.]], dtype=float32)
>>> z.asnumpy()
array([[ 1.],
[ 2.]], dtype=float32)
>>> (x**2).asnumpy()
array([[ 4., 4., 4.],
[ 4., 4., 4.]], dtype=float32)
>>> (x**y).asnumpy()
array([[ 2., 2., 2.],
[ 4., 4., 4.]], dtype=float32)
>>> mx.nd.power(x,y).asnumpy()
array([[ 2., 2., 2.],
[ 4., 4., 4.]], dtype=float32)
>>> (z**y).asnumpy()
array([[ 1.],
[ 4.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
return _ufunc_helper(
base,
exp,
op.broadcast_power,
operator.pow,
_internal._power_scalar,
_internal._rpower_scalar)
# pylint: enable= no-member, protected-access
def maximum(lhs, rhs):
"""Returns element-wise maximum of the input arrays with broadcasting.
Equivalent to ``mx.nd.broadcast_maximum(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or array
First array to be compared.
rhs : scalar or array
Second array to be compared. If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
The element-wise maximum of the input arrays.
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = mx.nd.arange(2).reshape((2,1))
>>> z = mx.nd.arange(2).reshape((1,2))
>>> x.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> y.asnumpy()
array([[ 0.],
[ 1.]], dtype=float32)
>>> z.asnumpy()
array([[ 0., 1.]], dtype=float32)
>>> mx.nd.maximum(x, 2).asnumpy()
array([[ 2., 2., 2.],
[ 2., 2., 2.]], dtype=float32)
>>> mx.nd.maximum(x, y).asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> mx.nd.maximum(y, z).asnumpy()
array([[ 0., 1.],
[ 1., 1.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
return _ufunc_helper(
lhs,
rhs,
op.broadcast_maximum,
lambda x, y: x if x > y else y,
_internal._maximum_scalar,
None)
# pylint: enable= no-member, protected-access
def minimum(lhs, rhs):
"""Returns element-wise minimum of the input arrays with broadcasting.
Equivalent to ``mx.nd.broadcast_minimum(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or array
First array to be compared.
rhs : scalar or array
Second array to be compared. If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
The element-wise minimum of the input arrays.
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = mx.nd.arange(2).reshape((2,1))
>>> z = mx.nd.arange(2).reshape((1,2))
>>> x.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> y.asnumpy()
array([[ 0.],
[ 1.]], dtype=float32)
>>> z.asnumpy()
array([[ 0., 1.]], dtype=float32)
>>> mx.nd.minimum(x, 2).asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> mx.nd.minimum(x, y).asnumpy()
array([[ 0., 0., 0.],
[ 1., 1., 1.]], dtype=float32)
>>> mx.nd.minimum(z, y).asnumpy()
array([[ 0., 0.],
[ 0., 1.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
return _ufunc_helper(
lhs,
rhs,
op.broadcast_minimum,
lambda x, y: x if x < y else y,
_internal._minimum_scalar,
None)
# pylint: enable= no-member, protected-access
def equal(lhs, rhs):
"""Returns the result of element-wise **equal to** (==) comparison operation with
broadcasting.
For each element in input arrays, return 1(true) if corresponding elements are same,
otherwise return 0(false).
Equivalent to ``lhs == rhs`` and ``mx.nd.broadcast_equal(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or array
First array to be compared.
rhs : scalar or array
Second array to be compared. If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
Output array of boolean values.
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = mx.nd.arange(2).reshape((2,1))
>>> z = mx.nd.arange(2).reshape((1,2))
>>> x.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> y.asnumpy()
array([[ 0.],
[ 1.]], dtype=float32)
>>> z.asnumpy()
array([[ 0., 1.]], dtype=float32)
>>> (x == 1).asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> (x == y).asnumpy()
array([[ 0., 0., 0.],
[ 1., 1., 1.]], dtype=float32)
>>> mx.nd.equal(x,y).asnumpy()
array([[ 0., 0., 0.],
[ 1., 1., 1.]], dtype=float32)
>>> (z == y).asnumpy()
array([[ 1., 0.],
[ 0., 1.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
return _ufunc_helper(
lhs,
rhs,
op.broadcast_equal,
lambda x, y: 1 if x == y else 0,
_internal._equal_scalar,
None)
# pylint: enable= no-member, protected-access
def not_equal(lhs, rhs):
"""Returns the result of element-wise **not equal to** (!=) comparison operation
with broadcasting.
For each element in input arrays, return 1(true) if corresponding elements are different,
otherwise return 0(false).
Equivalent to ``lhs != rhs`` and ``mx.nd.broadcast_not_equal(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or array
First array to be compared.
rhs : scalar or array
Second array to be compared. If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
Output array of boolean values.
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = mx.nd.arange(2).reshape((2,1))
>>> z = mx.nd.arange(2).reshape((1,2))
>>> x.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> y.asnumpy()
array([[ 0.],
[ 1.]], dtype=float32)
>>> z.asnumpy()
array([[ 0., 1.]], dtype=float32)
>>> (z == y).asnumpy()
array([[ 1., 0.],
[ 0., 1.]], dtype=float32)
>>> (x != 1).asnumpy()
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
>>> (x != y).asnumpy()
array([[ 1., 1., 1.],
[ 0., 0., 0.]], dtype=float32)
>>> mx.nd.not_equal(x, y).asnumpy()
array([[ 1., 1., 1.],
[ 0., 0., 0.]], dtype=float32)
>>> (z != y).asnumpy()
array([[ 0., 1.],
[ 1., 0.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
return _ufunc_helper(
lhs,
rhs,
op.broadcast_not_equal,
lambda x, y: 1 if x != y else 0,
_internal._not_equal_scalar,
None)
# pylint: enable= no-member, protected-access
def greater(lhs, rhs):
"""Returns the result of element-wise **greater than** (>) comparison operation
with broadcasting.
For each element in input arrays, return 1(true) if lhs elements are greater than rhs,
otherwise return 0(false).
Equivalent to ``lhs > rhs`` and ``mx.nd.broadcast_greater(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or array
First array to be compared.
rhs : scalar or array
Second array to be compared. If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
Output array of boolean values.
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = mx.nd.arange(2).reshape((2,1))
>>> z = mx.nd.arange(2).reshape((1,2))
>>> x.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> y.asnumpy()
array([[ 0.],
[ 1.]], dtype=float32)
>>> z.asnumpy()
array([[ 0., 1.]], dtype=float32)
>>> (x > 1).asnumpy()
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
>>> (x > y).asnumpy()
array([[ 1., 1., 1.],
[ 0., 0., 0.]], dtype=float32)
>>> mx.nd.greater(x, y).asnumpy()
array([[ 1., 1., 1.],
[ 0., 0., 0.]], dtype=float32)
>>> (z > y).asnumpy()
array([[ 0., 1.],
[ 0., 0.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
return _ufunc_helper(
lhs,
rhs,
op.broadcast_greater,
lambda x, y: 1 if x > y else 0,
_internal._greater_scalar,
_internal._lesser_scalar)
# pylint: enable= no-member, protected-access
def greater_equal(lhs, rhs):
"""Returns the result of element-wise **greater than or equal to** (>=) comparison
operation with broadcasting.
For each element in input arrays, return 1(true) if lhs elements are greater than equal to rhs,
otherwise return 0(false).
Equivalent to ``lhs >= rhs`` and ``mx.nd.broadcast_greater_equal(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or array
First array to be compared.
rhs : scalar or array
Second array to be compared. If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
Output array of boolean values.
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = mx.nd.arange(2).reshape((2,1))
>>> z = mx.nd.arange(2).reshape((1,2))
>>> x.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> y.asnumpy()
array([[ 0.],
[ 1.]], dtype=float32)
>>> z.asnumpy()
array([[ 0., 1.]], dtype=float32)
>>> (x >= 1).asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> (x >= y).asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> mx.nd.greater_equal(x, y).asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> (z >= y).asnumpy()
array([[ 1., 1.],
[ 0., 1.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
return _ufunc_helper(
lhs,
rhs,
op.broadcast_greater_equal,
lambda x, y: 1 if x >= y else 0,
_internal._greater_equal_scalar,
_internal._lesser_equal_scalar)
# pylint: enable= no-member, protected-access
def lesser(lhs, rhs):
"""Returns the result of element-wise **lesser than** (<) comparison operation
with broadcasting.
For each element in input arrays, return 1(true) if lhs elements are less than rhs,
otherwise return 0(false).
Equivalent to ``lhs < rhs`` and ``mx.nd.broadcast_lesser(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or array
First array to be compared.
rhs : scalar or array
Second array to be compared. If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
Output array of boolean values.
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = mx.nd.arange(2).reshape((2,1))
>>> z = mx.nd.arange(2).reshape((1,2))
>>> x.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> y.asnumpy()
array([[ 0.],
[ 1.]], dtype=float32)
>>> z.asnumpy()
array([[ 0., 1.]], dtype=float32)
>>> (x < 1).asnumpy()
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
>>> (x < y).asnumpy()
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
>>> mx.nd.lesser(x, y).asnumpy()
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
>>> (z < y).asnumpy()
array([[ 0., 0.],
[ 1., 0.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
return _ufunc_helper(
lhs,
rhs,
op.broadcast_lesser,
lambda x, y: 1 if x < y else 0,
_internal._lesser_scalar,
_internal._greater_scalar)
# pylint: enable= no-member, protected-access
def lesser_equal(lhs, rhs):
"""Returns the result of element-wise **lesser than or equal to** (<=) comparison
operation with broadcasting.
For each element in input arrays, return 1(true) if lhs elements are
lesser than equal to rhs, otherwise return 0(false).
Equivalent to ``lhs <= rhs`` and ``mx.nd.broadcast_lesser_equal(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or array
First array to be compared.
rhs : scalar or array
Second array to be compared. If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
Output array of boolean values.
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = mx.nd.arange(2).reshape((2,1))
>>> z = mx.nd.arange(2).reshape((1,2))
>>> x.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> y.asnumpy()
array([[ 0.],
[ 1.]], dtype=float32)
>>> z.asnumpy()
array([[ 0., 1.]], dtype=float32)
>>> (x <= 1).asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> (x <= y).asnumpy()
array([[ 0., 0., 0.],
[ 1., 1., 1.]], dtype=float32)
>>> mx.nd.lesser_equal(x, y).asnumpy()
array([[ 0., 0., 0.],
[ 1., 1., 1.]], dtype=float32)
>>> (z <= y).asnumpy()
array([[ 1., 0.],
[ 1., 1.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
return _ufunc_helper(
lhs,
rhs,
op.broadcast_lesser_equal,
lambda x, y: 1 if x <= y else 0,
_internal._lesser_equal_scalar,
_internal._greater_equal_scalar)
# pylint: enable= no-member, protected-access
def true_divide(lhs, rhs):
"""This function is similar to :meth:`divide`.
"""
return divide(lhs, rhs)
def concatenate(arrays, axis=0, always_copy=True):
"""DEPRECATED, use ``concat`` instead
Parameters
----------
arrays : list of `NDArray`
Arrays to be concatenate. They must have identical shape except
the first dimension. They also must have the same data type.
axis : int
The axis along which to concatenate.
always_copy : bool
Default `True`. When not `True`, if the arrays only contain one
`NDArray`, that element will be returned directly, avoid copying.
Returns
-------
NDArray
An `NDArray` that lives on the same context as `arrays[0].context`.
"""
assert isinstance(arrays, list)
assert len(arrays) > 0
assert isinstance(arrays[0], NDArray)
if not always_copy and len(arrays) == 1:
return arrays[0]
shape_axis = arrays[0].shape[axis]
shape_rest1 = arrays[0].shape[0:axis]
shape_rest2 = arrays[0].shape[axis+1:]
dtype = arrays[0].dtype
for arr in arrays[1:]:
shape_axis += arr.shape[axis]
assert shape_rest1 == arr.shape[0:axis]
assert shape_rest2 == arr.shape[axis+1:]
assert dtype == arr.dtype
ret_shape = shape_rest1 + (shape_axis,) + shape_rest2
ret = empty(ret_shape, ctx=arrays[0].context, dtype=dtype)
idx = 0
begin = [0 for _ in ret_shape]
end = list(ret_shape)
for arr in arrays:
if axis == 0:
ret[idx:idx+arr.shape[0]] = arr
else:
begin[axis] = idx
end[axis] = idx+arr.shape[axis]
# pylint: disable=no-member,protected-access
_internal._crop_assign(ret, arr, out=ret,
begin=tuple(begin),
end=tuple(end))
# pylint: enable=no-member,protected-access
idx += arr.shape[axis]
return ret
# pylint: disable=redefined-outer-name
def imdecode(str_img, clip_rect=(0, 0, 0, 0), out=None, index=0, channels=3, mean=None):
"""DEPRECATED, use mx.img instead
Parameters
----------
str_img : str
Binary image data
clip_rect : iterable of 4 int
Clip decoded image to rectangle (x0, y0, x1, y1).
out : NDArray
Output buffer. Can be 3 dimensional (c, h, w) or 4 dimensional (n, c, h, w).
index : int
Output decoded image to i-th slice of 4 dimensional buffer.
channels : int
Number of channels to output. Decode to grey scale when channels = 1.
mean : NDArray
Subtract mean from decode image before outputing.
"""
# pylint: disable= no-member, protected-access, too-many-arguments
if mean is None:
mean = NDArray(_new_empty_handle())
if out is None:
return _internal._imdecode(mean, index,
clip_rect[0],
clip_rect[1],
clip_rect[2],
clip_rect[3],
channels,
len(str_img),
str_img=str_img)
else:
return _internal._imdecode(mean, index,
clip_rect[0],
clip_rect[1],
clip_rect[2],
clip_rect[3],
channels,
len(str_img),
str_img=str_img,
out=out)
def zeros(shape, ctx=None, dtype=None, **kwargs):
"""Returns a new array filled with all zeros, with the given shape and type.
Parameters
----------
shape : int or tuple of int
The shape of the empty array.
ctx : Context, optional
An optional device context (default is the current default context).
dtype : str or numpy.dtype, optional
An optional value type (default is `float32`).
out : NDArray, optional
The output NDArray (default is `None`).
Returns
-------
NDArray
A created array
Examples
--------
>>> mx.nd.zeros(1).asnumpy()
array([ 0.], dtype=float32)
>>> mx.nd.zeros((1,2), mx.gpu(0))
<NDArray 1x2 @gpu(0)>
>>> mx.nd.zeros((1,2), mx.gpu(0), 'float16').asnumpy()
array([[ 0., 0.]], dtype=float16)
"""
# pylint: disable= unused-argument
if ctx is None:
ctx = Context.default_ctx
dtype = mx_real_t if dtype is None else dtype
# pylint: disable= no-member, protected-access
return _internal._zeros(shape=shape, ctx=ctx, dtype=dtype, **kwargs)
# pylint: enable= no-member, protected-access
def empty(shape, ctx=None, dtype=None):
"""Returns a new array of given shape and type, without initializing entries.
Parameters
----------
shape : int or tuple of int
The shape of the empty array.
ctx : Context, optional
An optional device context (default is the current default context).
dtype : str or numpy.dtype, optional
An optional value type (default is `float32`).
Returns
-------
NDArray
A created array.
"""
if isinstance(shape, int):
shape = (shape, )
if ctx is None:
ctx = Context.default_ctx
if dtype is None:
dtype = mx_real_t
return NDArray(handle=_new_alloc_handle(shape, ctx, False, dtype))
| 31.917762
| 99
| 0.54528
|
ba3a2db20ed39479f4d40ec01c58b455b5d59feb
| 1,275
|
py
|
Python
|
ship.py
|
favmatteo/navicella-cavallo
|
2a785da894399b912e6c35d49298db9bd8aecbd7
|
[
"MIT"
] | 5
|
2021-11-09T22:04:48.000Z
|
2022-02-08T12:32:31.000Z
|
ship.py
|
favmatteo/navicella-cavallo
|
2a785da894399b912e6c35d49298db9bd8aecbd7
|
[
"MIT"
] | 1
|
2021-11-11T11:16:13.000Z
|
2021-11-11T17:26:47.000Z
|
ship.py
|
favmatteo/navicella-cavallo
|
2a785da894399b912e6c35d49298db9bd8aecbd7
|
[
"MIT"
] | 2
|
2021-11-10T10:06:47.000Z
|
2022-02-08T12:29:44.000Z
|
import settings
"""Classe astratta che rappresenta una navicella"""
class Ship:
def __init__(self, x, y):
""" Inizializza gli attributi della classe Ship """
self.x = x
self.y = y
self.ship_img = None
self.laser_img = None
def draw(self, window):
window.blit(self.ship_img, (self.x, self.y))
def get_width(self):
return self.ship_img.get_width()
def get_height(self):
return self.ship_img.get_height()
""" Classe Player che eredita dalla classe Ship"""
class PlayerShip(Ship):
def __init__(self, x, y):
""" Inizializza gli attributi della classe PlayerShip """
super().__init__(x, y)
self.cool_down_counter = 0
self.ship_img = settings.YELLOW_SPACE_SHIP
"""Classe Nemico"""
class EnemyShip(Ship):
def __init__(self, x, y, color):
""" Inizializza gli attributi della classe EnemyShip """
super().__init__(x, y)
self.colors = {
"red": settings.RED_SPACE_SHIP,
"green": settings.GREEN_SPACE_SHIP,
"blue": settings.BLUE_SPACE_SHIP,
}
self.ship_img = self.colors.get(color, self.colors[
'red'])
def move(self):
self.y += settings.ENEMY_SPEED
| 23.181818
| 65
| 0.603922
|
82ff8ee9b54f6081d26d30c863eaefde479bd6fd
| 1,525
|
py
|
Python
|
python3/543.diameter-of-binary-tree.248946473.ac.py
|
Diego-Zulu/leetcode_answers
|
ad435df1bd95fb2c6e17d2d9ff349282c98ee0f4
|
[
"MIT"
] | null | null | null |
python3/543.diameter-of-binary-tree.248946473.ac.py
|
Diego-Zulu/leetcode_answers
|
ad435df1bd95fb2c6e17d2d9ff349282c98ee0f4
|
[
"MIT"
] | null | null | null |
python3/543.diameter-of-binary-tree.248946473.ac.py
|
Diego-Zulu/leetcode_answers
|
ad435df1bd95fb2c6e17d2d9ff349282c98ee0f4
|
[
"MIT"
] | null | null | null |
#
# @lc app=leetcode id=543 lang=python3
#
# [543] Diameter of Binary Tree
#
# https://leetcode.com/problems/diameter-of-binary-tree/description/
#
# algorithms
# Easy (47.98%)
# Likes: 2733
# Dislikes: 176
# Total Accepted: 308.1K
# Total Submissions: 642K
# Testcase Example: '[1,2,3,4,5]'
#
#
# Given a binary tree, you need to compute the length of the diameter of the
# tree. The diameter of a binary tree is the length of the longest path between
# any two nodes in a tree. This path may or may not pass through the root.
#
#
#
# Example:
# Given a binary tree
#
# 1
# / \
# 2 3
# / \
# 4 5
#
#
#
# Return 3, which is the length of the path [4,2,1,3] or [5,2,1,3].
#
#
# Note:
# The length of path between two nodes is represented by the number of edges
# between them.
#
#
# @lc code=start
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
def diameterOfBinaryTreeAux(root, result):
if root is None:
return 0
left_path = 1 + diameterOfBinaryTreeAux(root.left, result)
right_path = 1 + diameterOfBinaryTreeAux(root.right, result)
result[0] = max(result[0], left_path + right_path - 2)
return max(left_path, right_path)
class Solution:
def diameterOfBinaryTree(self, root: TreeNode) -> int:
result = [0]
diameterOfBinaryTreeAux(root, result)
return result.pop()
# @lc code=end
| 23.461538
| 79
| 0.630164
|
0091861d3bbc3164a84bbec6f99ee0ab32808ad0
| 1,958
|
py
|
Python
|
astropath/utilities/units/core.py
|
AstroPathJHU/AstroPathPipeline
|
3b0d9cdad18b84e55d00724f89f09bc7f7464c98
|
[
"Apache-2.0"
] | 14
|
2021-06-14T05:05:58.000Z
|
2022-01-15T14:07:30.000Z
|
astropath/utilities/units/core.py
|
AstroPathJHU/AstroPathPipeline
|
3b0d9cdad18b84e55d00724f89f09bc7f7464c98
|
[
"Apache-2.0"
] | null | null | null |
astropath/utilities/units/core.py
|
AstroPathJHU/AstroPathPipeline
|
3b0d9cdad18b84e55d00724f89f09bc7f7464c98
|
[
"Apache-2.0"
] | 2
|
2021-08-14T16:22:50.000Z
|
2022-01-16T17:47:07.000Z
|
import abc, methodtools
currentmodule = None
class UnitsError(Exception): pass
class Distance:
def __new__(self, *args, **kwargs):
return currentmodule.Distance(*args, **kwargs)
def onepixel(pscale):
return Distance(pixels=1, pscale=pscale)
def onemicron(pscale):
return Distance(microns=1, pscale=pscale)
class ThingWithPscale(abc.ABC):
@property
@abc.abstractmethod
def pscale(self): return self.__pscale
@pscale.setter
def pscale(self, pscale): object.__setattr__(self, "_ThingWithPscale__pscale", pscale)
@methodtools.lru_cache()
@property
def onepixel(self):
return onepixel(pscale=self.pscale)
@methodtools.lru_cache()
@property
def onemicron(self):
return onemicron(pscale=self.pscale)
class ThingWithQpscale(abc.ABC):
@property
@abc.abstractmethod
def qpscale(self): return self.__qpscale
@qpscale.setter
def qpscale(self, qpscale): object.__setattr__(self, "_ThingWithQpscale__qpscale", qpscale)
@methodtools.lru_cache()
@property
def oneqppixel(self):
return onepixel(pscale=self.qpscale)
@methodtools.lru_cache()
@property
def oneqpmicron(self):
return onemicron(pscale=self.qpscale)
class ThingWithApscale(abc.ABC):
@property
@abc.abstractmethod
def apscale(self): return self.__apscale
@apscale.setter
def apscale(self, apscale): object.__setattr__(self, "_ThingWithApscale__apscale", apscale)
@methodtools.lru_cache()
@property
def oneappixel(self):
return onepixel(pscale=self.apscale)
@methodtools.lru_cache()
@property
def oneapmicron(self):
return onemicron(pscale=self.apscale)
class ThingWithImscale(abc.ABC):
@property
@abc.abstractmethod
def imscale(self): pass
@imscale.setter
def imscale(self, imscale): object.__setattr__(self, "_ThingWithImscale__imscale", imscale)
@property
def oneimpixel(self): return onepixel(pscale=self.imscale)
@property
def oneimmicron(self): return onemicron(pscale=self.imscale)
| 27.577465
| 93
| 0.755363
|
72066d210c04ec6e8a86e8afac14680c0da51efe
| 403
|
py
|
Python
|
setup.py
|
urielka/pydds-rti-xml
|
15ad015420a966284203afa4e115e2871565d4ad
|
[
"MIT"
] | null | null | null |
setup.py
|
urielka/pydds-rti-xml
|
15ad015420a966284203afa4e115e2871565d4ad
|
[
"MIT"
] | null | null | null |
setup.py
|
urielka/pydds-rti-xml
|
15ad015420a966284203afa4e115e2871565d4ad
|
[
"MIT"
] | 1
|
2018-06-29T06:39:52.000Z
|
2018-06-29T06:39:52.000Z
|
from setuptools import setup, find_packages
import os
setup(name='pydds-rti-xml',
version='0.2.0',
description='Python wrapper for RTI DDS with XML application support',
author='Uriel Katz',
author_email='uriel.katz@gmail.com',
url='https://github.com/urielka/pydds-rti-xml',
include_package_data=True,
packages = find_packages(),
py_modules=['dds'],
)
| 26.866667
| 76
| 0.672457
|
daff1307f9ed9f8bbc3337df298f90d55c7b880e
| 537
|
py
|
Python
|
rx3/core/operators/publishvalue.py
|
samiur/RxPY
|
ea6b3554ab06cfc70e28b532c0a54b910b6ee470
|
[
"MIT"
] | null | null | null |
rx3/core/operators/publishvalue.py
|
samiur/RxPY
|
ea6b3554ab06cfc70e28b532c0a54b910b6ee470
|
[
"MIT"
] | null | null | null |
rx3/core/operators/publishvalue.py
|
samiur/RxPY
|
ea6b3554ab06cfc70e28b532c0a54b910b6ee470
|
[
"MIT"
] | null | null | null |
from typing import Any, Callable, Optional
from rx3 import operators as ops
from rx3.core import Observable
from rx3.subject import BehaviorSubject
from rx3.core.typing import Mapper
def _publish_value(initial_value: Any, mapper: Optional[Mapper] = None) -> Callable[[Observable], Observable]:
if mapper:
def subject_factory(scheduler):
return BehaviorSubject(initial_value)
return ops.multicast(subject_factory=subject_factory, mapper=mapper)
return ops.multicast(BehaviorSubject(initial_value))
| 33.5625
| 110
| 0.769088
|
066426649ff57fc38e87551fc2b4e62e6bf45bc9
| 7,022
|
py
|
Python
|
src/utils.py
|
asafjo23/outliers-detection
|
dd78e3161c02a2f8ae685ef31ad796787e305591
|
[
"MIT"
] | null | null | null |
src/utils.py
|
asafjo23/outliers-detection
|
dd78e3161c02a2f8ae685ef31ad796787e305591
|
[
"MIT"
] | 4
|
2022-03-28T08:53:39.000Z
|
2022-03-28T09:00:27.000Z
|
src/utils.py
|
asafjo23/outliers-detection
|
dd78e3161c02a2f8ae685ef31ad796787e305591
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
from pandas import DataFrame, Series
from scipy.sparse import csr_matrix
from torch import Tensor, sum, LongTensor, FloatTensor, histc
from sklearn.metrics.pairwise import cosine_similarity
from collections import namedtuple
from typing import Tuple, Mapping
from torch.nn import Parameter
from src.data_set import RatingsDataset
from src.model import MF
class ProcColumn:
def __init__(self, column: Series):
uniq = column.unique()
self._name_2_index = {o: i for i, o in enumerate(uniq)}
self._idx_2_name = {i: e for i, e in enumerate(self._name_2_index.keys())}
self.encoded_col = np.array([self._name_2_index[x] for x in column])
def get_index(self, name: str) -> int:
return self._name_2_index[name]
def get_name(self, index: int) -> str:
return self._idx_2_name[index]
class DataProcessor:
def __init__(self, original_df: DataFrame):
self.min_rating = min(original_df.rating.values)
self.max_rating = max(original_df.rating.values)
(
self.ratings_by_user,
self.histograms_by_users,
self.item_to_index_rating,
) = self.data_process(original_df=original_df)
def data_process(self, original_df: DataFrame) -> Tuple:
"""
This function creates the original ratings embedding for each user and saves mapping
from index to item place in the rating.
In addition, it also creates the original histogram of the ratings of the user.
:param original_df: original dataframe
:return: Tuple of ratings_by_users, histograms_by_users, item_to_index_rating
"""
ratings_by_users, histograms_by_users, item_to_index_rating = {}, {}, {}
items_grouped_by_users = original_df.groupby("user_id")
for user_id, group in items_grouped_by_users:
ratings_as_tensor = Tensor(group.rating.values)
ratings_by_users[user_id] = Parameter(ratings_as_tensor, requires_grad=False)
histograms_by_users[user_id] = histc(
ratings_as_tensor, bins=self.max_rating, min=self.min_rating, max=self.max_rating
)
item_to_index_rating[user_id] = {
row.item_id: i for i, row in enumerate(group.itertuples())
}
return ratings_by_users, histograms_by_users, item_to_index_rating
class DataConverter:
def __init__(self, original_df: DataFrame, n_random_users: int, n_ratings_per_random_user: int):
assert list(original_df.columns) == ["user_id", "item_id", "rating"]
original_data = original_df.copy()
self.min_rating = min(original_df.rating.values)
self.max_rating = max(original_df.rating.values)
if n_random_users > 0:
random_users = self.create_random_users(
original_df=original_data,
number_of_users_to_add=n_random_users,
n_ratings_per_random_user=n_ratings_per_random_user,
)
original_data = pd.concat([original_data, random_users], ignore_index=True)
self._user_original_id_to_encoded_id = ProcColumn(original_data.user_id)
self._item_original_id_to_encoded_id = ProcColumn(original_data.item_id)
self.original_df = original_data
self.encoded_df = original_data.copy()
self.encoded_df.user_id = self._user_original_id_to_encoded_id.encoded_col
self.encoded_df.item_id = self._item_original_id_to_encoded_id.encoded_col
self.n_users = self.original_df.user_id.nunique()
self.n_item = self.original_df.item_id.nunique()
def get_original_user_id(self, encoded_id: int) -> str:
return self._user_original_id_to_encoded_id.get_name(index=encoded_id)
def get_original_item_id(self, encoded_id: int) -> str:
return self._item_original_id_to_encoded_id.get_name(index=encoded_id)
def get_encoded_user_ids(self) -> np.ndarray:
return self._user_original_id_to_encoded_id.encoded_col
def get_encoded_item_ids(self) -> np.ndarray:
return self._item_original_id_to_encoded_id.encoded_col
def create_random_users(
self, original_df: DataFrame, number_of_users_to_add: int, n_ratings_per_random_user: int
) -> DataFrame:
assert list(original_df.columns) == ["user_id", "item_id", "rating"]
Row = namedtuple("Row", ["user_id", "item_id", "rating"])
random_data = []
original_num_of_users = original_df.user_id.nunique()
for i in range(original_num_of_users, original_num_of_users + number_of_users_to_add):
for _ in range(n_ratings_per_random_user):
random_song_id = np.random.choice(original_df.item_id.values)
random_rating = np.random.randint(self.min_rating, self.max_rating)
random_data.append(
Row(
user_id=f"random_guy_{i}",
item_id=random_song_id,
rating=random_rating,
)
)
return DataFrame(random_data, columns=["user_id", "item_id", "rating"])
def create_dataset(data_converter: DataConverter):
users_tensor = LongTensor(data_converter.encoded_df.user_id.values)
items_tensor = LongTensor(data_converter.encoded_df.item_id.values)
ratings_tensor = FloatTensor(data_converter.encoded_df.rating.values)
return RatingsDataset(
users_tensor=users_tensor,
items_tensor=items_tensor,
ratings_tensor=ratings_tensor,
)
def mine_outliers(model: MF, data_converter: DataConverter) -> Mapping:
optimized_user_embeddings = np.array(model.user_factors.weight.data)
c_similarity = cosine_similarity(optimized_user_embeddings)
similarities = c_similarity.sum(axis=1)
c_similarity_scores = {
data_converter.get_original_user_id(i): score for i, score in enumerate(similarities)
}
return c_similarity_scores
def classical_outliers_mining(data_converter: DataConverter) -> Mapping:
"""
This function tries to identify who the outliers are by using cosine similarities between all
users ratings vectors.
:return mapping of outliers to score:
"""
sparse_matrix = csr_matrix(
(data_converter.n_users, data_converter.n_item), dtype=np.float64
).toarray()
items_group_by_users = data_converter.encoded_df.groupby("user_id")
for key, group in items_group_by_users:
total_sum = sum(group.rating.values)
non_zeros = len(group.rating.values)
for row in group.itertuples():
sparse_matrix[row.user_id][row.item_id] = row.rating - total_sum / non_zeros
sparse_matrix[key] /= total_sum
c_similarity = cosine_similarity(sparse_matrix)
similarities = c_similarity.sum(axis=1)
c_similarity_scores = {
data_converter.get_original_user_id(i): score for i, score in enumerate(similarities)
}
return c_similarity_scores
| 41.305882
| 100
| 0.697664
|
85e19c9cc95d2e3e452e7b929ebadf08162bf3e5
| 2,469
|
py
|
Python
|
cogs/verify.py
|
xspo-oky/iris-test
|
a4cc15cd590c4ba9b78276565281ca6984be7bef
|
[
"MIT"
] | null | null | null |
cogs/verify.py
|
xspo-oky/iris-test
|
a4cc15cd590c4ba9b78276565281ca6984be7bef
|
[
"MIT"
] | null | null | null |
cogs/verify.py
|
xspo-oky/iris-test
|
a4cc15cd590c4ba9b78276565281ca6984be7bef
|
[
"MIT"
] | null | null | null |
import discord
from discord.ext import commands
import config
import asyncio
from discord.utils import get
import random
class Verify(commands.Cog):
def __init__(self, client):
self.client = client
@commands.Cog.listener()
async def on_message(self, message):
if message.channel.id == config.verifyChannel and not message.content.startswith(".verify "):
if message.author == self.client.user:
pass
else:
await message.delete()
@commands.command()
async def verify(self, ctx, *args):
if args == () and ctx.channel.id == config.verifyChannel:
msg = await ctx.send("Missing arguments")
await asyncio.sleep(2)
await msg.delete()
await ctx.message.delete()
elif ctx.channel.id != config.verifyChannel or args == ():
msg = await ctx.send("This command can't be used in this channel.")
await asyncio.sleep(2)
await msg.delete()
await ctx.message.delete()
elif ctx.channel.id == config.verifyChannel and args[0] != config.verifycode:
msg = await ctx.send("Incorrect code.")
await asyncio.sleep(2)
await msg.delete()
await ctx.message.delete()
else:
if ctx.channel.id == config.verifyChannel and args[0] == config.verifycode:
await ctx.message.delete()
member = ctx.message.author
role = discord.utils.get(member.guild.roles, id=config.verifyrole)
await member.add_roles(role)
channel = self.client.get_channel(config.verifylogs)
embed=discord.Embed(
color=0x0066ff,
description=f"**{member}** verified and got **{role}** role")
embed.set_footer(text=f"User ID: {ctx.message.author.id}")
await channel.send(embed=embed)
@commands.command()
async def verifymessage(self, ctx):
channel = self.client.get_channel(config.verifyChannel)
embed=discord.Embed(
title="Verification:",
color=0x0066ff,
)
embed.add_field(name="To confirm that you are not a bot, please enter:", value=f"`.verify {config.verifycode}`")
embed.set_footer(text=config.default_footer)
await channel.send(embed=embed)
@verify.error
async def verify_error(self, ctx, error):
if isinstance(error, discord.ext.commands.errors.MissingRequiredArgument):
await ctx.send("Missing required arguments.")
await ctx.message.delete()
else:
print(error)
def setup(client):
client.add_cog(Verify(client))
| 34.291667
| 116
| 0.669097
|
72bfc5ba377adecf88c8721fbcab413e202c929a
| 669
|
py
|
Python
|
URI Online Judge/Practice/question1049.py
|
kushagra1212/Competitive-Programming
|
5b68774c617d6abdf1b29893b1b13d47f62161e8
|
[
"MIT"
] | 994
|
2017-02-28T06:13:47.000Z
|
2022-03-31T10:49:00.000Z
|
Practice/UriOnlineJudge/question1049.py
|
devesh17m/Competitive-Programming
|
2d459dc8dc5ac628d94700b739988b0ea364cb71
|
[
"MIT"
] | 16
|
2018-01-01T02:59:55.000Z
|
2021-11-22T12:49:16.000Z
|
Practice/UriOnlineJudge/question1049.py
|
devesh17m/Competitive-Programming
|
2d459dc8dc5ac628d94700b739988b0ea364cb71
|
[
"MIT"
] | 325
|
2017-06-15T03:32:43.000Z
|
2022-03-28T22:43:42.000Z
|
# coding: utf-8
classificacao = raw_input()
tipo = raw_input()
especie = raw_input()
if (classificacao == "vertebrado"):
if(tipo == "ave"):
if (especie == "carnivoro"):
print("aguia")
elif(especie == "onivoro"):
print("pomba")
elif (tipo == "mamifero"):
if (especie == "onivoro"):
print("homem")
elif(especie == "herbivoro"):
print("vaca")
elif(classificacao == "invertebrado"):
if(tipo == "inseto"):
if (especie == "hematofago"):
print ("pulga")
elif(especie == "herbivoro"):
print ("lagarta")
elif(tipo == "anelideo"):
if (especie == "hematofago"):
print("sanguessuga")
elif(especie == "onivoro"):
print("minhoca")
| 20.90625
| 38
| 0.605381
|
3ad1db2daf37d07310381dbbb814eddb95195e0c
| 859
|
py
|
Python
|
setup.py
|
nielsdrost/pymt
|
ae39bf807428827a6904202bf4d3b927daa255ea
|
[
"MIT"
] | null | null | null |
setup.py
|
nielsdrost/pymt
|
ae39bf807428827a6904202bf4d3b927daa255ea
|
[
"MIT"
] | null | null | null |
setup.py
|
nielsdrost/pymt
|
ae39bf807428827a6904202bf4d3b927daa255ea
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
import versioneer
def read_requirements():
import os
path = os.path.dirname(os.path.abspath(__file__))
requirements_file = os.path.join(path, "requirements.txt")
try:
with open(requirements_file, "r") as req_fp:
requires = req_fp.read().split()
except IOError:
return []
else:
return [require.split() for require in requires]
setup(
name="pymt",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description="The CSDMS Python Modeling Toolkit",
author="Eric Hutton",
author_email="huttone@colorado.edu",
url="http://csdms.colorado.edu",
setup_requires=["setuptools"],
packages=find_packages(exclude=("tests*",)),
entry_points={"console_scripts": ["cmt-config=cmt.cmd.cmt_config:main"]},
)
| 26.84375
| 77
| 0.675204
|
777063b782c6df91ae36400652cb0dcd4d91ed40
| 6,064
|
py
|
Python
|
pirates/piratesgui/PDialog.py
|
Willy5s/Pirates-Online-Rewritten
|
7434cf98d9b7c837d57c181e5dabd02ddf98acb7
|
[
"BSD-3-Clause"
] | 81
|
2018-04-08T18:14:24.000Z
|
2022-01-11T07:22:15.000Z
|
pirates/piratesgui/PDialog.py
|
Willy5s/Pirates-Online-Rewritten
|
7434cf98d9b7c837d57c181e5dabd02ddf98acb7
|
[
"BSD-3-Clause"
] | 4
|
2018-09-13T20:41:22.000Z
|
2022-01-08T06:57:00.000Z
|
pirates/piratesgui/PDialog.py
|
Willy5s/Pirates-Online-Rewritten
|
7434cf98d9b7c837d57c181e5dabd02ddf98acb7
|
[
"BSD-3-Clause"
] | 26
|
2018-05-26T12:49:27.000Z
|
2021-09-11T09:11:59.000Z
|
from pandac.PandaModules import TextNode
from direct.gui.DirectGui import *
from direct.directnotify import DirectNotifyGlobal
from otp.otpgui import OTPDialog
from pirates.piratesbase import PLocalizer
from pirates.piratesgui import BorderFrame
from pirates.piratesgui import PiratesGuiGlobals
class PDialog(DirectDialog):
loadedAssets = False
checkButton = None
cancelButton = None
def __init__(self, parent=None, style=OTPDialog.NoButtons, giveMouse=True, **kw):
self.style = style
if not self.loadedAssets:
buttons = loader.loadModel('models/gui/lookout_gui')
self.checkButton = (buttons.find('**/lookout_submit'), buttons.find('**/lookout_submit_down'), buttons.find('**/lookout_submit_over'))
self.cancelButton = (buttons.find('**/lookout_close_window'), buttons.find('**/lookout_close_window_down'), buttons.find('**/lookout_close_window_over'))
for button in self.checkButton:
button.setScale(0.2)
button.flattenStrong()
for button in self.cancelButton:
button.setScale(0.2)
button.flattenStrong()
self.loadedAssets = True
buttons.removeNode()
if self.style == OTPDialog.TwoChoiceCustom:
buttonImage = [
self.checkButton, self.cancelButton]
buttonValue = [DGG.DIALOG_OK, DGG.DIALOG_CANCEL]
if 'buttonText' in kw:
buttonText = kw['buttonText']
del kw['buttonText']
else:
buttonText = [
PLocalizer.DialogOK, PLocalizer.DialogCancel]
elif self.style == OTPDialog.TwoChoice:
buttonImage = [
self.checkButton, self.cancelButton]
buttonText = [PLocalizer.DialogOK, PLocalizer.DialogCancel]
buttonValue = [
DGG.DIALOG_OK, DGG.DIALOG_CANCEL]
elif self.style == OTPDialog.YesNo:
buttonImage = [
self.checkButton, self.cancelButton]
buttonText = [PLocalizer.DialogYes, PLocalizer.DialogNo]
buttonValue = [
DGG.DIALOG_OK, DGG.DIALOG_CANCEL]
elif self.style == OTPDialog.Acknowledge:
buttonImage = [
self.checkButton]
buttonText = [PLocalizer.DialogOK]
buttonValue = [DGG.DIALOG_OK]
elif self.style == OTPDialog.CancelOnly:
buttonImage = [
self.cancelButton]
buttonText = [PLocalizer.DialogCancel]
buttonValue = [DGG.DIALOG_CANCEL]
elif self.style == OTPDialog.NoButtons:
buttonImage = []
buttonText = []
buttonValue = []
else:
self.notify.error('No such style as: ' + str(self.style))
self.borderFrame = BorderFrame.BorderFrame(borderScale=0.5)
optiondefs = (
(
'image', self.borderFrame, None), ('relief', None, None), ('buttonImageList', buttonImage, DGG.INITOPT), ('buttonTextList', buttonText, DGG.INITOPT), ('buttonValueList', buttonValue, DGG.INITOPT), ('buttonPadSF', 2.2, DGG.INITOPT), ('title_text', '', None), ('title_text_font', DGG.getDefaultFont(), None), ('title_text_wordwrap', 12, None), ('title_text_scale', PiratesGuiGlobals.TextScaleTitleSmall, None), ('title_text_fg', PiratesGuiGlobals.TextFG1, None), ('title_text_shadow', PiratesGuiGlobals.TextShadow, None), ('title_text_align', TextNode.ACenter, None), ('text_font', DGG.getDefaultFont(), None), ('text_wordwrap', 12, None), ('text_scale', PiratesGuiGlobals.TextScaleLarge, None), ('text_fg', PiratesGuiGlobals.TextFG1, None), ('text_shadow', PiratesGuiGlobals.TextShadow, None), ('text_align', TextNode.ALeft, None), ('button_pad', (0, 0), None), ('button_relief', None, None), ('button_text_pos', (0, -0.08), None), ('button_text_fg', PiratesGuiGlobals.TextFG1, None), ('button_text_shadow', PiratesGuiGlobals.TextShadow, None), ('button_text_scale', PiratesGuiGlobals.TextScaleLarge, None), ('fadeScreen', 0.5, None), ('image_color', (1, 1, 1, 1), None), ('destroyedCallback', None, None))
self.defineoptions(kw, optiondefs)
DirectDialog.__init__(self, parent)
def cleanupBorderFrame():
self.borderFrame.destroy()
self.postInitialiseFuncList.append(cleanupBorderFrame)
self.createcomponent('title', (), None, DirectLabel, (self,), relief=None, text_pos=(0, 0.16))
self.initialiseoptions(PDialog)
self.setBin('gui-fixed', 1)
def destroy(self):
if self['destroyedCallback']:
self['destroyedCallback']()
DirectDialog.destroy(self)
class PGlobalDialog(PDialog):
notify = DirectNotifyGlobal.directNotify.newCategory('PGlobalDialog')
def __init__(self, message='', doneEvent=None, style=OTPDialog.NoButtons, okButtonText=PLocalizer.DialogOK, cancelButtonText=PLocalizer.DialogCancel, **kw):
if doneEvent == None and style != OTPDialog.NoButtons:
self.notify.error('Boxes with buttons must specify a doneEvent.')
self.__doneEvent = doneEvent
if style == OTPDialog.NoButtons:
buttonText = []
elif style == OTPDialog.Acknowledge:
buttonText = [
okButtonText]
elif style == OTPDialog.CancelOnly:
buttonText = [
cancelButtonText]
else:
buttonText = [
okButtonText, cancelButtonText]
optiondefs = (('dialogName', 'globalDialog', DGG.INITOPT), ('buttonTextList', buttonText, DGG.INITOPT), ('text', message, None), ('command', self.handleButton, None))
self.defineoptions(kw, optiondefs)
PDialog.__init__(self, style=style)
self.initialiseoptions(PGlobalDialog)
def handleButton(self, value):
if value == DGG.DIALOG_OK:
self.doneStatus = 'ok'
messenger.send(self.__doneEvent)
elif value == DGG.DIALOG_CANCEL:
self.doneStatus = 'cancel'
messenger.send(self.__doneEvent)
| 50.533333
| 1,215
| 0.64215
|
9fc2fb585ab81aff14a51d613814ecebc401f77f
| 30,543
|
py
|
Python
|
numba/tests/test_typedlist.py
|
Rubtsowa/numba
|
6d7cddb9084d843e12bcdf9c443bc9cce56aa02f
|
[
"BSD-2-Clause"
] | null | null | null |
numba/tests/test_typedlist.py
|
Rubtsowa/numba
|
6d7cddb9084d843e12bcdf9c443bc9cce56aa02f
|
[
"BSD-2-Clause"
] | null | null | null |
numba/tests/test_typedlist.py
|
Rubtsowa/numba
|
6d7cddb9084d843e12bcdf9c443bc9cce56aa02f
|
[
"BSD-2-Clause"
] | null | null | null |
from __future__ import print_function, absolute_import, division
from itertools import product
import numpy as np
from numba import njit
from numba import int32, float32, types, prange
from numba import jitclass, typeof
from numba.typed import List, Dict
from numba.utils import IS_PY3
from .support import (TestCase, MemoryLeakMixin, unittest, override_config,
forbid_codegen)
from numba.unsafe.refcount import get_refcount
from .test_parfors import skip_unsupported as parfors_skip_unsupported
skip_py2 = unittest.skipUnless(IS_PY3, reason='not supported in py2')
def to_tl(l):
""" Convert cpython list to typed-list. """
tl = List.empty_list(int32)
for k in l:
tl.append(k)
return tl
class TestTypedList(MemoryLeakMixin, TestCase):
def test_basic(self):
l = List.empty_list(int32)
# len
self.assertEqual(len(l), 0)
# append
l.append(0)
# len
self.assertEqual(len(l), 1)
# setitem
l.append(0)
l.append(0)
l[0] = 10
l[1] = 11
l[2] = 12
# getitem
self.assertEqual(l[0], 10)
self.assertEqual(l[1], 11)
self.assertEqual(l[2], 12)
self.assertEqual(l[-3], 10)
self.assertEqual(l[-2], 11)
self.assertEqual(l[-1], 12)
# __iter__
# the default __iter__ from MutableSequence will raise an IndexError
# via __getitem__ and thus leak an exception, so this shouldn't
for i in l:
pass
# contains
self.assertTrue(10 in l)
self.assertFalse(0 in l)
# count
l.append(12)
self.assertEqual(l.count(0), 0)
self.assertEqual(l.count(10), 1)
self.assertEqual(l.count(12), 2)
# pop
self.assertEqual(len(l), 4)
self.assertEqual(l.pop(), 12)
self.assertEqual(len(l), 3)
self.assertEqual(l.pop(1), 11)
self.assertEqual(len(l), 2)
# extend
l.extend((100, 200, 300))
self.assertEqual(len(l), 5)
self.assertEqual(list(l), [10, 12, 100, 200, 300])
# insert
l.insert(0, 0)
self.assertEqual(list(l), [0, 10, 12, 100, 200, 300])
l.insert(3, 13)
self.assertEqual(list(l), [0, 10, 12, 13, 100, 200, 300])
l.insert(100, 400)
self.assertEqual(list(l), [0, 10, 12, 13, 100, 200, 300, 400])
# remove
l.remove(0)
l.remove(400)
l.remove(13)
self.assertEqual(list(l), [10, 12, 100, 200, 300])
# clear
l.clear()
self.assertEqual(len(l), 0)
self.assertEqual(list(l), [])
# reverse
l.extend(tuple(range(10, 20)))
l.reverse()
self.assertEqual(list(l), list(range(10, 20))[::-1])
# copy
new = l.copy()
self.assertEqual(list(new), list(range(10, 20))[::-1])
# equal
self.assertEqual(l, new)
# not equal
new[-1] = 42
self.assertNotEqual(l, new)
# index
self.assertEqual(l.index(15), 4)
def test_unsigned_access(self):
L = List.empty_list(int32)
ui32_0 = types.uint32(0)
ui32_1 = types.uint32(1)
ui32_2 = types.uint32(2)
# insert
L.append(types.uint32(10))
L.append(types.uint32(11))
L.append(types.uint32(12))
self.assertEqual(len(L), 3)
# getitem
self.assertEqual(L[ui32_0], 10)
self.assertEqual(L[ui32_1], 11)
self.assertEqual(L[ui32_2], 12)
# setitem
L[ui32_0] = 123
L[ui32_1] = 456
L[ui32_2] = 789
self.assertEqual(L[ui32_0], 123)
self.assertEqual(L[ui32_1], 456)
self.assertEqual(L[ui32_2], 789)
# index
ui32_123 = types.uint32(123)
ui32_456 = types.uint32(456)
ui32_789 = types.uint32(789)
self.assertEqual(L.index(ui32_123), 0)
self.assertEqual(L.index(ui32_456), 1)
self.assertEqual(L.index(ui32_789), 2)
# delitem
L.__delitem__(ui32_2)
del L[ui32_1]
self.assertEqual(len(L), 1)
self.assertEqual(L[ui32_0], 123)
# pop
L.append(2)
L.append(3)
L.append(4)
self.assertEqual(len(L), 4)
self.assertEqual(L.pop(), 4)
self.assertEqual(L.pop(ui32_2), 3)
self.assertEqual(L.pop(ui32_1), 2)
self.assertEqual(L.pop(ui32_0), 123)
@parfors_skip_unsupported
def test_unsigned_prange(self):
@njit(parallel=True)
def foo(a):
r = types.uint64(3)
s = types.uint64(0)
for i in prange(r):
s = s + a[i]
return s
a = List.empty_list(types.uint64)
a.append(types.uint64(12))
a.append(types.uint64(1))
a.append(types.uint64(7))
self.assertEqual(foo(a), 20)
def test_compiled(self):
@njit
def producer():
l = List.empty_list(int32)
l.append(23)
return l
@njit
def consumer(l):
return l[0]
l = producer()
val = consumer(l)
self.assertEqual(val, 23)
def test_getitem_slice(self):
""" Test getitem using a slice.
This tests suffers from combinatorial explosion, so we parametrize it
and compare results against the regular list in a quasi fuzzing
approach.
"""
# initialize regular list
rl = list(range(10, 20))
# initialize typed list
tl = List.empty_list(int32)
for i in range(10, 20):
tl.append(i)
# define the ranges
start_range = list(range(-20, 30))
stop_range = list(range(-20, 30))
step_range = [-5, -4, -3, -2, -1, 1, 2, 3, 4, 5]
# check that they are the same initially
self.assertEqual(rl, list(tl))
# check that copy by slice works, no start, no stop, no step
self.assertEqual(rl[:], list(tl[:]))
# start only
for sa in start_range:
self.assertEqual(rl[sa:], list(tl[sa:]))
# stop only
for so in stop_range:
self.assertEqual(rl[:so], list(tl[:so]))
# step only
for se in step_range:
self.assertEqual(rl[::se], list(tl[::se]))
# start and stop
for sa, so in product(start_range, stop_range):
self.assertEqual(rl[sa:so], list(tl[sa:so]))
# start and step
for sa, se in product(start_range, step_range):
self.assertEqual(rl[sa::se], list(tl[sa::se]))
# stop and step
for so, se in product(stop_range, step_range):
self.assertEqual(rl[:so:se], list(tl[:so:se]))
# start, stop and step
for sa, so, se in product(start_range, stop_range, step_range):
self.assertEqual(rl[sa:so:se], list(tl[sa:so:se]))
def test_setitem_slice(self):
""" Test setitem using a slice.
This tests suffers from combinatorial explosion, so we parametrize it
and compare results against the regular list in a quasi fuzzing
approach.
"""
def setup(start=10, stop=20):
# initialize regular list
rl_ = list(range(start, stop))
# intialize typed list
tl_ = List.empty_list(int32)
# populate typed list
for i in range(start, stop):
tl_.append(i)
# check they are the same
self.assertEqual(rl_, list(tl_))
return rl_, tl_
### Simple slicing ###
# assign to itself
rl, tl = setup()
rl[:], tl[:] = rl, tl
self.assertEqual(rl, list(tl))
# extend self
rl, tl = setup()
rl[len(rl):], tl[len(tl):] = rl, tl
self.assertEqual(rl, list(tl))
# prepend self
rl, tl = setup()
rl[:0], tl[:0] = rl, tl
self.assertEqual(rl, list(tl))
# partial assign to self, with equal length
rl, tl = setup()
rl[3:5], tl[3:5] = rl[6:8], tl[6:8]
self.assertEqual(rl, list(tl))
# partial assign to self, with larger slice
rl, tl = setup()
rl[3:5], tl[3:5] = rl[6:9], tl[6:9]
self.assertEqual(rl, list(tl))
# partial assign to self, with smaller slice
rl, tl = setup()
rl[3:5], tl[3:5] = rl[6:7], tl[6:7]
self.assertEqual(rl, list(tl))
# extend
rl, tl = setup()
rl[len(rl):] = list(range(110, 120))
tl[len(tl):] = to_tl(range(110,120))
self.assertEqual(rl, list(tl))
# extend empty
rl, tl = setup(0, 0)
rl[len(rl):] = list(range(110, 120))
tl[len(tl):] = to_tl(range(110,120))
self.assertEqual(rl, list(tl))
# extend singleton
rl, tl = setup(0, 1)
rl[len(rl):] = list(range(110, 120))
tl[len(tl):] = to_tl(range(110,120))
self.assertEqual(rl, list(tl))
# prepend
rl, tl = setup()
rl[:0], tl[:0] = list(range(110, 120)), to_tl(range(110,120))
self.assertEqual(rl, list(tl))
# prepend empty
rl, tl = setup(0,0)
rl[:0], tl[:0] = list(range(110, 120)), to_tl(range(110,120))
self.assertEqual(rl, list(tl))
# prepend singleton
rl, tl = setup(0,1)
rl[:0], tl[:0] = list(range(110, 120)), to_tl(range(110,120))
self.assertEqual(rl, list(tl))
# simple equal length assignment, just replace
rl, tl = setup()
rl[1:3], tl[1:3] = [100, 200], to_tl([100, 200])
self.assertEqual(rl, list(tl))
# slice for assignment is larger, need to replace and insert
rl, tl = setup()
rl[1:3], tl[1:3] = [100, 200, 300, 400], to_tl([100, 200, 300, 400])
self.assertEqual(rl, list(tl))
# slice for assignment is smaller, need to replace and delete
rl, tl = setup()
rl[1:3], tl[1:3] = [100], to_tl([100])
self.assertEqual(rl, list(tl))
# slice for assignment is smaller and item is empty, need to delete
rl, tl = setup()
rl[1:3], tl[1:3] = [], to_tl([])
self.assertEqual(rl, list(tl))
# Synonym for clear
rl, tl = setup()
rl[:], tl[:] = [], to_tl([])
self.assertEqual(rl, list(tl))
### Extended slicing ###
# replace every second element
rl, tl = setup()
rl[::2], tl[::2] = [100,200,300,400,500], to_tl([100,200,300,400,500])
self.assertEqual(rl, list(tl))
# replace every second element, backwards
rl, tl = setup()
rl[::-2], tl[::-2] = [100,200,300,400,500], to_tl([100,200,300,400,500])
self.assertEqual(rl, list(tl))
# reverse assign to itself
rl, tl = setup()
rl[::-1], tl[::-1] = rl, tl
self.assertEqual(rl, list(tl))
def test_setitem_slice_value_error(self):
self.disable_leak_check()
tl = List.empty_list(int32)
for i in range(10,20):
tl.append(i)
assignment = List.empty_list(int32)
for i in range(1, 4):
assignment.append(i)
with self.assertRaises(ValueError) as raises:
tl[8:3:-1] = assignment
self.assertIn(
"length mismatch for extended slice and sequence",
str(raises.exception),
)
def test_delitem_slice(self):
""" Test delitem using a slice.
This tests suffers from combinatorial explosion, so we parametrize it
and compare results against the regular list in a quasi fuzzing
approach.
"""
def setup(start=10, stop=20):
# initialize regular list
rl_ = list(range(start, stop))
# intialize typed list
tl_ = List.empty_list(int32)
# populate typed list
for i in range(start, stop):
tl_.append(i)
# check they are the same
self.assertEqual(rl_, list(tl_))
return rl_, tl_
# define the ranges
start_range = list(range(-20, 30))
stop_range = list(range(-20, 30))
step_range = [-5, -4, -3, -2, -1, 1, 2, 3, 4, 5]
rl, tl = setup()
# check that they are the same initially
self.assertEqual(rl, list(tl))
# check that deletion of the whole list by slice works
del rl[:]
del tl[:]
self.assertEqual(rl, list(tl))
# start only
for sa in start_range:
rl, tl = setup()
del rl[sa:]
del tl[sa:]
self.assertEqual(rl, list(tl))
# stop only
for so in stop_range:
rl, tl = setup()
del rl[:so]
del tl[:so]
self.assertEqual(rl, list(tl))
# step only
for se in step_range:
rl, tl = setup()
del rl[::se]
del tl[::se]
self.assertEqual(rl, list(tl))
# start and stop
for sa, so in product(start_range, stop_range):
rl, tl = setup()
del rl[sa:so]
del tl[sa:so]
self.assertEqual(rl, list(tl))
# start and step
for sa, se in product(start_range, step_range):
rl, tl = setup()
del rl[sa::se]
del tl[sa::se]
self.assertEqual(rl, list(tl))
# stop and step
for so, se in product(stop_range, step_range):
rl, tl = setup()
del rl[:so:se]
del tl[:so:se]
self.assertEqual(rl, list(tl))
# start, stop and step
for sa, so, se in product(start_range, stop_range, step_range):
rl, tl = setup()
del rl[sa:so:se]
del tl[sa:so:se]
self.assertEqual(rl, list(tl))
def test_list_create_no_jit_using_empty_list(self):
with override_config('DISABLE_JIT', True):
with forbid_codegen():
l = List.empty_list(types.int32)
self.assertEqual(type(l), list)
def test_list_create_no_jit_using_List(self):
with override_config('DISABLE_JIT', True):
with forbid_codegen():
l = List()
self.assertEqual(type(l), list)
class TestAllocation(MemoryLeakMixin, TestCase):
def test_allocation(self):
# kwarg version
for i in range(16):
tl = List.empty_list(types.int32, allocated=i)
self.assertEqual(tl._allocated(), i)
# posarg version
for i in range(16):
tl = List.empty_list(types.int32, i)
self.assertEqual(tl._allocated(), i)
def test_growth_and_shrinkage(self):
tl = List.empty_list(types.int32)
growth_before = {0: 0, 4:4, 8:8, 16:16}
growth_after = {0: 4, 4:8, 8:16, 16:25}
for i in range(17):
if i in growth_before:
self.assertEqual(growth_before[i], tl._allocated())
tl.append(i)
if i in growth_after:
self.assertEqual(growth_after[i], tl._allocated())
shrink_before = {17: 25, 12:25, 9:18, 6:12, 4:8, 3:6, 2:5, 1:4}
shrink_after = {17: 25, 12:18, 9:12, 6:8, 4:6, 3:5, 2:4, 1:0}
for i in range(17, 0, -1):
if i in shrink_before:
self.assertEqual(shrink_before[i], tl._allocated())
tl.pop()
if i in shrink_after:
self.assertEqual(shrink_after[i], tl._allocated())
class TestExtend(MemoryLeakMixin, TestCase):
def test_extend_other(self):
@njit
def impl(other):
l = List.empty_list(types.int32)
for x in range(10):
l.append(x)
l.extend(other)
return l
other = List.empty_list(types.int32)
for x in range(10):
other.append(x)
expected = impl.py_func(other)
got = impl(other)
self.assertEqual(expected, got)
def test_extend_self(self):
@njit
def impl():
l = List.empty_list(types.int32)
for x in range(10):
l.append(x)
l.extend(l)
return l
expected = impl.py_func()
got = impl()
self.assertEqual(expected, got)
def test_extend_tuple(self):
@njit
def impl():
l = List.empty_list(types.int32)
for x in range(10):
l.append(x)
l.extend((100,200,300))
return l
expected = impl.py_func()
got = impl()
self.assertEqual(expected, got)
@njit
def cmp(a, b):
return a < b, a <= b, a == b, a != b, a >= b, a > b
class TestComparisons(MemoryLeakMixin, TestCase):
def _cmp_dance(self, expected, pa, pb, na, nb):
# interpreter with regular list
self.assertEqual(cmp.py_func(pa, pb), expected)
# interpreter with typed-list
py_got = cmp.py_func(na, nb)
self.assertEqual(py_got, expected)
# compiled with typed-list
jit_got = cmp(na, nb)
self.assertEqual(jit_got, expected)
def test_empty_vs_empty(self):
pa, pb = [], []
na, nb = to_tl(pa), to_tl(pb)
expected = False, True, True, False, True, False
self._cmp_dance(expected, pa, pb, na, nb)
def test_empty_vs_singleton(self):
pa, pb = [], [0]
na, nb = to_tl(pa), to_tl(pb)
expected = True, True, False, True, False, False
self._cmp_dance(expected, pa, pb, na, nb)
def test_singleton_vs_empty(self):
pa, pb = [0], []
na, nb = to_tl(pa), to_tl(pb)
expected = False, False, False, True, True, True
self._cmp_dance(expected, pa, pb, na, nb)
def test_singleton_vs_singleton_equal(self):
pa, pb = [0], [0]
na, nb = to_tl(pa), to_tl(pb)
expected = False, True, True, False, True, False
self._cmp_dance(expected, pa, pb, na, nb)
def test_singleton_vs_singleton_less_than(self):
pa, pb = [0], [1]
na, nb = to_tl(pa), to_tl(pb)
expected = True, True, False, True, False, False
self._cmp_dance(expected, pa, pb, na, nb)
def test_singleton_vs_singleton_greater_than(self):
pa, pb = [1], [0]
na, nb = to_tl(pa), to_tl(pb)
expected = False, False, False, True, True, True
self._cmp_dance(expected, pa, pb, na, nb)
def test_equal(self):
pa, pb = [1, 2, 3], [1, 2, 3]
na, nb = to_tl(pa), to_tl(pb)
expected = False, True, True, False, True, False
self._cmp_dance(expected, pa, pb, na, nb)
def test_first_shorter(self):
pa, pb = [1, 2], [1, 2, 3]
na, nb = to_tl(pa), to_tl(pb)
expected = True, True, False, True, False, False
self._cmp_dance(expected, pa, pb, na, nb)
def test_second_shorter(self):
pa, pb = [1, 2, 3], [1, 2]
na, nb = to_tl(pa), to_tl(pb)
expected = False, False, False, True, True, True
self._cmp_dance(expected, pa, pb, na, nb)
def test_first_less_than(self):
pa, pb = [1, 2, 2], [1, 2, 3]
na, nb = to_tl(pa), to_tl(pb)
expected = True, True, False, True, False, False
self._cmp_dance(expected, pa, pb, na, nb)
def test_first_greater_than(self):
pa, pb = [1, 2, 3], [1, 2, 2]
na, nb = to_tl(pa), to_tl(pb)
expected = False, False, False, True, True, True
self._cmp_dance(expected, pa, pb, na, nb)
def test_equals_non_list(self):
l = to_tl([1, 2, 3])
self.assertFalse(any(cmp.py_func(l, 1)))
self.assertFalse(any(cmp(l, 1)))
class TestListInferred(TestCase):
def test_simple_refine_append(self):
@njit
def foo():
l = List()
l.append(1)
return l
expected = foo.py_func()
got = foo()
self.assertEqual(expected, got)
self.assertEqual(list(got), [1])
self.assertEqual(typeof(got).item_type, typeof(1))
def test_simple_refine_insert(self):
@njit
def foo():
l = List()
l.insert(0, 1)
return l
expected = foo.py_func()
got = foo()
self.assertEqual(expected, got)
self.assertEqual(list(got), [1])
self.assertEqual(typeof(got).item_type, typeof(1))
def test_refine_extend_list(self):
@njit
def foo():
a = List()
b = List()
for i in range(3):
b.append(i)
a.extend(b)
return a
expected = foo.py_func()
got = foo()
self.assertEqual(expected, got)
self.assertEqual(list(got), [0, 1, 2])
self.assertEqual(typeof(got).item_type, typeof(1))
def test_refine_extend_set(self):
@njit
def foo():
l = List()
l.extend((0, 1, 2))
return l
expected = foo.py_func()
got = foo()
self.assertEqual(expected, got)
self.assertEqual(list(got), [0, 1, 2])
self.assertEqual(typeof(got).item_type, typeof(1))
def test_refine_list_extend_iter(self):
@njit
def foo():
l = List()
d = Dict()
d[0] = 0
# d.keys() provides a DictKeysIterableType
l.extend(d.keys())
return l
got = foo()
self.assertEqual(0, got[0])
class TestListRefctTypes(MemoryLeakMixin, TestCase):
@skip_py2
def test_str_item(self):
@njit
def foo():
l = List.empty_list(types.unicode_type)
for s in ("a", "ab", "abc", "abcd"):
l.append(s)
return l
l = foo()
expected = ["a", "ab", "abc", "abcd"]
for i, s in enumerate(expected):
self.assertEqual(l[i], s)
self.assertEqual(list(l), expected)
# Test insert replacement
l[3] = 'uxyz'
self.assertEqual(l[3], 'uxyz')
# Test list growth
nelem = 100
for i in range(4, nelem):
l.append(str(i))
self.assertEqual(l[i], str(i))
@skip_py2
def test_str_item_refcount_replace(self):
@njit
def foo():
# use some tricks to make ref-counted unicode
i, j = 'ab', 'c'
a = i + j
m, n = 'zy', 'x'
z = m + n
l = List.empty_list(types.unicode_type)
l.append(a)
# This *should* dec' a and inc' z thus tests that items that are
# replaced are also dec'ed.
l[0] = z
ra, rz = get_refcount(a), get_refcount(z)
return l, ra, rz
l, ra, rz = foo()
self.assertEqual(l[0], "zyx")
self.assertEqual(ra, 1)
self.assertEqual(rz, 2)
@skip_py2
def test_dict_as_item_in_list(self):
@njit
def foo():
l = List.empty_list(Dict.empty(int32, int32))
d = Dict.empty(int32, int32)
d[0] = 1
# This increments the refcount for d
l.append(d)
return get_refcount(d)
c = foo()
self.assertEqual(2, c)
@skip_py2
def test_dict_as_item_in_list_multi_refcount(self):
@njit
def foo():
l = List.empty_list(Dict.empty(int32, int32))
d = Dict.empty(int32, int32)
d[0] = 1
# This increments the refcount for d, twice
l.append(d)
l.append(d)
return get_refcount(d)
c = foo()
self.assertEqual(3, c)
@skip_py2
def test_list_as_value_in_dict(self):
@njit
def foo():
d = Dict.empty(int32, List.empty_list(int32))
l = List.empty_list(int32)
l.append(0)
# This increments the refcount for l
d[0] = l
return get_refcount(l)
c = foo()
self.assertEqual(2, c)
@skip_py2
def test_list_as_item_in_list(self):
nested_type = types.ListType(types.int32)
@njit
def foo():
la = List.empty_list(nested_type)
lb = List.empty_list(types.int32)
lb.append(1)
la.append(lb)
return la
expected = foo.py_func()
got = foo()
self.assertEqual(expected, got)
@skip_py2
def test_array_as_item_in_list(self):
nested_type = types.Array(types.float64, 1, 'C')
@njit
def foo():
l = List.empty_list(nested_type)
a = np.zeros((1,))
l.append(a)
return l
expected = foo.py_func()
got = foo()
# Need to compare the nested arrays
self.assertTrue(np.all(expected[0] == got[0]))
@skip_py2
def test_jitclass_as_item_in_list(self):
spec = [
('value', int32), # a simple scalar field
('array', float32[:]), # an array field
]
@jitclass(spec)
class Bag(object):
def __init__(self, value):
self.value = value
self.array = np.zeros(value, dtype=np.float32)
@property
def size(self):
return self.array.size
def increment(self, val):
for i in range(self.size):
self.array[i] += val
return self.array
@njit
def foo():
l = List()
l.append(Bag(21))
l.append(Bag(22))
l.append(Bag(23))
return l
expected = foo.py_func()
got = foo()
def bag_equal(one, two):
# jitclasses couldn't override __eq__ at time of writing
self.assertEqual(one.value, two.value)
np.testing.assert_allclose(one.array, two.array)
[bag_equal(a, b) for a, b in zip(expected, got)]
@skip_py2
def test_storage_model_mismatch(self):
# https://github.com/numba/numba/issues/4520
# check for storage model mismatch in refcount ops generation
lst = List()
ref = [
("a", True, "a"),
("b", False, "b"),
("c", False, "c"),
]
# populate
for x in ref:
lst.append(x)
# test
for i, x in enumerate(ref):
self.assertEqual(lst[i], ref[i])
@skip_py2
def test_equals_on_list_with_dict_for_equal_lists(self):
# https://github.com/numba/numba/issues/4879
a, b = List(), Dict()
b["a"] = 1
a.append(b)
c, d = List(), Dict()
d["a"] = 1
c.append(d)
self.assertEqual(a, c)
@skip_py2
def test_equals_on_list_with_dict_for_unequal_dicts(self):
# https://github.com/numba/numba/issues/4879
a, b = List(), Dict()
b["a"] = 1
a.append(b)
c, d = List(), Dict()
d["a"] = 2
c.append(d)
self.assertNotEqual(a, c)
@skip_py2
def test_equals_on_list_with_dict_for_unequal_lists(self):
# https://github.com/numba/numba/issues/4879
a, b = List(), Dict()
b["a"] = 1
a.append(b)
c, d, e = List(), Dict(), Dict()
d["a"] = 1
e["b"] = 2
c.append(d)
c.append(e)
self.assertNotEqual(a, c)
class TestListSort(MemoryLeakMixin, TestCase):
def setUp(self):
super(TestListSort, self).setUp()
np.random.seed(0)
def make(self, ctor, data):
lst = ctor()
lst.extend(data)
return lst
def make_both(self, data):
return {
'py': self.make(list, data),
'nb': self.make(List, data),
}
def test_sort_no_args(self):
def udt(lst):
lst.sort()
return lst
for nelem in [13, 29, 127]:
my_lists = self.make_both(np.random.randint(0, nelem, nelem))
self.assertEqual(list(udt(my_lists['nb'])), udt(my_lists['py']))
def test_sort_all_args(self):
def udt(lst, key, reverse):
lst.sort(key=key, reverse=reverse)
return lst
possible_keys = [
lambda x: -x, # negative
lambda x: 1 / (1 + x), # make float
lambda x: (x, -x), # tuple
lambda x: x, # identity
]
possible_reverse = [True, False]
for key, reverse in product(possible_keys, possible_reverse):
my_lists = self.make_both(np.random.randint(0, 100, 23))
msg = "case for key={} reverse={}".format(key, reverse)
self.assertEqual(
list(udt(my_lists['nb'], key=key, reverse=reverse)),
udt(my_lists['py'], key=key, reverse=reverse),
msg=msg,
)
def test_sort_dispatcher_key(self):
def udt(lst, key):
lst.sort(key=key)
return lst
my_lists = self.make_both(np.random.randint(0, 100, 31))
py_key = lambda x: x + 1
nb_key = njit(lambda x: x + 1)
# test typedlist with jitted function
self.assertEqual(
list(udt(my_lists['nb'], key=nb_key)),
udt(my_lists['py'], key=py_key),
)
# test typedlist with and without jitted function
self.assertEqual(
list(udt(my_lists['nb'], key=nb_key)),
list(udt(my_lists['nb'], key=py_key)),
)
def test_sort_in_jit_w_lambda_key(self):
@njit
def udt(lst):
lst.sort(key=lambda x: -x)
return lst
lst = self.make(List, np.random.randint(0, 100, 31))
self.assertEqual(udt(lst), udt.py_func(lst))
def test_sort_in_jit_w_global_key(self):
@njit
def keyfn(x):
return -x
@njit
def udt(lst):
lst.sort(key=keyfn)
return lst
lst = self.make(List, np.random.randint(0, 100, 31))
self.assertEqual(udt(lst), udt.py_func(lst))
def test_sort_on_arrays(self):
@njit
def foo(lst):
lst.sort(key=lambda arr: np.sum(arr))
return lst
arrays = [np.random.random(3) for _ in range(10)]
my_lists = self.make_both(arrays)
self.assertEqual(
list(foo(my_lists['nb'])),
foo.py_func(my_lists['py']),
)
| 29.827148
| 80
| 0.528042
|
4d5d10f090adbbffaaa58792e17aa0f290ae1ab8
| 371
|
py
|
Python
|
tests/test_modules/test_ADOdin/test_adodin_blocks.py
|
aaron-parsons/pymalcolm
|
4e7ebd6b09382ab7e013278a81097d17873fa5c4
|
[
"Apache-2.0"
] | null | null | null |
tests/test_modules/test_ADOdin/test_adodin_blocks.py
|
aaron-parsons/pymalcolm
|
4e7ebd6b09382ab7e013278a81097d17873fa5c4
|
[
"Apache-2.0"
] | null | null | null |
tests/test_modules/test_ADOdin/test_adodin_blocks.py
|
aaron-parsons/pymalcolm
|
4e7ebd6b09382ab7e013278a81097d17873fa5c4
|
[
"Apache-2.0"
] | null | null | null |
from mock import Mock
from malcolm.testutil import ChildTestCase
from malcolm.modules.ADOdin.blocks import odin_runnable_block
class TestADOdinBlocks(ChildTestCase):
def test_odin_detector_runnable_block(self):
self.create_child_block(
odin_runnable_block, Mock(),
mri_prefix="mri_prefix", pv_prefix="pv_prefix", config_dir="/tmp")
| 30.916667
| 78
| 0.757412
|
8ffa61871dece7c31eb6c5f5e5f9b2ec161d9ef9
| 2,343
|
py
|
Python
|
docs/conf.py
|
Lesson-ThienHi/thienhi_shop
|
1c595d70299e1fcce12c3610e27b66c89bbadda6
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
Lesson-ThienHi/thienhi_shop
|
1c595d70299e1fcce12c3610e27b66c89bbadda6
|
[
"MIT"
] | 2
|
2022-03-30T06:34:29.000Z
|
2022-03-31T06:34:49.000Z
|
docs/conf.py
|
Lesson-ThienHi/thienhi_shop
|
1c595d70299e1fcce12c3610e27b66c89bbadda6
|
[
"MIT"
] | null | null | null |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
import django
if os.getenv("READTHEDOCS", default=False) == "True":
sys.path.insert(0, os.path.abspath(".."))
os.environ["DJANGO_READ_DOT_ENV_FILE"] = "True"
os.environ["USE_DOCKER"] = "no"
else:
sys.path.insert(0, os.path.abspath("/app"))
os.environ["DATABASE_URL"] = "sqlite:///readthedocs.db"
os.environ["CELERY_BROKER_URL"] = os.getenv("REDIS_URL", "redis://redis:6379")
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.local")
django.setup()
# -- Project information -----------------------------------------------------
project = "shop_thienhi"
copyright = """2022, ThienHi"""
author = "ThienHi"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
]
# Add any paths that contain templates here, relative to this directory.
# templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "alabaster"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ["_static"]
| 36.609375
| 79
| 0.668374
|
5d3a7a4c828e00572f691871f5959cb379c716a9
| 3,021
|
py
|
Python
|
tests/test_cli.py
|
OasisLMF/OEDtransform
|
688a9cf90a7f11ee19f5f48fcbe1cb93962ea67d
|
[
"BSD-3-Clause"
] | 1
|
2022-03-18T14:53:27.000Z
|
2022-03-18T14:53:27.000Z
|
tests/test_cli.py
|
OasisLMF/OpenDataTransform
|
688a9cf90a7f11ee19f5f48fcbe1cb93962ea67d
|
[
"BSD-3-Clause"
] | 25
|
2021-08-05T16:17:24.000Z
|
2022-03-29T16:28:35.000Z
|
tests/test_cli.py
|
OasisLMF/OEDtransform
|
688a9cf90a7f11ee19f5f48fcbe1cb93962ea67d
|
[
"BSD-3-Clause"
] | 1
|
2022-01-21T14:39:20.000Z
|
2022-01-21T14:39:20.000Z
|
from unittest import mock
import yaml
from click.testing import CliRunner
from hypothesis import given
from hypothesis.strategies import fixed_dictionaries, just, one_of, tuples
from converter.cli import cli
from converter.config import Config
from converter.mapping import BaseMapping
from converter.runner import BaseRunner
from tests.config.test_config import config_file
class FakeRunner(BaseRunner):
def transform(self, extractor, mapping):
return []
class FakeMapping(BaseMapping):
def get_transformations(self):
return []
def options():
return tuples(
one_of(fixed_dictionaries({"foo": just({"file": "bizz"})}), just({})),
one_of(
fixed_dictionaries({"converter_foo_env": just("fizz")}),
just({}),
),
one_of(tuples(just("foo.args"), just("fuzz")), just(())),
just(
{
"runner": {"path": "tests.test_cli.FakeRunner"},
"mapping": {
"path": "tests.test_cli.FakeMapping",
"options": {"input_format": "A", "output_format": "B"},
},
"extractor": {"path": "tests.connector.fakes.FakeConnector"},
"loader": {"path": "tests.connector.fakes.FakeConnector"},
},
),
)
@given(opts=options())
def test_show_config(opts):
conf, env, argv, ovr = opts
argv_dict = dict([argv] if argv else [])
argv = ["-o", *argv] if argv else []
with config_file({**conf, **ovr}) as f, mock.patch("os.environ", env):
expected_conf = Config(
config_path=f,
env=env,
argv=argv_dict,
overrides=ovr,
)
runner = CliRunner()
result = runner.invoke(cli, [*argv, "--config", f, "show-config"])
assert result.exit_code == 0
assert yaml.load(result.output, yaml.SafeLoader) == expected_conf
@given(opts=options())
def test_run(opts):
conf, env, argv, ovr = opts
argv_dict = dict([argv] if argv else [])
argv = ["-o", *argv] if argv else []
with config_file({**conf, **ovr}) as f, mock.patch(
"converter.cli.Controller"
) as mock_controller, mock.patch("os.environ", env):
expected_conf = Config(
config_path=f,
env=env,
argv=argv_dict,
overrides=ovr,
)
runner = CliRunner()
result = runner.invoke(cli, [*argv, "--config", f, "run"])
assert result.exit_code == 0
mock_controller.assert_called_once_with(expected_conf)
def test_run_raises___exception_is_logged():
expected_exception = Exception("Some Error")
with mock.patch("logging.exception") as mock_exception_logger, mock.patch(
"converter.cli.Controller", side_effect=expected_exception
):
runner = CliRunner()
result = runner.invoke(cli, ["run"])
mock_exception_logger.assert_called_once_with(expected_exception)
assert result.exit_code == 1
| 28.771429
| 78
| 0.602119
|
3a9ffb2008dcbe723586d855dd457ac4f001297d
| 1,087
|
py
|
Python
|
pyNastran/gui/dev/save_load.py
|
ACea15/pyNastran
|
5ffc37d784b52c882ea207f832bceb6b5eb0e6d4
|
[
"BSD-3-Clause"
] | 293
|
2015-03-22T20:22:01.000Z
|
2022-03-14T20:28:24.000Z
|
pyNastran/gui/dev/save_load.py
|
ACea15/pyNastran
|
5ffc37d784b52c882ea207f832bceb6b5eb0e6d4
|
[
"BSD-3-Clause"
] | 512
|
2015-03-14T18:39:27.000Z
|
2022-03-31T16:15:43.000Z
|
pyNastran/gui/dev/save_load.py
|
ACea15/pyNastran
|
5ffc37d784b52c882ea207f832bceb6b5eb0e6d4
|
[
"BSD-3-Clause"
] | 136
|
2015-03-19T03:26:06.000Z
|
2022-03-25T22:14:54.000Z
|
import json
data = {
'completed_in': 0.074,
'max_id': 264043230692245504,
'max_id_str': '264043230692245504',
'next_page': '?page=2&max_id=264043230692245504&q=python&rpp=5',
'page': 1,
'query': 'python',
'refresh_url': '?since_id=264043230692245504&q=python',
'results': [
{'created_at': 'Thu, 01 Nov 2012 16:36:26 +0000',
'from_user': 'cat',
},
{'created_at': 'Thu, 01 Nov 2012 16:36:14 +0000',
'from_user': 'cat',
},
{'created_at': 'Thu, 01 Nov 2012 16:36:13 +0000',
'from_user': 'cat',
},
{'created_at': 'Thu, 01 Nov 2012 16:36:07 +0000',
'from_user': 'cat',
},
{'created_at': 'Thu, 01 Nov 2012 16:36:04 +0000',
'from_user': 'cat',
},
],
'results_per_page': 5,
'since_id': 0,
'since_id_str': '0'
}
#print(json.dumps(data, indent=4))
with open('f.json', 'wb') as f:
json.dump(data, f, indent=4)
#parsed_json = json.loads(json_string)
with open('f.json', 'r') as f2:
data2 = json.load(f2)
print(data2)
| 27.175
| 68
| 0.546458
|
9549cdbee430a202bf0c02a19fc0dd13d6b22edf
| 42
|
py
|
Python
|
IDE/Missup.py
|
Xpliot/IDE
|
7b9b3debf8d5369f8ac6747fb46f5be25d3cfca1
|
[
"MIT"
] | null | null | null |
IDE/Missup.py
|
Xpliot/IDE
|
7b9b3debf8d5369f8ac6747fb46f5be25d3cfca1
|
[
"MIT"
] | null | null | null |
IDE/Missup.py
|
Xpliot/IDE
|
7b9b3debf8d5369f8ac6747fb46f5be25d3cfca1
|
[
"MIT"
] | null | null | null |
def add(a,b):
return a+b
add(54+23)
| 8.4
| 14
| 0.547619
|
055431ee8ee3ac14676fbfaf01642c79a03e6139
| 678
|
py
|
Python
|
django_redux/engine.py
|
fcurella/django_redux
|
0215fcd09cd02049778e98d77467d4900c62b8c2
|
[
"BSD-3-Clause"
] | 42
|
2017-02-15T13:39:41.000Z
|
2019-06-24T17:59:35.000Z
|
django_redux/engine.py
|
channels-frontend/django_redux
|
0215fcd09cd02049778e98d77467d4900c62b8c2
|
[
"BSD-3-Clause"
] | 3
|
2019-08-19T19:35:23.000Z
|
2022-02-26T16:46:54.000Z
|
django_redux/engine.py
|
channels-frontend/django_redux
|
0215fcd09cd02049778e98d77467d4900c62b8c2
|
[
"BSD-3-Clause"
] | 4
|
2017-06-28T18:35:21.000Z
|
2019-01-20T14:49:29.000Z
|
from channels.layers import get_channel_layer
async def send_action(group_name, action):
"""
Convenience method to dispatch redux actions from channels.
Usage::
send_action("group_name", {
"type": "MY_ACTION",
"payload": {
"id": 1,
"name": "Lorem",
}
})
"""
channel_layer = get_channel_layer()
data = {
'type': "redux.action",
'action': action,
}
await channel_layer.group_send(
group_name,
data
)
def action(action_type):
def wrap(func):
func.action_type = action_type
return func
return wrap
| 18.833333
| 63
| 0.538348
|
40502351eaf29688fab9e182e67fd1cd214d5167
| 196
|
py
|
Python
|
rhea/cores/usbext/fpgalink/__init__.py
|
meetps/rhea
|
f8a9a08fb5e14c5c4488ef68a2dff4d18222c2c0
|
[
"MIT"
] | 1
|
2022-03-16T23:56:09.000Z
|
2022-03-16T23:56:09.000Z
|
rhea/cores/usbext/fpgalink/__init__.py
|
meetps/rhea
|
f8a9a08fb5e14c5c4488ef68a2dff4d18222c2c0
|
[
"MIT"
] | null | null | null |
rhea/cores/usbext/fpgalink/__init__.py
|
meetps/rhea
|
f8a9a08fb5e14c5c4488ef68a2dff4d18222c2c0
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from . import _fpgalink_fx2 as fpgalink
from ._fpgalink_fx2 import get_interfaces
from ._fpgalink_fx2 import fpgalink_fx2
from ._fl_convert import convert
| 24.5
| 41
| 0.857143
|
f0c8340e9b6f928085a100a3d56bf3541cf1d7f3
| 900
|
py
|
Python
|
telegram_gcloner/handlers/cancel.py
|
Selena-of/ClonBot
|
22a291a8a56c6bb33596f7c9cecd2857e78ec55e
|
[
"MIT"
] | 79
|
2021-04-06T16:14:37.000Z
|
2022-03-31T06:35:50.000Z
|
telegram_gcloner/handlers/cancel.py
|
Selena-of/ClonBot
|
22a291a8a56c6bb33596f7c9cecd2857e78ec55e
|
[
"MIT"
] | 1
|
2021-05-01T14:03:21.000Z
|
2021-11-18T18:29:44.000Z
|
telegram_gcloner/handlers/cancel.py
|
Selena-of/ClonBot
|
22a291a8a56c6bb33596f7c9cecd2857e78ec55e
|
[
"MIT"
] | 184
|
2021-04-06T16:14:38.000Z
|
2022-03-31T06:35:52.000Z
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import logging
from telegram.ext import Dispatcher, CallbackQueryHandler
from utils.helper import alert_users
from utils.restricted import restricted
logger = logging.getLogger(__name__)
def init(dispatcher: Dispatcher):
"""Provide handlers initialization."""
dispatcher.add_handler(CallbackQueryHandler(cancel, pattern=r'^cancel$'))
@restricted
def cancel(update, context):
query = update.callback_query
if query.message.chat_id < 0 and \
(not query.message.reply_to_message or
query.from_user.id != query.message.reply_to_message.from_user.id):
alert_users(context, update.effective_user, 'invalid caller', query.data)
query.answer(text='Yo-he!', show_alert=True)
return
# query.message.edit_reply_markup(reply_markup=None)
query.message.delete()
| 31.034483
| 82
| 0.703333
|
1e03eadfb95d1b9182301855a5865086b87ad09d
| 598
|
py
|
Python
|
tests/_testsite/urls.py
|
kapt-labs/django-cross-site-urls-gitlab
|
b0d7415ee0f86e64c71d50ccea73f9c4a46e8768
|
[
"BSD-3-Clause"
] | 1
|
2016-03-01T20:49:18.000Z
|
2016-03-01T20:49:18.000Z
|
tests/_testsite/urls.py
|
kapt-labs/django-cross-site-urls-gitlab
|
b0d7415ee0f86e64c71d50ccea73f9c4a46e8768
|
[
"BSD-3-Clause"
] | null | null | null |
tests/_testsite/urls.py
|
kapt-labs/django-cross-site-urls-gitlab
|
b0d7415ee0f86e64c71d50ccea73f9c4a46e8768
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf.urls import include
from django.conf.urls.i18n import i18n_patterns
from django.conf.urls import url
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
# Local application / specific library imports
from cross_site_urls.conf import settings as cross_site_settings
admin.autodiscover()
urlpatterns = i18n_patterns(
'',
url((r'^{}').format(cross_site_settings.DEFAULT_API_URL), include('cross_site_urls.urls')),
)
urlpatterns += staticfiles_urlpatterns()
| 27.181818
| 95
| 0.794314
|
f8d4f57addaa672900b4eb6bed569aa75c240016
| 18,467
|
py
|
Python
|
scripts/audio_thread_log_viewer/viewer_c3.py
|
TinkerBoard-Android/external-adhd
|
ffc625793b55ea7f8e511b9362a88b6ba080c983
|
[
"BSD-1-Clause",
"BSD-3-Clause"
] | 1
|
2021-03-08T11:49:58.000Z
|
2021-03-08T11:49:58.000Z
|
scripts/audio_thread_log_viewer/viewer_c3.py
|
TinkerBoard-Android/external-adhd
|
ffc625793b55ea7f8e511b9362a88b6ba080c983
|
[
"BSD-1-Clause",
"BSD-3-Clause"
] | 1
|
2021-03-23T18:17:08.000Z
|
2021-03-23T18:17:08.000Z
|
scripts/audio_thread_log_viewer/viewer_c3.py
|
TinkerBoard-Android/external-adhd
|
ffc625793b55ea7f8e511b9362a88b6ba080c983
|
[
"BSD-1-Clause",
"BSD-3-Clause"
] | 1
|
2016-10-19T08:33:40.000Z
|
2016-10-19T08:33:40.000Z
|
#!/usr/bin/python
#
# Copyright 2016 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
"""Generates an HTML file with plot of buffer level in the audio thread log."""
import argparse
import collections
import logging
import string
import time
page_content = string.Template("""
<html meta charset="UTF8">
<head>
<!-- Load c3.css -->
<link href="https://rawgit.com/masayuki0812/c3/master/c3.css" rel="stylesheet" type="text/css">
<!-- Load d3.js and c3.js -->
<script src="https://d3js.org/d3.v4.min.js" charset="utf-8"></script>
<script src="https://rawgit.com/masayuki0812/c3/master/c3.js" charset="utf-8"></script>
<style type="text/css">
.c3-grid text {
fill: grey;
}
.event_log_box {
font-family: 'Courier New', Courier, 'Lucida Sans Typewriter', 'Lucida Typewriter', monospace;
font-size: 20px;
font-style: normal;
font-variant: normal;
font-weight: 300;
line-height: 26.4px;
white-space: pre;
height:50%;
width:48%;
border:1px solid #ccc;
overflow:auto;
}
.checkbox {
font-size: 30px;
border: 2px;
}
.device {
font-size: 15px;
}
.stream{
font-size: 15px;
}
.fetch{
}
.wake{
}
</style>
<script type="text/javascript">
var selected = null;
draw_chart = function() {
var chart = c3.generate({
data: {
x: 'time',
columns: [
['time', $times],
['buffer_level', $buffer_levels],
],
type: 'bar',
types: {
buffer_level: 'line',
},
onclick: function (d, i) {
elm = document.getElementById(d.x.toFixed(9));
if (selected)
selected.style.color = '';
if (elm === null) {
console.error("Can not find element by ID %s", d.x.toFixed(9));
return;
}
elm.style.color = 'blue';
elm.scrollIntoView();
selected = elm;
},
},
zoom: {
enabled: true,
},
grid: {
x: {
lines: [
$grids,
],
},
},
axis: {
y: {min: 0, max: $max_y},
},
});
};
logs = `$logs`;
put_logs = function () {
document.getElementById('logs').innerHTML = logs;
};
set_initial_checkbox_value = function () {
document.getElementById('device').checked = true;
document.getElementById('stream').checked = true;
document.getElementById('fetch').checked = true;
document.getElementById('wake').checked = true;
}
window.onload = function() {
draw_chart();
put_logs();
set_initial_checkbox_value();
};
function handleClick(checkbox) {
var class_name = checkbox.id;
var elements = document.getElementsByClassName(class_name);
var i;
if (checkbox.checked) {
display_value = "block";
} else {
display_value = "none"
}
console.log("change " + class_name + " to " + display_value);
for (i = 0; i < elements.length; i++) {
elements[i].style.display = display_value;
}
}
</script>
</head>
<body>
<div id="chart" style="height:50%; width:100%" ></div>
<div style="margin:0 auto"; class="checkbox">
<label><input type="checkbox" onclick="handleClick(this);" id="device">Show device removed/added event</label>
<label><input type="checkbox" onclick="handleClick(this);" id="stream">Show stream removed/added event</label>
<label><input type="checkbox" onclick="handleClick(this);" id="fetch">Show fetch event</label>
<label><input type="checkbox" onclick="handleClick(this);" id="wake">Show wake by num_fds=1 event</label>
</div>
<div class="event_log_box", id="logs", style="float:left;"></div>
<textarea class="event_log_box", id="text", style="float:right;"></textarea>
</body>
</html>
""")
def StrToTimestamp(s):
"""Converts a time string to a timestamp.
@param s: A time string like "2019-07-02T15:30:46.684190644".
@returns: Returns a timestamp string like "55846.684190644".
"""
fmt = "%Y-%m-%dT%H:%M:%S"
t = time.strptime(s[:-10], fmt)
# Ignore date to avoid a long timestamp.
ts = t.tm_hour * 3600 + t.tm_min * 60 + t.tm_sec
return "{:d}.{}".format(ts, s[-9:])
Tag = collections.namedtuple('Tag', ['time', 'text', 'position', 'class_name'])
"""
The tuple for tags shown on the plot on certain time.
text is the tag to show, position is the tag position, which is one of
'start', 'middle', 'end', class_name is one of 'device', 'stream', 'fetch',
and 'wake' which will be their CSS class name.
"""
class EventData(object):
"""The base class of an event."""
def __init__(self, time, name):
"""Initializes an EventData.
@param time: A string for event time.
@param name: A string for event name.
"""
self.time = time
self.name = name
self._text = None
self._position = None
self._class_name = None
def GetTag(self):
"""Gets the tag for this event.
@returns: A Tag object. Returns None if no need to show tag.
"""
if self._text:
return Tag(
time=self.time, text=self._text, position=self._position,
class_name=self._class_name)
return None
class DeviceEvent(EventData):
"""Class for device event."""
def __init__(self, time, name, device):
"""Initializes a DeviceEvent.
@param time: A string for event time.
@param name: A string for event name.
@param device: A string for device index.
"""
super(DeviceEvent, self).__init__(time, name)
self.device = device
self._position = 'start'
self._class_name = 'device'
class DeviceRemovedEvent(DeviceEvent):
"""Class for device removed event."""
def __init__(self, time, name, device):
"""Initializes a DeviceRemovedEvent.
@param time: A string for event time.
@param name: A string for event name.
@param device: A string for device index.
"""
super(DeviceRemovedEvent, self).__init__(time, name, device)
self._text = 'Removed Device %s' % self.device
class DeviceAddedEvent(DeviceEvent):
"""Class for device added event."""
def __init__(self, time, name, device):
"""Initializes a DeviceAddedEvent.
@param time: A string for event time.
@param name: A string for event name.
@param device: A string for device index.
"""
super(DeviceAddedEvent, self).__init__(time, name, device)
self._text = 'Added Device %s' % self.device
class LevelEvent(DeviceEvent):
"""Class for device event with buffer level."""
def __init__(self, time, name, device, level):
"""Initializes a LevelEvent.
@param time: A string for event time.
@param name: A string for event name.
@param device: A string for device index.
@param level: An int for buffer level.
"""
super(LevelEvent, self).__init__(time, name, device)
self.level = level
class StreamEvent(EventData):
"""Class for event with stream."""
def __init__(self, time, name, stream):
"""Initializes a StreamEvent.
@param time: A string for event time.
@param name: A string for event name.
@param stream: A string for stream id.
"""
super(StreamEvent, self).__init__(time, name)
self.stream = stream
self._class_name = 'stream'
class FetchStreamEvent(StreamEvent):
"""Class for stream fetch event."""
def __init__(self, time, name, stream):
"""Initializes a FetchStreamEvent.
@param time: A string for event time.
@param name: A string for event name.
@param stream: A string for stream id.
"""
super(FetchStreamEvent, self).__init__(time, name, stream)
self._text = 'Fetch %s' % self.stream
self._position = 'end'
self._class_name = 'fetch'
class StreamAddedEvent(StreamEvent):
"""Class for stream added event."""
def __init__(self, time, name, stream):
"""Initializes a StreamAddedEvent.
@param time: A string for event time.
@param name: A string for event name.
@param stream: A string for stream id.
"""
super(StreamAddedEvent, self).__init__(time, name, stream)
self._text = 'Add stream %s' % self.stream
self._position = 'middle'
class StreamRemovedEvent(StreamEvent):
"""Class for stream removed event."""
def __init__(self, time, name, stream):
"""Initializes a StreamRemovedEvent.
@param time: A string for event time.
@param name: A string for event name.
@param stream: A string for stream id.
"""
super(StreamRemovedEvent, self).__init__(time, name, stream)
self._text = 'Remove stream %s' % self.stream
self._position = 'middle'
class WakeEvent(EventData):
"""Class for wake event."""
def __init__(self, time, name, num_fds):
"""Initializes a WakeEvent.
@param time: A string for event time.
@param name: A string for event name.
@param num_fds: A string for number of fd that wakes audio thread up.
"""
super(WakeEvent, self).__init__(time, name)
self._position = 'middle'
self._class_name = 'wake'
if num_fds != '0':
self._text = 'num_fds %s' % num_fds
class C3LogWriter(object):
"""Class to handle event data and fill an HTML page using c3.js library"""
def __init__(self):
"""Initializes a C3LogWriter."""
self.times = []
self.buffer_levels = []
self.tags = []
self.max_y = 0
def AddEvent(self, event):
"""Digests an event.
Add a tag if this event needs to be shown on grid.
Add a buffer level data into buffer_levels if this event has buffer
level.
@param event: An EventData object.
"""
tag = event.GetTag()
if tag:
self.tags.append(tag)
if isinstance(event, LevelEvent):
self.times.append(event.time)
self.buffer_levels.append(str(event.level))
if event.level > self.max_y:
self.max_y = event.level
logging.debug('add data for a level event %s: %s',
event.time, event.level)
if (isinstance(event, DeviceAddedEvent) or
isinstance(event, DeviceRemovedEvent)):
self.times.append(event.time)
self.buffer_levels.append('null')
def _GetGrids(self):
"""Gets the content to be filled for grids.
@returns: A str for grid with format:
'{value: time1, text: "tag1", position: "position1"},
{value: time1, text: "tag1"},...'
"""
grids = []
for tag in self.tags:
content = ('{value: %s, text: "%s", position: "%s", '
'class: "%s"}') % (
tag.time, tag.text, tag.position, tag.class_name)
grids.append(content)
grids_joined = ', '.join(grids)
return grids_joined
def FillPage(self, page_template):
"""Fills in the page template with content.
@param page_template: A string for HTML page content with variables
to be filled.
@returns: A string for filled page.
"""
times = ', '.join(self.times)
buffer_levels = ', '.join(self.buffer_levels)
grids = self._GetGrids()
filled = page_template.safe_substitute(
times=times,
buffer_levels=buffer_levels,
grids=grids,
max_y=str(self.max_y))
return filled
class EventLogParser(object):
"""Class for event log parser."""
def __init__(self):
"""Initializes an EventLogParse."""
self.parsed_events = []
def AddEventLog(self, event_log):
"""Digests a line of event log.
@param event_log: A line for event log.
"""
event = self._ParseOneLine(event_log)
if event:
self.parsed_events.append(event)
def GetParsedEvents(self):
"""Gets the list of parsed events.
@returns: A list of parsed EventData.
"""
return self.parsed_events
def _ParseOneLine(self, line):
"""Parses one line of event log.
Split a line like
2019-07-02T15:30:46.683829810 cras atlog WRITE_STREAMS_FETCH_STREAM id:1e0000 cbth:512 delay:1136
into time, name, and props where
time = '54946.683829810'
name = 'WRITE_STREAMS_FETCH_STREAM'
props = {
'id': 0,
'cb_th': 512,
'delay': 1136
}
@param line: A line of event log.
@returns: A EventData object.
"""
line_split = line.split()
time, name = StrToTimestamp(line_split[0]), line_split[3]
logging.debug('time: %s, name: %s', time, name)
props = {}
for index in xrange(4, len(line_split)):
key, value = line_split[index].split(':')[:2]
props[key] = value
logging.debug('props: %s', props)
return self._CreateEventData(time, name, props)
def _CreateEventData(self, time, name, props):
"""Creates an EventData based on event name.
@param time: A string for event time.
@param name: A string for event name.
@param props: A dict for event properties.
@returns: A EventData object.
"""
if name == 'WRITE_STREAMS_FETCH_STREAM':
return FetchStreamEvent(time, name, stream=props['id'])
if name == 'STREAM_ADDED':
return StreamAddedEvent(time, name, stream=props['id'])
if name == 'STREAM_REMOVED':
return StreamRemovedEvent(time, name, stream=props['id'])
if name in ['FILL_AUDIO', 'SET_DEV_WAKE']:
return LevelEvent(
time, name, device=props['dev'],
level=int(props['hw_level']))
if name == 'DEV_ADDED':
return DeviceAddedEvent(time, name, device=props['dev'])
if name == 'DEV_REMOVED':
return DeviceRemovedEvent(time, name, device=props['dev'])
if name == 'WAKE':
return WakeEvent(time, name, num_fds=props['num_fds'])
return None
class AudioThreadLogParser(object):
"""Class for audio thread log parser."""
def __init__(self, path):
"""Initializes an AudioThreadLogParser.
@param path: The audio thread log file path.
"""
self.path = path
self.content = None
def Parse(self):
"""Prases the audio thread logs.
@returns: A list of event log lines.
"""
logging.debug('Using file: %s', self.path)
with open(self.path, 'r') as f:
self.content = f.read().splitlines()
# Event logs starting at two lines after 'Audio Thread Event Log'.
index_start = self.content.index('Audio Thread Event Log:') + 2
# If input is from audio_diagnostic result, use aplay -l line to find
# the end of audio thread event logs.
try:
index_end = self.content.index('=== aplay -l ===')
except ValueError:
logging.debug(
'Can not find aplay line. This is not from diagnostic')
index_end = len(self.content)
event_logs = self.content[index_start:index_end]
logging.info('Parsed %s log events', len(event_logs))
return event_logs
def FillLogs(self, page_template):
"""Fills the HTML page template with contents for audio thread logs.
@param page_template: A string for HTML page content with log variable
to be filled.
@returns: A string for filled page.
"""
logs = []
for s in self.content:
if 'atlog' in s:
time = StrToTimestamp(s.split()[0])
logs.append('<label id="{}">{}</label>'.format(time, s))
else:
logs.append(s)
logs = '\n'.join(logs)
return page_template.substitute(logs=logs)
def ParseArgs():
"""Parses the arguments.
@returns: The namespace containing parsed arguments.
"""
parser = argparse.ArgumentParser(
description='Draw time chart from audio thread log',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('FILE', type=str, help='The audio thread log file')
parser.add_argument('-o', type=str, dest='output',
default='view.html', help='The output HTML file')
parser.add_argument('-d', dest='debug', action='store_true',
default=False, help='Show debug message')
return parser.parse_args()
def Main():
"""The Main program."""
options = ParseArgs()
logging.basicConfig(
format='%(asctime)s:%(levelname)s:%(message)s',
level=logging.DEBUG if options.debug else logging.INFO)
# Gets lines of event logs.
audio_thread_log_parser = AudioThreadLogParser(options.FILE)
event_logs = audio_thread_log_parser.Parse()
# Parses event logs into events.
event_log_parser = EventLogParser()
for event_log in event_logs:
event_log_parser.AddEventLog(event_log)
events = event_log_parser.GetParsedEvents()
# Reads in events in preparation of filling HTML template.
c3_writer = C3LogWriter()
for event in events:
c3_writer.AddEvent(event)
# Fills in buffer level chart.
page_content_with_chart = c3_writer.FillPage(page_content)
# Fills in audio thread log into text box.
page_content_with_chart_and_logs = audio_thread_log_parser.FillLogs(
string.Template(page_content_with_chart))
with open(options.output, 'w') as f:
f.write(page_content_with_chart_and_logs)
if __name__ == '__main__':
Main()
| 30.523967
| 116
| 0.586722
|
6ee75387f7debd95d17fe3c3b04f51af21a58b44
| 66
|
py
|
Python
|
tokenfe/task/tests.py
|
jameem-pixel/ticketproject
|
d8ae4c3a0dc924bc99d8ba50eaa722935710ef93
|
[
"Apache-2.0"
] | null | null | null |
tokenfe/task/tests.py
|
jameem-pixel/ticketproject
|
d8ae4c3a0dc924bc99d8ba50eaa722935710ef93
|
[
"Apache-2.0"
] | null | null | null |
tokenfe/task/tests.py
|
jameem-pixel/ticketproject
|
d8ae4c3a0dc924bc99d8ba50eaa722935710ef93
|
[
"Apache-2.0"
] | 1
|
2022-01-22T18:58:20.000Z
|
2022-01-22T18:58:20.000Z
|
from django.test import TestCase
# Create your tests here.
| 16.5
| 33
| 0.712121
|
ca880b599c44d8861e1b3e9e7a2aab49c55c473e
| 31,913
|
py
|
Python
|
time_series/Lib/site-packages/statsmodels/discrete/count_model.py
|
Evans-colon/Time_Series_Analysis_Forcasting
|
cf7229d8558013ffcff0780b5ba7a2e2b95d7253
|
[
"MIT"
] | null | null | null |
time_series/Lib/site-packages/statsmodels/discrete/count_model.py
|
Evans-colon/Time_Series_Analysis_Forcasting
|
cf7229d8558013ffcff0780b5ba7a2e2b95d7253
|
[
"MIT"
] | 5
|
2022-02-13T14:38:04.000Z
|
2022-02-15T00:13:07.000Z
|
time_series/Lib/site-packages/statsmodels/discrete/count_model.py
|
Evans-colon/Time_Series_Analysis_Forcasting
|
cf7229d8558013ffcff0780b5ba7a2e2b95d7253
|
[
"MIT"
] | 4
|
2022-02-04T22:58:27.000Z
|
2022-02-14T19:29:18.000Z
|
__all__ = ["ZeroInflatedPoisson", "ZeroInflatedGeneralizedPoisson",
"ZeroInflatedNegativeBinomialP"]
import warnings
import numpy as np
import statsmodels.base.model as base
import statsmodels.base.wrapper as wrap
import statsmodels.regression.linear_model as lm
from statsmodels.discrete.discrete_model import (DiscreteModel, CountModel,
Poisson, Logit, CountResults,
L1CountResults, Probit,
_discrete_results_docs,
_validate_l1_method,
GeneralizedPoisson,
NegativeBinomialP)
from statsmodels.distributions import zipoisson, zigenpoisson, zinegbin
from statsmodels.tools.numdiff import approx_fprime, approx_hess
from statsmodels.tools.decorators import cache_readonly
from statsmodels.tools.sm_exceptions import ConvergenceWarning
from statsmodels.compat.pandas import Appender
_doc_zi_params = """
exog_infl : array_like or None
Explanatory variables for the binary inflation model, i.e. for
mixing probability model. If None, then a constant is used.
offset : array_like
Offset is added to the linear prediction with coefficient equal to 1.
exposure : array_like
Log(exposure) is added to the linear prediction with coefficient
equal to 1.
inflation : {'logit', 'probit'}
The model for the zero inflation, either Logit (default) or Probit
"""
class GenericZeroInflated(CountModel):
__doc__ = """
Generic Zero Inflated Model
%(params)s
%(extra_params)s
Attributes
----------
endog : ndarray
A reference to the endogenous response variable
exog : ndarray
A reference to the exogenous design.
exog_infl : ndarray
A reference to the zero-inflated exogenous design.
""" % {'params' : base._model_params_doc,
'extra_params' : _doc_zi_params + base._missing_param_doc}
def __init__(self, endog, exog, exog_infl=None, offset=None,
inflation='logit', exposure=None, missing='none', **kwargs):
super(GenericZeroInflated, self).__init__(endog, exog, offset=offset,
exposure=exposure,
missing=missing, **kwargs)
if exog_infl is None:
self.k_inflate = 1
self._no_exog_infl = True
self.exog_infl = np.ones((endog.size, self.k_inflate),
dtype=np.float64)
else:
self.exog_infl = exog_infl
self.k_inflate = exog_infl.shape[1]
self._no_exog_infl = False
if len(exog.shape) == 1:
self.k_exog = 1
else:
self.k_exog = exog.shape[1]
self.infl = inflation
if inflation == 'logit':
self.model_infl = Logit(np.zeros(self.exog_infl.shape[0]),
self.exog_infl)
self._hessian_inflate = self._hessian_logit
elif inflation == 'probit':
self.model_infl = Probit(np.zeros(self.exog_infl.shape[0]),
self.exog_infl)
self._hessian_inflate = self._hessian_probit
else:
raise ValueError("inflation == %s, which is not handled"
% inflation)
self.inflation = inflation
self.k_extra = self.k_inflate
if len(self.exog) != len(self.exog_infl):
raise ValueError('exog and exog_infl have different number of'
'observation. `missing` handling is not supported')
infl_names = ['inflate_%s' % i for i in self.model_infl.data.param_names]
self.exog_names[:] = infl_names + list(self.exog_names)
self.exog_infl = np.asarray(self.exog_infl, dtype=np.float64)
self._init_keys.extend(['exog_infl', 'inflation'])
self._null_drop_keys = ['exog_infl']
def loglike(self, params):
"""
Loglikelihood of Generic Zero Inflated model.
Parameters
----------
params : array_like
The parameters of the model.
Returns
-------
loglike : float
The log-likelihood function of the model evaluated at `params`.
See notes.
Notes
-----
.. math:: \\ln L=\\sum_{y_{i}=0}\\ln(w_{i}+(1-w_{i})*P_{main\\_model})+
\\sum_{y_{i}>0}(\\ln(1-w_{i})+L_{main\\_model})
where P - pdf of main model, L - loglike function of main model.
"""
return np.sum(self.loglikeobs(params))
def loglikeobs(self, params):
"""
Loglikelihood for observations of Generic Zero Inflated model.
Parameters
----------
params : array_like
The parameters of the model.
Returns
-------
loglike : ndarray
The log likelihood for each observation of the model evaluated
at `params`. See Notes for definition.
Notes
-----
.. math:: \\ln L=\\ln(w_{i}+(1-w_{i})*P_{main\\_model})+
\\ln(1-w_{i})+L_{main\\_model}
where P - pdf of main model, L - loglike function of main model.
for observations :math:`i=1,...,n`
"""
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
y = self.endog
w = self.model_infl.predict(params_infl)
w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)
llf_main = self.model_main.loglikeobs(params_main)
zero_idx = np.nonzero(y == 0)[0]
nonzero_idx = np.nonzero(y)[0]
llf = np.zeros_like(y, dtype=np.float64)
llf[zero_idx] = (np.log(w[zero_idx] +
(1 - w[zero_idx]) * np.exp(llf_main[zero_idx])))
llf[nonzero_idx] = np.log(1 - w[nonzero_idx]) + llf_main[nonzero_idx]
return llf
@Appender(DiscreteModel.fit.__doc__)
def fit(self, start_params=None, method='bfgs', maxiter=35,
full_output=1, disp=1, callback=None,
cov_type='nonrobust', cov_kwds=None, use_t=None, **kwargs):
if start_params is None:
offset = getattr(self, "offset", 0) + getattr(self, "exposure", 0)
if np.size(offset) == 1 and offset == 0:
offset = None
start_params = self._get_start_params()
if callback is None:
# work around perfect separation callback #3895
callback = lambda *x: x
mlefit = super(GenericZeroInflated, self).fit(start_params=start_params,
maxiter=maxiter, disp=disp, method=method,
full_output=full_output, callback=callback,
**kwargs)
zipfit = self.result_class(self, mlefit._results)
result = self.result_class_wrapper(zipfit)
if cov_kwds is None:
cov_kwds = {}
result._get_robustcov_results(cov_type=cov_type,
use_self=True, use_t=use_t, **cov_kwds)
return result
@Appender(DiscreteModel.fit_regularized.__doc__)
def fit_regularized(self, start_params=None, method='l1',
maxiter='defined_by_method', full_output=1, disp=1, callback=None,
alpha=0, trim_mode='auto', auto_trim_tol=0.01, size_trim_tol=1e-4,
qc_tol=0.03, **kwargs):
_validate_l1_method(method)
if np.size(alpha) == 1 and alpha != 0:
k_params = self.k_exog + self.k_inflate
alpha = alpha * np.ones(k_params)
extra = self.k_extra - self.k_inflate
alpha_p = alpha[:-(self.k_extra - extra)] if (self.k_extra
and np.size(alpha) > 1) else alpha
if start_params is None:
offset = getattr(self, "offset", 0) + getattr(self, "exposure", 0)
if np.size(offset) == 1 and offset == 0:
offset = None
start_params = self.model_main.fit_regularized(
start_params=start_params, method=method, maxiter=maxiter,
full_output=full_output, disp=0, callback=callback,
alpha=alpha_p, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol,
size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs).params
start_params = np.append(np.ones(self.k_inflate), start_params)
cntfit = super(CountModel, self).fit_regularized(
start_params=start_params, method=method, maxiter=maxiter,
full_output=full_output, disp=disp, callback=callback,
alpha=alpha, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol,
size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs)
discretefit = self.result_class_reg(self, cntfit)
return self.result_class_reg_wrapper(discretefit)
def score_obs(self, params):
"""
Generic Zero Inflated model score (gradient) vector of the log-likelihood
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
score : ndarray, 1-D
The score vector of the model, i.e. the first derivative of the
loglikelihood function, evaluated at `params`
"""
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
y = self.endog
w = self.model_infl.predict(params_infl)
w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)
score_main = self.model_main.score_obs(params_main)
llf_main = self.model_main.loglikeobs(params_main)
llf = self.loglikeobs(params)
zero_idx = np.nonzero(y == 0)[0]
nonzero_idx = np.nonzero(y)[0]
mu = self.model_main.predict(params_main)
dldp = np.zeros((self.exog.shape[0], self.k_exog), dtype=np.float64)
dldw = np.zeros_like(self.exog_infl, dtype=np.float64)
dldp[zero_idx,:] = (score_main[zero_idx].T *
(1 - (w[zero_idx]) / np.exp(llf[zero_idx]))).T
dldp[nonzero_idx,:] = score_main[nonzero_idx]
if self.inflation == 'logit':
dldw[zero_idx,:] = (self.exog_infl[zero_idx].T * w[zero_idx] *
(1 - w[zero_idx]) *
(1 - np.exp(llf_main[zero_idx])) /
np.exp(llf[zero_idx])).T
dldw[nonzero_idx,:] = -(self.exog_infl[nonzero_idx].T *
w[nonzero_idx]).T
elif self.inflation == 'probit':
return approx_fprime(params, self.loglikeobs)
return np.hstack((dldw, dldp))
def score(self, params):
return self.score_obs(params).sum(0)
def _hessian_main(self, params):
pass
def _hessian_logit(self, params):
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
y = self.endog
w = self.model_infl.predict(params_infl)
w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)
score_main = self.model_main.score_obs(params_main)
llf_main = self.model_main.loglikeobs(params_main)
llf = self.loglikeobs(params)
zero_idx = np.nonzero(y == 0)[0]
nonzero_idx = np.nonzero(y)[0]
hess_arr = np.zeros((self.k_inflate, self.k_exog + self.k_inflate))
pmf = np.exp(llf)
#d2l/dw2
for i in range(self.k_inflate):
for j in range(i, -1, -1):
hess_arr[i, j] = ((
self.exog_infl[zero_idx, i] * self.exog_infl[zero_idx, j] *
(w[zero_idx] * (1 - w[zero_idx]) * ((1 -
np.exp(llf_main[zero_idx])) * (1 - 2 * w[zero_idx]) *
np.exp(llf[zero_idx]) - (w[zero_idx] - w[zero_idx]**2) *
(1 - np.exp(llf_main[zero_idx]))**2) /
pmf[zero_idx]**2)).sum() -
(self.exog_infl[nonzero_idx, i] * self.exog_infl[nonzero_idx, j] *
w[nonzero_idx] * (1 - w[nonzero_idx])).sum())
#d2l/dpdw
for i in range(self.k_inflate):
for j in range(self.k_exog):
hess_arr[i, j + self.k_inflate] = -(score_main[zero_idx, j] *
w[zero_idx] * (1 - w[zero_idx]) *
self.exog_infl[zero_idx, i] / pmf[zero_idx]).sum()
return hess_arr
def _hessian_probit(self, params):
pass
def hessian(self, params):
"""
Generic Zero Inflated model Hessian matrix of the loglikelihood
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
hess : ndarray, (k_vars, k_vars)
The Hessian, second derivative of loglikelihood function,
evaluated at `params`
Notes
-----
"""
hess_arr_main = self._hessian_main(params)
hess_arr_infl = self._hessian_inflate(params)
if hess_arr_main is None or hess_arr_infl is None:
return approx_hess(params, self.loglike)
dim = self.k_exog + self.k_inflate
hess_arr = np.zeros((dim, dim))
hess_arr[:self.k_inflate,:] = hess_arr_infl
hess_arr[self.k_inflate:,self.k_inflate:] = hess_arr_main
tri_idx = np.triu_indices(self.k_exog + self.k_inflate, k=1)
hess_arr[tri_idx] = hess_arr.T[tri_idx]
return hess_arr
def predict(self, params, exog=None, exog_infl=None, exposure=None,
offset=None, which='mean'):
"""
Predict response variable of a count model given exogenous variables.
Parameters
----------
params : array_like
The parameters of the model
exog : ndarray, optional
A reference to the exogenous design.
If not assigned, will be used exog from fitting.
exog_infl : ndarray, optional
A reference to the zero-inflated exogenous design.
If not assigned, will be used exog from fitting.
offset : ndarray, optional
Offset is added to the linear prediction with coefficient equal to 1.
exposure : ndarray, optional
Log(exposure) is added to the linear prediction with coefficient
equal to 1. If exposure is specified, then it will be logged by the method.
The user does not need to log it first.
which : str, optional
Define values that will be predicted.
'mean', 'mean-main', 'linear', 'mean-nonzero', 'prob-zero, 'prob', 'prob-main'
Default is 'mean'.
Notes
-----
"""
no_exog = False
if exog is None:
no_exog = True
exog = self.exog
if exog_infl is None:
if no_exog:
exog_infl = self.exog_infl
else:
if self._no_exog_infl:
exog_infl = np.ones((len(exog), 1))
else:
exog_infl = np.asarray(exog_infl)
if exog_infl.ndim == 1 and self.k_inflate == 1:
exog_infl = exog_infl[:, None]
if exposure is None:
if no_exog:
exposure = getattr(self, 'exposure', 0)
else:
exposure = 0
else:
exposure = np.log(exposure)
if offset is None:
if no_exog:
offset = getattr(self, 'offset', 0)
else:
offset = 0
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
prob_main = 1 - self.model_infl.predict(params_infl, exog_infl)
lin_pred = np.dot(exog, params_main[:self.exog.shape[1]]) + exposure + offset
# Refactor: This is pretty hacky,
# there should be an appropriate predict method in model_main
# this is just prob(y=0 | model_main)
tmp_exog = self.model_main.exog
tmp_endog = self.model_main.endog
tmp_offset = getattr(self.model_main, 'offset', ['no'])
tmp_exposure = getattr(self.model_main, 'exposure', ['no'])
self.model_main.exog = exog
self.model_main.endog = np.zeros((exog.shape[0]))
self.model_main.offset = offset
self.model_main.exposure = exposure
llf = self.model_main.loglikeobs(params_main)
self.model_main.exog = tmp_exog
self.model_main.endog = tmp_endog
# tmp_offset might be an array with elementwise equality testing
if len(tmp_offset) == 1 and tmp_offset[0] == 'no':
del self.model_main.offset
else:
self.model_main.offset = tmp_offset
if len(tmp_exposure) == 1 and tmp_exposure[0] == 'no':
del self.model_main.exposure
else:
self.model_main.exposure = tmp_exposure
# end hack
prob_zero = (1 - prob_main) + prob_main * np.exp(llf)
if which == 'mean':
return prob_main * np.exp(lin_pred)
elif which == 'mean-main':
return np.exp(lin_pred)
elif which == 'linear':
return lin_pred
elif which == 'mean-nonzero':
return prob_main * np.exp(lin_pred) / (1 - prob_zero)
elif which == 'prob-zero':
return prob_zero
elif which == 'prob-main':
return prob_main
elif which == 'prob':
return self._predict_prob(params, exog, exog_infl, exposure, offset)
else:
raise ValueError('which = %s is not available' % which)
class ZeroInflatedPoisson(GenericZeroInflated):
__doc__ = """
Poisson Zero Inflated Model
%(params)s
%(extra_params)s
Attributes
----------
endog : ndarray
A reference to the endogenous response variable
exog : ndarray
A reference to the exogenous design.
exog_infl : ndarray
A reference to the zero-inflated exogenous design.
""" % {'params' : base._model_params_doc,
'extra_params' : _doc_zi_params + base._missing_param_doc}
def __init__(self, endog, exog, exog_infl=None, offset=None, exposure=None,
inflation='logit', missing='none', **kwargs):
super(ZeroInflatedPoisson, self).__init__(endog, exog, offset=offset,
inflation=inflation,
exog_infl=exog_infl,
exposure=exposure,
missing=missing, **kwargs)
self.model_main = Poisson(self.endog, self.exog, offset=offset,
exposure=exposure)
self.distribution = zipoisson
self.result_class = ZeroInflatedPoissonResults
self.result_class_wrapper = ZeroInflatedPoissonResultsWrapper
self.result_class_reg = L1ZeroInflatedPoissonResults
self.result_class_reg_wrapper = L1ZeroInflatedPoissonResultsWrapper
def _hessian_main(self, params):
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
y = self.endog
w = self.model_infl.predict(params_infl)
w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)
score = self.score(params)
zero_idx = np.nonzero(y == 0)[0]
nonzero_idx = np.nonzero(y)[0]
mu = self.model_main.predict(params_main)
hess_arr = np.zeros((self.k_exog, self.k_exog))
coeff = (1 + w[zero_idx] * (np.exp(mu[zero_idx]) - 1))
#d2l/dp2
for i in range(self.k_exog):
for j in range(i, -1, -1):
hess_arr[i, j] = ((
self.exog[zero_idx, i] * self.exog[zero_idx, j] *
mu[zero_idx] * (w[zero_idx] - 1) * (1 / coeff -
w[zero_idx] * mu[zero_idx] * np.exp(mu[zero_idx]) /
coeff**2)).sum() - (mu[nonzero_idx] * self.exog[nonzero_idx, i] *
self.exog[nonzero_idx, j]).sum())
return hess_arr
def _predict_prob(self, params, exog, exog_infl, exposure, offset):
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
counts = np.atleast_2d(np.arange(0, np.max(self.endog)+1))
if len(exog_infl.shape) < 2:
transform = True
w = np.atleast_2d(
self.model_infl.predict(params_infl, exog_infl))[:, None]
else:
transform = False
w = self.model_infl.predict(params_infl, exog_infl)[:, None]
w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)
mu = self.model_main.predict(params_main, exog,
offset=offset)[:, None]
result = self.distribution.pmf(counts, mu, w)
return result[0] if transform else result
def _get_start_params(self):
start_params = self.model_main.fit(disp=0, method="nm").params
start_params = np.append(np.ones(self.k_inflate) * 0.1, start_params)
return start_params
class ZeroInflatedGeneralizedPoisson(GenericZeroInflated):
__doc__ = """
Zero Inflated Generalized Poisson Model
%(params)s
%(extra_params)s
Attributes
----------
endog : ndarray
A reference to the endogenous response variable
exog : ndarray
A reference to the exogenous design.
exog_infl : ndarray
A reference to the zero-inflated exogenous design.
p : scalar
P denotes parametrizations for ZIGP regression.
""" % {'params' : base._model_params_doc,
'extra_params' : _doc_zi_params +
"""p : float
dispersion power parameter for the GeneralizedPoisson model. p=1 for
ZIGP-1 and p=2 for ZIGP-2. Default is p=2
""" + base._missing_param_doc}
def __init__(self, endog, exog, exog_infl=None, offset=None, exposure=None,
inflation='logit', p=2, missing='none', **kwargs):
super(ZeroInflatedGeneralizedPoisson, self).__init__(endog, exog,
offset=offset,
inflation=inflation,
exog_infl=exog_infl,
exposure=exposure,
missing=missing, **kwargs)
self.model_main = GeneralizedPoisson(self.endog, self.exog,
offset=offset, exposure=exposure, p=p)
self.distribution = zigenpoisson
self.k_exog += 1
self.k_extra += 1
self.exog_names.append("alpha")
self.result_class = ZeroInflatedGeneralizedPoissonResults
self.result_class_wrapper = ZeroInflatedGeneralizedPoissonResultsWrapper
self.result_class_reg = L1ZeroInflatedGeneralizedPoissonResults
self.result_class_reg_wrapper = L1ZeroInflatedGeneralizedPoissonResultsWrapper
def _get_init_kwds(self):
kwds = super(ZeroInflatedGeneralizedPoisson, self)._get_init_kwds()
kwds['p'] = self.model_main.parameterization + 1
return kwds
def _predict_prob(self, params, exog, exog_infl, exposure, offset):
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
p = self.model_main.parameterization
counts = np.atleast_2d(np.arange(0, np.max(self.endog)+1))
if len(exog_infl.shape) < 2:
transform = True
w = np.atleast_2d(
self.model_infl.predict(params_infl, exog_infl))[:, None]
else:
transform = False
w = self.model_infl.predict(params_infl, exog_infl)[:, None]
w[w == 1.] = np.nextafter(1, 0)
mu = self.model_main.predict(params_main, exog,
exposure=exposure, offset=offset)[:, None]
result = self.distribution.pmf(counts, mu, params_main[-1], p, w)
return result[0] if transform else result
def _get_start_params(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=ConvergenceWarning)
start_params = ZeroInflatedPoisson(self.endog, self.exog,
exog_infl=self.exog_infl).fit(disp=0).params
start_params = np.append(start_params, 0.1)
return start_params
class ZeroInflatedNegativeBinomialP(GenericZeroInflated):
__doc__ = """
Zero Inflated Generalized Negative Binomial Model
%(params)s
%(extra_params)s
Attributes
----------
endog : ndarray
A reference to the endogenous response variable
exog : ndarray
A reference to the exogenous design.
exog_infl : ndarray
A reference to the zero-inflated exogenous design.
p : scalar
P denotes parametrizations for ZINB regression. p=1 for ZINB-1 and
p=2 for ZINB-2. Default is p=2
""" % {'params' : base._model_params_doc,
'extra_params' : _doc_zi_params +
"""p : float
dispersion power parameter for the NegativeBinomialP model. p=1 for
ZINB-1 and p=2 for ZINM-2. Default is p=2
""" + base._missing_param_doc}
def __init__(self, endog, exog, exog_infl=None, offset=None, exposure=None,
inflation='logit', p=2, missing='none', **kwargs):
super(ZeroInflatedNegativeBinomialP, self).__init__(endog, exog,
offset=offset,
inflation=inflation,
exog_infl=exog_infl,
exposure=exposure,
missing=missing, **kwargs)
self.model_main = NegativeBinomialP(self.endog, self.exog,
offset=offset, exposure=exposure, p=p)
self.distribution = zinegbin
self.k_exog += 1
self.k_extra += 1
self.exog_names.append("alpha")
self.result_class = ZeroInflatedNegativeBinomialResults
self.result_class_wrapper = ZeroInflatedNegativeBinomialResultsWrapper
self.result_class_reg = L1ZeroInflatedNegativeBinomialResults
self.result_class_reg_wrapper = L1ZeroInflatedNegativeBinomialResultsWrapper
def _get_init_kwds(self):
kwds = super(ZeroInflatedNegativeBinomialP, self)._get_init_kwds()
kwds['p'] = self.model_main.parameterization
return kwds
def _predict_prob(self, params, exog, exog_infl, exposure, offset):
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
p = self.model_main.parameterization
counts = np.arange(0, np.max(self.endog)+1)
if len(exog_infl.shape) < 2:
transform = True
w = np.atleast_2d(
self.model_infl.predict(params_infl, exog_infl))[:, None]
else:
transform = False
w = self.model_infl.predict(params_infl, exog_infl)[:, None]
w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)
mu = self.model_main.predict(params_main, exog,
exposure=exposure, offset=offset)[:, None]
result = self.distribution.pmf(counts, mu, params_main[-1], p, w)
return result[0] if transform else result
def _get_start_params(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=ConvergenceWarning)
start_params = self.model_main.fit(disp=0, method='nm').params
start_params = np.append(np.zeros(self.k_inflate), start_params)
return start_params
class ZeroInflatedPoissonResults(CountResults):
__doc__ = _discrete_results_docs % {
"one_line_description": "A results class for Zero Inflated Poisson",
"extra_attr": ""}
@cache_readonly
def _dispersion_factor(self):
mu = self.predict(which='linear')
w = 1 - self.predict() / np.exp(self.predict(which='linear'))
return (1 + w * np.exp(mu))
def get_margeff(self, at='overall', method='dydx', atexog=None,
dummy=False, count=False):
"""Get marginal effects of the fitted model.
Not yet implemented for Zero Inflated Models
"""
raise NotImplementedError("not yet implemented for zero inflation")
class L1ZeroInflatedPoissonResults(L1CountResults, ZeroInflatedPoissonResults):
pass
class ZeroInflatedPoissonResultsWrapper(lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(ZeroInflatedPoissonResultsWrapper,
ZeroInflatedPoissonResults)
class L1ZeroInflatedPoissonResultsWrapper(lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(L1ZeroInflatedPoissonResultsWrapper,
L1ZeroInflatedPoissonResults)
class ZeroInflatedGeneralizedPoissonResults(CountResults):
__doc__ = _discrete_results_docs % {
"one_line_description": "A results class for Zero Inflated Generalized Poisson",
"extra_attr": ""}
@cache_readonly
def _dispersion_factor(self):
p = self.model.model_main.parameterization
alpha = self.params[self.model.k_inflate:][-1]
mu = np.exp(self.predict(which='linear'))
w = 1 - self.predict() / mu
return ((1 + alpha * mu**p)**2 + w * mu)
def get_margeff(self, at='overall', method='dydx', atexog=None,
dummy=False, count=False):
"""Get marginal effects of the fitted model.
Not yet implemented for Zero Inflated Models
"""
raise NotImplementedError("not yet implemented for zero inflation")
class L1ZeroInflatedGeneralizedPoissonResults(L1CountResults,
ZeroInflatedGeneralizedPoissonResults):
pass
class ZeroInflatedGeneralizedPoissonResultsWrapper(
lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(ZeroInflatedGeneralizedPoissonResultsWrapper,
ZeroInflatedGeneralizedPoissonResults)
class L1ZeroInflatedGeneralizedPoissonResultsWrapper(
lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(L1ZeroInflatedGeneralizedPoissonResultsWrapper,
L1ZeroInflatedGeneralizedPoissonResults)
class ZeroInflatedNegativeBinomialResults(CountResults):
__doc__ = _discrete_results_docs % {
"one_line_description": "A results class for Zero Inflated Generalized Negative Binomial",
"extra_attr": ""}
@cache_readonly
def _dispersion_factor(self):
p = self.model.model_main.parameterization
alpha = self.params[self.model.k_inflate:][-1]
mu = np.exp(self.predict(which='linear'))
w = 1 - self.predict() / mu
return (1 + alpha * mu**(p-1) + w * mu)
def get_margeff(self, at='overall', method='dydx', atexog=None,
dummy=False, count=False):
"""Get marginal effects of the fitted model.
Not yet implemented for Zero Inflated Models
"""
raise NotImplementedError("not yet implemented for zero inflation")
class L1ZeroInflatedNegativeBinomialResults(L1CountResults,
ZeroInflatedNegativeBinomialResults):
pass
class ZeroInflatedNegativeBinomialResultsWrapper(
lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(ZeroInflatedNegativeBinomialResultsWrapper,
ZeroInflatedNegativeBinomialResults)
class L1ZeroInflatedNegativeBinomialResultsWrapper(
lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(L1ZeroInflatedNegativeBinomialResultsWrapper,
L1ZeroInflatedNegativeBinomialResults)
| 38.264988
| 98
| 0.59681
|
07b529e7a720807fc3252b20de3680d9007dab6e
| 343
|
py
|
Python
|
test/word_store/test_construction.py
|
AndrewWasHere/passphrase_generator
|
8db3b833401036fc6db4ed2b9dc7980e5fec525c
|
[
"BSD-3-Clause"
] | null | null | null |
test/word_store/test_construction.py
|
AndrewWasHere/passphrase_generator
|
8db3b833401036fc6db4ed2b9dc7980e5fec525c
|
[
"BSD-3-Clause"
] | null | null | null |
test/word_store/test_construction.py
|
AndrewWasHere/passphrase_generator
|
8db3b833401036fc6db4ed2b9dc7980e5fec525c
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Copyright 2017, Andrew Lin
All rights reserved.
This software is licensed under the BSD 3-Clause License.
See LICENSE.txt at the root of the project or
https://opensource.org/licenses/BSD-3-Clause
"""
from passphrase.word_store import WordStore
def test_nominal():
words = WordStore()
assert len(list(words.iter_words())) == 0
| 21.4375
| 57
| 0.74344
|
7526daa6f0b263e555f30adc5e6af3be286474cf
| 7,647
|
py
|
Python
|
vis_sv/vis_sv_z1000_L1.py
|
danforthcenter/plantcv-dev-scripts
|
57ebc9a031b7141b8965c927c3b7b01ba6504dc1
|
[
"MIT"
] | null | null | null |
vis_sv/vis_sv_z1000_L1.py
|
danforthcenter/plantcv-dev-scripts
|
57ebc9a031b7141b8965c927c3b7b01ba6504dc1
|
[
"MIT"
] | null | null | null |
vis_sv/vis_sv_z1000_L1.py
|
danforthcenter/plantcv-dev-scripts
|
57ebc9a031b7141b8965c927c3b7b01ba6504dc1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import sys, traceback
import cv2
import numpy as np
import argparse
import string
import plantcv as pcv
### Parse command-line arguments
def options():
parser = argparse.ArgumentParser(description="Imaging processing with opencv")
parser.add_argument("-i", "--image", help="Input image file.", required=True)
parser.add_argument("-m", "--roi", help="Input region of interest file.", required=False)
parser.add_argument("-o", "--outdir", help="Output directory for image files.", required=True)
parser.add_argument("-D", "--debug", help="Turn on debug, prints intermediate images.", action="store_true")
args = parser.parse_args()
return args
### Main pipeline
def main():
# Get options
args = options()
# Read image
img, path, filename = pcv.readimage(args.image)
#roi = cv2.imread(args.roi)
# Pipeline step
device = 0
# Convert RGB to HSV and extract the Saturation channel
device, s = pcv.rgb2gray_hsv(img, 's', device, args.debug)
# Threshold the Saturation image
device, s_thresh = pcv.binary_threshold(s, 36, 255, 'light', device, args.debug)
# Median Filter
device, s_mblur = pcv.median_blur(s_thresh, 5, device, args.debug)
device, s_cnt = pcv.median_blur(s_thresh, 5, device, args.debug)
# Fill small objects
device, s_fill = pcv.fill(s_mblur, s_cnt, 0, device, args.debug)
# Convert RGB to LAB and extract the Blue channel
device, b = pcv.rgb2gray_lab(img, 'b', device, args.debug)
# Threshold the blue image
device, b_thresh = pcv.binary_threshold(b, 138, 255, 'light', device, args.debug)
device, b_cnt = pcv.binary_threshold(b, 138, 255, 'light', device, args.debug)
# Fill small objects
device, b_fill = pcv.fill(b_thresh, b_cnt, 150, device, args.debug)
# Join the thresholded saturation and blue-yellow images
device, bs = pcv.logical_and(s_fill, b_fill, device, args.debug)
# Apply Mask (for vis images, mask_color=white)
device, masked = pcv.apply_mask(img, bs, 'white', device, args.debug)
# Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels
device, masked_a = pcv.rgb2gray_lab(masked, 'a', device, args.debug)
device, masked_b = pcv.rgb2gray_lab(masked, 'b', device, args.debug)
# Threshold the green-magenta and blue images
device, maskeda_thresh = pcv.binary_threshold(masked_a, 122, 255, 'dark', device, args.debug)
device, maskedb_thresh = pcv.binary_threshold(masked_b, 133, 255, 'light', device, args.debug)
# Join the thresholded saturation and blue-yellow images (OR)
device, ab = pcv.logical_or(maskeda_thresh, maskedb_thresh, device, args.debug)
device, ab_cnt = pcv.logical_or(maskeda_thresh, maskedb_thresh, device, args.debug)
# Fill small objects
device, ab_fill = pcv.fill(ab, ab_cnt, 200, device, args.debug)
# Apply mask (for vis images, mask_color=white)
device, masked2 = pcv.apply_mask(masked, ab_fill, 'white', device, args.debug)
# Select area with black bars and find overlapping plant material
device, roi1, roi_hierarchy1= pcv.define_roi(masked2,'rectangle', device, None, 'default', args.debug,True, 0, 0,-1900,0)
device, id_objects1,obj_hierarchy1 = pcv.find_objects(masked2, ab_fill, device, args.debug)
device,roi_objects1, hierarchy1, kept_mask1, obj_area1 = pcv.roi_objects(masked2,'cutto',roi1,roi_hierarchy1,id_objects1,obj_hierarchy1,device, args.debug)
device, masked3 = pcv.apply_mask(masked2, kept_mask1, 'white', device, args.debug)
device, masked_a1 = pcv.rgb2gray_lab(masked3, 'a', device, args.debug)
device, masked_b1 = pcv.rgb2gray_lab(masked3, 'b', device, args.debug)
device, maskeda_thresh1 = pcv.binary_threshold(masked_a1, 122, 255, 'dark', device, args.debug)
device, maskedb_thresh1 = pcv.binary_threshold(masked_b1, 170, 255, 'light', device, args.debug)
device, ab1 = pcv.logical_or(maskeda_thresh1, maskedb_thresh1, device, args.debug)
device, ab_cnt1 = pcv.logical_or(maskeda_thresh1, maskedb_thresh1, device, args.debug)
device, ab_fill1 = pcv.fill(ab1, ab_cnt1, 300, device, args.debug)
device, roi2, roi_hierarchy2= pcv.define_roi(masked2,'rectangle', device, None, 'default', args.debug,True, 1900, 0,0,0)
device, id_objects2,obj_hierarchy2 = pcv.find_objects(masked2, ab_fill, device, args.debug)
device,roi_objects2, hierarchy2, kept_mask2, obj_area2 = pcv.roi_objects(masked2,'cutto',roi2,roi_hierarchy2,id_objects2,obj_hierarchy2,device, args.debug)
device, masked4 = pcv.apply_mask(masked2, kept_mask2, 'white', device, args.debug)
device, masked_a2 = pcv.rgb2gray_lab(masked4, 'a', device, args.debug)
device, masked_b2 = pcv.rgb2gray_lab(masked4, 'b', device, args.debug)
device, maskeda_thresh2 = pcv.binary_threshold(masked_a2, 122, 255, 'dark', device, args.debug)
device, maskedb_thresh2 = pcv.binary_threshold(masked_b2, 170, 255, 'light', device, args.debug)
device, ab2 = pcv.logical_or(maskeda_thresh2, maskedb_thresh2, device, args.debug)
device, ab_cnt2 = pcv.logical_or(maskeda_thresh2, maskedb_thresh2, device, args.debug)
device, ab_fill2 = pcv.fill(ab2, ab_cnt2, 200, device, args.debug)
device, ab_cnt3 = pcv.logical_or(ab_fill1, ab_fill2, device, args.debug)
device, masked3 = pcv.apply_mask(masked2, ab_cnt3, 'white', device, args.debug)
# Identify objects
device, id_objects3,obj_hierarchy3 = pcv.find_objects(masked2, ab_fill, device, args.debug)
# Define ROI
device, roi3, roi_hierarchy3= pcv.define_roi(masked2,'rectangle', device, None, 'default', args.debug,True, 500, 0,-450,-530)
# Decide which objects to keep and combine with objects overlapping with black bars
device,roi_objects3, hierarchy3, kept_mask3, obj_area1 = pcv.roi_objects(img,'cutto',roi3,roi_hierarchy3,id_objects3,obj_hierarchy3,device, args.debug)
device, kept_mask4_1 = pcv.logical_or(ab_cnt3, kept_mask3, device, args.debug)
device, kept_cnt = pcv.logical_or(ab_cnt3, kept_mask3, device, args.debug)
device, kept_mask4 = pcv.fill(kept_mask4_1, kept_cnt, 200, device, args.debug)
device, masked5 = pcv.apply_mask(masked2, kept_mask4, 'white', device, args.debug)
device, id_objects4,obj_hierarchy4 = pcv.find_objects(masked5, kept_mask4, device, args.debug)
device, roi4, roi_hierarchy4= pcv.define_roi(masked2,'rectangle', device, None, 'default', args.debug,False, 0, 0,0,0)
device,roi_objects4, hierarchy4, kept_mask4, obj_area = pcv.roi_objects(img,'partial',roi4,roi_hierarchy4,id_objects4,obj_hierarchy4,device, args.debug)
# Object combine kept objects
device, obj, mask = pcv.object_composition(img, roi_objects4, hierarchy4, device, args.debug)
############## Analysis ################
# Find shape properties, output shape image (optional)
device, shape_header,shape_data,shape_img = pcv.analyze_object(img, args.image, obj, mask, device,args.debug,args.outdir+'/'+filename)
# Shape properties relative to user boundary line (optional)
device, boundary_header,boundary_data, boundary_img1= pcv.analyze_bound(img, args.image,obj, mask, 950, device,args.debug,args.outdir+'/'+filename)
# Determine color properties: Histograms, Color Slices and Pseudocolored Images, output color analyzed images (optional)
device, color_header,color_data,norm_slice= pcv.analyze_color(img, args.image, kept_mask4, 256, device, args.debug,'all','rgb','v','img',300,args.outdir+'/'+filename)
# Output shape and color data
pcv.print_results(args.image, shape_header, shape_data)
pcv.print_results(args.image, color_header, color_data)
pcv.print_results(args.image, boundary_header, boundary_data)
if __name__ == '__main__':
main()#!/usr/bin/env python
| 52.737931
| 168
| 0.741729
|
7a5ed1592d9b05c8a304a27c939a38020c2b340f
| 2,114
|
py
|
Python
|
warpseq/model/instrument.py
|
simian-terminal/warpseq
|
f61e68d1e6a9ad15a5e0c899237be784bcff7093
|
[
"Apache-2.0"
] | 3
|
2021-01-22T01:20:20.000Z
|
2022-03-10T20:58:42.000Z
|
warpseq/model/instrument.py
|
simian-terminal/warpseq
|
f61e68d1e6a9ad15a5e0c899237be784bcff7093
|
[
"Apache-2.0"
] | 1
|
2020-08-11T07:21:05.000Z
|
2020-08-11T07:21:05.000Z
|
warpseq/model/instrument.py
|
simianterminal/warpseq
|
f61e68d1e6a9ad15a5e0c899237be784bcff7093
|
[
"Apache-2.0"
] | null | null | null |
# ------------------------------------------------------------------
# Warp Sequencer
# (C) 2020 Michael DeHaan <michael@michaeldehaan.net> & contributors
# Apache2 Licensed
# ------------------------------------------------------------------
# an instrument adds a channel number to a MIDI device and has some
# parameters around supported note ranges. It can also be muted.
from .base import NewReferenceObject
from .device import Device
class Instrument(NewReferenceObject):
__slots__ = [ 'name', 'channel', 'device', 'min_octave', 'base_octave', 'max_octave', 'default_velocity', 'muted' ]
def __init__(self, name=None, channel=None, device=None, min_octave=0, base_octave=3, max_octave=10, default_velocity=120, muted=False, obj_id=None):
self.name = name
self.channel = int(channel)
self.device = device
self.min_octave = min_octave
self.base_octave = base_octave
self.max_octave = max_octave
self.default_velocity = default_velocity
self.muted = muted
self.obj_id = obj_id
super(Instrument,self).__init__()
def to_dict(self):
result = dict(
obj_id = self.obj_id,
name = self.name,
channel = self.channel,
min_octave = self.min_octave,
base_octave = self.base_octave,
max_octave = self.max_octave,
default_velocity = self.default_velocity,
muted = self.muted
)
if self.device:
result['device'] = self.device.obj_id
else:
result['device'] = None
return result
@classmethod
def from_dict(cls, song, data):
return Instrument(
obj_id = data['obj_id'],
name = data['name'],
channel = int(data['channel']),
device = song.find_device(data['device']),
min_octave = data['min_octave'],
base_octave = data['base_octave'],
max_octave = data['max_octave'],
default_velocity = data['default_velocity'],
muted = data['muted']
)
| 34.096774
| 153
| 0.569536
|
375630a1da1c56e360aae35470702f17b9ba31a7
| 2,806
|
py
|
Python
|
lib/exabgp/bgp/message/update/attribute/bgpls/prefix/srprefix.py
|
fser/exabgp
|
9a41b5f833a00a4d56b1a38f73858d62685065dd
|
[
"BSD-3-Clause"
] | 2
|
2018-02-07T14:49:11.000Z
|
2021-09-08T15:31:51.000Z
|
lib/exabgp/bgp/message/update/attribute/bgpls/prefix/srprefix.py
|
fser/exabgp
|
9a41b5f833a00a4d56b1a38f73858d62685065dd
|
[
"BSD-3-Clause"
] | null | null | null |
lib/exabgp/bgp/message/update/attribute/bgpls/prefix/srprefix.py
|
fser/exabgp
|
9a41b5f833a00a4d56b1a38f73858d62685065dd
|
[
"BSD-3-Clause"
] | 1
|
2018-12-19T18:02:59.000Z
|
2018-12-19T18:02:59.000Z
|
# encoding: utf-8
"""
srprefix.py
Created by Evelio Vila
Copyright (c) 2014-2017 Exa Networks. All rights reserved.
"""
import json
from struct import unpack
from exabgp.vendoring import six
from exabgp.vendoring.bitstring import BitArray
from exabgp.bgp.message.notification import Notify
from exabgp.bgp.message.update.attribute.bgpls.linkstate import LINKSTATE, LsGenericFlags
# draft-gredler-idr-bgp-ls-segment-routing-ext-03
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type | Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Flags | Algorithm | Reserved |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | SID/Index/Label (variable) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
@LINKSTATE.register()
class SrPrefix(object):
TLV = 1158
def __init__ (self, flags, sids, sr_algo):
self.flags = flags
self.sids = sids
self.sr_algo = sr_algo
def __repr__ (self):
return "prefix_flags: %s, sids: %s" % (self.flags, self.sids)
@classmethod
def unpack (cls,data,length):
# We only support IS-IS flags for now.
flags = LsGenericFlags.unpack(data[0:1],LsGenericFlags.ISIS_SR_FLAGS)
#
# Parse Algorithm
sr_algo = six.indexbytes(data, 1)
# Move pointer 4 bytes: Flags(1) + Algorithm(1) + Reserved(2)
data = data[4:]
# SID/Index/Label: according to the V and L flags, it contains
# either:
# * A 3 octet local label where the 20 rightmost bits are used for
# encoding the label value. In this case the V and L flags MUST
# be set.
#
# * A 4 octet index defining the offset in the SID/Label space
# advertised by this router using the encodings defined in
# Section 3.1. In this case V and L flags MUST be unset.
sids = []
while data:
if flags.flags['V'] and flags.flags['L']:
b = BitArray(bytes=data[:3])
sid = b.unpack('uintbe:24')[0]
data = data[3:]
elif (not flags.flags['V']) and \
(not flags.flags['L']):
if len(data) != 4:
# Cisco IOS XR Software, Version 6.1.1.19I is not
# correctly setting the flags
raise Notify(3,5, "SID/Label size doesn't match V and L flag state")
sid = unpack('!I', data[:4])[0]
data = data[4:]
sids.append(sid)
return cls(flags=flags, sids=sids, sr_algo=sr_algo)
def json (self,compact=None):
return ', '.join(['"sr-prefix-flags": {}'.format(self.flags.json()),
'"sids": {}'.format(json.dumps(self.sids)),
'"sr-algorithm": {}'.format(json.dumps(self.sr_algo))])
| 35.518987
| 89
| 0.556664
|
617b37d997a3cd28c041baae49d52cf26bc290f1
| 220
|
py
|
Python
|
arrays/first_not_repeating_char.py
|
nickaigi/automatic-dollop
|
eb8222475c7871c1d5710242c5aed8c70ea0d2c8
|
[
"Unlicense"
] | null | null | null |
arrays/first_not_repeating_char.py
|
nickaigi/automatic-dollop
|
eb8222475c7871c1d5710242c5aed8c70ea0d2c8
|
[
"Unlicense"
] | null | null | null |
arrays/first_not_repeating_char.py
|
nickaigi/automatic-dollop
|
eb8222475c7871c1d5710242c5aed8c70ea0d2c8
|
[
"Unlicense"
] | null | null | null |
def first_not_repeating_character(s):
for c in s:
if s.index(c) == s.rindex(c):
return c
return '_'
if __name__ == '__main__':
s = 'abacabad'
print(first_not_repeating_character(s))
| 20
| 43
| 0.604545
|
d86b1b6c0bb04edd6cec2877caf04b99efefba37
| 1,629
|
py
|
Python
|
tests/neox_args/test_neoxargs_usage.py
|
igor0/gpt-neox
|
3ad61952c290669d3741c01f767d41fdee5215c5
|
[
"Apache-2.0"
] | 1,871
|
2020-12-22T14:44:29.000Z
|
2022-03-31T14:21:40.000Z
|
tests/neox_args/test_neoxargs_usage.py
|
igor0/gpt-neox
|
3ad61952c290669d3741c01f767d41fdee5215c5
|
[
"Apache-2.0"
] | 300
|
2020-12-23T17:51:43.000Z
|
2022-03-30T17:34:42.000Z
|
tests/neox_args/test_neoxargs_usage.py
|
igor0/gpt-neox
|
3ad61952c290669d3741c01f767d41fdee5215c5
|
[
"Apache-2.0"
] | 235
|
2020-12-23T19:45:19.000Z
|
2022-03-31T20:33:47.000Z
|
"""
plausibility check for the usage of neox_args in the megatron codebase
"""
import pytest
import re
from ..common import get_root_directory
@pytest.mark.cpu
def test_neoxargs_usage():
""""
checks for code pieces of the pattern "args.*" and verifies that such used arg is defined in NeoXArgs
"""
from megatron.neox_arguments import NeoXArgs
declared_all = True
neox_args_attributes = set(NeoXArgs.__dataclass_fields__.keys())
# we exlude a number of properties (implemented with the @property decorator) or functions that we know exists
exclude = set(['params_dtype', 'deepspeed_config', 'get', 'pop', 'get_deepspeed_main_args', 'optimizer["params"]', 'attention_config[layer_number]', 'adlr_autoresume_object', 'update_value', 'all_config', 'tensorboard_writer', 'tokenizer', 'train_batch_size]'])
# test file by file
for filename in (get_root_directory() / "megatron").glob('**/*.py'):
if filename.name in ["text_generation_utils.py", "train_tokenizer.py"]: continue
# load file
with open(filename, 'r') as f:
file_contents = f.read()
# find args matches
matches = list(re.findall(r"(?<=args\.).{2,}?(?=[\s\n(){}+-/*;:,=])", file_contents))
if len(matches) == 0: continue
# compare
for match in matches:
if match not in neox_args_attributes and match not in exclude:
print(f"(arguments used not found in neox args): {filename.name}: {match}", flush=True)
declared_all = False
assert declared_all, "all arguments used in code defined in NeoXArgs"
| 38.785714
| 265
| 0.666053
|
a2b9d5671ae53cbefb3e73e2c4ecc597b6ae1fc8
| 1,209
|
py
|
Python
|
13_async_development/sample_code/8_queued_no_waits.py
|
gdia/The-Complete-Python-Course
|
ed375b65242249bc749c3e292a6149f8528b9dcf
|
[
"MIT"
] | 29
|
2019-09-02T21:15:59.000Z
|
2022-01-14T02:20:05.000Z
|
13_async_development/sample_code/8_queued_no_waits.py
|
gdia/The-Complete-Python-Course
|
ed375b65242249bc749c3e292a6149f8528b9dcf
|
[
"MIT"
] | 2
|
2020-08-20T05:48:36.000Z
|
2021-06-02T03:16:31.000Z
|
13_async_development/sample_code/8_queued_no_waits.py
|
gdia/The-Complete-Python-Course
|
ed375b65242249bc749c3e292a6149f8528b9dcf
|
[
"MIT"
] | 38
|
2019-10-20T14:29:12.000Z
|
2022-03-27T19:50:05.000Z
|
import time
import random
import queue
from threading import Thread # still needed for daemon threads
from concurrent.futures import ThreadPoolExecutor
counter = 0
job_queue = queue.Queue()
counter_queue = queue.Queue()
def increment_manager():
global counter
while True:
increment = counter_queue.get() # this waits until an item is available and locks the queue
old_counter = counter
counter = old_counter + increment
job_queue.put((f'New counter value {counter}', '------------'))
counter_queue.task_done() # this unlocks the queue
# printer_manager and increment_manager run continuously because of the `daemon` flag.
Thread(target=increment_manager, daemon=True).start()
def printer_manager():
while True:
for line in job_queue.get():
print(line)
job_queue.task_done()
# printer_manager and increment_manager run continuously because of the `daemon` flag.
Thread(target=printer_manager, daemon=True).start()
def increment_counter():
counter_queue.put(1)
with ThreadPoolExecutor(max_workers=10) as pool:
[pool.submit(increment_counter) for x in range(10)]
counter_queue.join() # wait for counter_queue to be empty
job_queue.join() # wait for job_queue to be empty
| 26.866667
| 94
| 0.762614
|
85d195d594a710336938dcda953fb7fbeae1bb79
| 3,782
|
py
|
Python
|
prodmanager/settings.py
|
georgiawang5332/prodmanager
|
5dbff0789deb0c4f696149345ded3e338c6f59ec
|
[
"MIT"
] | null | null | null |
prodmanager/settings.py
|
georgiawang5332/prodmanager
|
5dbff0789deb0c4f696149345ded3e338c6f59ec
|
[
"MIT"
] | null | null | null |
prodmanager/settings.py
|
georgiawang5332/prodmanager
|
5dbff0789deb0c4f696149345ded3e338c6f59ec
|
[
"MIT"
] | null | null | null |
"""
Django settings for prodmanager project.
Generated by 'django-admin startproject' using Django 3.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve(strict=True).parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'g2f2j02l)q(*z31g@()&-neg2o4d_lg%fqvv@nhztk&twb5px9'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'dashboard',
'django_filters',
'milt_sear',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'prodmanager.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# 'DIRS': [BASE_DIR / 'templates'],
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'prodmanager.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'filt_db',
'USER': 'postgres',
'PASSWORD': 'postgres5',
'HOST': 'localhost',
'PORT': '5432',
'DISABLE_SERVER_SIDE_CURSORS': True,
# 游標問題: https://stackoverflow.com/questions/19069722/psycopg2-operationalerror-cursor-does-not-exist
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
# 非必要、方便做語言選單用的
from django.utils.translation import gettext_lazy as _
LANGUAGE_CODE = 'zh-Hant'
LANGUAGES = (
('en', _('English')),
('zh-hant', _('Traditional Chinese')),
)
TIME_ZONE = 'Asia/Taipei'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
# Extra places for collectstatic to find static files.
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
# STATICFILES_DIRS = [ BASE_DIR / 'static']
MEDIA_ROOT = os.path.join(BASE_DIR, "static/dist/media_cdn")
MEDIA_URL = '/images/'
| 26.447552
| 104
| 0.724484
|
f7954dc876348518f9d557b91d1e863ed39cd3dc
| 2,682
|
py
|
Python
|
shop/shipping/modifiers.py
|
r4co0n/django-shop
|
69cbddba27066568920cce177511c95824bfe45b
|
[
"BSD-3-Clause"
] | null | null | null |
shop/shipping/modifiers.py
|
r4co0n/django-shop
|
69cbddba27066568920cce177511c95824bfe45b
|
[
"BSD-3-Clause"
] | 5
|
2021-03-19T16:04:13.000Z
|
2022-01-13T03:35:33.000Z
|
shop/shipping/modifiers.py
|
r4co0n/django-shop
|
69cbddba27066568920cce177511c95824bfe45b
|
[
"BSD-3-Clause"
] | 1
|
2020-07-06T05:06:05.000Z
|
2020-07-06T05:06:05.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from shop.modifiers.base import BaseCartModifier
class ShippingModifier(BaseCartModifier):
"""
Base class for all shipping modifiers.
"""
def get_choice(self):
"""
:returns: A tuple consisting of 'value, label' used by the shipping form dialog to render
the available shipping choices.
"""
raise NotImplemented("{} must implement method `get_choice()`.".format(self.__class__))
def is_active(self, shipping_modifier):
"""
:returns: ``True`` if this shipping modifier is active.
"""
return shipping_modifier == self.identifier
def is_disabled(self, cart):
"""
Hook method to be overridden by the concrete shipping modifier. Shall be used to
temporarily disable a shipping method, in case the cart does not fulfill certain criteria,
for instance an undeliverable destination address.
:returns: ``True`` if this shipping modifier is disabled for the current cart.
"""
return False
def update_render_context(self, context):
"""
Hook to update the rendering context with shipping specific data.
"""
from shop.models.cart import CartModel
if 'shipping_modifiers' not in context:
context['shipping_modifiers'] = {}
try:
cart = CartModel.objects.get_from_request(context['request'])
if self.is_active(cart.extra.get('shipping_modifier')):
cart.update(context['request'])
data = cart.extra_rows[self.identifier].data
data.update(modifier=self.identifier)
context['shipping_modifiers']['initial_row'] = data
except (KeyError, CartModel.DoesNotExist):
pass
def ship_the_goods(self, delivery):
"""
Hook to be overridden by the active shipping modifier. It should be used to perform the
shipping request.
"""
delivery.shipped_at = timezone.now()
class SelfCollectionModifier(ShippingModifier):
"""
This modifiers has not influence on the cart final. It can be used,
to enable the customer to pick up the products in the shop.
"""
identifier = 'self-collection'
def get_choice(self):
return (self.identifier, _("Self-collection"))
def ship_the_goods(self, delivery):
if not delivery.shipping_id:
delivery.shipping_id = str(delivery.id)
super(SelfCollectionModifier, self).ship_the_goods(delivery)
| 35.289474
| 98
| 0.65399
|
f77a7e41120a37ce5a2f05432c49db923c877116
| 1,486
|
py
|
Python
|
simpleh5/tests/test_simpleh5_search_util.py
|
ssolari/simpleh5
|
0c82395f5144634221419ff347db23ef4d34cd04
|
[
"MIT"
] | null | null | null |
simpleh5/tests/test_simpleh5_search_util.py
|
ssolari/simpleh5
|
0c82395f5144634221419ff347db23ef4d34cd04
|
[
"MIT"
] | null | null | null |
simpleh5/tests/test_simpleh5_search_util.py
|
ssolari/simpleh5
|
0c82395f5144634221419ff347db23ef4d34cd04
|
[
"MIT"
] | null | null | null |
import unittest
import os
from simpleh5.utilities.search_utilities import _build_search_string
class TestBuildString(unittest.TestCase):
def test_string_single(self):
query = ['strs', '==', 'abc']
match_string, uservars = _build_search_string(query)
self.assertEqual(match_string, "(n0==b'abc')")
self.assertDictEqual(uservars, {'n0': 'strs'})
def test_string_single_unicode(self):
query = (
('strs', '==', '£')
)
match_string, uservars = _build_search_string(query)
self.assertEqual(match_string, "(n0==b'\\xc2\\xa3')")
self.assertDictEqual(uservars, {'n0': 'strs'})
def test_string_compound(self):
query = [
[('strs', '==', 'abc'), ('strs', '==', '£')],
['nums', '>', 1.3]
]
match_string, uservars = _build_search_string(query)
self.assertEqual(match_string, "((n0==b'abc')|(n0==b'\\xc2\\xa3'))&(n1>1.3)")
self.assertDictEqual(uservars, {'n0': 'strs', 'n1': 'nums'})
def test_string_double_compound(self):
# meaningless logic
query = [
[('strs', '!=', 'abc'), ('strs', '!=', 'cba')],
('strs', '==', '£')
]
match_string, uservars = _build_search_string(query)
self.assertEqual(match_string, "((n0!=b'abc')|(n0!=b'cba'))&(n0==b'\\xc2\\xa3')")
self.assertDictEqual(uservars, {'n0': 'strs'})
if __name__ == "__main__":
unittest.main()
| 30.326531
| 89
| 0.560565
|
9cd5b7957823e02e9cd7b6b090f21f29c565b07a
| 802
|
py
|
Python
|
api/source/models/option.py
|
1pkg/ReRe
|
83f77d2cece0fb5f6d7b86a395fcca7d4e16459f
|
[
"MIT"
] | 1
|
2019-12-17T10:31:48.000Z
|
2019-12-17T10:31:48.000Z
|
api/source/models/option.py
|
c-pkg/ReRe
|
83f77d2cece0fb5f6d7b86a395fcca7d4e16459f
|
[
"MIT"
] | null | null | null |
api/source/models/option.py
|
c-pkg/ReRe
|
83f77d2cece0fb5f6d7b86a395fcca7d4e16459f
|
[
"MIT"
] | 1
|
2019-04-29T08:19:36.000Z
|
2019-04-29T08:19:36.000Z
|
from base import Alchemy
class Option(Alchemy.Model):
__tablename__ = 'option'
id = Alchemy.Column(
Alchemy.Integer,
nullable=False,
primary_key=True,
)
name = Alchemy.Column(
Alchemy.String,
nullable=False,
unique=True,
)
description = Alchemy.Column(
Alchemy.String,
nullable=False,
)
link = Alchemy.Column(
Alchemy.String,
nullable=False,
unique=True,
)
source = Alchemy.Column(
Alchemy.String,
nullable=False,
)
subjects = Alchemy.relationship(
'Subject',
backref='option',
passive_deletes=True,
)
answers = Alchemy.relationship(
'Answer',
backref='option',
passive_deletes=True,
)
| 19.560976
| 36
| 0.562344
|
ebfa2f6b03db0b30d80056918a698118b714de25
| 253
|
py
|
Python
|
tests/exceptions/source/others/handler_formatting_with_context_manager.py
|
joshgordon/loguru
|
9777f4bec8b03ef074635269224baa3fd263fd09
|
[
"MIT"
] | 2
|
2020-01-20T10:03:24.000Z
|
2020-02-05T03:38:25.000Z
|
tests/exceptions/source/others/handler_formatting_with_context_manager.py
|
joshgordon/loguru
|
9777f4bec8b03ef074635269224baa3fd263fd09
|
[
"MIT"
] | 1
|
2021-01-31T07:28:51.000Z
|
2021-02-21T08:38:53.000Z
|
tests/exceptions/source/others/handler_formatting_with_context_manager.py
|
joshgordon/loguru
|
9777f4bec8b03ef074635269224baa3fd263fd09
|
[
"MIT"
] | 3
|
2020-02-05T03:38:28.000Z
|
2020-02-11T02:34:56.000Z
|
import sys
from loguru import logger
logger.remove()
logger.add(
sys.stderr,
format="{name} {file.name} {function} {line}",
diagnose=False,
backtrace=False,
colorize=False,
)
def a():
with logger.catch():
1 / 0
a()
| 12.047619
| 50
| 0.604743
|
cd7b6b0a2f2c2d1cd94a529ea300dcfba5c891e3
| 1,150
|
py
|
Python
|
dataxHWSp2021/HW2_CoreConcepts/student/tests/q3b.py
|
UCBerkeley-SCET/DataX-Berkeley
|
f912d22c838b511d3ada4ecfa3548afd80437b74
|
[
"Apache-2.0"
] | 28
|
2020-06-15T23:53:36.000Z
|
2022-03-19T09:27:02.000Z
|
dataxHWSp2021/HW2_CoreConcepts/student/tests/q3b.py
|
UCBerkeley-SCET/DataX-Berkeley
|
f912d22c838b511d3ada4ecfa3548afd80437b74
|
[
"Apache-2.0"
] | 4
|
2020-06-24T22:20:31.000Z
|
2022-02-28T01:37:36.000Z
|
dataxHWSp2021/HW2_CoreConcepts/student/tests/q3b.py
|
UCBerkeley-SCET/DataX-Berkeley
|
f912d22c838b511d3ada4ecfa3548afd80437b74
|
[
"Apache-2.0"
] | 78
|
2020-06-19T09:41:01.000Z
|
2022-02-05T00:13:29.000Z
|
test = { 'name': 'q3b',
'points': 3,
'suites': [ { 'cases': [ { 'code': '>>> '
'print(np.mean(train_scores,axis=1).round(3))\n'
'[0.954 0.97 0.979 0.985 0.988 '
'0.989 0.988 0.988 0.992 '
'0.993]\n',
'hidden': False,
'locked': False},
{ 'code': '>>> '
'print(np.mean(val_scores,axis=1).round(3))\n'
'[0.947 0.958 0.969 0.976 0.976 '
'0.974 0.969 0.971 0.965 '
'0.958]\n',
'hidden': False,
'locked': False}],
'scored': True,
'setup': '',
'teardown': '',
'type': 'doctest'}]}
| 54.761905
| 95
| 0.229565
|
1f69739cac447b4837daed3ea17b01ac125a4367
| 797
|
py
|
Python
|
devday/talk/migrations/0042_delete_attendee_feedback_with_attendee.py
|
jenslauterbach/devday_website
|
a827c9237e656842542eff07ec9fa7b39716a0ee
|
[
"CC-BY-4.0",
"BSD-3-Clause"
] | 6
|
2018-09-30T20:18:01.000Z
|
2020-03-12T09:03:38.000Z
|
devday/talk/migrations/0042_delete_attendee_feedback_with_attendee.py
|
jenslauterbach/devday_website
|
a827c9237e656842542eff07ec9fa7b39716a0ee
|
[
"CC-BY-4.0",
"BSD-3-Clause"
] | 260
|
2018-09-30T14:17:57.000Z
|
2022-03-04T13:48:34.000Z
|
devday/talk/migrations/0042_delete_attendee_feedback_with_attendee.py
|
jenslauterbach/devday_website
|
a827c9237e656842542eff07ec9fa7b39716a0ee
|
[
"CC-BY-4.0",
"BSD-3-Clause"
] | 9
|
2018-09-30T13:17:21.000Z
|
2020-10-03T12:55:05.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-04-29 12:05
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
("attendee", "0009_auto_20181020_0802"),
("event", "0007_event_voting_open"),
("talk", "0041_drop_unique_constraint_on_talkslot"),
]
operations = [
migrations.AlterField(
model_name="attendeefeedback",
name="attendee",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="attendee.Attendee",
verbose_name="Attendee",
),
)
]
| 26.566667
| 60
| 0.588457
|
f9cdee0ce7a58c8128d3676b25266565f52c51d3
| 537
|
py
|
Python
|
easyrules/support/reader/rule_definition_reader.py
|
wffzxyl/easyrules
|
8874718fec629435c69ce360cd43a281da162627
|
[
"MIT"
] | 1
|
2020-10-03T12:34:01.000Z
|
2020-10-03T12:34:01.000Z
|
easyrules/support/reader/rule_definition_reader.py
|
wffzxyl/easyrules
|
8874718fec629435c69ce360cd43a281da162627
|
[
"MIT"
] | null | null | null |
easyrules/support/reader/rule_definition_reader.py
|
wffzxyl/easyrules
|
8874718fec629435c69ce360cd43a281da162627
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Rules engine interface.
"""
from abc import ABCMeta, abstractmethod
from typing import List
from easyrules.utils import exception_handler
class RuleDefinitionReader(metaclass=ABCMeta):
@exception_handler
@abstractmethod
def read(self, file_path: str) -> List:
"""
Read a list of rule definitions from a structured file.
The file is expected to contain a collection of rule definitions even for a single rule.
:return: a list of rule definitions
"""
pass
| 25.571429
| 96
| 0.6946
|
f6247a0962f97c4e574a50a886fbfb77814fd561
| 15,474
|
py
|
Python
|
PythonNetwork/venv/Lib/site-packages/mysql/connector/django/introspection.py
|
Moldovandreii/RepetitionCount
|
b5715b0948b609fde0ce05d45023b7d4cfd635e7
|
[
"FTL"
] | 1
|
2021-04-09T15:23:40.000Z
|
2021-04-09T15:23:40.000Z
|
PythonNetwork/venv/Lib/site-packages/mysql/connector/django/introspection.py
|
Moldovandreii/RepetitionCount
|
b5715b0948b609fde0ce05d45023b7d4cfd635e7
|
[
"FTL"
] | null | null | null |
PythonNetwork/venv/Lib/site-packages/mysql/connector/django/introspection.py
|
Moldovandreii/RepetitionCount
|
b5715b0948b609fde0ce05d45023b7d4cfd635e7
|
[
"FTL"
] | 1
|
2021-03-28T18:09:09.000Z
|
2021-03-28T18:09:09.000Z
|
# Copyright (c) 2020, Oracle and/or its affiliates.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License, version 2.0, as
# published by the Free Software Foundation.
#
# This program is also distributed with certain software (including
# but not limited to OpenSSL) that is licensed under separate terms,
# as designated in a particular file or component or in included license
# documentation. The authors of MySQL hereby grant you an
# additional permission to link the program and your derivative works
# with the separately licensed software that they have included with
# MySQL.
#
# Without limiting anything contained in the foregoing, this file,
# which is part of MySQL Connector/Python, is also subject to the
# Universal FOSS Exception, version 1.0, a copy of which can be found at
# http://oss.oracle.com/licenses/universal-foss-exception.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License, version 2.0, for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from collections import namedtuple
import sqlparse
from mysql.connector.constants import FieldType
from django.db.backends.base.introspection import (
BaseDatabaseIntrospection, FieldInfo as BaseFieldInfo, TableInfo,
)
from django.db.models import Index
from django.utils.datastructures import OrderedSet
FieldInfo = namedtuple('FieldInfo', BaseFieldInfo._fields + ('extra', 'is_unsigned', 'has_json_constraint'))
InfoLine = namedtuple('InfoLine', 'col_name data_type max_len num_prec num_scale extra column_default is_unsigned')
class DatabaseIntrospection(BaseDatabaseIntrospection):
data_types_reverse = {
FieldType.BLOB: 'TextField',
FieldType.DECIMAL: 'DecimalField',
FieldType.NEWDECIMAL: 'DecimalField',
FieldType.DATE: 'DateField',
FieldType.DATETIME: 'DateTimeField',
FieldType.DOUBLE: 'FloatField',
FieldType.FLOAT: 'FloatField',
FieldType.INT24: 'IntegerField',
FieldType.JSON: 'JSONField',
FieldType.LONG: 'IntegerField',
FieldType.LONGLONG: 'BigIntegerField',
FieldType.SHORT: 'SmallIntegerField',
FieldType.STRING: 'CharField',
FieldType.TIME: 'TimeField',
FieldType.TIMESTAMP: 'DateTimeField',
FieldType.TINY: 'IntegerField',
FieldType.TINY_BLOB: 'TextField',
FieldType.MEDIUM_BLOB: 'TextField',
FieldType.LONG_BLOB: 'TextField',
FieldType.VAR_STRING: 'CharField',
}
def get_field_type(self, data_type, description):
field_type = super().get_field_type(data_type, description)
if 'auto_increment' in description.extra:
if field_type == 'IntegerField':
return 'AutoField'
elif field_type == 'BigIntegerField':
return 'BigAutoField'
elif field_type == 'SmallIntegerField':
return 'SmallAutoField'
if description.is_unsigned:
if field_type == 'BigIntegerField':
return 'PositiveBigIntegerField'
elif field_type == 'IntegerField':
return 'PositiveIntegerField'
elif field_type == 'SmallIntegerField':
return 'PositiveSmallIntegerField'
# JSON data type is an alias for LONGTEXT in MariaDB, use check
# constraints clauses to introspect JSONField.
if description.has_json_constraint:
return 'JSONField'
return field_type
def get_table_list(self, cursor):
"""Return a list of table and view names in the current database."""
cursor.execute("SHOW FULL TABLES")
return [TableInfo(row[0], {'BASE TABLE': 't', 'VIEW': 'v'}.get(row[1]))
for row in cursor.fetchall()]
def get_table_description(self, cursor, table_name):
"""
Return a description of the table with the DB-API cursor.description
interface."
"""
json_constraints = {}
if self.connection.mysql_is_mariadb and self.connection.features.can_introspect_json_field:
# JSON data type is an alias for LONGTEXT in MariaDB, select
# JSON_VALID() constraints to introspect JSONField.
cursor.execute("""
SELECT c.constraint_name AS column_name
FROM information_schema.check_constraints AS c
WHERE
c.table_name = %s AND
LOWER(c.check_clause) = 'json_valid(`' + LOWER(c.constraint_name) + '`)' AND
c.constraint_schema = DATABASE()
""", [table_name])
json_constraints = {row[0] for row in cursor.fetchall()}
# information_schema database gives more accurate results for some figures:
# - varchar length returned by cursor.description is an internal length,
# not visible length (#5725)
# - precision and scale (for decimal fields) (#5014)
# - auto_increment is not available in cursor.description
cursor.execute("""
SELECT
column_name, data_type, character_maximum_length,
numeric_precision, numeric_scale, extra, column_default,
CASE
WHEN column_type LIKE '%% unsigned' THEN 1
ELSE 0
END AS is_unsigned
FROM information_schema.columns
WHERE table_name = %s AND table_schema = DATABASE()""", [table_name])
field_info = {line[0]: InfoLine(*line) for line in cursor.fetchall()}
cursor.execute("SELECT * FROM %s LIMIT 1" % self.connection.ops.quote_name(table_name))
def to_int(i):
return int(i) if i is not None else i
fields = []
for line in cursor.description:
info = field_info[line[0]]
name, type_code, display_size = line[:3]
fields.append(FieldInfo(
name,
type_code,
display_size,
to_int(info.max_len) or line[3],
to_int(info.num_prec) or line[4],
to_int(info.num_scale) or line[5],
line[6],
info.column_default,
info.extra,
info.is_unsigned,
line[0] in json_constraints
))
return fields
def get_sequences(self, cursor, table_name, table_fields=()):
for field_info in self.get_table_description(cursor, table_name):
if 'auto_increment' in field_info.extra:
# MySQL allows only one auto-increment column per table.
return [{'table': table_name, 'column': field_info.name}]
return []
def get_relations(self, cursor, table_name):
"""
Return a dictionary of {field_name: (field_name_other_table, other_table)}
representing all relationships to the given table.
"""
constraints = self.get_key_columns(cursor, table_name)
relations = {}
for my_fieldname, other_table, other_field in constraints:
relations[my_fieldname] = (other_field, other_table)
return relations
def get_key_columns(self, cursor, table_name):
"""
Return a list of (column_name, referenced_table_name, referenced_column_name)
for all key columns in the given table.
"""
key_columns = []
cursor.execute("""
SELECT column_name, referenced_table_name, referenced_column_name
FROM information_schema.key_column_usage
WHERE table_name = %s
AND table_schema = DATABASE()
AND referenced_table_name IS NOT NULL
AND referenced_column_name IS NOT NULL""", [table_name])
key_columns.extend(cursor.fetchall())
return key_columns
def get_indexes(self, cursor, table_name):
cursor.execute("SHOW INDEX FROM {0}"
"".format(self.connection.ops.quote_name(table_name)))
# Do a two-pass search for indexes: on first pass check which indexes
# are multicolumn, on second pass check which single-column indexes
# are present.
rows = list(cursor.fetchall())
multicol_indexes = set()
for row in rows:
if row[3] > 1:
multicol_indexes.add(row[2])
indexes = {}
for row in rows:
if row[2] in multicol_indexes:
continue
if row[4] not in indexes:
indexes[row[4]] = {'primary_key': False, 'unique': False}
# It's possible to have the unique and PK constraints in
# separate indexes.
if row[2] == 'PRIMARY':
indexes[row[4]]['primary_key'] = True
if not row[1]:
indexes[row[4]]['unique'] = True
return indexes
def get_primary_key_column(self, cursor, table_name):
"""
Returns the name of the primary key column for the given table
"""
for column in self.get_indexes(cursor, table_name).items():
if column[1]['primary_key']:
return column[0]
return None
def get_storage_engine(self, cursor, table_name):
"""
Retrieve the storage engine for a given table. Return the default
storage engine if the table doesn't exist.
"""
cursor.execute(
"SELECT engine "
"FROM information_schema.tables "
"WHERE table_name = %s", [table_name])
result = cursor.fetchone()
if not result:
return self.connection.features._mysql_storage_engine
return result[0]
def _parse_constraint_columns(self, check_clause, columns):
check_columns = OrderedSet()
statement = sqlparse.parse(check_clause)[0]
tokens = (token for token in statement.flatten() if not token.is_whitespace)
for token in tokens:
if (
token.ttype == sqlparse.tokens.Name and
self.connection.ops.quote_name(token.value) == token.value and
token.value[1:-1] in columns
):
check_columns.add(token.value[1:-1])
return check_columns
def get_constraints(self, cursor, table_name):
"""
Retrieve any constraints or keys (unique, pk, fk, check, index) across
one or more columns.
"""
constraints = {}
# Get the actual constraint names and columns
name_query = """
SELECT kc.`constraint_name`, kc.`column_name`,
kc.`referenced_table_name`, kc.`referenced_column_name`
FROM information_schema.key_column_usage AS kc
WHERE
kc.table_schema = DATABASE() AND
kc.table_name = %s
ORDER BY kc.`ordinal_position`
"""
cursor.execute(name_query, [table_name])
for constraint, column, ref_table, ref_column in cursor.fetchall():
if constraint not in constraints:
constraints[constraint] = {
'columns': OrderedSet(),
'primary_key': False,
'unique': False,
'index': False,
'check': False,
'foreign_key': (ref_table, ref_column) if ref_column else None,
}
constraints[constraint]['columns'].add(column)
# Now get the constraint types
type_query = """
SELECT c.constraint_name, c.constraint_type
FROM information_schema.table_constraints AS c
WHERE
c.table_schema = DATABASE() AND
c.table_name = %s
"""
cursor.execute(type_query, [table_name])
for constraint, kind in cursor.fetchall():
if kind.lower() == "primary key":
constraints[constraint]['primary_key'] = True
constraints[constraint]['unique'] = True
elif kind.lower() == "unique":
constraints[constraint]['unique'] = True
# Add check constraints.
if self.connection.features.can_introspect_check_constraints:
unnamed_constraints_index = 0
columns = {info.name for info in self.get_table_description(cursor, table_name)}
if self.connection.mysql_is_mariadb:
type_query = """
SELECT c.constraint_name, c.check_clause
FROM information_schema.check_constraints AS c
WHERE
c.constraint_schema = DATABASE() AND
c.table_name = %s
"""
else:
type_query = """
SELECT cc.constraint_name, cc.check_clause
FROM
information_schema.check_constraints AS cc,
information_schema.table_constraints AS tc
WHERE
cc.constraint_schema = DATABASE() AND
tc.table_schema = cc.constraint_schema AND
cc.constraint_name = tc.constraint_name AND
tc.constraint_type = 'CHECK' AND
tc.table_name = %s
"""
cursor.execute(type_query, [table_name])
for constraint, check_clause in cursor.fetchall():
constraint_columns = self._parse_constraint_columns(check_clause, columns)
# Ensure uniqueness of unnamed constraints. Unnamed unique
# and check columns constraints have the same name as
# a column.
if set(constraint_columns) == {constraint}:
unnamed_constraints_index += 1
constraint = '__unnamed_constraint_%s__' % unnamed_constraints_index
constraints[constraint] = {
'columns': constraint_columns,
'primary_key': False,
'unique': False,
'index': False,
'check': True,
'foreign_key': None,
}
# Now add in the indexes
cursor.execute("SHOW INDEX FROM %s" % self.connection.ops.quote_name(table_name))
for table, non_unique, index, colseq, column, type_ in [x[:5] + (x[10],) for x in cursor.fetchall()]:
if index not in constraints:
constraints[index] = {
'columns': OrderedSet(),
'primary_key': False,
'unique': False,
'check': False,
'foreign_key': None,
}
constraints[index]['index'] = True
constraints[index]['type'] = Index.suffix if type_ == 'BTREE' else type_.lower()
constraints[index]['columns'].add(column)
# Convert the sorted sets to lists
for constraint in constraints.values():
constraint['columns'] = list(constraint['columns'])
return constraints
| 43.711864
| 115
| 0.597131
|
0ccdb4b618383146e0de733c5d407498a806594a
| 114
|
py
|
Python
|
snake/snake/__init__.py
|
JoyPang123/RL-Explore-with-Own-made-Env
|
4f527236571ea8a8d700d2d56c1718f2126d2d8d
|
[
"MIT"
] | 1
|
2021-12-07T07:37:40.000Z
|
2021-12-07T07:37:40.000Z
|
snake/snake/__init__.py
|
JoyPang123/RL-Explore-with-Own-made-Env
|
4f527236571ea8a8d700d2d56c1718f2126d2d8d
|
[
"MIT"
] | null | null | null |
snake/snake/__init__.py
|
JoyPang123/RL-Explore-with-Own-made-Env
|
4f527236571ea8a8d700d2d56c1718f2126d2d8d
|
[
"MIT"
] | null | null | null |
from gym.envs.registration import register
register(
id='snake-v0',
entry_point='snake.envs:SnakeEnv',
)
| 16.285714
| 42
| 0.719298
|
b91e2edf9a6bf573759a03f1b8cee7f96e3e60d7
| 1,863
|
py
|
Python
|
Visualization/styled_layer_descriptors.py
|
monocilindro/qgis-earthengine-examples
|
82aea8926d34ed3f4ad4a4a345ddbd225819d28f
|
[
"MIT"
] | 646
|
2019-12-03T06:09:03.000Z
|
2022-03-28T03:37:08.000Z
|
Visualization/styled_layer_descriptors.py
|
csaybar/qgis-earthengine-examples
|
ba8942683834d2847ff3246bdd1859b36e50fe44
|
[
"MIT"
] | 10
|
2019-12-30T03:42:44.000Z
|
2021-05-22T07:34:07.000Z
|
Visualization/styled_layer_descriptors.py
|
csaybar/qgis-earthengine-examples
|
ba8942683834d2847ff3246bdd1859b36e50fe44
|
[
"MIT"
] | 219
|
2019-12-06T02:20:53.000Z
|
2022-03-30T15:14:27.000Z
|
import ee
from ee_plugin import Map
cover = ee.Image('MODIS/051/MCD12Q1/2012_01_01').select('Land_Cover_Type_1')
# Define an SLD style of discrete intervals to apply to the image.
sld_intervals = \
'<RasterSymbolizer>' + \
' <ColorMap type="intervals" extended="false" >' + \
'<ColorMapEntry color="#aec3d4" quantity="0" label="Water"/>' + \
'<ColorMapEntry color="#152106" quantity="1" label="Evergreen Needleleaf Forest"/>' + \
'<ColorMapEntry color="#225129" quantity="2" label="Evergreen Broadleaf Forest"/>' + \
'<ColorMapEntry color="#369b47" quantity="3" label="Deciduous Needleleaf Forest"/>' + \
'<ColorMapEntry color="#30eb5b" quantity="4" label="Deciduous Broadleaf Forest"/>' + \
'<ColorMapEntry color="#387242" quantity="5" label="Mixed Deciduous Forest"/>' + \
'<ColorMapEntry color="#6a2325" quantity="6" label="Closed Shrubland"/>' + \
'<ColorMapEntry color="#c3aa69" quantity="7" label="Open Shrubland"/>' + \
'<ColorMapEntry color="#b76031" quantity="8" label="Woody Savanna"/>' + \
'<ColorMapEntry color="#d9903d" quantity="9" label="Savanna"/>' + \
'<ColorMapEntry color="#91af40" quantity="10" label="Grassland"/>' + \
'<ColorMapEntry color="#111149" quantity="11" label="Permanent Wetland"/>' + \
'<ColorMapEntry color="#cdb33b" quantity="12" label="Cropland"/>' + \
'<ColorMapEntry color="#cc0013" quantity="13" label="Urban"/>' + \
'<ColorMapEntry color="#33280d" quantity="14" label="Crop, Natural Veg. Mosaic"/>' + \
'<ColorMapEntry color="#d7cdcc" quantity="15" label="Permanent Snow, Ice"/>' + \
'<ColorMapEntry color="#f7e084" quantity="16" label="Barren, Desert"/>' + \
'<ColorMapEntry color="#6f6f6f" quantity="17" label="Tundra"/>' + \
'</ColorMap>' + \
'</RasterSymbolizer>'
Map.addLayer(cover.sldStyle(sld_intervals), {}, 'IGBP classification styled')
| 62.1
| 91
| 0.66774
|
ef5d7eb08a00d544a732b7adb80dadd78a39fbf0
| 2,954
|
py
|
Python
|
Testscripts/Linux/BVT-VERIFY-UUID-FSTAB.py
|
senugala/LISAv2
|
42edd3e40fb0ee847bdd915343c4c064256ede4c
|
[
"Apache-2.0"
] | null | null | null |
Testscripts/Linux/BVT-VERIFY-UUID-FSTAB.py
|
senugala/LISAv2
|
42edd3e40fb0ee847bdd915343c4c064256ede4c
|
[
"Apache-2.0"
] | null | null | null |
Testscripts/Linux/BVT-VERIFY-UUID-FSTAB.py
|
senugala/LISAv2
|
42edd3e40fb0ee847bdd915343c4c064256ede4c
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the Apache License.
from azuremodules import *
import sys
import time
import re
import linecache
def RunTest():
UpdateState("TestRunning")
uuid_from_demesg = 0
dmsg_dev_count = 0
output = JustRun("dmesg")
output = output.lower()
filter_condition_dmesg = r'.*root=UUID=(.*?) .*'
filter_condition_fstab = r'.*UUID=(.*?)[ \t]+\/[ \t]+'
if (DetectDistro()[0] == 'opensuse' or DetectDistro()[0] == 'SUSE'or DetectDistro()[0] == 'sles'):
filter_condition_dmesg = r'.*root=/dev/disk/by-uuid/(.*?) .*'
filter_condition_fstab = r'.*/dev/disk/by-uuid/(.*?)[ \t]+\/[ \t]+'
dmsg_dev_count = output.count('command line:.*root=/dev/sd')
outputlist = re.split("\n", output)
for line in outputlist:
matchObj = re.match(filter_condition_dmesg, line, re.M|re.I)
if matchObj:
uuid_from_demesg = matchObj.group(1)
uuid_from_fstab = 0
fstab_dev_count = 0
fstab_dev_count = output = JustRun("cat /etc/fstab")
fstab_dev_count = output.count('/dev/sd')
outputlist = re.split("\n", output)
for line in outputlist:
matchObj = re.match(filter_condition_fstab, line, re.M|re.I)
#matchObj = re.match( r'.*UUID=(.*?)[ \t]*/ .*', line, re.M|re.I)
if matchObj:
uuid_from_fstab = matchObj.group(1)
if(uuid_from_demesg and uuid_from_fstab and (uuid_from_demesg == uuid_from_fstab) and (dmsg_dev_count == 0) and (fstab_dev_count == 0)):
ResultLog.info('PASS')
#print "UUID are valid and matched"
elif (DetectDistro()[0] == 'coreos'):
output = JustRun("dmesg | grep root")
if ("root=LABEL" in output):
RunLog.info('CoreOS uses disk labels to specify drives.')
ResultLog.info('PASS')
else:
RunLog.info('root partition is not mounted using LABEL in dmesg.')
ResultLog.info('FAIL')
elif(DetectDistro()[0] == 'ubuntu' and fstab_dev_count == 1):
if (uuid_from_demesg != 0 and uuid_from_fstab != 0 and uuid_from_demesg == uuid_from_fstab and dmsg_dev_count == 0):
ResultLog.info('PASS')
else:
ResultLog.info('FAIL')
else:
if (uuid_from_demesg == 0):
RunLog.info('/ partition is not mounted using UUID in dmesg.')
if (uuid_from_fstab == 0):
RunLog.info('/ partition is not mounted using UUID in /etc/fstab.')
if (uuid_from_demesg != uuid_from_fstab):
RunLog.info(' UUID is not same in dmesg and /etc/fstab.')
if (dmsg_dev_count != 0):
RunLog.info('Found disks mounted without using UUID in dmesg.')
if (fstab_dev_count != 0):
RunLog.info('Found disks mounted without using UUID in /etc/fstab.')
ResultLog.info('FAIL')
UpdateState("TestCompleted")
RunTest()
| 38.363636
| 140
| 0.61239
|
69d6fdf642e999bacb659a1db08c335d0680dc96
| 437
|
py
|
Python
|
experiment/util.py
|
tom-andersson/neuralprocesses
|
7696dc1c8bbe922fb2a1ba18fe0cdda041fc9cfd
|
[
"MIT"
] | null | null | null |
experiment/util.py
|
tom-andersson/neuralprocesses
|
7696dc1c8bbe922fb2a1ba18fe0cdda041fc9cfd
|
[
"MIT"
] | null | null | null |
experiment/util.py
|
tom-andersson/neuralprocesses
|
7696dc1c8bbe922fb2a1ba18fe0cdda041fc9cfd
|
[
"MIT"
] | null | null | null |
import lab as B
__all__ = ["with_err"]
def with_err(vals, and_lower=False, and_upper=False):
"""Print the mean value of a list of values with error."""
vals = B.to_numpy(vals)
mean = B.mean(vals)
err = 1.96 * B.std(vals) / B.sqrt(B.length(vals))
res = f"{mean:10.5f} +- {err:10.5f}"
if and_lower:
res += f" ({mean - err:10.5f})"
if and_upper:
res += f" ({mean + err:10.5f})"
return res
| 25.705882
| 62
| 0.574371
|
a855f5aed2d28386615e30726f906c83f7f34de0
| 2,745
|
py
|
Python
|
sdk/python/pulumi_azure_native/domainregistration/v20180201/list_domain_recommendations.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/domainregistration/v20180201/list_domain_recommendations.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/domainregistration/v20180201/list_domain_recommendations.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'ListDomainRecommendationsResult',
'AwaitableListDomainRecommendationsResult',
'list_domain_recommendations',
]
@pulumi.output_type
class ListDomainRecommendationsResult:
"""
Collection of domain name identifiers.
"""
def __init__(__self__, next_link=None, value=None):
if next_link and not isinstance(next_link, str):
raise TypeError("Expected argument 'next_link' to be a str")
pulumi.set(__self__, "next_link", next_link)
if value and not isinstance(value, list):
raise TypeError("Expected argument 'value' to be a list")
pulumi.set(__self__, "value", value)
@property
@pulumi.getter(name="nextLink")
def next_link(self) -> str:
"""
Link to next page of resources.
"""
return pulumi.get(self, "next_link")
@property
@pulumi.getter
def value(self) -> Sequence['outputs.NameIdentifierResponse']:
"""
Collection of resources.
"""
return pulumi.get(self, "value")
class AwaitableListDomainRecommendationsResult(ListDomainRecommendationsResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListDomainRecommendationsResult(
next_link=self.next_link,
value=self.value)
def list_domain_recommendations(keywords: Optional[str] = None,
max_domain_recommendations: Optional[int] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListDomainRecommendationsResult:
"""
Collection of domain name identifiers.
:param str keywords: Keywords to be used for generating domain recommendations.
:param int max_domain_recommendations: Maximum number of recommendations.
"""
__args__ = dict()
__args__['keywords'] = keywords
__args__['maxDomainRecommendations'] = max_domain_recommendations
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:domainregistration/v20180201:listDomainRecommendations', __args__, opts=opts, typ=ListDomainRecommendationsResult).value
return AwaitableListDomainRecommendationsResult(
next_link=__ret__.next_link,
value=__ret__.value)
| 34.3125
| 170
| 0.685246
|
b47da3782c873de9cc822c18a93afdfcfdb98d0f
| 40,775
|
py
|
Python
|
thonny/tktextext.py
|
Natureshadow/thonny
|
3dfc846829968ae8294b61bbdf154278850b29df
|
[
"MIT"
] | null | null | null |
thonny/tktextext.py
|
Natureshadow/thonny
|
3dfc846829968ae8294b61bbdf154278850b29df
|
[
"MIT"
] | null | null | null |
thonny/tktextext.py
|
Natureshadow/thonny
|
3dfc846829968ae8294b61bbdf154278850b29df
|
[
"MIT"
] | null | null | null |
# coding=utf-8
"""Extensions for tk.Text"""
import platform
import time
import tkinter as tk
import traceback
from logging import exception
from tkinter import TclError, messagebox
from tkinter import font as tkfont
from tkinter import ttk
class TweakableText(tk.Text):
"""Allows intercepting Text commands at Tcl-level"""
def __init__(self, master=None, cnf={}, read_only=False, **kw):
super().__init__(master=master, cnf=cnf, **kw)
self._read_only = read_only
self._suppress_events = False
self._original_widget_name = self._w + "_orig"
self.tk.call("rename", self._w, self._original_widget_name)
self.tk.createcommand(self._w, self._dispatch_tk_operation)
self._tk_proxies = {}
self._original_insert = self._register_tk_proxy_function("insert", self.intercept_insert)
self._original_delete = self._register_tk_proxy_function("delete", self.intercept_delete)
self._original_mark = self._register_tk_proxy_function("mark", self.intercept_mark)
def _register_tk_proxy_function(self, operation, function):
self._tk_proxies[operation] = function
setattr(self, operation, function)
def original_function(*args):
self.tk.call((self._original_widget_name, operation) + args)
return original_function
def _dispatch_tk_operation(self, operation, *args):
f = self._tk_proxies.get(operation)
try:
if f:
return f(*args)
else:
return self.tk.call((self._original_widget_name, operation) + args)
except TclError as e:
# Some Tk internal actions (eg. paste and cut) can cause this error
if (
str(e).lower() == '''text doesn't contain any characters tagged with "sel"'''
and operation in ["delete", "index", "get"]
and args in [("sel.first", "sel.last"), ("sel.first",)]
):
pass
else:
exception(
"[_dispatch_tk_operation] operation: " + operation + ", args:" + repr(args)
)
# traceback.print_exc()
return "" # Taken from idlelib.WidgetRedirector
def set_read_only(self, value):
self._read_only = value
def is_read_only(self):
return self._read_only
def set_content(self, chars):
self.direct_delete("1.0", tk.END)
self.direct_insert("1.0", chars)
def set_insertwidth(self, new_width):
"""Change cursor width
NB! Need to be careful with setting text["insertwidth"]!
My first straightforward solution caused unexplainable
infinite loop of insertions and deletions in the text
(Repro: insert a line and a word, select that word and then do Ctrl-Z).
This solution seems safe but be careful!
"""
if self._suppress_events:
return
if self["insertwidth"] != new_width:
old_suppress = self._suppress_events
try:
self._suppress_events = True
self.config(insertwidth=new_width)
finally:
self._suppress_events = old_suppress
def intercept_mark(self, *args):
self.direct_mark(*args)
def intercept_insert(self, index, chars, tags=None, **kw):
assert isinstance(chars, str)
if chars >= "\uf704" and chars <= "\uf70d": # Function keys F1..F10 in Mac cause these
pass
elif self.is_read_only():
self.bell()
else:
self.direct_insert(index, chars, tags, **kw)
def intercept_delete(self, index1, index2=None, **kw):
if index1 == "sel.first" and index2 == "sel.last" and not self.has_selection():
return
if self.is_read_only():
self.bell()
elif self._is_erroneous_delete(index1, index2):
pass
else:
self.direct_delete(index1, index2, **kw)
def _is_erroneous_delete(self, index1, index2):
"""Paste can cause deletes where index1 is sel.start but text has no selection. This would cause errors"""
return index1.startswith("sel.") and not self.has_selection()
def direct_mark(self, *args):
self._original_mark(*args)
if args[:2] == ("set", "insert") and not self._suppress_events:
self.event_generate("<<CursorMove>>")
def index_sel_first(self):
# Tk will give error without this check
if self.tag_ranges("sel"):
return self.index("sel.first")
else:
return None
def index_sel_last(self):
if self.tag_ranges("sel"):
return self.index("sel.last")
else:
return None
def has_selection(self):
return len(self.tag_ranges("sel")) > 0
def get_selection_indices(self):
# If a selection is defined in the text widget, return (start,
# end) as Tkinter text indices, otherwise return (None, None)
if self.has_selection():
return self.index("sel.first"), self.index("sel.last")
else:
return None, None
def direct_insert(self, index, chars, tags=None, **kw):
self._original_insert(index, chars, tags, **kw)
if not self._suppress_events:
self.event_generate("<<TextChange>>")
def direct_delete(self, index1, index2=None, **kw):
self._original_delete(index1, index2, **kw)
if not self._suppress_events:
self.event_generate("<<TextChange>>")
class EnhancedText(TweakableText):
"""Text widget with extra navigation and editing aids.
Provides more comfortable deletion, indentation and deindentation,
and undo handling. Not specific to Python code.
Most of the code is adapted from idlelib.EditorWindow.
"""
def __init__(
self,
master=None,
style="Text",
tag_current_line=False,
indent_with_tabs=False,
replace_tabs=False,
cnf={},
**kw
):
# Parent class shouldn't autoseparate
# TODO: take client provided autoseparators value into account
kw["autoseparators"] = False
self._style = style
self._original_options = kw.copy()
super().__init__(master=master, cnf=cnf, **kw)
self.tabwidth = 8 # See comments in idlelib.editor.EditorWindow
self.indent_width = 4
self.indent_with_tabs = indent_with_tabs
self.replace_tabs = replace_tabs
self._last_event_kind = None
self._last_key_time = None
self._bind_editing_aids()
self._bind_movement_aids()
self._bind_selection_aids()
self._bind_undo_aids()
self._bind_mouse_aids()
self._ui_theme_change_binding = self.bind(
"<<ThemeChanged>>", self._reload_theme_options, True
)
self._initial_configuration = self.configure()
self._regular_insertwidth = self["insertwidth"]
self._reload_theme_options()
self._should_tag_current_line = tag_current_line
if tag_current_line:
self.bind("<<CursorMove>>", self._tag_current_line, True)
self.bind("<<TextChange>>", self._tag_current_line, True)
self._tag_current_line()
def _bind_mouse_aids(self):
if _running_on_mac():
self.bind("<Button-2>", self.on_secondary_click)
self.bind("<Control-Button-1>", self.on_secondary_click)
else:
self.bind("<Button-3>", self.on_secondary_click)
def _bind_editing_aids(self):
def if_not_readonly(fun):
def dispatch(event):
if not self.is_read_only():
return fun(event)
else:
return "break"
return dispatch
self.bind("<Control-BackSpace>", if_not_readonly(self.delete_word_left), True)
self.bind("<Control-Delete>", if_not_readonly(self.delete_word_right), True)
self.bind("<Control-d>", self._redirect_ctrld, True)
self.bind("<Control-t>", self._redirect_ctrlt, True)
self.bind("<BackSpace>", if_not_readonly(self.perform_smart_backspace), True)
self.bind("<Return>", if_not_readonly(self.perform_return), True)
self.bind("<KP_Enter>", if_not_readonly(self.perform_return), True)
self.bind("<Tab>", if_not_readonly(self.perform_tab), True)
try:
# Is needed on eg. Ubuntu with Estonian keyboard
self.bind("<ISO_Left_Tab>", if_not_readonly(self.perform_tab), True)
except Exception:
pass
if platform.system() == "Windows":
self.bind("<KeyPress>", self._insert_untypable_characters_on_windows, True)
def _bind_movement_aids(self):
self.bind("<Home>", self.perform_smart_home, True)
self.bind("<Left>", self.move_to_edge_if_selection(0), True)
self.bind("<Right>", self.move_to_edge_if_selection(1), True)
self.bind("<Next>", self.perform_page_down, True)
self.bind("<Prior>", self.perform_page_up, True)
def _bind_selection_aids(self):
self.bind("<Command-a>" if _running_on_mac() else "<Control-a>", self.select_all, True)
def _bind_undo_aids(self):
self.bind("<<Undo>>", self._on_undo, True)
self.bind("<<Redo>>", self._on_redo, True)
self.bind("<<Cut>>", self._on_cut, True)
self.bind("<<Copy>>", self._on_copy, True)
self.bind("<<Paste>>", self._on_paste, True)
self.bind("<FocusIn>", self._on_get_focus, True)
self.bind("<FocusOut>", self._on_lose_focus, True)
self.bind("<Key>", self._on_key_press, True)
self.bind("<1>", self._on_mouse_click, True)
self.bind("<2>", self._on_mouse_click, True)
self.bind("<3>", self._on_mouse_click, True)
def _redirect_ctrld(self, event):
# I want to disable the deletion effect of Ctrl-D in the text but still
# keep the event for other purposes
self.event_generate("<<CtrlDInText>>")
return "break"
def _redirect_ctrlt(self, event):
# I want to disable the swap effect of Ctrl-T in the text but still
# keep the event for other purposes
self.event_generate("<<CtrlTInText>>")
return "break"
def tag_reset(self, tag_name):
empty_conf = {key: "" for key in self.tag_configure(tag_name)}
self.tag_configure(empty_conf)
def select_lines(self, first_line, last_line):
self.tag_remove("sel", "1.0", tk.END)
self.tag_add("sel", "%s.0" % first_line, "%s.end" % last_line)
def delete_word_left(self, event):
self.event_generate("<Meta-Delete>")
self.edit_separator()
return "break"
def delete_word_right(self, event):
self.event_generate("<Meta-d>")
self.edit_separator()
return "break"
def perform_smart_backspace(self, event):
self._log_keypress_for_undo(event)
text = self
first, last = self.get_selection_indices()
if first and last:
text.delete(first, last)
text.mark_set("insert", first)
return "break"
# Delete whitespace left, until hitting a real char or closest
# preceding virtual tab stop.
chars = text.get("insert linestart", "insert")
if chars == "":
if text.compare("insert", ">", "1.0"):
# easy: delete preceding newline
text.delete("insert-1c")
else:
text.bell() # at start of buffer
return "break"
if (
chars.strip() != ""
): # there are non-whitespace chars somewhere to the left of the cursor
# easy: delete preceding real char
text.delete("insert-1c")
self._log_keypress_for_undo(event)
return "break"
# Ick. It may require *inserting* spaces if we back up over a
# tab character! This is written to be clear, not fast.
have = len(chars.expandtabs(self.tabwidth))
assert have > 0
want = ((have - 1) // self.indent_width) * self.indent_width
# Debug prompt is multilined....
# if self.context_use_ps1:
# last_line_of_prompt = sys.ps1.split('\n')[-1]
# else:
last_line_of_prompt = ""
ncharsdeleted = 0
while 1:
if chars == last_line_of_prompt:
break
chars = chars[:-1]
ncharsdeleted = ncharsdeleted + 1
have = len(chars.expandtabs(self.tabwidth))
if have <= want or chars[-1] not in " \t":
break
text.delete("insert-%dc" % ncharsdeleted, "insert")
if have < want:
text.insert("insert", " " * (want - have))
return "break"
def perform_midline_tab(self, event=None):
"autocompleter can put its magic here"
# by default
return self.perform_smart_tab(event)
def perform_smart_tab(self, event=None):
self._log_keypress_for_undo(event)
# if intraline selection:
# delete it
# elif multiline selection:
# do indent-region
# else:
# indent one level
first, last = self.get_selection_indices()
if first and last:
if index2line(first) != index2line(last):
return self.indent_region(event)
self.delete(first, last)
self.mark_set("insert", first)
prefix = self.get("insert linestart", "insert")
raw, effective = classifyws(prefix, self.tabwidth)
if raw == len(prefix):
# only whitespace to the left
self._reindent_to(effective + self.indent_width)
else:
# tab to the next 'stop' within or to right of line's text:
if self.indent_with_tabs:
pad = "\t"
else:
effective = len(prefix.expandtabs(self.tabwidth))
n = self.indent_width
pad = " " * (n - effective % n)
self.insert("insert", pad)
self.see("insert")
return "break"
def get_cursor_position(self):
return map(int, self.index("insert").split("."))
def get_line_count(self):
return list(map(int, self.index("end-1c").split(".")))[0]
def perform_return(self, event):
self.insert("insert", "\n")
return "break"
def perform_page_down(self, event):
# if last line is visible then go to last line
# (by default it doesn't move then)
try:
last_visible_idx = self.index("@0,%d" % self.winfo_height())
row, _ = map(int, last_visible_idx.split("."))
line_count = self.get_line_count()
if row == line_count or row == line_count - 1: # otherwise tk doesn't show last line
self.mark_set("insert", "end")
except Exception:
traceback.print_exc()
def perform_page_up(self, event):
# if first line is visible then go there
# (by default it doesn't move then)
try:
first_visible_idx = self.index("@0,0")
row, _ = map(int, first_visible_idx.split("."))
if row == 1:
self.mark_set("insert", "1.0")
except Exception:
traceback.print_exc()
def compute_smart_home_destination_index(self):
"""Is overridden in shell"""
line = self.get("insert linestart", "insert lineend")
for insertpt in range(len(line)):
if line[insertpt] not in (" ", "\t"):
break
else:
insertpt = len(line)
lineat = int(self.index("insert").split(".")[1])
if insertpt == lineat:
insertpt = 0
return "insert linestart+" + str(insertpt) + "c"
def perform_smart_home(self, event):
if (event.state & 4) != 0 and event.keysym == "Home":
# state&4==Control. If <Control-Home>, use the Tk binding.
return None
dest = self.compute_smart_home_destination_index()
if (event.state & 1) == 0:
# shift was not pressed
self.tag_remove("sel", "1.0", "end")
else:
if not self.index_sel_first():
# there was no previous selection
self.mark_set("my_anchor", "insert")
else:
if self.compare(self.index_sel_first(), "<", self.index("insert")):
self.mark_set("my_anchor", "sel.first") # extend back
else:
self.mark_set("my_anchor", "sel.last") # extend forward
first = self.index(dest)
last = self.index("my_anchor")
if self.compare(first, ">", last):
first, last = last, first
self.tag_remove("sel", "1.0", "end")
self.tag_add("sel", first, last)
self.mark_set("insert", dest)
self.see("insert")
return "break"
def move_to_edge_if_selection(self, edge_index):
"""Cursor move begins at start or end of selection
When a left/right cursor key is pressed create and return to Tkinter a
function which causes a cursor move from the associated edge of the
selection.
"""
def move_at_edge(event):
if (
self.has_selection() and (event.state & 5) == 0
): # no shift(==1) or control(==4) pressed
try:
self.mark_set("insert", ("sel.first+1c", "sel.last-1c")[edge_index])
except tk.TclError:
pass
return move_at_edge
def perform_tab(self, event=None):
self._log_keypress_for_undo(event)
if event.state & 0x0001: # shift is pressed (http://stackoverflow.com/q/32426250/261181)
return self.dedent_region(event)
else:
# check whether there are letters before cursor on this line
index = self.index("insert")
left_text = self.get(index + " linestart", index)
if left_text.strip() == "" or self.has_selection():
return self.perform_smart_tab(event)
else:
return self.perform_midline_tab(event)
def indent_region(self, event=None):
return self._change_indentation(True)
def dedent_region(self, event=None):
return self._change_indentation(False)
def _change_indentation(self, increase=True):
head, tail, chars, lines = self._get_region()
# Text widget plays tricks if selection ends on last line
# and content doesn't end with empty line,
text_last_line = index2line(self.index("end-1c"))
sel_last_line = index2line(tail)
if sel_last_line >= text_last_line:
while not self.get(head, "end").endswith("\n\n"):
self.insert("end", "\n")
for pos in range(len(lines)):
line = lines[pos]
if line:
raw, effective = classifyws(line, self.tabwidth)
if increase:
effective = effective + self.indent_width
else:
effective = max(effective - self.indent_width, 0)
lines[pos] = self._make_blanks(effective) + line[raw:]
self._set_region(head, tail, chars, lines)
return "break"
def select_all(self, event):
self.tag_remove("sel", "1.0", tk.END)
self.tag_add("sel", "1.0", tk.END)
def set_read_only(self, value):
if value == self.is_read_only():
return
TweakableText.set_read_only(self, value)
self._reload_theme_options()
if self._should_tag_current_line:
self._tag_current_line()
def _reindent_to(self, column):
# Delete from beginning of line to insert point, then reinsert
# column logical (meaning use tabs if appropriate) spaces.
if self.compare("insert linestart", "!=", "insert"):
self.delete("insert linestart", "insert")
if column:
self.insert("insert", self._make_blanks(column))
def _get_region(self):
first, last = self.get_selection_indices()
if first and last:
head = self.index(first + " linestart")
tail = self.index(last + "-1c lineend +1c")
else:
head = self.index("insert linestart")
tail = self.index("insert lineend +1c")
chars = self.get(head, tail)
lines = chars.split("\n")
return head, tail, chars, lines
def _set_region(self, head, tail, chars, lines):
newchars = "\n".join(lines)
if newchars == chars:
self.bell()
return
self.tag_remove("sel", "1.0", "end")
self.mark_set("insert", head)
self.delete(head, tail)
self.insert(head, newchars)
self.tag_add("sel", head, "insert")
def _log_keypress_for_undo(self, e):
if e is None:
return
# NB! this may not execute if the event is cancelled in another handler
event_kind = self._get_event_kind(e)
if (
event_kind != self._last_event_kind
or e.char in ("\r", "\n", " ", "\t")
or e.keysym in ["Return", "KP_Enter"]
or time.time() - self._last_key_time > 2
):
self.edit_separator()
self._last_event_kind = event_kind
self._last_key_time = time.time()
def _get_event_kind(self, event):
if event.keysym in ("BackSpace", "Delete"):
return "delete"
elif event.char:
return "insert"
else:
# eg. e.keysym in ("Left", "Up", "Right", "Down", "Home", "End", "Prior", "Next"):
return "other_key"
def _make_blanks(self, n):
# Make string that displays as n leading blanks.
if self.indent_with_tabs:
ntabs, nspaces = divmod(n, self.tabwidth)
return "\t" * ntabs + " " * nspaces
else:
return " " * n
def _on_undo(self, e):
self._last_event_kind = "undo"
def _on_redo(self, e):
self._last_event_kind = "redo"
def _on_cut(self, e):
self._last_event_kind = "cut"
self.edit_separator()
def _on_copy(self, e):
self._last_event_kind = "copy"
self.edit_separator()
def _on_paste(self, e):
if self.is_read_only():
return
try:
if self.has_selection():
self.direct_delete("sel.first", "sel.last")
except Exception:
pass
self._last_event_kind = "paste"
self.edit_separator()
self.see("insert")
self.after_idle(lambda: self.see("insert"))
def _on_get_focus(self, e):
self._last_event_kind = "get_focus"
self.edit_separator()
def _on_lose_focus(self, e):
self._last_event_kind = "lose_focus"
self.edit_separator()
def _on_key_press(self, e):
return self._log_keypress_for_undo(e)
def _on_mouse_click(self, event):
self.edit_separator()
def _tag_current_line(self, event=None):
self.tag_remove("current_line", "1.0", "end")
# Let's show current line only with readable text
# (this fits well with Thonny debugger,
# otherwise debugger focus box and current line interact in an ugly way)
if self._should_tag_current_line and not self.is_read_only():
# we may be on the same line as with prev event but tag needs extension
lineno = int(self.index("insert").split(".")[0])
self.tag_add("current_line", str(lineno) + ".0", str(lineno + 1) + ".0")
def on_secondary_click(self, event=None):
"Use this for invoking context menu"
self.focus_set()
if event:
self.mark_set("insert", "@%d,%d" % (event.x, event.y))
def _reload_theme_options(self, event=None):
style = ttk.Style()
states = []
if self.is_read_only():
states.append("readonly")
# Following crashes when a combobox is focused
# if self.focus_get() == self:
# states.append("focus")
if "background" not in self._initial_configuration:
background = style.lookup(self._style, "background", states)
if background:
self.configure(background=background)
if "foreground" not in self._initial_configuration:
foreground = style.lookup(self._style, "foreground", states)
if foreground:
self.configure(foreground=foreground)
self.configure(insertbackground=foreground)
def _insert_untypable_characters_on_windows(self, event):
if event.state == 131084: # AltGr or Ctrl+Alt
lang_id = get_keyboard_language()
char = _windows_altgr_chars_by_lang_id_and_keycode.get(lang_id, {}).get(
event.keycode, None
)
if char is not None:
self.insert("insert", char)
def destroy(self):
self.unbind("<<ThemeChanged>>", self._ui_theme_change_binding)
super().destroy()
def direct_insert(self, index, chars, tags=None, **kw):
chars = self.check_convert_tabs_to_spaces(chars)
super().direct_insert(index, chars, tags, **kw)
def check_convert_tabs_to_spaces(self, chars):
if not self.replace_tabs:
return chars
tab_count = chars.count("\t")
if tab_count == 0:
return chars
else:
if messagebox.askyesno(
"Convert tabs to spaces?",
"Thonny (according to Python recommendation) uses spaces for indentation, "
+ "but the text you are about to insert/open contains %d tab characters. "
% tab_count
+ "To avoid confusion, it's better to convert them into spaces (unless you know they should be kept as tabs).\n\n"
+ "Do you want me to replace each tab with %d spaces?\n\n" % self.indent_width,
parent=tk._default_root,
):
return chars.expandtabs(self.indent_width)
else:
return chars
class TextFrame(ttk.Frame):
"Decorates text with scrollbars, line numbers and print margin"
def __init__(
self,
master,
line_numbers=False,
line_length_margin=0,
first_line_number=1,
text_class=EnhancedText,
horizontal_scrollbar=True,
vertical_scrollbar=True,
vertical_scrollbar_class=ttk.Scrollbar,
horizontal_scrollbar_class=ttk.Scrollbar,
vertical_scrollbar_style=None,
horizontal_scrollbar_style=None,
borderwidth=0,
relief="sunken",
gutter_background="#e0e0e0",
gutter_foreground="#999999",
**text_options
):
ttk.Frame.__init__(self, master=master, borderwidth=borderwidth, relief=relief)
final_text_options = {
"borderwidth": 0,
"insertwidth": 2,
"spacing1": 0,
"spacing3": 0,
"highlightthickness": 0,
"inactiveselectbackground": "gray",
"padx": 5,
"pady": 5,
}
final_text_options.update(text_options)
self.text = text_class(self, **final_text_options)
self.text.grid(row=0, column=2, sticky=tk.NSEW)
self._gutter = tk.Text(
self,
width=5,
padx=0,
pady=5,
highlightthickness=0,
bd=0,
takefocus=False,
font=self.text["font"],
background="#e0e0e0",
foreground=gutter_foreground,
selectbackground=gutter_background,
selectforeground=gutter_foreground,
cursor="arrow",
state="disabled",
undo=False,
wrap="none",
)
self._gutter_is_gridded = False
self._gutter.bind("<Double-Button-1>", self.on_gutter_double_click, True),
self._gutter.bind("<ButtonRelease-1>", self.on_gutter_click, True)
self._gutter.bind("<Button-1>", self.on_gutter_click, True)
self._gutter.bind("<Button1-Motion>", self.on_gutter_motion, True)
self._gutter["yscrollcommand"] = self._gutter_scroll
# need tags for justifying and rmargin
self._gutter.tag_configure("content", justify="right", rmargin=3)
# gutter will be gridded later
assert first_line_number is not None
self._first_line_number = first_line_number
self.set_gutter_visibility(line_numbers)
if vertical_scrollbar:
self._vbar = vertical_scrollbar_class(
self, orient=tk.VERTICAL, style=vertical_scrollbar_style
)
self._vbar.grid(row=0, column=3, sticky=tk.NSEW)
self._vbar["command"] = self._vertical_scroll
self.text["yscrollcommand"] = self._vertical_scrollbar_update
if horizontal_scrollbar:
self._hbar = horizontal_scrollbar_class(
self, orient=tk.HORIZONTAL, style=horizontal_scrollbar_style
)
self._hbar.grid(row=1, column=0, sticky=tk.NSEW, columnspan=3)
self._hbar["command"] = self._horizontal_scroll
self.text["xscrollcommand"] = self._horizontal_scrollbar_update
self.columnconfigure(2, weight=1)
self.rowconfigure(0, weight=1)
self._recommended_line_length = line_length_margin
margin_line_color = ttk.Style().lookup("Gutter", "background", default="LightGray")
self._margin_line = tk.Canvas(
self.text,
borderwidth=0,
width=1,
height=2000,
highlightthickness=0,
background=margin_line_color,
)
self.update_margin_line()
self.text.bind("<<TextChange>>", self._text_changed, True)
self.text.bind("<<CursorMove>>", self._cursor_moved, True)
self._ui_theme_change_binding = self.bind(
"<<ThemeChanged>>", self._reload_theme_options, True
)
self._reload_theme_options()
# TODO: add context menu?
def focus_set(self):
self.text.focus_set()
def set_gutter_visibility(self, value):
if value and not self._gutter_is_gridded:
self._gutter.grid(row=0, column=0, sticky=tk.NSEW)
self._gutter_is_gridded = True
elif not value and self._gutter_is_gridded:
self._gutter.grid_forget()
self._gutter_is_gridded = False
else:
return
"""
# insert first line number (NB! Without trailing linebreak. See update_gutter)
self._gutter.config(state="normal")
self._gutter.delete("1.0", "end")
for content, tags in self.compute_gutter_line(self._first_line_number):
self._gutter.insert("end", content, ("content",) + tags)
self._gutter.config(state="disabled")
"""
self.update_gutter(True)
def set_line_length_margin(self, value):
self._recommended_line_length = value
self.update_margin_line()
def _text_changed(self, event):
self.update_gutter()
def _cursor_moved(self, event):
self._update_gutter_active_line()
def _vertical_scrollbar_update(self, *args):
self._vbar.set(*args)
self._gutter.yview(tk.MOVETO, args[0])
self.text.event_generate("<<VerticalScroll>>")
def _gutter_scroll(self, *args):
try:
self._vbar.set(*args)
self.text.yview(tk.MOVETO, args[0])
except TclError:
pass
def _horizontal_scrollbar_update(self, *args):
self._hbar.set(*args)
self.update_margin_line()
def _vertical_scroll(self, *args):
self.text.yview(*args)
self._gutter.yview(*args)
self.text.event_generate("<<VerticalScroll>>")
def _horizontal_scroll(self, *args):
self.text.xview(*args)
self.update_margin_line()
def update_gutter(self, clean=False):
if clean:
self._gutter.config(state="normal")
self._gutter.delete("1.0", "end")
# need to add first item separately, because Text can't report 0 rows
for content, tags in self.compute_gutter_line(self._first_line_number):
self._gutter.insert("end-1c", content, tags + ("content",))
self._gutter.config(state="disabled")
text_line_count = int(self.text.index("end").split(".")[0])
gutter_line_count = int(self._gutter.index("end").split(".")[0])
if text_line_count != gutter_line_count:
self._gutter.config(state="normal")
# NB! Text acts weird with last symbol
# (don't really understand whether it automatically keeps a newline there or not)
# Following seems to ensure both Text-s have same height
if text_line_count > gutter_line_count:
delta = text_line_count - gutter_line_count
start = gutter_line_count + self._first_line_number - 1
if not clean and text_line_count > 10 and gutter_line_count < 3:
# probably initial load, do bulk insert
parts = []
for i in range(start, start + delta):
parts.append("\n")
for content, tags in self.compute_gutter_line(i, plain=True):
parts.append(content)
self._gutter.insert("end-1c", "".join(parts), ("content",) + tags)
else:
for i in range(start, start + delta):
self._gutter.insert("end-1c", "\n", ("content",))
for content, tags in self.compute_gutter_line(i):
self._gutter.insert("end-1c", content, ("content",) + tags)
else:
self._gutter.delete(line2index(text_line_count) + "-1c", "end-1c")
self._gutter.config(state="disabled")
# synchronize gutter scroll position with text
# https://mail.python.org/pipermail/tkinter-discuss/2010-March/002197.html
first, _ = self.text.yview()
self._gutter.yview_moveto(first)
self._update_gutter_active_line()
if text_line_count > 9998:
self._gutter.configure(width=7)
elif text_line_count > 998:
self._gutter.configure(width=6)
def _update_gutter_active_line(self):
self._gutter.tag_remove("active", "1.0", "end")
insert = self.text.index("insert")
self._gutter.tag_add("active", insert + " linestart", insert + " lineend")
def compute_gutter_line(self, lineno, plain=False):
yield str(lineno), ()
def update_margin_line(self):
if self._recommended_line_length == 0:
self._margin_line.place_forget()
else:
try:
self.text.update_idletasks()
# How far left has text been scrolled
first_visible_idx = self.text.index("@0,0")
first_visible_col = int(first_visible_idx.split(".")[1])
bbox = self.text.bbox(first_visible_idx)
first_visible_col_x = bbox[0]
margin_line_visible_col = self._recommended_line_length - first_visible_col
delta = first_visible_col_x
except Exception:
# fall back to ignoring scroll position
margin_line_visible_col = self._recommended_line_length
delta = 0
if margin_line_visible_col > -1:
x = (
get_text_font(self.text).measure((margin_line_visible_col - 1) * "M")
+ delta
+ self.text["padx"]
)
else:
x = -10
# print(first_visible_col, first_visible_col_x)
self._margin_line.place(y=-10, x=x)
def on_gutter_click(self, event=None):
try:
linepos = self._gutter.index("@%s,%s" % (event.x, event.y)).split(".")[0]
self.text.mark_set("insert", "%s.0" % linepos)
self._gutter.mark_set("gutter_selection_start", "%s.0" % linepos)
if (
event.type == "4"
): # In Python 3.6 you can use tk.EventType.ButtonPress instead of "4"
self.text.tag_remove("sel", "1.0", "end")
except tk.TclError:
exception("on_gutter_click")
def on_gutter_double_click(self, event=None):
try:
self._gutter.mark_unset("gutter_selection_start")
self.text.tag_remove("sel", "1.0", "end")
self._gutter.tag_remove("sel", "1.0", "end")
except tk.TclError:
exception("on_gutter_click")
def on_gutter_motion(self, event=None):
try:
if "gutter_selection_start" not in self._gutter.mark_names():
return
linepos = int(self._gutter.index("@%s,%s" % (event.x, event.y)).split(".")[0])
gutter_selection_start = int(self._gutter.index("gutter_selection_start").split(".")[0])
self.text.select_lines(
min(gutter_selection_start, linepos), max(gutter_selection_start - 1, linepos - 1)
)
self.text.mark_set("insert", "%s.0" % linepos)
except tk.TclError:
exception("on_gutter_motion")
def _reload_theme_options(self, event=None):
style = ttk.Style()
background = style.lookup("GUTTER", "background")
if background:
self._gutter.configure(background=background, selectbackground=background)
self._margin_line.configure(background=background)
foreground = style.lookup("GUTTER", "foreground")
if foreground:
self._gutter.configure(foreground=foreground, selectforeground=foreground)
def destroy(self):
self.unbind("<<ThemeChanged>>", self._ui_theme_change_binding)
super().destroy()
def get_text_font(text):
font = text["font"]
if isinstance(font, str):
return tkfont.nametofont(font)
else:
return font
def classifyws(s, tabwidth):
raw = effective = 0
for ch in s:
if ch == " ":
raw = raw + 1
effective = effective + 1
elif ch == "\t":
raw = raw + 1
effective = (effective // tabwidth + 1) * tabwidth
else:
break
return raw, effective
def index2line(index):
return int(float(index))
def line2index(line):
return str(float(line))
def fixwordbreaks(root):
# Adapted from idlelib.EditorWindow (Python 3.4.2)
# Modified to include non-ascii chars
# Make sure that Tk's double-click and next/previous word
# operations use our definition of a word (i.e. an identifier)
root.tk.call("tcl_wordBreakAfter", "a b", 0) # make sure word.tcl is loaded
root.tk.call("set", "tcl_wordchars", r"\w")
root.tk.call("set", "tcl_nonwordchars", r"\W")
def rebind_control_a(root):
# Tk 8.6 has <<SelectAll>> event but 8.5 doesn't
# http://stackoverflow.com/questions/22907200/remap-default-keybinding-in-tkinter
def control_a(event):
widget = event.widget
if isinstance(widget, tk.Text):
widget.tag_remove("sel", "1.0", "end")
widget.tag_add("sel", "1.0", "end")
root.bind_class("Text", "<Control-a>", control_a)
def _running_on_mac():
return tk._default_root.call("tk", "windowingsystem") == "aqua"
def get_keyboard_language():
# https://stackoverflow.com/a/42047820/261181
if platform.system() != "Windows":
raise NotImplementedError("Can provide keyboard language only on Windows")
import ctypes
user32 = ctypes.WinDLL("user32", use_last_error=True)
curr_window = user32.GetForegroundWindow()
thread_id = user32.GetWindowThreadProcessId(curr_window, 0)
# Made up of 0xAAABBBB, AAA = HKL (handle object) & BBBB = language ID
klid = user32.GetKeyboardLayout(thread_id)
# Language ID -> low 10 bits, Sub-language ID -> high 6 bits
# Extract language ID from KLID
lid = klid & (2 ** 16 - 1)
return lid
_windows_altgr_chars_by_lang_id_and_keycode = {
# https://docs.microsoft.com/en-us/windows/desktop/intl/language-identifier-constants-and-strings
0x0425: {191: "^"} # AltGr+Ä
}
| 36.14805
| 130
| 0.591637
|
78504e4682d31f1e857a409dd9b39c0ecdb6c924
| 7,299
|
py
|
Python
|
yt/visualization/volume_rendering/transfer_function_helper.py
|
themousepotato/yt
|
6befef2bc0427250fd62395962599be41b193e65
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
yt/visualization/volume_rendering/transfer_function_helper.py
|
themousepotato/yt
|
6befef2bc0427250fd62395962599be41b193e65
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
yt/visualization/volume_rendering/transfer_function_helper.py
|
themousepotato/yt
|
6befef2bc0427250fd62395962599be41b193e65
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
import matplotlib
import numpy as np
from distutils.version import LooseVersion
from yt.funcs import mylog
from yt.data_objects.profiles import create_profile
from yt.visualization.volume_rendering.transfer_functions import \
ColorTransferFunction
from io import BytesIO
class TransferFunctionHelper:
r"""A transfer function helper.
This attempts to help set up a good transfer function by finding
bounds, handling linear/log options, and displaying the transfer
function combined with 1D profiles of rendering quantity.
Parameters
----------
ds: A Dataset instance
A static output that is currently being rendered. This is used to
help set up data bounds.
Notes
-----
"""
profiles = None
def __init__(self, ds):
self.ds = ds
self.field = None
self.log = False
self.tf = None
self.bounds = None
self.grey_opacity = False
self.profiles = {}
def set_bounds(self, bounds=None):
"""
Set the bounds of the transfer function.
Parameters
----------
bounds: array-like, length 2, optional
A length 2 list/array in the form [min, max]. These should be the
raw values and not the logarithm of the min and max. If bounds is
None, the bounds of the data are calculated from all of the data
in the dataset. This can be slow for very large datasets.
"""
if bounds is None:
bounds = self.ds.h.all_data().quantities['Extrema'](self.field, non_zero=True)
bounds = [b.ndarray_view() for b in bounds]
self.bounds = bounds
# Do some error checking.
assert(len(self.bounds) == 2)
if self.log:
assert(self.bounds[0] > 0.0)
assert(self.bounds[1] > 0.0)
return
def set_field(self, field):
"""
Set the field to be rendered
Parameters
----------
field: string
The field to be rendered.
"""
if field != self.field:
self.log = self.ds._get_field_info(field).take_log
self.field = field
def set_log(self, log):
"""
Set whether or not the transfer function should be in log or linear
space. Also modifies the ds.field_info[field].take_log attribute to
stay in sync with this setting.
Parameters
----------
log: boolean
Sets whether the transfer function should use log or linear space.
"""
self.log = log
def build_transfer_function(self):
"""
Builds the transfer function according to the current state of the
TransferFunctionHelper.
Parameters
----------
None
Returns
-------
A ColorTransferFunction object.
"""
if self.bounds is None:
mylog.info('Calculating data bounds. This may take a while.' +
' Set the TransferFunctionHelper.bounds to avoid this.')
self.set_bounds()
if self.log:
mi, ma = np.log10(self.bounds[0]), np.log10(self.bounds[1])
else:
mi, ma = self.bounds
self.tf = ColorTransferFunction((mi, ma),
grey_opacity=self.grey_opacity,
nbins=512)
return self.tf
def setup_default(self):
"""Setup a default colormap
Creates a ColorTransferFunction including 10 gaussian layers whose
colors sample the 'spectral' colormap. Also attempts to scale the
transfer function to produce a natural contrast ratio.
"""
if LooseVersion(matplotlib.__version__) < LooseVersion('2.0.0'):
colormap_name = 'spectral'
else:
colormap_name = 'nipy_spectral'
self.tf.add_layers(10, colormap=colormap_name)
factor = self.tf.funcs[-1].y.size / self.tf.funcs[-1].y.sum()
self.tf.funcs[-1].y *= 2*factor
def plot(self, fn=None, profile_field=None, profile_weight=None):
"""
Save the current transfer function to a bitmap, or display
it inline.
Parameters
----------
fn: string, optional
Filename to save the image to. If None, the returns an image
to an IPython session.
Returns
-------
If fn is None, will return an image to an IPython notebook.
"""
from yt.visualization._mpl_imports import FigureCanvasAgg
from matplotlib.figure import Figure
if self.tf is None:
self.build_transfer_function()
self.setup_default()
tf = self.tf
if self.log:
xfunc = np.logspace
xmi, xma = np.log10(self.bounds[0]), np.log10(self.bounds[1])
else:
xfunc = np.linspace
# Need to strip units off of the bounds to avoid a recursion error
# in matplotlib 1.3.1
xmi, xma = [np.float64(b) for b in self.bounds]
x = xfunc(xmi, xma, tf.nbins)
y = tf.funcs[3].y
w = np.append(x[1:]-x[:-1], x[-1]-x[-2])
colors = np.array([tf.funcs[0].y, tf.funcs[1].y, tf.funcs[2].y,
np.ones_like(x)]).T
fig = Figure(figsize=[6, 3])
canvas = FigureCanvasAgg(fig)
ax = fig.add_axes([0.2, 0.2, 0.75, 0.75])
ax.bar(x, tf.funcs[3].y, w, edgecolor=[0.0, 0.0, 0.0, 0.0],
log=self.log, color=colors, bottom=[0])
if profile_field is not None:
try:
prof = self.profiles[self.field]
except KeyError:
self.setup_profile(profile_field, profile_weight)
prof = self.profiles[self.field]
try:
prof[profile_field]
except KeyError:
prof.add_fields([profile_field])
# Strip units, if any, for matplotlib 1.3.1
xplot = np.array(prof.x)
yplot = np.array(prof[profile_field]*tf.funcs[3].y.max() /
prof[profile_field].max())
ax.plot(xplot, yplot, color='w', linewidth=3)
ax.plot(xplot, yplot, color='k')
ax.set_xscale({True: 'log', False: 'linear'}[self.log])
ax.set_xlim(x.min(), x.max())
ax.set_xlabel(self.ds._get_field_info(self.field).get_label())
ax.set_ylabel(r'$\mathrm{alpha}$')
ax.set_ylim(y.max()*1.0e-3, y.max()*2)
if fn is None:
from IPython.core.display import Image
f = BytesIO()
canvas.print_figure(f)
f.seek(0)
img = f.read()
return Image(img)
else:
fig.savefig(fn)
def setup_profile(self, profile_field=None, profile_weight=None):
if profile_field is None:
profile_field = 'cell_volume'
prof = create_profile(self.ds.all_data(), self.field, profile_field,
n_bins=128, extrema={self.field: self.bounds},
weight_field=profile_weight,
logs = {self.field: self.log})
self.profiles[self.field] = prof
return
| 32.730942
| 90
| 0.561173
|
8462b49b868aa8141b29e19a0be853634906efa9
| 85
|
py
|
Python
|
vagas/apps.py
|
rodgeraraujo/cv-base
|
c6bbd19f4586a3c4d065f486c4de6a92e26c1c9b
|
[
"MIT"
] | 1
|
2019-08-21T20:10:21.000Z
|
2019-08-21T20:10:21.000Z
|
vagas/apps.py
|
rodgeraraujo/cv-base
|
c6bbd19f4586a3c4d065f486c4de6a92e26c1c9b
|
[
"MIT"
] | 8
|
2019-10-02T19:44:19.000Z
|
2022-02-10T08:30:05.000Z
|
vagas/apps.py
|
rodgeraraujo/cv-base
|
c6bbd19f4586a3c4d065f486c4de6a92e26c1c9b
|
[
"MIT"
] | 4
|
2019-09-17T01:00:55.000Z
|
2019-10-02T20:07:36.000Z
|
from django.apps import AppConfig
class VagasConfig(AppConfig):
name = "vagas"
| 14.166667
| 33
| 0.741176
|
27d49470797debb25f38cafee39cf075973b7b18
| 5,380
|
py
|
Python
|
python/benchmarks/extensions_25percent_1teams.py
|
tbvanderwoude/research-project
|
1c5cb09a424a0db83f18dae3278ecb7ad6866d2c
|
[
"Apache-2.0",
"MIT"
] | 3
|
2021-08-23T18:00:44.000Z
|
2022-02-09T06:00:50.000Z
|
python/benchmarks/extensions_25percent_1teams.py
|
tbvanderwoude/research-project
|
1c5cb09a424a0db83f18dae3278ecb7ad6866d2c
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
python/benchmarks/extensions_25percent_1teams.py
|
tbvanderwoude/research-project
|
1c5cb09a424a0db83f18dae3278ecb7ad6866d2c
|
[
"Apache-2.0",
"MIT"
] | 1
|
2021-06-15T11:38:45.000Z
|
2021-06-15T11:38:45.000Z
|
from multiprocessing import Pool
from typing import Optional
from mapfmclient import Problem
from tqdm import tqdm
from python.benchmarks.extensions_25percent_3teams import read_from_file
from python.benchmarks.graph_times import graph_results
from python.benchmarks.inmatch_vs_prematch_75percent_1teams import output_data
from python.benchmarks.map import MapGenerator
import pathlib
from python.benchmarks.parse_map import MapParser
from python.benchmarks.run_with_timeout import run_with_timeout
from python.mstar.rewrite import Config, MatchingStrategy
from python.mstar.rewrite.config import GigaByte
from python.solvers.configurable_mstar_solver import ConfigurableMStar
this_dir = pathlib.Path(__file__).parent.absolute()
name = "extensions_25percent_1teams_maps"
processes = 1
def generate_maps():
path = this_dir / name
try:
path.mkdir(parents=True)
except FileExistsError:
print("maps already generated")
return
for i in tqdm(range(1, 16)):
tqdm.write(f"generating {path}")
map_generator = MapGenerator(path)
map_generator.generate_even_batch(
200, # number of maps
20, 20, # size
i, # number of agents
1, # number of teams
prefix=name,
min_goal_distance=0,
open_factor=0.65,
max_neighbors=3,
)
def run(config: Config, bm_name: str):
batchdir = this_dir / name
parser = MapParser(batchdir)
fname = batchdir / f"results_{bm_name}.txt"
if fname.exists():
print(f"data exists for {bm_name}")
return fname, bm_name
# num agents : solutions
results: dict[int, list[Optional[float]]] = {}
all_problems = [[i[1] for i in parser.parse_batch(n.name)] for n in batchdir.iterdir() if n.is_dir()]
all_problems.sort(key=lambda i: len(i[0].goals))
with Pool(processes) as p:
for problems in tqdm(all_problems):
num_agents = len(problems[0].goals)
partname = pathlib.Path(str(fname) + f".{num_agents}agents")
if partname.exists():
print(f"found data for part {num_agents}")
results[num_agents] = read_from_file(partname, num_agents)
continue
if num_agents <= 1 or sum(1 for i in results[num_agents - 1] if i is not None) != 0:
sols_inmatch = run_with_timeout(p, ConfigurableMStar(
config
), problems, 2 * 60)
tqdm.write(f"{bm_name} with {num_agents} agents: {sols_inmatch}")
results[num_agents] = sols_inmatch
else:
results[num_agents] = [None for i in range(len(problems))]
output_data(partname, results)
tqdm.write(str(results))
output_data(fname, results)
return fname, bm_name
def main():
batchdir = this_dir / name
generate_maps()
files: list[tuple[pathlib.Path, str]] = []
files.append(run(
Config(
operator_decomposition=False,
precompute_paths=False,
precompute_heuristic=False,
collision_avoidance_table=False,
recursive=False,
matching_strategy=MatchingStrategy.Prematch,
max_memory_usage=3 * GigaByte,
debug=False,
report_expansions=True,
),
"no extensions"
))
files.append(run(
Config(
operator_decomposition=False,
precompute_paths=False,
precompute_heuristic=False,
collision_avoidance_table=False,
recursive=False,
matching_strategy=MatchingStrategy.PruningPrematch,
max_memory_usage=3 * GigaByte,
debug=False,
report_expansions=True,
),
"pruning"
))
files.append(run(
Config(
operator_decomposition=False,
precompute_paths=False,
precompute_heuristic=False,
collision_avoidance_table=False,
recursive=False,
matching_strategy=MatchingStrategy.SortedPruningPrematch,
max_memory_usage=3 * GigaByte,
debug=False,
report_expansions=True,
),
"pruning and sorting"
))
files.append(run(
Config(
operator_decomposition=False,
precompute_paths=False,
precompute_heuristic=True,
collision_avoidance_table=False,
recursive=False,
matching_strategy=MatchingStrategy.SortedPruningPrematch,
max_memory_usage=3 * GigaByte,
debug=False,
report_expansions=True,
),
"precomputed heuristic"
))
files.append(run(
Config(
operator_decomposition=True,
precompute_paths=False,
precompute_heuristic=True,
collision_avoidance_table=False,
recursive=False,
matching_strategy=MatchingStrategy.SortedPruningPrematch,
max_memory_usage=3 * GigaByte,
debug=False,
report_expansions=True,
),
"operator decomposition"
))
graph_results(
*files,
batchdir / f"{name}",
save=True,
bounds=False,
legend=False,
)
if __name__ == '__main__':
main()
| 29.081081
| 105
| 0.6171
|
df3f328e28f8bca7b4d88d903cd87578fe2ff3a2
| 578
|
py
|
Python
|
code/digit_factorials/sol_34.py
|
bhavinjawade/project-euler-solutions
|
56bf6a282730ed4b9b875fa081cf4509d9939d98
|
[
"Apache-2.0"
] | 2
|
2020-07-16T08:16:32.000Z
|
2020-10-01T07:16:48.000Z
|
code/digit_factorials/sol_34.py
|
Psingh12354/project-euler-solutions
|
56bf6a282730ed4b9b875fa081cf4509d9939d98
|
[
"Apache-2.0"
] | null | null | null |
code/digit_factorials/sol_34.py
|
Psingh12354/project-euler-solutions
|
56bf6a282730ed4b9b875fa081cf4509d9939d98
|
[
"Apache-2.0"
] | 1
|
2021-05-07T18:06:08.000Z
|
2021-05-07T18:06:08.000Z
|
# -*- coding: utf-8 -*-
'''
File name: code\digit_factorials\sol_34.py
Author: Vaidic Joshi
Date created: Oct 20, 2018
Python Version: 3.x
'''
# Solution to Project Euler Problem #34 :: Digit factorials
#
# For more information see:
# https://projecteuler.net/problem=34
# Problem Statement
'''
145 is a curious number, as 1! + 4! + 5! = 1 + 24 + 120 = 145.
Find the sum of all numbers which are equal to the sum of the factorial of their digits.
Note: as 1! = 1 and 2! = 2 are not sums they are not included.
'''
# Solution
# Solution Approach
'''
'''
| 21.407407
| 88
| 0.653979
|
aed956091a2c754e19d546901f2a83bf1ddf4038
| 1,300
|
py
|
Python
|
doc/generate-doc.py
|
ponty/pyunpack
|
09184e1f35ce8b805c672b6f7ebf06974d4ba34c
|
[
"BSD-2-Clause"
] | 74
|
2015-03-19T20:32:04.000Z
|
2022-03-18T02:29:01.000Z
|
doc/generate-doc.py
|
ponty/pyunpack
|
09184e1f35ce8b805c672b6f7ebf06974d4ba34c
|
[
"BSD-2-Clause"
] | 11
|
2016-01-21T11:29:21.000Z
|
2022-01-24T19:26:50.000Z
|
doc/generate-doc.py
|
ponty/pyunpack
|
09184e1f35ce8b805c672b6f7ebf06974d4ba34c
|
[
"BSD-2-Clause"
] | 9
|
2015-11-30T02:24:27.000Z
|
2021-07-05T20:14:46.000Z
|
import glob
import logging
import os
from easyprocess import EasyProcess
from entrypoint2 import entrypoint
commands = """
python3 -m pyunpack.cli --help
"""
commands = commands.strip().splitlines()
def empty_dir(dir):
files = glob.glob(os.path.join(dir, "*"))
for f in files:
os.remove(f)
@entrypoint
def main():
gendir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "gen")
logging.info("gendir: %s", gendir)
os.makedirs(gendir, exist_ok=True)
empty_dir(gendir)
try:
os.chdir("gen")
for cmd in commands:
logging.info("cmd: %s", cmd)
fname_base = cmd.replace(" ", "_")
fname = fname_base + ".txt"
logging.info("cmd: %s", cmd)
print("file name: %s" % fname)
with open(fname, "w") as f:
f.write("$ " + cmd + "\n")
p = EasyProcess(cmd).call()
f.write(p.stdout)
if p.stderr and p.stdout:
f.write("\n")
f.write(p.stderr)
finally:
os.chdir("..")
embedme = EasyProcess(["npx", "embedme", "../README.md"])
embedme.call()
print(embedme.stdout)
assert embedme.return_code == 0
assert not "but file does not exist" in embedme.stdout
| 25
| 76
| 0.555385
|
6fdc7319b512cca35163df8683c7cbb1bab4957f
| 1,215
|
py
|
Python
|
Walker.py
|
ey3lock3r/The-Nature-of-Code
|
cca3a0359a46570b1cf0b02315be8cee1728a01a
|
[
"MIT"
] | null | null | null |
Walker.py
|
ey3lock3r/The-Nature-of-Code
|
cca3a0359a46570b1cf0b02315be8cee1728a01a
|
[
"MIT"
] | null | null | null |
Walker.py
|
ey3lock3r/The-Nature-of-Code
|
cca3a0359a46570b1cf0b02315be8cee1728a01a
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import numpy as np
import noise
map = lambda n, start1, stop1, start2, stop2: ((n-start1)/(stop1-start1))*(stop2-start2)+start2
class Walker():
def __init__(self):
self.fig, self.ax = plt.subplots(figsize=(8, 5), subplot_kw=dict(aspect="equal", adjustable='datalim', anchor='C'))
self.fig.set_dpi(100)
self.w = 320
self.h = 180
self.ax.set_xlim((-self.w,self.w))
self.ax.set_ylim((-self.h,self.h))
self.n1 = 0
self.n2 = 10000
def step(self, time):
n1 = noise.pnoise1(self.n1)
n2 = noise.pnoise1(self.n2)
x = map(n1, 0, 1, 0, self.w)
y = map(n2, 0, 1, 0, self.h)
# print('x={} y={}'.format(x,y))
self.point.center = (x, y)
self.n1 += 0.01
self.n2 += 0.01
return [self.point]
def display(self):
self.point = plt.Circle((0, 0), 10, color='red', alpha=1)
self.ax.add_patch(self.point)
ani = animation.FuncAnimation(self.fig, self.step, frames=500, interval=40, blit=True)
plt.show()
agent = Walker()
agent.display()
| 31.973684
| 124
| 0.561317
|
bf540c2d9004cbfba99d2117e9e8bfc7eed95f5c
| 75
|
py
|
Python
|
instance/config.py
|
cherucole/News.io
|
6a00132acf37b39649d2954d43433a2eb4a414ac
|
[
"Unlicense",
"MIT"
] | null | null | null |
instance/config.py
|
cherucole/News.io
|
6a00132acf37b39649d2954d43433a2eb4a414ac
|
[
"Unlicense",
"MIT"
] | null | null | null |
instance/config.py
|
cherucole/News.io
|
6a00132acf37b39649d2954d43433a2eb4a414ac
|
[
"Unlicense",
"MIT"
] | null | null | null |
NEWS_API_KEY='34c58192d7aa4ba89c5f8fcca69719a8'
SECRET_KEY = 'mySecretKey'
| 25
| 47
| 0.866667
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.