hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
127342322ff0b09fda00cbb8a72a2a1cd9398ea0
| 9,244
|
py
|
Python
|
tests/profiling/test_profiler.py
|
p7g/dd-trace-py
|
141ac0ab6e9962e3b3bafc9de172076075289a19
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
tests/profiling/test_profiler.py
|
p7g/dd-trace-py
|
141ac0ab6e9962e3b3bafc9de172076075289a19
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
tests/profiling/test_profiler.py
|
p7g/dd-trace-py
|
141ac0ab6e9962e3b3bafc9de172076075289a19
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
import logging
import time
import mock
import pytest
import ddtrace
from ddtrace.profiling import collector
from ddtrace.profiling import event
from ddtrace.profiling import exporter
from ddtrace.profiling import profiler
from ddtrace.profiling.collector import stack
from ddtrace.profiling.exporter import http
def test_status():
p = profiler.Profiler()
assert repr(p.status) == "<ServiceStatus.STOPPED: 'stopped'>"
p.start()
assert repr(p.status) == "<ServiceStatus.RUNNING: 'running'>"
p.stop(flush=False)
assert repr(p.status) == "<ServiceStatus.STOPPED: 'stopped'>"
def test_restart():
p = profiler.Profiler()
p.start()
p.stop(flush=False)
p.start()
p.stop(flush=False)
def test_multiple_stop():
"""Check that the profiler can be stopped twice."""
p = profiler.Profiler()
p.start()
p.stop(flush=False)
p.stop(flush=False)
@pytest.mark.parametrize(
"service_name_var",
("DD_SERVICE",),
)
def test_default_from_env(service_name_var, monkeypatch):
monkeypatch.setenv("DD_API_KEY", "foobar")
monkeypatch.setenv(service_name_var, "foobar")
prof = profiler.Profiler()
for exp in prof._profiler._scheduler.exporters:
if isinstance(exp, http.PprofHTTPExporter):
assert exp.service == "foobar"
break
else:
pytest.fail("Unable to find HTTP exporter")
def test_service_api(monkeypatch):
monkeypatch.setenv("DD_API_KEY", "foobar")
prof = profiler.Profiler(service="foobar")
assert prof.service == "foobar"
for exp in prof._profiler._scheduler.exporters:
if isinstance(exp, http.PprofHTTPExporter):
assert exp.service == "foobar"
break
else:
pytest.fail("Unable to find HTTP exporter")
def test_tracer_api(monkeypatch):
monkeypatch.setenv("DD_API_KEY", "foobar")
prof = profiler.Profiler(tracer=ddtrace.tracer)
assert prof.tracer == ddtrace.tracer
for col in prof._profiler._collectors:
if isinstance(col, stack.StackCollector):
assert col.tracer == ddtrace.tracer
break
else:
pytest.fail("Unable to find stack collector")
def test_env_default(monkeypatch):
monkeypatch.setenv("DD_API_KEY", "foobar")
monkeypatch.setenv("DD_ENV", "staging")
monkeypatch.setenv("DD_VERSION", "123")
prof = profiler.Profiler()
assert prof.env == "staging"
assert prof.version == "123"
assert prof.url is None
for exp in prof._profiler._scheduler.exporters:
if isinstance(exp, http.PprofHTTPExporter):
assert exp.env == "staging"
assert exp.version == "123"
break
else:
pytest.fail("Unable to find HTTP exporter")
def test_env_api():
prof = profiler.Profiler(env="staging", version="123")
assert prof.env == "staging"
assert prof.version == "123"
assert prof.url is None
for exp in prof._profiler._scheduler.exporters:
if isinstance(exp, http.PprofHTTPExporter):
assert exp.env == "staging"
assert exp.version == "123"
break
else:
pytest.fail("Unable to find HTTP exporter")
def test_tags_api():
prof = profiler.Profiler(env="staging", version="123", tags={"foo": "bar"})
assert prof.env == "staging"
assert prof.version == "123"
assert prof.url is None
assert prof.tags["foo"] == "bar"
for exp in prof._profiler._scheduler.exporters:
if isinstance(exp, http.PprofHTTPExporter):
assert exp.env == "staging"
assert exp.version == "123"
assert exp.tags["foo"] == b"bar"
break
else:
pytest.fail("Unable to find HTTP exporter")
def test_env_agentless(monkeypatch):
monkeypatch.setenv("DD_PROFILING_AGENTLESS", "true")
monkeypatch.setenv("DD_API_KEY", "foobar")
prof = profiler.Profiler()
_check_url(prof, "https://intake.profile.datadoghq.com", "foobar", endpoint_path="/v1/input")
def test_env_agentless_site(monkeypatch):
monkeypatch.setenv("DD_SITE", "datadoghq.eu")
monkeypatch.setenv("DD_PROFILING_AGENTLESS", "true")
monkeypatch.setenv("DD_API_KEY", "foobar")
prof = profiler.Profiler()
_check_url(prof, "https://intake.profile.datadoghq.eu", "foobar", endpoint_path="/v1/input")
def test_env_no_agentless(monkeypatch):
monkeypatch.setenv("DD_PROFILING_AGENTLESS", "false")
monkeypatch.setenv("DD_API_KEY", "foobar")
prof = profiler.Profiler()
_check_url(prof, "http://localhost:8126", "foobar")
def test_url():
prof = profiler.Profiler(url="https://foobar:123")
_check_url(prof, "https://foobar:123")
def _check_url(prof, url, api_key=None, endpoint_path="profiling/v1/input"):
for exp in prof._profiler._scheduler.exporters:
if isinstance(exp, http.PprofHTTPExporter):
assert exp.api_key == api_key
assert exp.endpoint == url
assert exp.endpoint_path == endpoint_path
break
else:
pytest.fail("Unable to find HTTP exporter")
def test_default_tracer_and_url():
try:
ddtrace.tracer.configure(hostname="foobar")
prof = profiler.Profiler(url="https://foobaz:123")
_check_url(prof, "https://foobaz:123")
finally:
ddtrace.tracer.configure(hostname="localhost")
def test_tracer_and_url():
t = ddtrace.Tracer()
t.configure(hostname="foobar")
prof = profiler.Profiler(tracer=t, url="https://foobaz:123")
_check_url(prof, "https://foobaz:123")
def test_tracer_url():
t = ddtrace.Tracer()
t.configure(hostname="foobar")
prof = profiler.Profiler(tracer=t)
_check_url(prof, "http://foobar:8126")
def test_tracer_url_https():
t = ddtrace.Tracer()
t.configure(hostname="foobar", https=True)
prof = profiler.Profiler(tracer=t)
_check_url(prof, "https://foobar:8126")
def test_tracer_url_uds_hostname():
t = ddtrace.Tracer()
t.configure(hostname="foobar", uds_path="/foobar")
prof = profiler.Profiler(tracer=t)
_check_url(prof, "unix://foobar/foobar")
def test_tracer_url_uds():
t = ddtrace.Tracer()
t.configure(uds_path="/foobar")
prof = profiler.Profiler(tracer=t)
_check_url(prof, "unix:///foobar")
def test_env_no_api_key():
prof = profiler.Profiler()
_check_url(prof, "http://localhost:8126")
def test_env_endpoint_url(monkeypatch):
monkeypatch.setenv("DD_AGENT_HOST", "foobar")
monkeypatch.setenv("DD_TRACE_AGENT_PORT", "123")
t = ddtrace.Tracer()
prof = profiler.Profiler(tracer=t)
_check_url(prof, "http://foobar:123")
def test_env_endpoint_url_no_agent(monkeypatch):
monkeypatch.setenv("DD_SITE", "datadoghq.eu")
monkeypatch.setenv("DD_API_KEY", "123")
prof = profiler.Profiler()
_check_url(prof, "http://localhost:8126", "123")
def test_copy():
p = profiler._ProfilerInstance(env="123", version="dwq", service="foobar")
c = p.copy()
assert c == p
assert p.env == c.env
assert p.version == c.version
assert p.service == c.service
assert p.tracer == c.tracer
assert p.tags == c.tags
def test_snapshot(monkeypatch):
class SnapCollect(collector.Collector):
@staticmethod
def collect():
pass
@staticmethod
def snapshot():
return [[event.Event()]]
def _start_service(self):
pass
def _stop_service(self):
pass
all_events = {}
class Exporter(exporter.Exporter):
def export(self, events, *args, **kwargs):
all_events["EVENTS"] = events
class TestProfiler(profiler._ProfilerInstance):
def _build_default_exporters(self, *args, **kargs):
return [Exporter()]
monkeypatch.setenv("DD_PROFILING_UPLOAD_INTERVAL", "1")
p = TestProfiler()
p._collectors = [SnapCollect(p._recorder)]
p.start()
time.sleep(2)
p.stop()
assert len(all_events["EVENTS"][event.Event]) == 1
def test_failed_start_collector(caplog, monkeypatch):
class ErrCollect(collector.Collector):
def _start_service(self):
raise RuntimeError("could not import required module")
def _stop_service(self):
pass
@staticmethod
def collect():
pass
@staticmethod
def snapshot():
raise Exception("error!")
monkeypatch.setenv("DD_PROFILING_UPLOAD_INTERVAL", "1")
class Exporter(exporter.Exporter):
def export(self, events, *args, **kwargs):
pass
class TestProfiler(profiler._ProfilerInstance):
def _build_default_exporters(self, *args, **kargs):
return [Exporter()]
p = TestProfiler()
err_collector = mock.MagicMock(wraps=ErrCollect(p._recorder))
p._collectors = [err_collector]
p.start()
assert caplog.record_tuples == [
(("ddtrace.profiling.profiler", logging.ERROR, "Failed to start collector %r, disabling." % err_collector))
]
time.sleep(2)
p.stop()
assert err_collector.snapshot.call_count == 0
assert caplog.record_tuples == [
(("ddtrace.profiling.profiler", logging.ERROR, "Failed to start collector %r, disabling." % err_collector))
]
| 29.346032
| 115
| 0.658806
|
21181bc95df67916d7160425fd7d119de6defab6
| 10,702
|
py
|
Python
|
featuretools/synthesis/dfs.py
|
chidauri/featuretools
|
1fd1df0765ab7c0af7c495496ea787345a9cab11
|
[
"BSD-3-Clause"
] | 3
|
2019-09-29T15:10:13.000Z
|
2019-10-03T08:39:14.000Z
|
featuretools/synthesis/dfs.py
|
chidauri/featuretools
|
1fd1df0765ab7c0af7c495496ea787345a9cab11
|
[
"BSD-3-Clause"
] | null | null | null |
featuretools/synthesis/dfs.py
|
chidauri/featuretools
|
1fd1df0765ab7c0af7c495496ea787345a9cab11
|
[
"BSD-3-Clause"
] | null | null | null |
import pandas as pd
from featuretools.computational_backends import calculate_feature_matrix
from featuretools.entityset import EntitySet
from featuretools.synthesis.deep_feature_synthesis import DeepFeatureSynthesis
from featuretools.utils import entry_point
@entry_point('featuretools_dfs')
def dfs(entities=None,
relationships=None,
entityset=None,
target_entity=None,
cutoff_time=None,
instance_ids=None,
agg_primitives=None,
trans_primitives=None,
groupby_trans_primitives=None,
allowed_paths=None,
max_depth=2,
ignore_entities=None,
ignore_variables=None,
seed_features=None,
drop_contains=None,
drop_exact=None,
where_primitives=None,
max_features=-1,
cutoff_time_in_index=False,
save_progress=None,
features_only=False,
training_window=None,
approximate=None,
chunk_size=None,
n_jobs=1,
dask_kwargs=None,
verbose=False,
return_variable_types=None):
'''Calculates a feature matrix and features given a dictionary of entities
and a list of relationships.
Args:
entities (dict[str -> tuple(pd.DataFrame, str, str)]): Dictionary of
entities. Entries take the format
{entity id -> (dataframe, id column, (time_column))}.
relationships (list[(str, str, str, str)]): List of relationships
between entities. List items are a tuple with the format
(parent entity id, parent variable, child entity id, child variable).
entityset (EntitySet): An already initialized entityset. Required if
entities and relationships are not defined.
target_entity (str): Entity id of entity on which to make predictions.
cutoff_time (pd.DataFrame or Datetime): Specifies times at which to
calculate each instance. The resulting feature matrix will use data
up to and including the cutoff_time. Can either be a DataFrame with
'instance_id' and 'time' columns, a DataFrame with the name of the
index variable in the target entity and a time column, a
list of values, or a single
value to calculate for all instances. If the dataframe has more than
two columns, any additional columns will be added to the resulting
feature matrix.
instance_ids (list): List of instances on which to calculate features. Only
used if cutoff_time is a single datetime.
agg_primitives (list[str or AggregationPrimitive], optional): List of Aggregation
Feature types to apply.
Default: ["sum", "std", "max", "skew", "min", "mean", "count", "percent_true", "num_unique", "mode"]
trans_primitives (list[str or TransformPrimitive], optional):
List of Transform Feature functions to apply.
Default: ["day", "year", "month", "weekday", "haversine", "num_words", "num_characters"]
groupby_trans_primitives (list[str or :class:`.primitives.TransformPrimitive`], optional):
list of Transform primitives to make GroupByTransformFeatures with
allowed_paths (list[list[str]]): Allowed entity paths on which to make
features.
max_depth (int) : Maximum allowed depth of features.
ignore_entities (list[str], optional): List of entities to
blacklist when creating features.
ignore_variables (dict[str -> list[str]], optional): List of specific
variables within each entity to blacklist when creating features.
seed_features (list[:class:`.FeatureBase`]): List of manually defined
features to use.
drop_contains (list[str], optional): Drop features
that contains these strings in name.
drop_exact (list[str], optional): Drop features that
exactly match these strings in name.
where_primitives (list[str or PrimitiveBase], optional):
List of Primitives names (or types) to apply with where clauses.
Default:
["count"]
max_features (int, optional) : Cap the number of generated features to
this number. If -1, no limit.
features_only (bool, optional): If True, returns the list of
features without calculating the feature matrix.
cutoff_time_in_index (bool): If True, return a DataFrame with a MultiIndex
where the second index is the cutoff time (first is instance id).
DataFrame will be sorted by (time, instance_id).
training_window (Timedelta or str, optional):
Window defining how much time before the cutoff time data
can be used when calculating features. If ``None`` , all data before cutoff time is used.
Defaults to ``None``.
approximate (Timedelta): Bucket size to group instances with similar
cutoff times by for features with costly calculations. For example,
if bucket is 24 hours, all instances with cutoff times on the same
day will use the same calculation for expensive features.
save_progress (str, optional): Path to save intermediate computational results.
n_jobs (int, optional): number of parallel processes to use when
calculating feature matrix
chunk_size (int or float or None or "cutoff time", optional): Number
of rows of output feature matrix to calculate at time. If passed an
integer greater than 0, will try to use that many rows per chunk.
If passed a float value between 0 and 1 sets the chunk size to that
percentage of all instances. If passed the string "cutoff time",
rows are split per cutoff time.
dask_kwargs (dict, optional): Dictionary of keyword arguments to be
passed when creating the dask client and scheduler. Even if n_jobs
is not set, using `dask_kwargs` will enable multiprocessing.
Main parameters:
cluster (str or dask.distributed.LocalCluster):
cluster or address of cluster to send tasks to. If unspecified,
a cluster will be created.
diagnostics port (int):
port number to use for web dashboard. If left unspecified, web
interface will not be enabled.
Valid keyword arguments for LocalCluster will also be accepted.
return_variable_types (list[Variable] or str, optional): Types of
variables to return. If None, default to
Numeric, Discrete, and Boolean. If given as
the string 'all', use all available variable types.
Examples:
.. code-block:: python
from featuretools.primitives import Mean
# cutoff times per instance
entities = {
"sessions" : (session_df, "id"),
"transactions" : (transactions_df, "id", "transaction_time")
}
relationships = [("sessions", "id", "transactions", "session_id")]
feature_matrix, features = dfs(entities=entities,
relationships=relationships,
target_entity="transactions",
cutoff_time=cutoff_times)
feature_matrix
features = dfs(entities=entities,
relationships=relationships,
target_entity="transactions",
features_only=True)
'''
if not isinstance(entityset, EntitySet):
entityset = EntitySet("dfs", entities, relationships)
dfs_object = DeepFeatureSynthesis(target_entity, entityset,
agg_primitives=agg_primitives,
trans_primitives=trans_primitives,
groupby_trans_primitives=groupby_trans_primitives,
max_depth=max_depth,
where_primitives=where_primitives,
allowed_paths=allowed_paths,
drop_exact=drop_exact,
drop_contains=drop_contains,
ignore_entities=ignore_entities,
ignore_variables=ignore_variables,
max_features=max_features,
seed_features=seed_features)
features = dfs_object.build_features(
verbose=verbose, return_variable_types=return_variable_types)
if features_only:
return features
if isinstance(cutoff_time, pd.DataFrame):
feature_matrix = calculate_feature_matrix(features,
entityset=entityset,
cutoff_time=cutoff_time,
training_window=training_window,
approximate=approximate,
cutoff_time_in_index=cutoff_time_in_index,
save_progress=save_progress,
chunk_size=chunk_size,
n_jobs=n_jobs,
dask_kwargs=dask_kwargs,
verbose=verbose)
else:
feature_matrix = calculate_feature_matrix(features,
entityset=entityset,
cutoff_time=cutoff_time,
instance_ids=instance_ids,
training_window=training_window,
approximate=approximate,
cutoff_time_in_index=cutoff_time_in_index,
save_progress=save_progress,
chunk_size=chunk_size,
n_jobs=n_jobs,
dask_kwargs=dask_kwargs,
verbose=verbose)
return feature_matrix, features
| 46.530435
| 116
| 0.572229
|
e77503f3ae9948ee66bc3fa6b2d7824107a871c8
| 8,727
|
py
|
Python
|
gesso/gesso/util/util.py
|
machineeeee/builder-python
|
a01415ef8675e5a11afaa0fe33f794f8ab2a98dc
|
[
"Apache-2.0"
] | null | null | null |
gesso/gesso/util/util.py
|
machineeeee/builder-python
|
a01415ef8675e5a11afaa0fe33f794f8ab2a98dc
|
[
"Apache-2.0"
] | null | null | null |
gesso/gesso/util/util.py
|
machineeeee/builder-python
|
a01415ef8675e5a11afaa0fe33f794f8ab2a98dc
|
[
"Apache-2.0"
] | null | null | null |
import os, sys
import psutil
import socket
import json, yaml
import time
import netifaces
import logging
import pkg_resources
from tinydb import TinyDB, Query
# -----------------------------------------------------------------------------
# Process Management
# -----------------------------------------------------------------------------
def kill_proc_tree(pid, including_parent=True):
parent = psutil.Process(pid)
children = parent.children(recursive=True)
for child in children:
child.kill()
gone, still_alive = psutil.wait_procs(children, timeout=5)
if including_parent:
parent.kill()
parent.wait(5)
# -----------------------------------------------------------------------------
# Network Management
# -----------------------------------------------------------------------------
def get_inet_addresses():
addresses = []
for iface_name in netifaces.interfaces():
#iface_addresses = [i['addr'] for i in netifaces.ifaddresses(ifaceName).setdefault(netifaces.AF_INET, [{'addr': 'No IP addr'}])]
#iface_addresses = [i['addr'] for i in netifaces.ifaddresses(iface_name).setdefault(netifaces.AF_INET, [{'addr': None}])]
for i in netifaces.ifaddresses(iface_name).setdefault(netifaces.AF_INET, [{'addr': None}]):
if not i['addr'] == None:
addresses.append(i['addr'])
return addresses
# -----------------------------------------------------------------------------
# File I/O
# -----------------------------------------------------------------------------
def load_yaml_file(path):
# TODO: load .yaml file with model for particular (id, version) and populate this model... create __init__ to do that... multiple constructors!
#component_model = 'model-component-gesso.yaml'
yaml_object = None
with open(path, 'r') as file:
yaml_string = file.read()
yaml_object = yaml.load(yaml_string)
#print component_model
return yaml_object
return None
# -----------------------------------------------------------------------------
# File System Management
# -----------------------------------------------------------------------------
def get_file_list(path=os.getcwdu()):
file_names = os.listdir(path)
return file_names
# Checks if the current directory contains the specified file
# If so, returns the path containing the file. If not, returns None.
def contains_file(path=os.getcwdu(), filename=None):
# TODO: rename to locate_file
file_path = os.path.join(path, filename)
if os.path.exists(file_path):
return path
else:
return None
# Checks if the current directory or parent directory contains the specified file, recursively, starting with the specified path.
# If so, returns the path containing the file.
def parent_contains(filename, path=os.getcwdu(), recursive=True):
# TODO: rename to locate_file
file_path = os.path.join(path, filename)
if os.path.exists(file_path):
return path
if recursive:
parent_dir_path = os.path.abspath(os.path.join(path, os.pardir))
is_root_dir = parent_dir_path == path
if not is_root_dir:
return parent_contains(filename, parent_dir_path)
else:
return parent_contains(filename, parent_dir_path, recursive=False)
else:
return None
def get_gesso_root(path=os.getcwdu()):
return parent_contains('.gesso')
def is_gesso_root(path=os.getcwdu()):
return path == get_gesso_root(path)
def is_gesso_tree(path=os.getcwdu()):
if parent_contains('.gesso'):
return True
else:
return False
def init_workspace_path(path=get_gesso_root()):
# make "./.gesso/vagrant" folder if doesn't exist
# make "./.gesso/vagrant/<vm-name>" folder for generated name
# generate ./.gesso/vagrant/<vm-name>" Vagrantfile
# - modify to set the name of the VM
# - add bootstrap.sh
# - run "gesso init <vm-name>" on VM
gesso_path = os.path.join(path, '.gesso')
if not os.path.exists(gesso_path):
print 'mkdir %s' % gesso_path
os.makedirs(gesso_path)
components_path = os.path.join(path, '.gesso', 'components')
if not os.path.exists(components_path):
print 'mkdir %s' % components_path
os.makedirs(components_path)
def init_machine_path(name, path=get_gesso_root()):
init_workspace_path(path)
# Example filesystem:
#
# .gesso
# /components
# /fuzzy-koala
# /vagrant
# .Vagrantfile
machine_path = os.path.join(path, '.gesso', 'components', name)
if not os.path.exists(machine_path):
print 'mkdir %s' % machine_path
os.makedirs(machine_path)
#if virtual:
#vagrant_path = os.path.join(path, '.gesso', 'components', name, 'vagrant')
#if not os.path.exists(vagrant_path):
# print 'mkdir %s' % vagrant_path
# os.makedirs(vagrant_path)
return machine_path
def get_machine_path(name, path=get_gesso_root()):
return os.path.join(path, '.gesso', 'components', name)
# TODO: Add to Device/Database class (serves as data access interface/map to component)
def get_machine_address(name):
gesso_db_path = get_database_path()
db = TinyDB(gesso_db_path, default_table='gesso')
component_table = db.table('component')
component = None
Device = Query()
component_element = component_table.search(Device.name == name)
if len(component_element) > 0:
component = component_element[0]
return component['address']['ip4']
# Get machines from database
def get_machines():
gesso_db_path = get_database_path()
db = TinyDB(gesso_db_path, default_table='gesso')
component_table = db.table('component')
#component = None
#Device = Query()
#component_element = component_table.search(Device.name == name)
#if len(component_element) > 0:
#component = component_element[0]
#return component['address']['ip4']
return component_table.all()
def logger(log_name, exclude_prefix=False):
"""
Returns a logger for the log located at '.gesso/logs/<log_name>'.
If the log doesn't exist, creates it in the '.gesso/logs' directory.
exclude_prefix : If set to `True`, this will remove per-line prefixes
from the output written to the file.
"""
gesso_root = get_gesso_root()
gesso_folder = os.path.join(gesso_root, '.gesso')
if not os.path.exists(gesso_folder):
print 'mkdir %s' % gesso_folder
os.makedirs(gesso_folder)
log_folder = os.path.join(gesso_root, '.gesso', 'logs')
if not os.path.exists(log_folder):
print 'mkdir %s' % log_folder
os.makedirs(log_folder)
logfile_name = '%s.log' % log_name
logfile_path = os.path.join(log_folder, logfile_name)
#logging.basicConfig(filename=logfile_path, level=logging.DEBUG, format='%(asctime)s:%(name)s:%(levelname)s:%(message)s')
logger = logging.getLogger(log_name)
logger.setLevel(logging.DEBUG)
# Create file handler that stores the logged messages
fh = logging.FileHandler(logfile_path)
fh.setLevel(logging.DEBUG)
# Create formatter and add it to handlers
if not exclude_prefix:
formatter = logging.Formatter('%(asctime)s:%(name)s:%(levelname)s:%(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)
#return logfile_path
return logger
def get_data_dir():
data_dir = pkg_resources.resource_filename('gesso', 'data')
if not os.path.exists(data_dir):
return None
return data_dir
def get_data_filename(filename):
if filename is '' or filename is None:
return pkg_resources.resource_filename('gesso', 'data')
else:
return pkg_resources.resource_filename('gesso', 'data/%s' % filename)
def setup_gesso_dir():
return None
def get_current_dir():
return os.getcwdu()
def get_parent_dir():
return os.path.abspath(os.path.join(get_current_dir(), os.pardir))
def get_gesso_dir():
return os.path.join(get_current_dir(), '.gesso')
# Load copy of file
def get_file(name):
return open(get_data_filename(name)).read()
# Load copy of Vagrantfile
def get_vagrant_file(name):
vagrantfiledata = get_data_filename('Vagrantfile')
vagrantfiledata = open(vagrantfiledata).read().replace('%NAME%', name)
return vagrantfiledata
# Load the Builderfile into a dictionary
def load_gessofile(path=get_gesso_root()):
gesso_config_path = os.path.join(path, '.gesso', 'config')
# Load the record from database
gesso_config = {}
with open(gesso_config_path, 'r') as file:
gesso_config = json.loads(file.read())
#gessofile_json = json.dumps(gessofile, indent=4, sort_keys=False)
return gesso_config
# TODO: class Database:
# Get the path to SQLite database
def get_database_path(path=get_gesso_root()):
gesso_db_path = os.path.join(path, '.gesso', 'database')
return gesso_db_path
# Write updated database
def store_gessofile(gessofile, path=os.getcwdu()):
gessofile_json = json.dumps(gessofile, indent=4, sort_keys=False)
with open(path, 'w') as file:
file.write(gessofile_json)
#logger.info('---\n%s\n---' % db_dict_json)
| 31.167857
| 147
| 0.674115
|
19c2d0246a600d9130a2bd6a5df5269287abf29a
| 3,394
|
py
|
Python
|
sphinx_toolbox/__init__.py
|
arisp99/sphinx-toolbox
|
2987080e2d65c0dd2d392dcf7f1f5a904a9231f5
|
[
"MIT"
] | null | null | null |
sphinx_toolbox/__init__.py
|
arisp99/sphinx-toolbox
|
2987080e2d65c0dd2d392dcf7f1f5a904a9231f5
|
[
"MIT"
] | null | null | null |
sphinx_toolbox/__init__.py
|
arisp99/sphinx-toolbox
|
2987080e2d65c0dd2d392dcf7f1f5a904a9231f5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
#
# __init__.py
"""
Box of handy tools for Sphinx.
"""
#
# Copyright © 2020-2021 Dominic Davis-Foster <dominic@davis-foster.co.uk>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
#
# stdlib
import sys
# This all has to be up here so it's triggered first.
if sys.version_info >= (3, 10):
# stdlib
import types
types.Union = types.UnionType
# 3rd party
from sphinx.application import Sphinx
# this package
from sphinx_toolbox import ( # noqa: F401
assets,
code,
config,
confval,
installation,
issues,
rest_example,
shields,
source,
utils,
wikipedia
)
from sphinx_toolbox.cache import cache # noqa: F401
__author__: str = "Dominic Davis-Foster"
__copyright__: str = "2020 Dominic Davis-Foster"
__license__: str = "MIT License"
__version__: str = "2.18.0"
__email__: str = "dominic@davis-foster.co.uk"
__all__ = ["setup"]
def setup(app: Sphinx) -> "utils.SphinxExtMetadata":
"""
Setup :mod:`sphinx_toolbox`.
:param app: The Sphinx application.
"""
# Ensure dependencies are set up
app.setup_extension("sphinx.ext.viewcode")
app.setup_extension("sphinx_toolbox.github")
app.connect("config-inited", config.validate_config, priority=850)
# Setup standalone extensions
app.setup_extension("sphinx_toolbox.assets")
app.setup_extension("sphinx_toolbox.changeset")
app.setup_extension("sphinx_toolbox.code")
app.setup_extension("sphinx_toolbox.collapse")
app.setup_extension("sphinx_toolbox.confval")
app.setup_extension("sphinx_toolbox.decorators")
app.setup_extension("sphinx_toolbox.formatting")
app.setup_extension("sphinx_toolbox.installation")
app.setup_extension("sphinx_toolbox.issues")
app.setup_extension("sphinx_toolbox.latex")
app.setup_extension("sphinx_toolbox.rest_example")
app.setup_extension("sphinx_toolbox.shields")
app.setup_extension("sphinx_toolbox.sidebar_links")
app.setup_extension("sphinx_toolbox.source")
app.setup_extension("sphinx_toolbox.wikipedia")
app.setup_extension("sphinx_toolbox.more_autodoc.autoprotocol")
app.setup_extension("sphinx_toolbox.more_autodoc.autotypeddict")
app.setup_extension("sphinx_toolbox.more_autodoc.autonamedtuple")
# Hack to get the docutils tab size, as there doesn't appear to be any other way
app.setup_extension("sphinx_toolbox.tweaks.tabsize")
return {
"version": __version__,
"parallel_read_safe": True,
}
| 32.018868
| 81
| 0.767531
|
2ae75d843c7941aaafebce473fd22cdf76871531
| 7,992
|
py
|
Python
|
backtest/strategy.py
|
Christakou/backtest
|
fa97f50b36a1d56fe667250169ed50a8d9121c3c
|
[
"MIT"
] | null | null | null |
backtest/strategy.py
|
Christakou/backtest
|
fa97f50b36a1d56fe667250169ed50a8d9121c3c
|
[
"MIT"
] | null | null | null |
backtest/strategy.py
|
Christakou/backtest
|
fa97f50b36a1d56fe667250169ed50a8d9121c3c
|
[
"MIT"
] | null | null | null |
import datetime
from backtest.marketdata import MarketData
from dataclasses import dataclass, field
import math
@dataclass
class TradeAction:
"""
Data class to store trades
"""
symbol: str
date: str
type: str
quantity: int
price: float
value: float = field(init=False)
def __post_init__(self):
self.value = self.price * self.quantity
def __str__(self):
return f'|{self.type} {self.quantity} of {self.symbol} at {self.price} on {self.date:%Y-%m-%d} for {self.value}|'
def __repr__(self):
return self.__str__()
class Strategy:
"""
A template class that can be used to define new strategies through inheritence
"""
def __init__(self, relevant_symbols=(), initial_capital=1000000, start_date=None, end_date=None, market_data=MarketData(), name = None):
self.name = name if name is not None else type(self).__name__
self.symbols = relevant_symbols
self.market_data = market_data
self.start_date = start_date if start_date else self.market_data.start_date
self.end_date = end_date if end_date else self.market_data.end_date
self.holdings = {}
self.cash_holdings = {}
self.trades = []
self.initial_capital = self.cash = initial_capital
def execute(self):
"""
To be implemented by child class, this should
iterate through dates and outputs actions based on signals
"""
pass
def buy(self, instrument, date, quantity):
"""
Defines a 'BUY' operation, handles cash and updates the holdings
"""
price = instrument.prices.loc[date].close
self.trades.append(TradeAction(instrument.symbol, date, 'BUY', quantity, price))
if self.holdings.get(instrument.symbol):
self.holdings[instrument.symbol] += quantity
else:
self.holdings.update({instrument.symbol: quantity})
self.cash -= price*quantity
self._update_cash_holdings(date)
def sell(self, instrument, date, quantity):
"""
Defines a 'SELL' operation, handles cash and updates the holdings
"""
# should handle shorts?
if self.holdings.get(instrument.symbol) is not None and self.holdings.get(instrument.symbol) > 0:
price = instrument.prices.loc[date].close
self.trades.append(TradeAction(instrument.symbol,date, 'SELL', quantity, price))
if self.holdings.get(instrument.symbol):
self.holdings[instrument.symbol] -= quantity
else:
self.holdings.update({instrument.symbol: -quantity})
self.cash += price * quantity
self._update_cash_holdings(date)
else:
pass
def _update_cash_holdings(self, date):
self.cash_holdings.update({date: self.cash})
def evaluate(self,date):
"""
Applies the strategy to our market data on a day by day basis
"""
print(f'_____________________ EVALUATION: {self.name} as of {date} ____________________')
print('')
print(f'Initial investment = {self.initial_capital:.2f}')
print(f'Portfolio Gross value at {date}: {self.portfolio_gross_value_at(date):.2f}')
print(f'Total Net Profit: {self.portfolio_gross_value_at(date) - self.initial_capital + self.cash :.2f}')
print(f'Fractional contributions: {self.fractional_portfolio_gross_value_at(date)}')
def fractional_portfolio_gross_value_at(self, date):
"""
Returns a dictionary representing the portfolio gross value contributions of each instrument held
"""
holdings_to_date = self._holdings_at(date)
pgv = {}
for symbol, quantity in holdings_to_date.items():
pgv.update({symbol:self.market_data.close_at(symbol,date)*quantity})
return pgv
def portfolio_gross_value_at(self, date):
"""
Returns the total portfolio gross value at a given date
"""
pgv_dict = self.fractional_portfolio_gross_value_at(date)
cash_holdings_at = self._cash_holdings_at(date)
return sum(pgv_dict.values())+cash_holdings_at
def _plot_PGV(self):
"""
Returns data for plotting
"""
pgv_over_time = []
dates = self.market_data.dates
for date in dates:
pgv_over_time.append(self.portfolio_gross_value_at(date))
x = dates
y = pgv_over_time
return x, y
def _holdings_at(self, date):
'''
Reads trade records to return the positions held at a given date
'''
holdings_at = {}
if type(date) == str:
date = datetime.date.fromisoformat(date)
relevant_trades = [trade_record for trade_record in self.trades if trade_record.date <= date]
for trade in relevant_trades:
if holdings_at.get(trade.symbol) is None:
holdings_at.update({trade.symbol:trade.quantity})
else:
holdings_at[trade.symbol] += trade.quantity
return holdings_at
def _cash_holdings_at(self,date):
"""
Reads the strategies cash_holdings variable and outputs the amount of cash at a given time
"""
if type(date) == str:
date = datetime.date.fromisoformat(date)
cash_holdings_dates_to_date = [cash_record for cash_record in self.cash_holdings if cash_record <= date]
if cash_holdings_dates_to_date:
return self.cash_holdings.get(cash_holdings_dates_to_date[-1])
else:
return 0
class BuyAndHoldEqualAllocation(Strategy):
"""
Dummy strategy that splits exposure equaly across all symbols and buys
"""
def execute(self):
relevant_instruments = [instrument for symbol, instrument in self.market_data.instruments.items() if
symbol in self.symbols]
for date in self.market_data.dates:
cash_to_allocate = round(self.cash / len(relevant_instruments))
for instrument in relevant_instruments:
quantity_to_buy = math.floor(cash_to_allocate / instrument.prices.loc[date].close)
if quantity_to_buy <= 0:
break
self.buy(instrument, date, quantity_to_buy)
else:
continue
break
class BuyOnTheUpSellOnTheDown(Strategy):
"""
Dummy strategy that buys stocks that have gone up in the last 30 days and sells stocks that have gone down
"""
def __init__(self,date_gap, *args, **kwargs):
super().__init__(*args, **kwargs)
self.date_gap = date_gap
def execute(self):
relevant_instruments = [instrument for symbol, instrument in self.market_data.instruments.items() if
symbol in self.symbols]
for date in self.market_data.dates:
for instrument in relevant_instruments:
visible_instrument_prices = instrument.prices[date:]
if len(visible_instrument_prices) <self.date_gap:
continue
is_slope_positive = visible_instrument_prices.iloc[0] - visible_instrument_prices.iloc[-self.date_gap] > 0
if is_slope_positive['close']:
if self.cash > 10000:
quantity_to_buy = math.floor((self.cash//100) / instrument.prices.loc[date].close)
if quantity_to_buy <= 0:
break
self.buy(instrument, date, quantity_to_buy)
else:
quantity_to_sell = self.holdings.get(instrument.symbol)
self.sell(instrument, date, quantity_to_sell)
else:
continue
break
| 39.564356
| 140
| 0.613864
|
b385d4627af10b48784d69a9abab1deaf6bd60a1
| 24,950
|
py
|
Python
|
bigplanet/bigplanet/bigplot.py
|
decaelus/vplanet
|
f59bd59027f725cc12a2115e8d5df58784c53477
|
[
"MIT"
] | null | null | null |
bigplanet/bigplanet/bigplot.py
|
decaelus/vplanet
|
f59bd59027f725cc12a2115e8d5df58784c53477
|
[
"MIT"
] | null | null | null |
bigplanet/bigplanet/bigplot.py
|
decaelus/vplanet
|
f59bd59027f725cc12a2115e8d5df58784c53477
|
[
"MIT"
] | null | null | null |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from . import data_extraction as de
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import itertools
"""
ROUTINES FOR REGULARLY GRIDDED DATA
The below functions only work/make sense for some N-dimensional
simulation grid.
Note: For regularly gridded data, no simulations can be thrown away, i.e.
when running data_extraction, you CANNOT have excluded halted simulations.
If you did that, then the data will have been created from a grid, but the grid
will be likely missing chunks and hence the dimensionality reduction algorithms
will fail.
"""
# Tell module what it's allowed to import
__all__ = ["plot_red_dim",
"plot_red_dim_contour",
"red_dim_grid",
"multiscatter"]
def plot_red_dim(x, y, z, shape, fig, ax, labels=None, dims = (-1,),
reduce_func = np.nanmean, nan_value = 0.0, bReduce=True,
interp="gaussian",cmap="viridis",colorbar=True,aspect="auto",
origin="lower",vmin=None,vmax=None,**kwargs):
"""
Work horse plotting function. Plots simple compression of multidimensional
gridded data down to a 3D (2 dims + color dim) space for imshow plots.
Parameters
----------
x : numpy array
y : numpy array
z : numpy array
Color variable
shape : tuple
Dimensionality of data, e.g. (5, 4) for x,y,z of len 20
fig : matplotlib figure object
ax : matplotlib axis object
labels : list of str
Axis labels
dims : tuple
Dimensions to compress, e.g. for a (5, 4, 3) dimensional data set that we
wish to compress to (5, 4), dims == (-1)
reduce_func : function
Function used to compress color dimension, typically numpy mean
nan_value : float
Value to set nans to for coloring purposes
bReduce : bool
Whether or not to reduce the data
interp : str
imshow interpolation method
cmap : str
Matplotlib colormap
colorbar : bool
Whether or not to plot a colorbar
aspect : str
See matplotlib imshow docs
origin : str
See matplotlib imshow docs
vmin, vmax : float
min, max of colorbar range, respectively
Returns
-------
None
Example usage:
# Init axes labels, data to plot
labels = ["Binary Semimajor Axis [AU]", "Binary Eccentricity", "Initial CBP Eccentricty"]
x = df["secondary"]["SemiMajorAxis"]
y = df["secondary"]["Eccentricity"]
z = df["cbp"]["Ecce"]
# Shape of the data
shape = (5, 4, 3)
fig, ax = plt.subplots()
plot_red_dim(x, y, z, shape, ax, labels=labels)
"""
# Reduce the dimensionality of the data?
if bReduce:
x, y, z = de.reduce_dimensions(x, y, z, shape=shape, dims=dims,
reduce_func = reduce_func)
# Filter our nans
z[np.isnan(z)] = nan_value
# Set colorbar ranges if not given
if vmin == None or vmax == None:
vmin=np.min(z)
vmax=np.max(z)
# Plot
cax = ax.imshow(z, cmap=cmap, vmin=vmin, vmax=vmax, origin=origin, aspect=aspect,
extent=[x.min(), x.max(), y.min(), y.max()],interpolation=interp,**kwargs)
# Make it square and set limits
ax.set_adjustable('box-forced')
ax.set_xlim(x.min(),x.max())
ax.set_ylim(y.min(),y.max())
# Format plot
if labels is not None and labels != []:
assert len(labels) == 3
ax.set_xlabel(labels[0])
ax.set_ylabel(labels[1])
if colorbar:
# Create colorbar
cbar = fig.colorbar(cax)
cbar.set_label(labels[2], rotation=270, labelpad=25)
cax.set_clim(vmin,vmax)
return None
# End function
def plot_red_dim_contour(x, y, z, shape, fig, ax, labels=None, dims = (-1,),
reduce_func = np.nanmean, nan_value = 0.0, bReduce=True,
interp="gaussian",cmap="viridis",levels=20,clines=False,
colorbar=False,origin="lower",**kwargs):
"""
Work horse plotting function. Plots simple compression of multidimensional
gridded data down to a 3D (2 dims + color dim) space for imshow plots.
Parameters
----------
x : numpy array
y : numpy array
z : numpy array
Color variable
shape : tuple
Dimensionality of data, e.g. (5, 4) for x,y,z of len 20
fig : matplotlib figure object
ax : matplotlib axis object
labels : list of str
Axis labels
dims : tuple
Dimensions to compress, e.g. for a (5, 4, 3) dimensional data set that we
wish to compress to (5, 4), dims == (-1)
reduce_func : function
Function used to compress color dimension, typically numpy mean
nan_value : float
Value to set nans to for coloring purposes
bReduce : bool
Whether or not to reduce the data
interp : str
imshow interpolation method
cmap : str
Matplotlib colormap
levels : int
Number of contour levels to plot
clines : bool
Whether or not to plot contour lines
colorbar : bool
Whether or not to plot a colorbar
origin : str
See matplotlib imshow docs
Returns
-------
None
Example usage:
# Init axes labels, data to plot
labels = ["Binary Semimajor Axis [AU]", "Binary Eccentricity", "Initial CBP Eccentricty"]
x = df["secondary"]["SemiMajorAxis"]
y = df["secondary"]["Eccentricity"]
z = df["cbp"]["Ecce"]
# Shape of the data
shape = (5, 4, 3)
levels = 15
fig, ax = plt.subplots()
plot_red_dim_contour(x, y, z, shape, ax, labels=labels, levels=levels)
"""
# Reduce the dimensionality of the data?
if bReduce:
x, y, z = de.reduce_dimensions(x, y, z, shape=shape, dims=dims,
reduce_func = reduce_func)
# Filter our nans
z[np.isnan(z)] = nan_value
# Set colorbar ranges
vmin=np.min(z)
vmax=np.max(z)
# Plot
cax = ax.contourf(z, levels, cmap=cmap,vmin=vmin,vmax=vmax,origin=origin,
extent=[x.min(), x.max(), y.min(), y.max()],**kwargs)
# Plot/label contour lines?
if clines:
# Plot contour lines
ax.contour(cax, levels=cax.levels[1::2],
colors="white",
origin="lower",
hold='on')
# Label contours
ax.clabel(cax, cax.levels[::2], # label every second level
inline=0,
fmt='%1.1f',
fontsize=15,
colors="white")
# Format plot
if labels is not None and labels != []:
ax.set_xlabel(labels[0])
ax.set_ylabel(labels[1])
if colorbar:
# Create colorbar
cbar = fig.colorbar(cax)
cbar.set_label(labels[2], rotation=270, labelpad=25)
cax.set_clim(vmin,vmax)
return None
# End function
def red_dim_grid(df, shape, dims, color_by="cbp_DampTime", left_color_func = np.nanmean, nan_value = 0.0,
bReduce=True,interp="nearest",lcmap="viridis_r",rcmap="magma_r",levels=None,clines=False,
origin="lower",save=False,right_color_func = np.std,**kwargs):
"""
Produce a grid of plots where each subplot shows 2 variables versus each other colored by a 3rd variable.
The lower left triangle plots the 3rd variable marginalized (averaged) over all other dimensions while
the upper right triangle plots some measure of the variance of the marginalized 3rd variable.
Parameters
----------
df: dateframe produced by the "aggrategate_data" fn
Data frame containing simulation initial values for variables specified by user and
any additional aggregate values. 1 row == 1 simulation.
shape : dict of dict
Dictionary containing body and variable and dimensional length of that variable, e.g.
shape = {"secondary" : {"Eccentricity" : 5}} indicates that this simulation suite was
run for 5 values of the secondary eccentricity.
color_by : str
Name of body_var to color output by
left_color_func : function
function used to marginalize. typically numpy.nanmean or some similar variant
right_color_func : function
function used to plot something variance-like. typically numpy.nanstd or some similar variant
nan_value: float
set NaNs to this value
bReduce : bool
whether or not to marginalize over higher dimensions. Should be true and mainly is here
for backwards compatibility.
interp : str
imshow interpolation method. see matplotlib imshow docs for more info
lcmap : str
matplotlib colormap for lefthand triangle
rcmap : str
matplotlib colormap for righthand triangle
levels : int
number of contours if making a contour plot. Currently not implemented.
clines : int
number of contour lines if making a contour plot. Currently not implemented.
origin : str
matplotlib imshow parameter
save : bool
whether or not to save the plot as a image
Returns
-------
fig : matplotlib figure object
axes : array
array of fig's axes
Example
-------
>>> # Define the shape
>>> shape = {"secondary" : {"Eccentricity" : 5, "SemimajorAxis" : 5}, "cbp" : {"Eccentricity" : 5,
>>> "SemimajorAxis" : 5}}
>>> # Define dims
>>> dims = {"secondary" : {"Eccentricity" : 0, "SemimajorAxis" : 1}, "cbp": {"Eccentricity" : 2,
>>> "SemimajorAxis" : 3}}
>>> fig, axes = red_dim_grid(df, shape, color_by="cbp_DampTime", interp="nearest",
>>> lcmap="viridis_r",rcmap="plasma_r",save=True, right_color_func = np.std)
(plot saved to confusogram.pdf)
"""
# Using df, shape, color_name, make a list containing all the axes
# combinations
combos = create_axes_combos(df,color_by=color_by)
# Length of size of plot square == number of variables varied in simulation suite
size = int(np.sqrt(len(combos)))
# If size isn't at least 3, this function isn't for you
assert size >= 3,"Too few dimensions. Use imshow or contourf instead!"
# Make a figure/axes to plot on
fig, axes = plt.subplots(size, size, figsize=(size*9,size*8))
# Get colorbar ranges
# Is there something smarter I can do here?
lvmin = np.min(df[color_by])
lvmax = np.max(df[color_by])
rvmin = 0.0
rvmax = np.std(df[color_by])
# Iterate over combos going left->right->down->repeat
# along the axes
# Loop over a rows
for i in range(size):
# Loop over cols
for j in range(size):
# Along the diagonal? skip
if i == j:
fig.delaxes(axes[i,j])
continue
# Make subplot axis labels
label = [combos[i*size + j][1].split("_")[0] + " " + combos[i*size + j][1].split("_")[1],
combos[i*size + j][0].split("_")[0] + " " + combos[i*size + j][0].split("_")[1],
""]
# LEFT of the diagonal? Marginalize (via mean)
if j < i:
combo = combos[i*size + j]
xcombo = combo[1]
ycombo = combo[0]
x = df[xcombo].values
y = df[ycombo].values
z = df[color_by].values
# Get shape of data
tmp_shape = axes_to_shape(combo, shape)
# Get dimensions to marginalize over
xkey, xvar = xcombo.split("_")
ykey, yvar = ycombo.split("_")
tmp_dims = get_dims(dims, xkey, xvar, ykey, yvar)
plot_red_dim(x, y, z, tmp_shape, fig, axes[i,j], labels=label, dims=tmp_dims,
reduce_func = left_color_func, nan_value = nan_value, bReduce=bReduce,
interp=interp,cmap=lcmap,colorbar=False,origin=origin,vmin=lvmin,
vmax=lvmax)
# RIGHT of the diagonal? Some measure of the STD/Variance
elif j > i:
# Note: here x, y are switched for readbility
combo = combos[i*size + j]
ycombo = combo[1]
xcombo = combo[0]
# Exchange labels
tmp_lab = label[0]
label[0] = label[1]
label[1] = tmp_lab
x = df[ycombo].values
y = df[xcombo].values
z = df[color_by].values
# Get shape of data
tmp_shape = axes_to_shape(combo, shape)
# Get dimensions to marginalize over
xkey, xvar = xcombo.split("_")
ykey, yvar = ycombo.split("_")
tmp_dims = get_dims(dims, xkey, xvar, ykey, yvar)
plot_red_dim(x, y, z, tmp_shape, fig, axes[i,j], labels=label, dims=tmp_dims,
reduce_func = right_color_func, nan_value = nan_value, bReduce=bReduce,
interp=interp,cmap=rcmap,colorbar=False,origin=origin,vmin=rvmin,
vmax=rvmax)
# Add colorbars for both triangles
# Left side...STRONG SIDE
lcax = fig.add_axes([0.05, 0.1, 0.02, 0.6])
lnorm = mpl.colors.Normalize(vmin=lvmin, vmax=lvmax)
lcb = mpl.colorbar.ColorbarBase(lcax, cmap=lcmap,spacing='proportional',
norm=lnorm)
lcb.set_label('DampTime')
lcb.ax.yaxis.set_ticks_position('left')
lcb.ax.yaxis.set_label_position('left')
# Right Side
rcax = fig.add_axes([0.93, 0.3, 0.02, 0.6])
rnorm = mpl.colors.Normalize(vmin=rvmin, vmax=rvmax)
rcb = mpl.colorbar.ColorbarBase(rcax, cmap=rcmap,spacing='proportional',
norm=rnorm)
rcb.set_label('DampTime std',rotation=270,labelpad=30)
# Save the figure?
if save:
fig.savefig("confusogram.pdf",bbox_extra_artists=(rcax,lcax),bbox_inches='tight')
return fig, axes
# end function
###################################################################
#
# Regular Grid Plotting utilities functions.
#
###################################################################
def create_axes_combos(df,color_by="cbp_DampTime"):
"""
Parse a shape nested dictionary to derive a shape tuple where the user defined
parameters are the first two values of the tuple. This function is used to create
shape tuples needed for plotting functions that plot a 2D figure with a color that
must marginalize over all higher dimensions. The first 2 values in the tuple are the
plotted variables while the others are the ones to be marginalized over.
Parameters
----------
df: dict of dateframes produced by the "aggrategate_data" fn
1 dataframe for each body
Data frame containing simulation initial values for variables specified by user and
any additional aggregate values. 1 row == 1 simulation.
color_by : str
column in one of df's dataframes that will be the variable that describes (gives a color
to) each simulation. It is ignored from the combinations since we wouldn't color by a
variable that also is plotted along an axis.
Returns
-------
list : list
list of every combination of length 2 of simulation parameters
stored in the dataframe
Example
-------
>>> create_axes_combos(df)
[('cbp_Ecce', 'secondary_Eccentricity'),
('cbp_Ecce', 'secondary_SemiMajorAxis'),
('secondary_Eccentricity', 'secondary_SemiMajorAxis')]
"""
# Get columns
cols = list(df.columns)
# Remove color_by
cols.remove(color_by)
# Return permutations
return list(itertools.product(cols,cols))
# Old code
'''
variables = []
for key in df.keys():
# Get variables associated with said body
tmp = list(df[key].columns.values)
# If color variable in list, remove it
if color_by in tmp:
tmp.remove(color_by)
# Add body's name to variable for clarity
for i in range(0,len(tmp)):
tmp[i] = key + "_" + tmp[i]
variables = variables + tmp
# Get only unique combinations
return list(itertools.product(variables,variables))
'''
# end function
def get_shape(shape, key1, var1, key2, var2):
"""
Parse a shape nested dictionary to derive a shape tuple where the user defined
parameters are the first two values of the tuple. This function is used to create
shape tuples needed for plotting functions that plot a 2D figure with a color that
must marginalize over all higher dimensions. The first 2 values in the tuple are the
plotted variables while the others are the ones to be marginalized over.
Parameters
----------
shape : dict of dict
Dictionary containing body and variable and dimensional length of that variable, e.g.
shape = {"secondary" : {"Eccentricity" : 5}} indicates that this simulation suite was
run for 5 values of the secondary eccentricity.
key1, key2 : str
Names of bodies
var1, var2 : str
Names of variables associated with given body
Returns
-------
shape : tuple
Shape of your data, e.g. (5,4,3)
Usage:
>>> holder = get_shape(shape, "secondary","Eccentricity", "cbp", "Ecce")
>>> print(holder)
>>> (5, 3, 4) # for a 60 simulation suite
"""
holder = []
# Loop over bodies
for key in shape.keys():
# Loop over variables for each body
for var in shape[key].keys():
# If key-var pair not a user-defined one, append it
# These are the dimensions to marginalize over!
if (var != var1 or key != key1) and (var != var2 or key != key2):
holder.append(shape[key][var])
# Insert front 2 dimensions
holder.insert(0,shape[key2][var2])
holder.insert(0,shape[key1][var1])
return list(holder)
# end function
def axes_to_shape(combo, shape):
"""
Create a shape tuple from
Parameters
----------
combo : tuple
combination of length 2 of simulation parameters
stored in the dataframe
shape : dict of dict
Dictionary containing body and variable and dimensional length of that variable, e.g.
shape = {"secondary" : {"Eccentricity" : 5}} indicates that this simulation suite was
run for 5 values of the secondary eccentricity.
Returns
-------
shape : tuple
Shape of your data, e.g. (5,4,3)
Example
-------
>>> combo = create_axes_combos(df)[0] # Pick 1st combo to test
>>> print(axes_to_shape(combo, shape))
(5, 3, 4)
"""
# Split combo into body_var
word1 = combo[0]
word2 = combo[1]
# Isolate body name, variable name
key1, var1 = word1.split("_")
key2, var2 = word2.split("_")
# Return shape
return get_shape(shape, key1, var1, key2, var2)
# end function
def get_dims(dims, key1, var1, key2, var2):
"""
Parse a shape nested dictionary to derive a dimension tuple for dimensions to
marginalize over. This function is used to create
dimension tuples needed for plotting functions that plot a 2D figure with a color that
must marginalize over all higher dimensions. The first 2 values in the tuple are the
plotted variables while the others are the ones to be marginalized over.
Parameters
----------
dims : dict of dict
Dictionary containing body and variable and dimensional position of that variable, e.g.
shape = {"secondary" : {"Eccentricity" : 0}} indicates that this simulation suite was
run where secondary eccentricity was the first varied variable (1st line in the
vspace input file).
key1, key2 : str
Names of bodies
var1, var2 : str
Names of variables associated with given body
Returns
-------
dims : tuple
dims of your data, e.g. (1,3)
Example
-------
>>> dims = {"secondary" : {"Eccentricity" : 0, "SemimajorAxis" : 1}, "cbp": {"Eccentricity" : 2,
"SemimajorAxis" : 3}}
>>> get_dims(dims, "cbp", "Eccentricity", "secondary", "SemimajorAxis")
(3, 0)
"""
holder = []
# Loop over bodies
for key in dims.keys():
# Loop over variables for each body
for var in dims[key].keys():
# If key-var pair not a user-defined one, append it
# These are the dimensions to marginalize over!
if (var != var1 or key != key1) and (var != var2 or key != key2):
holder.append(dims[key][var])
return list(holder)
# end function
"""
FOR IRREGULAR/RANDOM DATA (or also gridded!)
"""
def multiscatter(df,z_var="cbp_DampTime",size_var=None,color_var=None,cmap="magma_r",
alpha=0.5):
"""
Plot a series of scatter plots of 1 dataframe column (z_var) vs every other column
with the option to have one parameter size points and another parameter color the points.
Parameters
----------
df : Pandas dataframe
z_var : string
column name of dependent variable
size_var : string (optional)
column name that determines size of points
color_var : string (optional)
column name that determines color of points
cmap : string (optional)
matplotlib colormap name
alpha : float (optional)
alpha (shade) parameter which ranges from [0,1]
Returns
-------
fig : matplotlib figure object
axes : array of matplotlib axis objects
Example
-------
>>> fig, axes = multiscatter(df,z_var="cbp_DampTime",size_var="secondary_Eccentricity",
color_var="secondary_SemimajorAxis")
(plot)
"""
# Can't color/size points by the dependent variable!
assert(z_var != size_var and z_var != color_var), "Can't color/size points by the dependent variable!"
# Set default color if none given
if color_var is not None:
color = df[color_var]
else:
color = "black"
# Set default size if none given
if size_var is not None:
# Compute point sizes by normalizing data
s = 10. + 240.*(df[size_var] - df[size_var].min())/np.ptp(df[size_var])
else:
s = 50
# Get size for square grid of plots, leave space blank
size = len(df.columns)
grid_size = int(np.sqrt(size))
# Make subplots
fig, axes = plt.subplots(grid_size, grid_size, figsize=(grid_size*9,grid_size*8))
axes = axes.flatten()
# dependent var (y coor for all scatter plots)
z = df[z_var]
# Iterate over all things, scatter plot "z" value vs sim variables
ii = 0
# Loop over columns == simulation variables
for col in df.columns.values:
if col != z_var:
im = axes[ii].scatter(df[col],z,c=color,s=s,alpha=alpha,cmap=cmap)
# Format axes with labels, limits
axes[ii].set_ylabel(z_var.split("_")[0] + " " + z_var.split("_")[1])
axes[ii].set_xlabel(col.split("_")[0] + " " + col.split("_")[1])
axes[ii].set_ylim(z.min()-0.05*z.min(),z.max()+0.05*z.max())
# Increment ii
ii = ii + 1
else:
pass
# Add colorbar?
if color_var is not None:
cbar = fig.colorbar(im, ax=axes.ravel().tolist())
cbar.ax.set_ylabel((color_var.split("_")[0] + " " + color_var.split("_")[1]), rotation=270,
labelpad = 25)
# Add legend that displays typical point sizes?
if size_var is not None:
# Dummy plots for range of points
sizes = s.max()
s_inv = df[size_var].max()
l1 = plt.scatter([],[], s=sizes/4., color="black")
l2 = plt.scatter([],[], s=2.*sizes/4., color="black")
l3 = plt.scatter([],[], s=3.*sizes/4., color="black")
l4 = plt.scatter([],[], s=sizes, color="black")
# Labels for points
labels = ["%.2f" % (s_inv/4.), "%.2f" % (s_inv/2.),
"%.2f" % (3.*s_inv/4.), "%.2f" % s_inv]
# Fancy legend relative to last axis
leg = axes[-1].legend([l1, l2, l3, l4], labels, ncol=4, frameon=False, fontsize=15, loc = 8, borderpad = 1.0,
handletextpad=1, scatterpoints = 1, bbox_to_anchor=(-.1, -0.3),
title = size_var.split("_")[0] + " " + size_var.split("_")[1])
return fig, axes
# End Function
"""
Misc
"""
| 33.178191
| 117
| 0.593788
|
a9631319f119cc6367821445a0f1dd9249689e05
| 15,631
|
py
|
Python
|
python/GafferSceneUI/CameraUI.py
|
pier-robot/gaffer
|
9267f2ba3822b14430d8a283c745261110b0f570
|
[
"BSD-3-Clause"
] | null | null | null |
python/GafferSceneUI/CameraUI.py
|
pier-robot/gaffer
|
9267f2ba3822b14430d8a283c745261110b0f570
|
[
"BSD-3-Clause"
] | null | null | null |
python/GafferSceneUI/CameraUI.py
|
pier-robot/gaffer
|
9267f2ba3822b14430d8a283c745261110b0f570
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
##########################################################################
#
# Copyright (c) 2014, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import math
import functools
import imath
import IECoreScene
import Gaffer
import GafferUI
import GafferScene
import GafferSceneUI
##########################################################################
# Metadata
##########################################################################
plugsMetadata = {
"sets" : [
"layout:divider", True,
],
"projection" : [
"description",
"""
The base camera type.
Supports two standard projections: orthographic and
perspective. For less standard projections that require
renderer-specific implementations, such as spherical, you
will need to use a downstream CameraTweaks node to adjust
this camera's parameters.
""",
"preset:Perspective", "perspective",
"preset:Orthographic", "orthographic",
"plugValueWidget:type", "GafferUI.PresetsPlugValueWidget",
"layout:divider", True,
],
"perspectiveMode" : [
"description",
"""
The input values to use in defining the perspective
projection. They can be either a horizontal field of view
(`fieldOfView`), or a film back/sensor (`aperture`) and
focal length (`focalLength`). The latter two can take the
exact measurements from a real camera and lens setup. With
either perspective mode, perspective is stored as
`aperture` and `focalLength` parameters on the camera.
""",
"preset:Field Of View", GafferScene.Camera.PerspectiveMode.FieldOfView,
"preset:Aperture and Focal Length", GafferScene.Camera.PerspectiveMode.ApertureFocalLength,
"plugValueWidget:type", "GafferUI.PresetsPlugValueWidget",
"layout:visibilityActivator", "perspective",
],
"fieldOfView" : [
"description",
"""
The horizontal field of view, in degrees.
In the camera's parameters, projection is always stored as
`aperture` and `focalLength`. When using the _Field of
View_ perspective mode, the aperture has the fixed
dimensions of `1, 1`, and this plug drives the
`focalLength` parameter.
""",
"layout:visibilityActivator", "perspectiveModeFOV",
],
"apertureAspectRatio" : [
"description",
"""
The vertical field of view, according to the ratio
`(horizontal FOV) / (vertical FOV)`. A value of 1 would
result in a square aperture, while a value of 1.778 would
result in a 16:9 aperture.
"Aperture" in this sense is equivalent to film back/sensor.
The final projection of a render using this camera will
depend on these settings in combination with the
`resolution` and `filmFit` render settings.
""",
"layout:visibilityActivator", "perspectiveModeFOV",
] ,
"aperture" : [
"description",
"""
The width and height of the aperture when using the
_Aperture and Focal Length_ perspective mode. Use this in
conjunction with a focal length to define the camera's
equivalent field of view.
"Aperture" here is equivalent to the film back/sensor on a
real camera. A handful of default camera presets are
provided, including Full Frame 35mm and several popular
Alexa and RED bodies. Once the aperture is set, the focal
length can then be adjusted on its own to control the field
of view, just like on a real camera.
When setting the aperture manually, the `x` and `y`
dimensions can be measured in any unit of length, so long
as they use the same unit as the focal length. You can
safely follow convention and use millimeters for both.
The final field of view of a render will depend on these
settings in combination with the `resolution` and `filmFit`
render options.
""",
"layout:visibilityActivator", "perspectiveModeFocalLength",
"preset:Academy 35mm 21.946 × 16.000", imath.V2f( 21.946, 16 ),
"preset:Super 35mm 24.892 × 18.669", imath.V2f( 24.892, 18.669 ),
"preset:Micro Four Thirds 17.30 × 13.00", imath.V2f( 17.3, 13 ),
"preset:APS-C 22.30 × 14.90", imath.V2f( 22.3, 14.9 ),
"preset:Full Frame 35mm 36.00 × 24.00", imath.V2f( 36, 24 ),
"preset:Alexa SXT 4:3 2.8k 23.76 × 17.82", imath.V2f( 23.76, 17.82 ),
"preset:Alexa SXT Open Gate 3.4k 28.25 × 18.17", imath.V2f( 28.25, 18.17 ),
"preset:Alexa 65 16:9 5.1k 42.24 × 23.76", imath.V2f( 42.24, 23.76 ),
"preset:Alexa 65 Open Gate 6.5k 54.12 × 25.58", imath.V2f( 54.12, 25.58 ),
"preset:RED EPIC-W 5K S35 30.72 × 18.00", imath.V2f( 30.72, 18 ),
"preset:RED EPIC-W 8K S35 29.90 × 15.77", imath.V2f( 29.9, 15.77 ),
"presetsPlugValueWidget:allowCustom", True,
"plugValueWidget:type", "GafferUI.PresetsPlugValueWidget",
],
"focalLength" : [
"description",
"""
The focal length portion of the _Aperture and Focal Length_
perspective mode. This is equivalent to the lens's focal
length in a real camera setup. Use this in conjunction with
the aperture to set the camera's equivalent field of view.
Like on a real camera, the aperture is typically constant,
and the focal length is then adjusted to control the field
of view.
This can be a distance in any unit of length, as long as
you use the same unit for the aperture. You can safely
follow convention and use millimeters for both.
The final field of view of a render using this camera will
depend on these settings in combination with the
`resolution` and `filmFit` render options.
""",
"layout:visibilityActivator", "perspectiveModeFocalLength",
],
"orthographicAperture" : [
"description",
"""
The width and height of the orthographic camera's aperture,
in world space units.
""",
"layout:visibilityActivator", "orthographic",
"layout:divider", True,
],
"apertureOffset" : [
"description",
"""
Offsets the aperture parallel to the image plane, to
achieve a skewed viewing frustum. The scale of the offset
depends on the projection and perspective mode:
- Perspective projection:
- _Field Of View_ mode: 1 offset = 1 horizontal field
of view.
- _Aperture and Focal Length_ mode: 1 offset = 1
aperture unit of measure (for example, 1mm).
- Orthographic projection: 1 offset = 1 world space unit.
For use in special cases, such as simulating a tilt-shift
lens, rendering tiles for a large panorama, or matching a
plate that has been asymmetrically cropped.
""",
],
"fStop" : [
"description",
"""
The setting equivalent to the f-number on a camera, which ultimately determines the strength of the depth of field blur. A lower value produces more blur. As in a real camera, `fStop` is defined as `focalLength / lens aperture`.
To enable depth of field blur (if your renderer supports it), give this plug a value greater than 0, and, on a downstream StandardOptions node, enable the _Depth Of Field_ plug and turn it on.
""",
"layout:section", "Depth of Field",
],
"focalLengthWorldScale" : [
"description",
"""
The scale to convert from focal length units to world space
units. Combined with f-stop to calculate the lens aperture.
Set this to scale the lens units into scene units, to
ensure the depth of field blur correctly scales to the
scene. Once this plug is set, the `fStop` plug can be
adjusted to match a real-world lens setting.
For example, given a lens with a focal length in mm, and a
scene that uses decimeters for its world space units, the
_Millimeters to Decimeters_ preset would provide the proper
conversion.
The default value of 0.1 scales millimeter (default focal
length unit) to centimeter (default world space unit of
Alembic and USD scene formats). Other default presets for
scaling to decimeter or meter are also available.
If using _Field Of View_ projection mode, you won't have a
focal length plug to work with, and the aperture size will
be (1,1). To compensate, select _Custom_ and then input a
value that scales the scene unit of measure to a realistic
aperture size. For example, `3.5` would convert 1
centimeter (Alembic/USD default) to 35mm, which would
simulate a 35mm lens.
""",
"preset:No Conversion ( 1.0 )", 1.0,
"preset:Millimeters to Centimeters ( 0.1 )", 0.1,
"preset:Millimeters to Decimeters ( 0.01 )", 0.01,
"preset:Millimeters to Meters ( 0.001 )", 0.001,
"presetsPlugValueWidget:allowCustom", True,
"plugValueWidget:type", "GafferUI.PresetsPlugValueWidget",
"layout:activator", "dof",
"layout:section", "Depth of Field",
],
"focusDistance" : [
"description",
"""
The distance from the camera at which objects are in
perfect focus, in world space units.
""",
"layout:activator", "dof",
"layout:section", "Depth of Field",
],
"clippingPlanes" : [
"description",
"""
The near and far clipping planes, defining a region of
forward depth within which objects are visible to this
camera.
""",
],
"renderSettingOverrides" : [
"description",
"""
Render settings specified here will override their
corresponding global render options.
""",
"layout:section", "Render Overrides",
"compoundDataPlugValueWidget:editable", False,
],
"renderSettingOverrides.*" : [
"nameValuePlugPlugValueWidget:ignoreNamePlug", True,
],
"visualiserAttributes" : [
"description",
"""
Attributes that affect the visualisation of this camera in the Viewer.
""",
"layout:section", "Visualisation",
"compoundDataPlugValueWidget:editable", False,
],
"visualiserAttributes.*" : [
"nameValuePlugPlugValueWidget:ignoreNamePlug", True,
],
"visualiserAttributes.scale" : [
"description",
"""
Scales non-geometric visualisations in the viewport to make them
easier to work with.
""",
],
"visualiserAttributes.frustum" : [
"description",
"""
Controls whether the camera draws a visualisation of its frustum.
"""
],
"visualiserAttributes.frustum.value" : [
"preset:Off", "off",
"preset:When Selected", "whenSelected",
"preset:On", "on",
"plugValueWidget:type", "GafferUI.PresetsPlugValueWidget"
]
}
__sourceMetadata = GafferSceneUI.StandardOptionsUI.plugsMetadata
## Start with a special entry: the filmFit option presets are reused
# without modification
__overrideMetadata = {
"renderSettingOverrides.filmFit.value": __sourceMetadata["options.filmFit.value"]
}
## The plug names from StandardOptionsUI that the Camera node actually
# overrides; not all of their names match, so we need to provide
# the replacement names too
__plugsToOverride = {
"options.filmFit": "renderSettingOverrides.filmFit",
"options.shutter": "renderSettingOverrides.shutter",
"options.renderResolution": "renderSettingOverrides.resolution",
"options.pixelAspectRatio": "renderSettingOverrides.pixelAspectRatio",
"options.resolutionMultiplier": "renderSettingOverrides.resolutionMultiplier",
"options.overscan": "renderSettingOverrides.overscan",
"options.overscanLeft": "renderSettingOverrides.overscanLeft",
"options.overscanRight": "renderSettingOverrides.overscanRight",
"options.overscanTop": "renderSettingOverrides.overscanTop",
"options.overscanBottom": "renderSettingOverrides.overscanBottom",
"options.renderCropWindow": "renderSettingOverrides.cropWindow",
"options.depthOfField": "renderSettingOverrides.depthOfField",
}
## Use the key names from the override dict, but reuse the plug
# description text from the source dict
for sourcePlug, overridePlug in __plugsToOverride.items() :
plugMetadata = __sourceMetadata[ sourcePlug ]
__overrideMetadata[ overridePlug ] = [
plugMetadata[0],
"Overrides the `{option}` render option:\n\n{description}".format(
option = sourcePlug.replace( "options.", ""),
# We assume the second element is the plug description
description = plugMetadata[1]
)
]
plugsMetadata.update( __overrideMetadata )
Gaffer.Metadata.registerNode(
GafferScene.Camera,
"description",
"""
Produces scenes containing a camera. To choose which camera is
used for rendering, use a StandardOptions node.
""",
"layout:activator:perspective", lambda node : node["projection"].getValue() == "perspective",
"layout:activator:perspectiveModeFOV", lambda node : node["perspectiveMode"].getValue() == GafferScene.Camera.PerspectiveMode.FieldOfView and node["projection"].getValue() == "perspective",
"layout:activator:perspectiveModeFocalLength", lambda node : node["perspectiveMode"].getValue() == GafferScene.Camera.PerspectiveMode.ApertureFocalLength and node["projection"].getValue() == "perspective",
"layout:activator:orthographic", lambda node : node["projection"].getValue() == "orthographic",
"layout:activator:dof", lambda node : node["fStop"].getValue() != 0,
plugs = plugsMetadata
)
##########################################################################
# NodeEditor tool menu
##########################################################################
def __copyCamera( node, transform ) :
with Gaffer.UndoScope( node.scriptNode() ) :
s, h, r, t = imath.V3f(), imath.V3f(), imath.V3f(), imath.V3f()
transform.extractSHRT( s, h, r, t )
node["transform"]["translate"].setValue( t )
node["transform"]["rotate"].setValue( r * 180.0 / math.pi )
node["transform"]["scale"].setValue( s )
def __nodeEditorToolMenu( nodeEditor, node, menuDefinition ) :
if not isinstance( node, GafferScene.Camera ) :
return
layout = nodeEditor.ancestor( GafferUI.CompoundEditor )
if layout is None :
return
viewers = [ v for v in layout.editors( GafferUI.Viewer ) if isinstance( v.view(), GafferSceneUI.SceneView ) ]
if not viewers :
return
for viewer in viewers :
menuDefinition.append(
"/Copy From Viewer" + ( "/" + viewer.getTitle() if len( viewers ) > 1 else "" ),
{
"command" : functools.partial( __copyCamera, node, viewer.view().viewportGadget().getCameraTransform() ),
"active" : not Gaffer.MetadataAlgo.readOnly( node["transform"] ),
}
)
GafferUI.NodeEditor.toolMenuSignal().connect( __nodeEditorToolMenu, scoped = False )
| 31.324649
| 230
| 0.700915
|
34a40a9086bfeace560adc4a09e74bd072e8db2f
| 26,048
|
py
|
Python
|
kuryr_kubernetes/controller/handlers/lbaas.py
|
dulek/kuryr-kubernetes
|
d76a9dad18320ecd57b7735aed34806aa07f4091
|
[
"Apache-2.0"
] | null | null | null |
kuryr_kubernetes/controller/handlers/lbaas.py
|
dulek/kuryr-kubernetes
|
d76a9dad18320ecd57b7735aed34806aa07f4091
|
[
"Apache-2.0"
] | null | null | null |
kuryr_kubernetes/controller/handlers/lbaas.py
|
dulek/kuryr-kubernetes
|
d76a9dad18320ecd57b7735aed34806aa07f4091
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2016 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from kuryr.lib._i18n import _
from oslo_log import log as logging
from oslo_serialization import jsonutils
from kuryr_kubernetes import clients
from kuryr_kubernetes import config
from kuryr_kubernetes import constants as k_const
from kuryr_kubernetes.controller.drivers import base as drv_base
from kuryr_kubernetes import exceptions as k_exc
from kuryr_kubernetes.handlers import k8s_base
from kuryr_kubernetes.objects import lbaas as obj_lbaas
from kuryr_kubernetes import utils
LOG = logging.getLogger(__name__)
SUPPORTED_SERVICE_TYPES = ('ClusterIP', 'LoadBalancer')
class LBaaSSpecHandler(k8s_base.ResourceEventHandler):
"""LBaaSSpecHandler handles K8s Service events.
LBaaSSpecHandler handles K8s Service events and updates related Endpoints
with LBaaSServiceSpec when necessary.
"""
OBJECT_KIND = k_const.K8S_OBJ_SERVICE
OBJECT_WATCH_PATH = "%s/%s" % (k_const.K8S_API_BASE, "services")
def __init__(self):
super(LBaaSSpecHandler, self).__init__()
self._drv_project = drv_base.ServiceProjectDriver.get_instance()
self._drv_subnets = drv_base.ServiceSubnetsDriver.get_instance()
self._drv_sg = drv_base.ServiceSecurityGroupsDriver.get_instance()
def on_present(self, service):
lbaas_spec = utils.get_lbaas_spec(service)
if self._should_ignore(service):
LOG.debug("Skipping Kubernetes service %s of an unsupported kind "
"or without a selector as Kubernetes does not create "
"an endpoint object for it.",
service['metadata']['name'])
return
if self._has_lbaas_spec_changes(service, lbaas_spec):
lbaas_spec = self._generate_lbaas_spec(service)
utils.set_lbaas_spec(service, lbaas_spec)
def _is_supported_type(self, service):
spec = service['spec']
return spec.get('type') in SUPPORTED_SERVICE_TYPES
def _get_service_ip(self, service):
if self._is_supported_type(service):
return service['spec'].get('clusterIP')
return None
def _should_ignore(self, service):
return (not(self._has_selector(service)) or
not(self._has_clusterip(service)) or
not(self._is_supported_type(service)))
def _has_selector(self, service):
return service['spec'].get('selector')
def _has_clusterip(self, service):
# ignore headless service, clusterIP is None
return service['spec'].get('clusterIP') != 'None'
def _get_subnet_id(self, service, project_id, ip):
subnets_mapping = self._drv_subnets.get_subnets(service, project_id)
subnet_ids = {
subnet_id
for subnet_id, network in subnets_mapping.items()
for subnet in network.subnets.objects
if ip in subnet.cidr}
if len(subnet_ids) != 1:
raise k_exc.IntegrityError(_(
"Found %(num)s subnets for service %(link)s IP %(ip)s") % {
'link': service['metadata']['selfLink'],
'ip': ip,
'num': len(subnet_ids)})
return subnet_ids.pop()
def _generate_lbaas_spec(self, service):
project_id = self._drv_project.get_project(service)
ip = self._get_service_ip(service)
subnet_id = self._get_subnet_id(service, project_id, ip)
ports = self._generate_lbaas_port_specs(service)
sg_ids = self._drv_sg.get_security_groups(service, project_id)
spec_type = service['spec'].get('type')
spec_lb_ip = service['spec'].get('loadBalancerIP')
return obj_lbaas.LBaaSServiceSpec(ip=ip,
project_id=project_id,
subnet_id=subnet_id,
ports=ports,
security_groups_ids=sg_ids,
type=spec_type,
lb_ip=spec_lb_ip)
def _has_lbaas_spec_changes(self, service, lbaas_spec):
return (self._has_ip_changes(service, lbaas_spec) or
utils.has_port_changes(service, lbaas_spec))
def _has_ip_changes(self, service, lbaas_spec):
link = service['metadata']['selfLink']
svc_ip = self._get_service_ip(service)
if not lbaas_spec:
if svc_ip:
LOG.debug("LBaaS spec is missing for %(link)s"
% {'link': link})
return True
elif str(lbaas_spec.ip) != svc_ip:
LOG.debug("LBaaS spec IP %(spec_ip)s != %(svc_ip)s for %(link)s"
% {'spec_ip': lbaas_spec.ip,
'svc_ip': svc_ip,
'link': link})
return True
return False
def _generate_lbaas_port_specs(self, service):
return [obj_lbaas.LBaaSPortSpec(**port)
for port in utils.get_service_ports(service)]
class LoadBalancerHandler(k8s_base.ResourceEventHandler):
"""LoadBalancerHandler handles K8s Endpoints events.
LoadBalancerHandler handles K8s Endpoints events and tracks changes in
LBaaSServiceSpec to update Neutron LBaaS accordingly and to reflect its'
actual state in LBaaSState.
"""
OBJECT_KIND = k_const.K8S_OBJ_ENDPOINTS
OBJECT_WATCH_PATH = "%s/%s" % (k_const.K8S_API_BASE, "endpoints")
def __init__(self):
super(LoadBalancerHandler, self).__init__()
self._drv_lbaas = drv_base.LBaaSDriver.get_instance()
self._drv_pod_project = drv_base.PodProjectDriver.get_instance()
self._drv_pod_subnets = drv_base.PodSubnetsDriver.get_instance()
self._drv_service_pub_ip = drv_base.ServicePubIpDriver.get_instance()
# Note(yboaron) LBaaS driver supports 'provider' parameter in
# Load Balancer creation flow.
# We need to set the requested load balancer provider
# according to 'endpoints_driver_octavia_provider' configuration.
self._lb_provider = None
if (config.CONF.kubernetes.endpoints_driver_octavia_provider
!= 'default'):
self._lb_provider = (
config.CONF.kubernetes.endpoints_driver_octavia_provider)
def on_present(self, endpoints):
lbaas_spec = self._get_lbaas_spec(endpoints)
if self._should_ignore(endpoints, lbaas_spec):
LOG.debug("Ignoring Kubernetes endpoints %s",
endpoints['metadata']['name'])
return
lbaas_state = utils.get_lbaas_state(endpoints)
if not lbaas_state:
lbaas_state = obj_lbaas.LBaaSState()
if self._sync_lbaas_members(endpoints, lbaas_state, lbaas_spec):
# Note(yboaron) For LoadBalancer services, we should allocate FIP,
# associate it to LB VIP and update K8S service status
if lbaas_state.service_pub_ip_info is None:
service_pub_ip_info = (
self._drv_service_pub_ip.acquire_service_pub_ip_info(
lbaas_spec.type,
lbaas_spec.lb_ip,
lbaas_spec.project_id,
lbaas_state.loadbalancer.port_id))
if service_pub_ip_info:
self._drv_service_pub_ip.associate_pub_ip(
service_pub_ip_info, lbaas_state.loadbalancer.port_id)
lbaas_state.service_pub_ip_info = service_pub_ip_info
self._update_lb_status(
endpoints,
lbaas_state.service_pub_ip_info.ip_addr)
# REVISIT(ivc): since _sync_lbaas_members is responsible for
# creating all lbaas components (i.e. load balancer, listeners,
# pools, members), it is currently possible for it to fail (due
# to invalid Kuryr/K8s/Neutron configuration, e.g. Members' IPs
# not belonging to configured Neutron subnet or Service IP being
# in use by gateway or VMs) leaving some Neutron entities without
# properly updating annotation. Some sort of failsafe mechanism is
# required to deal with such situations (e.g. cleanup, or skip
# failing items, or validate configuration) to prevent annotation
# being out of sync with the actual Neutron state.
try:
utils.set_lbaas_state(endpoints, lbaas_state)
except k_exc.K8sResourceNotFound:
# Note(yboaron) It's impossible to store neutron resources
# in K8S object since object was deleted. In that case
# we should rollback all neutron resources.
LOG.debug("LoadBalancerHandler failed to store Openstack "
"resources in K8S object (not found)")
self.on_deleted(endpoints, lbaas_state)
def on_deleted(self, endpoints, lbaas_state=None):
if lbaas_state is None:
lbaas_state = utils.get_lbaas_state(endpoints)
if not lbaas_state:
return
# NOTE(ivc): deleting pool deletes its members
self._drv_lbaas.release_loadbalancer(
loadbalancer=lbaas_state.loadbalancer)
if lbaas_state.service_pub_ip_info:
self._drv_service_pub_ip.release_pub_ip(
lbaas_state.service_pub_ip_info)
def _should_ignore(self, endpoints, lbaas_spec):
# NOTE(ltomasbo): we must wait until service handler has annotated the
# endpoints to process them. Thus, if annotations are not updated to
# match the endpoints information, we should skip the event
return not(lbaas_spec and
self._has_pods(endpoints) and
self._svc_handler_annotations_updated(endpoints,
lbaas_spec))
def _svc_handler_annotations_updated(self, endpoints, lbaas_spec):
svc_link = self._get_service_link(endpoints)
k8s = clients.get_kubernetes_client()
service = k8s.get(svc_link)
if utils.has_port_changes(service, lbaas_spec):
# NOTE(ltomasbo): Ensuring lbaas_spec annotated on the endpoints
# is in sync with the service status, i.e., upon a service
# modification it will ensure endpoint modifications are not
# handled until the service handler has performed its annotations
return False
return True
def _has_pods(self, endpoints):
ep_subsets = endpoints.get('subsets', [])
if not ep_subsets:
return False
return any(True
for subset in ep_subsets
for address in subset.get('addresses', [])
if address.get('targetRef', {}).get('kind') == 'Pod')
def _sync_lbaas_members(self, endpoints, lbaas_state, lbaas_spec):
changed = False
if (self._has_pods(endpoints) and
self._remove_unused_members(endpoints, lbaas_state,
lbaas_spec)):
changed = True
if self._sync_lbaas_pools(endpoints, lbaas_state, lbaas_spec):
changed = True
if (self._has_pods(endpoints) and
self._add_new_members(endpoints, lbaas_state, lbaas_spec)):
changed = True
return changed
def _sync_lbaas_sgs(self, endpoints, lbaas_state, lbaas_spec):
# NOTE (maysams) Need to retrieve the LBaaS Spec again due to
# the possibility of it being updated after the LBaaS creation
# process has started.
svc_link = self._get_service_link(endpoints)
k8s = clients.get_kubernetes_client()
service = k8s.get(svc_link)
lbaas_spec = utils.get_lbaas_spec(service)
lb = lbaas_state.loadbalancer
default_sgs = config.CONF.neutron_defaults.pod_security_groups
lbaas_spec_sgs = lbaas_spec.security_groups_ids
if lb.security_groups and lb.security_groups != lbaas_spec_sgs:
sgs = [lb_sg for lb_sg in lb.security_groups
if lb_sg not in default_sgs]
if lbaas_spec_sgs != default_sgs:
sgs.extend(lbaas_spec_sgs)
lb.security_groups = sgs
def _add_new_members(self, endpoints, lbaas_state, lbaas_spec):
changed = False
self._sync_lbaas_sgs(endpoints, lbaas_state, lbaas_spec)
lsnr_by_id = {l.id: l for l in lbaas_state.listeners}
pool_by_lsnr_port = {(lsnr_by_id[p.listener_id].protocol,
lsnr_by_id[p.listener_id].port): p
for p in lbaas_state.pools}
# NOTE(yboaron): Since LBaaSv2 doesn't support UDP load balancing,
# the LBaaS driver will return 'None' in case of UDP port
# listener creation.
# we should consider the case in which
# 'pool_by_lsnr_port[p.protocol, p.port]' is missing
pool_by_tgt_name = {}
for p in lbaas_spec.ports:
try:
pool_by_tgt_name[p.name] = pool_by_lsnr_port[p.protocol,
p.port]
except KeyError:
continue
current_targets = {(str(m.ip), m.port, m.pool_id)
for m in lbaas_state.members}
for subset in endpoints.get('subsets', []):
subset_ports = subset.get('ports', [])
for subset_address in subset.get('addresses', []):
try:
target_ip = subset_address['ip']
target_ref = subset_address['targetRef']
if target_ref['kind'] != k_const.K8S_OBJ_POD:
continue
except KeyError:
continue
if not pool_by_tgt_name:
continue
for subset_port in subset_ports:
target_port = subset_port['port']
port_name = subset_port.get('name')
try:
pool = pool_by_tgt_name[port_name]
except KeyError:
LOG.debug("No pool found for port: %r", port_name)
continue
if (target_ip, target_port, pool.id) in current_targets:
continue
# TODO(apuimedo): Do not pass subnet_id at all when in
# L3 mode once old neutron-lbaasv2 is not supported, as
# octavia does not require it
if (config.CONF.octavia_defaults.member_mode ==
k_const.OCTAVIA_L2_MEMBER_MODE):
member_subnet_id = self._get_pod_subnet(target_ref,
target_ip)
else:
# We use the service subnet id so that the connectivity
# from VIP to pods happens in layer 3 mode, i.e.,
# routed.
member_subnet_id = lbaas_state.loadbalancer.subnet_id
first_member_of_the_pool = True
for member in lbaas_state.members:
if pool.id == member.pool_id:
first_member_of_the_pool = False
break
if first_member_of_the_pool:
listener_port = lsnr_by_id[pool.listener_id].port
else:
listener_port = None
member = self._drv_lbaas.ensure_member(
loadbalancer=lbaas_state.loadbalancer,
pool=pool,
subnet_id=member_subnet_id,
ip=target_ip,
port=target_port,
target_ref_namespace=target_ref['namespace'],
target_ref_name=target_ref['name'],
listener_port=listener_port)
lbaas_state.members.append(member)
changed = True
return changed
def _get_pod_subnet(self, target_ref, ip):
# REVISIT(ivc): consider using true pod object instead
pod = {'kind': target_ref['kind'],
'metadata': {'name': target_ref['name'],
'namespace': target_ref['namespace']}}
project_id = self._drv_pod_project.get_project(pod)
subnets_map = self._drv_pod_subnets.get_subnets(pod, project_id)
# FIXME(ivc): potentially unsafe [0] index
return [subnet_id for subnet_id, network in subnets_map.items()
for subnet in network.subnets.objects
if ip in subnet.cidr][0]
def _get_port_in_pool(self, pool, lbaas_state, lbaas_spec):
for l in lbaas_state.listeners:
if l.id != pool.listener_id:
continue
for port in lbaas_spec.ports:
if l.port == port.port and l.protocol == port.protocol:
return port
return None
def _remove_unused_members(self, endpoints, lbaas_state, lbaas_spec):
spec_ports = {}
for pool in lbaas_state.pools:
port = self._get_port_in_pool(pool, lbaas_state, lbaas_spec)
if port:
spec_ports[port.name] = pool.id
current_targets = {(a['ip'], p['port'],
spec_ports.get(p.get('name')))
for s in endpoints['subsets']
for a in s['addresses']
for p in s['ports']
if p.get('name') in spec_ports}
removed_ids = set()
for member in lbaas_state.members:
if ((str(member.ip), member.port, member.pool_id) in
current_targets):
continue
self._drv_lbaas.release_member(lbaas_state.loadbalancer,
member)
removed_ids.add(member.id)
if removed_ids:
lbaas_state.members = [m for m in lbaas_state.members
if m.id not in removed_ids]
return bool(removed_ids)
def _sync_lbaas_pools(self, endpoints, lbaas_state, lbaas_spec):
changed = False
if self._remove_unused_pools(lbaas_state, lbaas_spec):
changed = True
if self._sync_lbaas_listeners(endpoints, lbaas_state, lbaas_spec):
changed = True
if self._add_new_pools(lbaas_state, lbaas_spec):
changed = True
return changed
def _add_new_pools(self, lbaas_state, lbaas_spec):
changed = False
current_listeners_ids = {pool.listener_id
for pool in lbaas_state.pools}
for listener in lbaas_state.listeners:
if listener.id in current_listeners_ids:
continue
pool = self._drv_lbaas.ensure_pool(lbaas_state.loadbalancer,
listener)
lbaas_state.pools.append(pool)
changed = True
return changed
def _is_pool_in_spec(self, pool, lbaas_state, lbaas_spec):
# NOTE(yboaron): in order to check if a specific pool is in lbaas_spec
# we should:
# 1. get the listener that pool is attached to
# 2. check if listener's attributes appear in lbaas_spec.
for l in lbaas_state.listeners:
if l.id != pool.listener_id:
continue
for port in lbaas_spec.ports:
if l.port == port.port and l.protocol == port.protocol:
return True
return False
def _remove_unused_pools(self, lbaas_state, lbaas_spec):
removed_ids = set()
for pool in lbaas_state.pools:
if self._is_pool_in_spec(pool, lbaas_state, lbaas_spec):
continue
self._drv_lbaas.release_pool(lbaas_state.loadbalancer,
pool)
removed_ids.add(pool.id)
if removed_ids:
lbaas_state.pools = [p for p in lbaas_state.pools
if p.id not in removed_ids]
lbaas_state.members = [m for m in lbaas_state.members
if m.pool_id not in removed_ids]
return bool(removed_ids)
def _sync_lbaas_listeners(self, endpoints, lbaas_state, lbaas_spec):
changed = False
if self._remove_unused_listeners(endpoints, lbaas_state, lbaas_spec):
changed = True
if self._sync_lbaas_loadbalancer(endpoints, lbaas_state, lbaas_spec):
changed = True
if self._add_new_listeners(endpoints, lbaas_spec, lbaas_state):
changed = True
return changed
def _add_new_listeners(self, endpoints, lbaas_spec, lbaas_state):
changed = False
current_port_tuples = {(listener.protocol, listener.port)
for listener in lbaas_state.listeners}
for port_spec in lbaas_spec.ports:
protocol = port_spec.protocol
port = port_spec.port
if (protocol, port) in current_port_tuples:
continue
listener = self._drv_lbaas.ensure_listener(
loadbalancer=lbaas_state.loadbalancer,
protocol=protocol,
port=port,
service_type=lbaas_spec.type)
if listener is not None:
lbaas_state.listeners.append(listener)
changed = True
return changed
def _remove_unused_listeners(self, endpoints, lbaas_state, lbaas_spec):
current_listeners = {p.listener_id for p in lbaas_state.pools}
removed_ids = set()
for listener in lbaas_state.listeners:
if listener.id in current_listeners:
continue
self._drv_lbaas.release_listener(lbaas_state.loadbalancer,
listener)
removed_ids.add(listener.id)
if removed_ids:
lbaas_state.listeners = [l for l in lbaas_state.listeners
if l.id not in removed_ids]
return bool(removed_ids)
def _update_lb_status(self, endpoints, lb_ip_address):
status_data = {"loadBalancer": {
"ingress": [{"ip": lb_ip_address.format()}]}}
k8s = clients.get_kubernetes_client()
svc_link = self._get_service_link(endpoints)
try:
k8s.patch("status", svc_link, status_data)
except k_exc.K8sClientException:
# REVISIT(ivc): only raise ResourceNotReady for NotFound
raise k_exc.ResourceNotReady(svc_link)
def _get_service_link(self, endpoints):
ep_link = endpoints['metadata']['selfLink']
link_parts = ep_link.split('/')
if link_parts[-2] != 'endpoints':
raise k_exc.IntegrityError(_(
"Unsupported endpoints link: %(link)s") % {
'link': ep_link})
link_parts[-2] = 'services'
return "/".join(link_parts)
def _sync_lbaas_loadbalancer(self, endpoints, lbaas_state, lbaas_spec):
changed = False
lb = lbaas_state.loadbalancer
if lb and lb.ip != lbaas_spec.ip:
# if loadbalancerIP was associated to lbaas VIP, disassociate it.
if lbaas_state.service_pub_ip_info:
self._drv_service_pub_ip.disassociate_pub_ip(
lbaas_state.service_pub_ip_info)
self._drv_lbaas.release_loadbalancer(
loadbalancer=lb)
lb = None
changed = True
if not lb:
if lbaas_spec.ip:
lb_name = self._drv_lbaas.get_service_loadbalancer_name(
endpoints['metadata']['namespace'],
endpoints['metadata']['name'])
lb = self._drv_lbaas.ensure_loadbalancer(
name=lb_name,
project_id=lbaas_spec.project_id,
subnet_id=lbaas_spec.subnet_id,
ip=lbaas_spec.ip,
security_groups_ids=lbaas_spec.security_groups_ids,
service_type=lbaas_spec.type,
provider=self._lb_provider)
changed = True
elif lbaas_state.service_pub_ip_info:
self._drv_service_pub_ip.release_pub_ip(
lbaas_state.service_pub_ip_info)
lbaas_state.service_pub_ip_info = None
changed = True
lbaas_state.loadbalancer = lb
return changed
def _get_lbaas_spec(self, endpoints):
# TODO(ivc): same as '_get_lbaas_state'
try:
annotations = endpoints['metadata']['annotations']
annotation = annotations[k_const.K8S_ANNOTATION_LBAAS_SPEC]
except KeyError:
return None
obj_dict = jsonutils.loads(annotation)
obj = obj_lbaas.LBaaSServiceSpec.obj_from_primitive(obj_dict)
LOG.debug("Got LBaaSServiceSpec from annotation: %r", obj)
return obj
| 42.771757
| 79
| 0.592291
|
ee4a0e51685717fa0958c574883a770944ed91be
| 1,683
|
py
|
Python
|
gksolite/patterns.py
|
daseith/workshop-collaborative_software
|
697e7dd941d0107b495a3884410be49ef931c46b
|
[
"MIT"
] | null | null | null |
gksolite/patterns.py
|
daseith/workshop-collaborative_software
|
697e7dd941d0107b495a3884410be49ef931c46b
|
[
"MIT"
] | 6
|
2018-08-29T13:26:27.000Z
|
2018-08-29T13:35:50.000Z
|
gksolite/patterns.py
|
daseith/workshop-collaborative_software
|
697e7dd941d0107b495a3884410be49ef931c46b
|
[
"MIT"
] | 12
|
2018-08-29T11:31:02.000Z
|
2018-08-29T11:32:53.000Z
|
import random
def pad(literal):
if not literal:
return " \n "
lines = ['', *literal.splitlines(), '']
width = max(len(line) for line in lines)
return '\n'.join(' ' + line.ljust(width) + ' ' for line in lines)
def create_random_literal():
number_lines = random.randint(1, 10)
number_columns = random.randint(1, 10)
literal =""
for line in range(0, number_lines):
col_str = ""
for col in range(0, number_columns):
rand = random.randint(0,1)
if rand == 0:
col_str+="#"
else:
col_str+=" "
literal += col_str.rstrip()+"\n"
return literal
BLOCK = pad("""\
##
##
""")
BLINKER = pad("""\
###
""")
BLINKER3 = pad("""\
#
### # ###
#
""")
PULSAR = pad("""\
###
# #
# #
# #
###
""")
PENTADECATHLON = pad("""\
# # # #
### ###### ###
# # # #
""")
PINWHEEL = pad("""\
####
# # #
## #
# # #
# #
####
""")
GLIDER = pad("""\
#
#
###
""")
DIEHARD = pad("""\
#
##
# ###
""")
GLIDER_GUN = pad("""\
#
# #
## ## ##
# # ## ##
## # # ##
## # # ## # #
# # #
# #
##
""")
PENTOMINO = pad("""\
##
##
#
""")
BASELINE = pad("""\
######## ##### ### ####### #####
""")
RANDOM = pad(create_random_literal())
PATTERNS = [
'BLOCK', 'BLINKER', 'BLINKER3', 'PULSAR', 'PENTADECATHLON', 'PINWHEEL', 'GLIDER', 'DIEHARD', 'GLIDER_GUN',
'PENTOMINO', 'RANDOM'
]
__all__ = PATTERNS[:]
print (create_random_literal())
| 15.026786
| 110
| 0.385621
|
e5de6985beac654bb327c4266e63e08d8a566ff3
| 613
|
py
|
Python
|
python/strings/score-of-parenthesis.py
|
prakashsellathurai/a-grim-loth
|
656e6eea8e6c1761f2705519ea05d6ddb1d4beb7
|
[
"MIT"
] | 4
|
2021-06-26T17:18:47.000Z
|
2022-02-02T15:02:27.000Z
|
python/strings/score-of-parenthesis.py
|
prakashsellathurai/a-grim-loth
|
656e6eea8e6c1761f2705519ea05d6ddb1d4beb7
|
[
"MIT"
] | 8
|
2021-06-29T07:00:32.000Z
|
2021-12-01T11:26:22.000Z
|
python/strings/score-of-parenthesis.py
|
prakashsellathurai/a-grim-loth
|
656e6eea8e6c1761f2705519ea05d6ddb1d4beb7
|
[
"MIT"
] | 3
|
2021-07-14T14:42:08.000Z
|
2021-12-07T19:36:53.000Z
|
import unittest
def scoreOfParentheses(s: str) -> int:
ans = bal = 0
for i, x in enumerate(s):
if x == "(":
bal += 1
else:
bal -= 1
if s[i - 1] == "(":
ans += 1 << bal
return ans
class TestStringMethods(unittest.TestCase):
def test_all_cases(self):
self.assertEqual(scoreOfParentheses("()"), 1)
self.assertEqual(scoreOfParentheses("(())"), 2)
self.assertEqual(scoreOfParentheses("()()"), 2)
self.assertEqual(scoreOfParentheses("(()(()))"), 6)
if __name__ == "__main__":
unittest.main()
| 22.703704
| 59
| 0.533442
|
e8753859cf6ab8072efea8af64e569449de648c8
| 755
|
py
|
Python
|
vimfiles/bundle/vim-python/submodules/pylint/tests/functional/t/too/too_many_boolean_expressions.py
|
ciskoinch8/vimrc
|
5bf77a7e7bc70fac5173ab2e9ea05d7dda3e52b8
|
[
"MIT"
] | 463
|
2015-01-15T08:17:42.000Z
|
2022-03-28T15:10:20.000Z
|
vimfiles/bundle/vim-python/submodules/pylint/tests/functional/t/too/too_many_boolean_expressions.py
|
ciskoinch8/vimrc
|
5bf77a7e7bc70fac5173ab2e9ea05d7dda3e52b8
|
[
"MIT"
] | 52
|
2015-01-06T02:43:59.000Z
|
2022-03-14T11:15:21.000Z
|
vimfiles/bundle/vim-python/submodules/pylint/tests/functional/t/too/too_many_boolean_expressions.py
|
ciskoinch8/vimrc
|
5bf77a7e7bc70fac5173ab2e9ea05d7dda3e52b8
|
[
"MIT"
] | 249
|
2015-01-07T22:49:49.000Z
|
2022-03-18T02:32:06.000Z
|
"""Checks for if statements containing too many boolean expressions"""
# pylint: disable=invalid-name, comparison-with-itself, chained-comparison, condition-evals-to-constant
x = y = z = 5
if x > -5 and x < 5 and y > -5 and y < 5 and z > -5 and z < 5: # [too-many-boolean-expressions]
pass
elif True and False and 1 and 2 and 3:
pass
elif True and False and 1 and 2 and 3 and 4 and 5: # [too-many-boolean-expressions]
pass
elif True and (True and True) and (x == 5 or True or True): # [too-many-boolean-expressions]
pass
elif True and (True or (x > -5 and x < 5 and (z > -5 or z < 5))): # [too-many-boolean-expressions]
pass
elif True == True == True == True == True == True:
pass
if True and False and 1 and 2 and 3:
pass
| 35.952381
| 103
| 0.651656
|
5685e0057b191a5ffaed8dd71d44c3e2ad44e6d9
| 293
|
py
|
Python
|
tests/tests/assign_attr.py
|
yu-i9/mini_python
|
d62b9040f8427057a20d18340a27bdf2dfc8c22e
|
[
"MIT"
] | 2
|
2018-06-22T07:07:03.000Z
|
2018-08-03T04:26:43.000Z
|
tests/tests/assign_attr.py
|
yu-i9/mini_python
|
d62b9040f8427057a20d18340a27bdf2dfc8c22e
|
[
"MIT"
] | null | null | null |
tests/tests/assign_attr.py
|
yu-i9/mini_python
|
d62b9040f8427057a20d18340a27bdf2dfc8c22e
|
[
"MIT"
] | null | null | null |
class Test:
def test(self):
h = Hoge()
h.initialize()
def generator():
return h
return generator
class Hoge:
def initialize(self):
self.x = 42
generator = Test().test()
generator().y = 2
assert generator().x + generator().y == 44
| 17.235294
| 42
| 0.542662
|
4f451a125184de0090d98a38ce32cc5cd4c5ce2a
| 14,766
|
py
|
Python
|
tests/python/relay/test_external_codegen.py
|
shengxinhu/tvm
|
06c443e9959452c6da3a911fe0c11e08c5554477
|
[
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 4,640
|
2017-08-17T19:22:15.000Z
|
2019-11-04T15:29:46.000Z
|
tests/python/relay/test_external_codegen.py
|
shengxinhu/tvm
|
06c443e9959452c6da3a911fe0c11e08c5554477
|
[
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 2,863
|
2017-08-17T19:55:50.000Z
|
2019-11-04T17:18:41.000Z
|
tests/python/relay/test_external_codegen.py
|
shengxinhu/tvm
|
06c443e9959452c6da3a911fe0c11e08c5554477
|
[
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1,352
|
2017-08-17T19:30:38.000Z
|
2019-11-04T16:09:29.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unit tests for graph partitioning."""
import sys
from collections import OrderedDict
import numpy as np
import pytest
import tvm
import tvm.testing
from tvm import relay, runtime
from tvm.relay.build_module import bind_params_by_name
from tvm.relay.op.annotation import compiler_begin, compiler_end
from utils.external_codegen import (
update_lib,
set_external_func_attr,
parametrize_external_codegen_checks,
parametrize_external_json_codegen_checks,
check_graph_executor_result,
check_vm_result,
)
@parametrize_external_codegen_checks
def test_multi_node_subgraph(check_result):
x = relay.var("x", shape=(10, 10))
w0 = relay.var("w0", shape=(10, 10))
w1 = relay.var("w1", shape=(10, 10))
w2 = relay.var("w2", shape=(10, 10))
w3 = relay.var("w3", shape=(10, 10))
w4 = relay.var("w4", shape=(10, 10))
w5 = relay.var("w5", shape=(10, 10))
w6 = relay.var("w6", shape=(10, 10))
w7 = relay.var("w7", shape=(10, 10))
# subgraph0
x0 = relay.var("x0", shape=(10, 10))
w00 = relay.var("w00", shape=(10, 10))
w01 = relay.var("w01", shape=(10, 10))
w02 = relay.var("w02", shape=(10, 10))
z00 = relay.add(x0, w00)
p00 = relay.subtract(z00, w01)
q00 = relay.multiply(p00, w02)
subgraph0 = relay.Function([x0, w00, w01, w02], q00)
subgraph0 = set_external_func_attr(subgraph0, "ccompiler", "ccompiler_0")
call0 = relay.Call(subgraph0, [x, w0, w1, w2])
# subgraph1
x1 = relay.var("x1", shape=(10, 10))
w10 = relay.var("w10", shape=(10, 10))
w11 = relay.var("w11", shape=(10, 10))
w12 = relay.var("w12", shape=(10, 10))
z10 = relay.add(x1, w10)
p10 = relay.subtract(z10, w11)
q10 = relay.multiply(p10, w12)
subgraph1 = relay.Function([x1, w10, w11, w12], q10)
subgraph1 = set_external_func_attr(subgraph1, "ccompiler", "ccompiler_1")
call1 = relay.Call(subgraph1, [x, w3, w4, w5])
# Other parts on TVM
z2 = relay.add(x, w6)
q2 = relay.subtract(z2, w7)
r = relay.concatenate((call0, call1, q2), axis=0)
f = relay.Function([x, w0, w1, w2, w3, w4, w5, w6, w7], r)
mod = tvm.IRModule()
mod["main"] = f
mod = relay.transform.InferType()(mod)
x_data = np.random.rand(10, 10).astype("float32")
w_data = []
for _ in range(8):
w_data.append(np.random.rand(10, 10).astype("float32"))
map_inputs = OrderedDict([("x", x_data)] + [("w{}".format(i), w_data[i]) for i in range(8)])
check_result(
mod,
map_inputs,
(30, 10),
np.concatenate(
(
((x_data + w_data[0]) - w_data[1]) * w_data[2],
((x_data + w_data[3]) - w_data[4]) * w_data[5],
x_data + w_data[6] - w_data[7],
),
axis=0,
),
)
@parametrize_external_codegen_checks
def test_extern_gcc_single_op(check_result):
x = relay.var("x", shape=(8, 8))
y = relay.var("y", shape=(8, 8))
x0 = relay.var("x0", shape=(8, 8))
y0 = relay.var("y0", shape=(8, 8))
z = x0 + y0
f = relay.Function([x0, y0], z)
f = set_external_func_attr(f, "ccompiler", "ccompiler_0")
call = relay.Call(f, [x, y])
mod = tvm.IRModule.from_expr(call)
x_data = np.random.rand(8, 8).astype("float32")
y_data = np.random.rand(8, 8).astype("float32")
check_result(mod, {"x": x_data, "y": y_data}, (8, 8), x_data + y_data)
@parametrize_external_codegen_checks
def test_extern_gcc_single_op_int(check_result):
x = relay.var("x", shape=(8, 8), dtype="int32")
y = relay.var("y", shape=(8, 8), dtype="int32")
x0 = relay.var("x0", shape=(8, 8), dtype="int32")
y0 = relay.var("y0", shape=(8, 8), dtype="int32")
z = x0 + y0
f = relay.Function([x0, y0], z)
f = set_external_func_attr(f, "ccompiler", "ccompiler_0")
call = relay.Call(f, [x, y])
mod = tvm.IRModule.from_expr(call)
x_data = np.random.rand(8, 8).astype("int32")
y_data = np.random.rand(8, 8).astype("int32")
check_result(mod, {"x": x_data, "y": y_data}, (8, 8), x_data + y_data)
@parametrize_external_codegen_checks
def test_extern_gcc(check_result):
x = relay.var("x", shape=(2, 2))
y = relay.var("y", shape=(2, 2))
# subgraph for mul
x0 = relay.var("x0", shape=(2, 2))
y0 = relay.var("y0", shape=(2, 2))
mul = x0 * y0
mul = relay.Function([x0, y0], mul)
mul = set_external_func_attr(mul, "ccompiler", "ccompiler_2")
call_mul = relay.Call(mul, [y, y])
# subgraph for add
x1 = relay.var("x1", shape=(2, 2))
y1 = relay.var("y1", shape=(2, 2))
add = x1 + y1
add = relay.Function([x1, y1], add)
add = set_external_func_attr(add, "ccompiler", "ccompiler_1")
call_add = relay.Call(add, [x, x])
# subgraph for sub
x2 = relay.var("x2", shape=(2, 2))
y2 = relay.var("y2", shape=(2, 2))
sub = x2 - y2
sub = relay.Function([x2, y2], sub)
sub = set_external_func_attr(sub, "ccompiler", "ccompiler_0")
call_sub = relay.Call(sub, [call_mul, call_add])
mod = tvm.IRModule.from_expr(call_sub)
x_data = np.random.rand(2, 2).astype("float32")
y_data = np.random.rand(2, 2).astype("float32")
inputs = OrderedDict(
[
("y", y_data),
("x", x_data),
]
)
check_result(mod, inputs, (2, 2), (y_data * y_data) - (x_data + x_data))
# TODO(mbs): The check_aot_executor_result does not support the list-of-targets, mostly because
# tvm.testing.aot.compile_and_run requires the target to be a kind name string, and
# tvm.testing.aot.compile_models requires a single Target object. However, code outside of
# tvm.testing.aot is ready for this more general form.
@pytest.mark.parametrize("check_result", [check_graph_executor_result, check_vm_result])
def test_extern_gcc_with_target_instance(check_result):
shape = (8, 8)
dtype = "int32"
def make_mod():
x0 = relay.var("x0", shape=shape, dtype=dtype)
y0 = relay.var("y0", shape=shape, dtype=dtype)
z = x0 + y0
f = relay.Function([x0, y0], z)
f = set_external_func_attr(f, "ccompiler", "ccompiler_0")
x = relay.var("x", shape=shape, dtype=dtype)
y = relay.var("y", shape=shape, dtype=dtype)
call = relay.Call(f, [x, y])
return tvm.IRModule.from_expr(call)
host_target = tvm.target.Target("llvm")
generic_target = tvm.target.Target("llvm", host=host_target)
# The header attribute is just whitespace, so compilation is as usual.
good_extern_codegen_target = tvm.target.Target(
{"kind": "ccompiler", "header": "// Good"}, host=host_target
)
# The header attribute is ill-formed, so compilation is expected to fail.
bogus_extern_codegen_target = tvm.target.Target(
{"kind": "ccompiler", "header": "Bogus"}, host=host_target
)
mod = make_mod()
x_data = np.random.rand(*shape).astype(dtype)
y_data = np.random.rand(*shape).astype(dtype)
expected_result = x_data + y_data
inputs = {"x": x_data, "y": y_data}
check_result(
mod, inputs, shape, expected_result, target=[generic_target, good_extern_codegen_target]
)
with pytest.raises(RuntimeError):
check_result(
mod,
inputs,
shape,
expected_result,
target=[generic_target, bogus_extern_codegen_target],
)
@pytest.mark.skipif(sys.platform == "win32", reason="Skip test on Windows for now")
def test_extern_gcc_consts():
@tvm._ffi.register_func("relay.ext.ccompiler.constant_updater")
def constant_updater(expr, symbol):
"""A dummy constant updater just to test that a custom one works."""
return {"ccompiler_0_p0": tvm.nd.array(y0_data)}
x = relay.var("x", shape=(8, 8))
y0_data = np.random.uniform(0, 1, (8, 8)).astype("float32")
x0 = relay.var("x0", shape=(8, 8))
y0_const = relay.const(y0_data, "float32")
z = x0 + y0_const
f = relay.Function([x0], z)
f = set_external_func_attr(f, "ccompiler", "ccompiler_0")
call = relay.Call(f, [x])
mod = tvm.IRModule.from_expr(call)
with tvm.transform.PassContext(opt_level=3, disabled_pass=["AlterOpLayout"]):
compiler = relay.backend.vm.VMCompiler()
compiler.lower(mod, "llvm")
compiler.codegen()
params = compiler.get_params()
assert len(params) == 1
assert "ccompiler_0_p0" in params.keys()
with tvm.transform.PassContext(opt_level=3, disabled_pass=["AlterOpLayout"]):
_, _, params = relay.build(mod, target="llvm")
assert len(params) == 1
assert "ccompiler_0_p0" in params.keys()
tvm._ffi.registry.remove_global_func("relay.ext.ccompiler.constant_updater")
@pytest.mark.skipif(
not tvm.get_global_func("relay.ext.dnnl", True),
reason="skip because DNNL codegen is not available",
)
@parametrize_external_json_codegen_checks
def test_extern_dnnl_padding(check_result):
dtype = "float32"
ishape = (1, 1, 99, 12)
w1shape = (54, 1, 3, 3)
data0 = relay.var("data0", shape=(ishape), dtype=dtype)
weight0 = relay.var("weight0", shape=(w1shape), dtype=dtype)
out = relay.nn.conv2d(data0, weight0, kernel_size=(3, 3), strides=(2, 2), padding=(1, 0, 1, 1))
f = relay.Function([data0, weight0], out)
ref_mod = tvm.IRModule()
ref_mod["main"] = f
data1 = relay.var("data0", shape=(ishape), dtype=dtype)
weight1 = relay.var("weight0", shape=(w1shape), dtype=dtype)
f = set_external_func_attr(f, "dnnl", "dnnl_0")
call = relay.Call(f, [data1, weight1])
mod = tvm.IRModule.from_expr(call)
i_data = np.random.uniform(0, 1, ishape).astype(dtype)
w_data = np.random.uniform(0, 1, w1shape).astype(dtype)
ref_res = relay.create_executor("graph", mod=ref_mod, device=tvm.cpu()).evaluate()(
i_data, w_data
)
check_result(
mod, {"data0": i_data, "weight0": w_data}, (1, 54, 50, 6), ref_res.numpy(), tol=1e-5
)
@pytest.mark.skipif(
not tvm.get_global_func("relay.ext.dnnl", True),
reason="skip because DNNL codegen is not available",
)
@parametrize_external_json_codegen_checks
def test_extern_dnnl(check_result):
dtype = "float32"
ishape = (1, 32, 14, 14)
w1shape = (32, 1, 3, 3)
data0 = relay.var("data0", shape=(ishape), dtype=dtype)
weight0 = relay.var("weight0", shape=(w1shape), dtype=dtype)
data1 = relay.var("data0", shape=(ishape), dtype=dtype)
weight1 = relay.var("weight0", shape=(w1shape), dtype=dtype)
weight2 = relay.var("weight1", shape=(w1shape), dtype=dtype)
depthwise_conv2d_1 = relay.nn.conv2d(
data1, weight1, kernel_size=(3, 3), padding=(1, 1), groups=32
)
depthwise_conv2d_2 = relay.nn.conv2d(
depthwise_conv2d_1, weight2, kernel_size=(3, 3), padding=(1, 1), groups=32
)
out = relay.add(depthwise_conv2d_1, depthwise_conv2d_2)
f = relay.Function([data1, weight1, weight2], out)
ref_mod = tvm.IRModule()
ref_mod["main"] = f
f = set_external_func_attr(f, "dnnl", "dnnl_0")
call = relay.Call(f, [data0, weight0, weight0])
mod = tvm.IRModule.from_expr(call)
i_data = np.random.uniform(0, 1, ishape).astype(dtype)
w_data = np.random.uniform(0, 1, w1shape).astype(dtype)
ref_res = relay.create_executor("graph", mod=ref_mod, device=tvm.cpu()).evaluate()(
i_data, w_data, w_data
)
check_result(
mod, {"data0": i_data, "weight0": w_data}, (1, 32, 14, 14), ref_res.numpy(), tol=1e-5
)
@pytest.mark.skipif(
not tvm.get_global_func("relay.ext.dnnl", True),
reason="skip because DNNL codegen is not available",
)
@parametrize_external_json_codegen_checks
def test_extern_dnnl_const(check_result):
dtype = "float32"
ishape = (1, 32, 14, 14)
w1shape = (32, 1, 3, 3)
data0 = relay.var("data0", shape=(ishape), dtype=dtype)
w_data = np.random.uniform(0, 1, w1shape).astype(dtype)
data1 = relay.var("data0", shape=(ishape), dtype=dtype)
weight1 = relay.const(w_data, dtype=dtype)
weight2 = relay.const(w_data, dtype=dtype)
depthwise_conv2d_1 = relay.nn.conv2d(
data1, weight1, kernel_size=(3, 3), padding=(1, 1), groups=32
)
depthwise_conv2d_2 = relay.nn.conv2d(
depthwise_conv2d_1, weight2, kernel_size=(3, 3), padding=(1, 1), groups=32
)
out = relay.add(depthwise_conv2d_1, depthwise_conv2d_2)
f = relay.Function([data1], out)
ref_mod = tvm.IRModule()
ref_mod["main"] = f
f = set_external_func_attr(f, "dnnl", "dnnl_0")
call = relay.Call(f, [data0])
mod = tvm.IRModule.from_expr(call)
i_data = np.random.uniform(0, 1, ishape).astype(dtype)
ref_res = relay.create_executor("graph", mod=ref_mod, device=tvm.cpu()).evaluate()(i_data)
check_result(mod, {"data0": i_data}, (1, 32, 14, 14), ref_res.numpy(), tol=1e-5)
def test_load_params_with_constants_in_ext_codegen():
# After binding params and partitioning graph_module.get_params()
# might contain parameters that are not an graph executor input but
# for example constants in external function.
y_in = np.ones((1,)).astype("float32")
params = {"y": y_in}
mod = tvm.IRModule()
x = relay.var("x", shape=(1, 10))
y = relay.var("y", shape=(1,))
xcb = compiler_begin(x, "ccompiler")
ycb = compiler_begin(y, "ccompiler")
z = relay.add(xcb, ycb)
zce = compiler_end(z, "ccompiler")
mod["main"] = relay.Function([x, y], zce)
mod["main"] = bind_params_by_name(mod["main"], params)
mod = relay.transform.PartitionGraph()(mod)
graph_module = relay.build(mod, target="llvm", params=params)
# Params will be stored in metadata module.
assert len(graph_module.get_params()) == 0
lib = update_lib(graph_module.get_lib())
rt_mod = tvm.contrib.graph_executor.create(graph_module.get_graph_json(), lib, tvm.cpu(0))
rt_mod.load_params(runtime.save_param_dict(graph_module.get_params()))
if __name__ == "__main__":
tvm.testing.main()
| 36.014634
| 99
| 0.644657
|
52e186a0b8eee3d9c652f5001a0c976023058786
| 6,152
|
py
|
Python
|
vmtkScripts/vmtksurfacethickness.py
|
michelebucelli/vmtk
|
738bd1d152e8836847ab4d75f7e8360bd574e724
|
[
"Apache-2.0"
] | 1
|
2021-09-25T12:02:25.000Z
|
2021-09-25T12:02:25.000Z
|
vmtkScripts/vmtksurfacethickness.py
|
michelebucelli/vmtk
|
738bd1d152e8836847ab4d75f7e8360bd574e724
|
[
"Apache-2.0"
] | 1
|
2022-02-10T14:33:24.000Z
|
2022-02-10T14:33:24.000Z
|
vmtkScripts/vmtksurfacethickness.py
|
michelebucelli/vmtk
|
738bd1d152e8836847ab4d75f7e8360bd574e724
|
[
"Apache-2.0"
] | 1
|
2021-02-18T10:11:50.000Z
|
2021-02-18T10:11:50.000Z
|
#!/usr/bin/env python
## Program: VMTK
## Module: $RCSfile: vmtksurfaceclipper.py,v $
## Language: Python
## Date: $Date: 2006/05/26 12:35:13 $
## Version: $Revision: 1.9 $
## Copyright (c) Luca Antiga, David Steinman. All rights reserved.
## See LICENSE file for details.
## This software is distributed WITHOUT ANY WARRANTY; without even
## the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
## PURPOSE. See the above copyright notices for more information.
## Note: this class was contributed by
## Marco Fedele (marco.fedele@polimi.it)
## Politecnico di Milano
from __future__ import absolute_import #NEEDS TO STAY AS TOP LEVEL MODULE FOR Py2-3 COMPATIBILITY
import vtk
import sys
from vmtk import vmtkrenderer
from vmtk import pypes
class vmtkSurfaceThickness(pypes.pypeScript):
def __init__(self):
pypes.pypeScript.__init__(self)
self.Surface = None
self.CellEntityIdsArrayName = 'CellEntityIds'
self.CellEntityIdsArray = None
self.ThicknessArrayName = 'Thickness'
self.ThicknessArray = None
self.ExternalWallEntityIds = []
self.InternalWallEntityIds = []
self.InternalWall2EntityIds = []
self.SetScriptName('vmtksurfacethickness')
self.SetScriptDoc('compute local thickness of a structure as distance between the internal and the external walls (exploiting their entity ids)')
self.SetInputMembers([
['Surface','i','vtkPolyData',1,'','the input surface','vmtksurfacereader'],
['CellEntityIdsArrayName','entityidsarray','str',1,'','name of the array where the tags are stored'],
['ThicknessArrayName','thicknessarray','str',1,'','name of the array with which to define the boundary between tags'],
['ExternalWallEntityIds','externalwallids','int',-1,'','entity ids on the external wall of the structure'],
['InternalWallEntityIds','internalwallids','int',-1,'','entity ids on the internal wall of the structure'],
['InternalWall2EntityIds','internalwall2ids','int',-1,'','entity ids on the second internal wall of the structure (necessary only in case of two disconnected internal walls, e.g. heart ventricles)']
])
self.SetOutputMembers([
['Surface','o','vtkPolyData',1,'','the output surface','vmtksurfacewriter'],
['CellEntityIdsArray','oentityidsarray','vtkIntArray',1,'','the output entity ids array']
])
def Execute(self):
from vmtk import vmtkscripts
if self.Surface == None:
self.PrintError('Error: No Surface.')
if self.InternalWallEntityIds == [] or self.ExternalWallEntityIds == []:
self.PrintError('Error: Empty Internal/External Wall EntityIds.')
th = vtk.vtkThreshold()
th.SetInputData(self.Surface)
th.SetInputArrayToProcess(0,0,0,1,self.CellEntityIdsArrayName)
def extractWall(ids):
appendFilter = vtk.vtkAppendPolyData()
wallParts = []
for i,item in enumerate(ids):
th.ThresholdBetween(item-0.5,item+0.5)
th.Update()
gf = vtk.vtkGeometryFilter()
gf.SetInputConnection(th.GetOutputPort())
gf.Update()
wallParts.append(gf.GetOutput())
appendFilter.AddInputData(wallParts[i])
appendFilter.Update()
return appendFilter.GetOutput()
def ComputeDistance(surface,referenceSurface):
distance = vmtkscripts.vmtkSurfaceImplicitDistance()
distance.Surface = surface
distance.ReferenceSurface = referenceSurface
distance.CellData = 0
distance.ComputeSignedDistance = 0
distance.Execute()
return distance.Array
allWalls = extractWall(self.ExternalWallEntityIds+self.InternalWallEntityIds+self.InternalWall2EntityIds)
externalWall = extractWall(self.ExternalWallEntityIds)
externalWallDistanceArray = ComputeDistance(allWalls,externalWall)
internalWall = extractWall(self.InternalWallEntityIds)
internalWallDistanceArray = ComputeDistance(allWalls,internalWall)
if self.InternalWall2EntityIds != []:
internalWall2 = extractWall(self.InternalWall2EntityIds)
internalWalls = extractWall(self.InternalWallEntityIds+self.InternalWall2EntityIds)
internalWall2DistanceArray = ComputeDistance(allWalls,internalWall2)
internalWallsDistanceArray = ComputeDistance(allWalls,internalWalls)
numberOfTuple = allWalls.GetNumberOfPoints()
thicknessArray = vtk.vtkDoubleArray()
thicknessArray.SetName(self.ThicknessArrayName)
thicknessArray.SetNumberOfComponents(1)
thicknessArray.SetNumberOfTuples(numberOfTuple)
for i in range(numberOfTuple):
d_a = externalWallDistanceArray.GetComponent(i,0)
d_b = internalWallDistanceArray.GetComponent(i,0)
if self.InternalWall2EntityIds != []:
d_b2 = internalWall2DistanceArray.GetComponent(i,0)
d_b3 = internalWallsDistanceArray.GetComponent(i,0)
value = max(min(max(d_b,d_b2),d_a),d_b3)
else:
value = max(d_a,d_b)
thicknessArray.SetComponent(i,0,value)
allWalls.GetPointData().AddArray(thicknessArray)
# project the thickness also in regions outside the two walls (e.g. caps)
# WARNING: it works only with PointData
surfaceCopy = vtk.vtkPolyData()
surfaceCopy.DeepCopy(self.Surface)
surfaceProjection = vmtkscripts.vmtkSurfaceProjection()
surfaceProjection.Surface = surfaceCopy
surfaceProjection.ReferenceSurface = allWalls
surfaceProjection.Execute()
surfaceCopy = surfaceProjection.Surface
self.Surface.GetPointData().AddArray(surfaceCopy.GetPointData().GetArray(self.ThicknessArrayName))
if __name__=='__main__':
main = pypes.pypeMain()
main.Arguments = sys.argv
main.Execute()
| 43.020979
| 210
| 0.669376
|
d33af2fe1e2b34fc88f48f9426ff04a4dab7e153
| 400
|
py
|
Python
|
SimCalorimetry/EcalTrigPrimProducers/python/ecalTrigPrimSpikeESProducer_cff.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
SimCalorimetry/EcalTrigPrimProducers/python/ecalTrigPrimSpikeESProducer_cff.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
SimCalorimetry/EcalTrigPrimProducers/python/ecalTrigPrimSpikeESProducer_cff.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
import FWCore.ParameterSet.Config as cms
# esmodule creating records + corresponding empty essource
EcalTrigPrimSpikeESProducer = cms.ESProducer("EcalTrigPrimSpikeESProducer",
TCCZeroingThreshold = cms.untracked.uint32(1023)
)
tpspikeparms = cms.ESSource("EmptyESSource",
recordName = cms.string('EcalTPGSpikeRcd'),
iovIsRunNotTime = cms.bool(True),
firstValid = cms.vuint32(1)
)
| 28.571429
| 75
| 0.7725
|
2902f84e98556a9af67edc55142569e02f1b0205
| 171
|
py
|
Python
|
open_data/insert_delivery_place_indication.py
|
Homyakin/ZakupkiParser
|
9fdfbb96ff128a90165ffe6f48c0f3915dcd747a
|
[
"MIT"
] | 12
|
2019-09-25T19:00:54.000Z
|
2022-01-24T13:10:54.000Z
|
open_data/insert_delivery_place_indication.py
|
hbhbr/ZakupkiParser
|
1e9c03d0ce4c4e23ae70514ce163ba53c2963231
|
[
"MIT"
] | 2
|
2020-04-15T18:08:47.000Z
|
2020-10-14T12:01:12.000Z
|
open_data/insert_delivery_place_indication.py
|
hbhbr/ZakupkiParser
|
1e9c03d0ce4c4e23ae70514ce163ba53c2963231
|
[
"MIT"
] | 4
|
2019-04-21T16:57:44.000Z
|
2021-11-10T14:57:05.000Z
|
import insert_from_file
def insert():
table = 'delivery_place_indication'
columns = ['code', 'name']
insert_from_file.insert(table, table, columns, columns)
| 21.375
| 59
| 0.71345
|
159e5da38e57bb109fb084de3ba94dc7accf728a
| 3,272
|
py
|
Python
|
google/ads/googleads/v4/errors/types/url_field_error.py
|
batardo/google-ads-python
|
a39748521847e85138fca593f3be2681352ad024
|
[
"Apache-2.0"
] | null | null | null |
google/ads/googleads/v4/errors/types/url_field_error.py
|
batardo/google-ads-python
|
a39748521847e85138fca593f3be2681352ad024
|
[
"Apache-2.0"
] | null | null | null |
google/ads/googleads/v4/errors/types/url_field_error.py
|
batardo/google-ads-python
|
a39748521847e85138fca593f3be2681352ad024
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v4.errors",
marshal="google.ads.googleads.v4",
manifest={"UrlFieldErrorEnum",},
)
class UrlFieldErrorEnum(proto.Message):
r"""Container for enum describing possible url field errors."""
class UrlFieldError(proto.Enum):
r"""Enum describing possible url field errors."""
UNSPECIFIED = 0
UNKNOWN = 1
INVALID_TRACKING_URL_TEMPLATE = 2
INVALID_TAG_IN_TRACKING_URL_TEMPLATE = 3
MISSING_TRACKING_URL_TEMPLATE_TAG = 4
MISSING_PROTOCOL_IN_TRACKING_URL_TEMPLATE = 5
INVALID_PROTOCOL_IN_TRACKING_URL_TEMPLATE = 6
MALFORMED_TRACKING_URL_TEMPLATE = 7
MISSING_HOST_IN_TRACKING_URL_TEMPLATE = 8
INVALID_TLD_IN_TRACKING_URL_TEMPLATE = 9
REDUNDANT_NESTED_TRACKING_URL_TEMPLATE_TAG = 10
INVALID_FINAL_URL = 11
INVALID_TAG_IN_FINAL_URL = 12
REDUNDANT_NESTED_FINAL_URL_TAG = 13
MISSING_PROTOCOL_IN_FINAL_URL = 14
INVALID_PROTOCOL_IN_FINAL_URL = 15
MALFORMED_FINAL_URL = 16
MISSING_HOST_IN_FINAL_URL = 17
INVALID_TLD_IN_FINAL_URL = 18
INVALID_FINAL_MOBILE_URL = 19
INVALID_TAG_IN_FINAL_MOBILE_URL = 20
REDUNDANT_NESTED_FINAL_MOBILE_URL_TAG = 21
MISSING_PROTOCOL_IN_FINAL_MOBILE_URL = 22
INVALID_PROTOCOL_IN_FINAL_MOBILE_URL = 23
MALFORMED_FINAL_MOBILE_URL = 24
MISSING_HOST_IN_FINAL_MOBILE_URL = 25
INVALID_TLD_IN_FINAL_MOBILE_URL = 26
INVALID_FINAL_APP_URL = 27
INVALID_TAG_IN_FINAL_APP_URL = 28
REDUNDANT_NESTED_FINAL_APP_URL_TAG = 29
MULTIPLE_APP_URLS_FOR_OSTYPE = 30
INVALID_OSTYPE = 31
INVALID_PROTOCOL_FOR_APP_URL = 32
INVALID_PACKAGE_ID_FOR_APP_URL = 33
URL_CUSTOM_PARAMETERS_COUNT_EXCEEDS_LIMIT = 34
INVALID_CHARACTERS_IN_URL_CUSTOM_PARAMETER_KEY = 39
INVALID_CHARACTERS_IN_URL_CUSTOM_PARAMETER_VALUE = 40
INVALID_TAG_IN_URL_CUSTOM_PARAMETER_VALUE = 41
REDUNDANT_NESTED_URL_CUSTOM_PARAMETER_TAG = 42
MISSING_PROTOCOL = 43
INVALID_PROTOCOL = 52
INVALID_URL = 44
DESTINATION_URL_DEPRECATED = 45
INVALID_TAG_IN_URL = 46
MISSING_URL_TAG = 47
DUPLICATE_URL_ID = 48
INVALID_URL_ID = 49
FINAL_URL_SUFFIX_MALFORMED = 50
INVALID_TAG_IN_FINAL_URL_SUFFIX = 51
INVALID_TOP_LEVEL_DOMAIN = 53
MALFORMED_TOP_LEVEL_DOMAIN = 54
MALFORMED_URL = 55
MISSING_HOST = 56
NULL_CUSTOM_PARAMETER_VALUE = 57
__all__ = tuple(sorted(__protobuf__.manifest))
| 36.355556
| 74
| 0.716687
|
c9678b02d89fe52bf9df19ed28d47556085b9e9b
| 8,748
|
py
|
Python
|
forum/migrations/0002_auto__chg_field_node_updated_on__chg_field_reply_updated_on__chg_field.py
|
chiehtu/kissaten
|
a7aad01de569107d5fd5ed2cd781bca6e5750871
|
[
"MIT"
] | null | null | null |
forum/migrations/0002_auto__chg_field_node_updated_on__chg_field_reply_updated_on__chg_field.py
|
chiehtu/kissaten
|
a7aad01de569107d5fd5ed2cd781bca6e5750871
|
[
"MIT"
] | null | null | null |
forum/migrations/0002_auto__chg_field_node_updated_on__chg_field_reply_updated_on__chg_field.py
|
chiehtu/kissaten
|
a7aad01de569107d5fd5ed2cd781bca6e5750871
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Node.updated_on'
db.alter_column(u'forum_node', 'updated_on', self.gf('django.db.models.fields.DateTimeField')(null=True))
# Changing field 'Reply.updated_on'
db.alter_column(u'forum_reply', 'updated_on', self.gf('django.db.models.fields.DateTimeField')(null=True))
# Changing field 'Topic.updated_on'
db.alter_column(u'forum_topic', 'updated_on', self.gf('django.db.models.fields.DateTimeField')(null=True))
# Changing field 'Topic.last_reply_on'
db.alter_column(u'forum_topic', 'last_reply_on', self.gf('django.db.models.fields.DateTimeField')(null=True))
def backwards(self, orm):
# User chose to not deal with backwards NULL issues for 'Node.updated_on'
raise RuntimeError("Cannot reverse this migration. 'Node.updated_on' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration
# Changing field 'Node.updated_on'
db.alter_column(u'forum_node', 'updated_on', self.gf('django.db.models.fields.DateTimeField')(auto_now=True))
# User chose to not deal with backwards NULL issues for 'Reply.updated_on'
raise RuntimeError("Cannot reverse this migration. 'Reply.updated_on' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration
# Changing field 'Reply.updated_on'
db.alter_column(u'forum_reply', 'updated_on', self.gf('django.db.models.fields.DateTimeField')(auto_now=True))
# User chose to not deal with backwards NULL issues for 'Topic.updated_on'
raise RuntimeError("Cannot reverse this migration. 'Topic.updated_on' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration
# Changing field 'Topic.updated_on'
db.alter_column(u'forum_topic', 'updated_on', self.gf('django.db.models.fields.DateTimeField')(auto_now=True))
# User chose to not deal with backwards NULL issues for 'Topic.last_reply_on'
raise RuntimeError("Cannot reverse this migration. 'Topic.last_reply_on' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration
# Changing field 'Topic.last_reply_on'
db.alter_column(u'forum_topic', 'last_reply_on', self.gf('django.db.models.fields.DateTimeField')())
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'forum.node': {
'Meta': {'object_name': 'Node'},
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'num_topics': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'updated_on': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
u'forum.reply': {
'Meta': {'object_name': 'Reply'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'replies'", 'to': u"orm['auth.User']"}),
'author_ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'topic': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'replies'", 'to': u"orm['forum.Topic']"}),
'updated_on': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
u'forum.topic': {
'Meta': {'object_name': 'Topic'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'topics'", 'to': u"orm['auth.User']"}),
'author_ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_reply_on': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'node': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'topics'", 'to': u"orm['forum.Node']"}),
'num_hits': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'num_replies': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'sticky': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'updated_on': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['forum']
| 68.34375
| 187
| 0.598994
|
e66d984fda430aa4cf6096d001629155e25aba08
| 810
|
py
|
Python
|
product/migrations/0003_product_quantity_alter_category_id_alter_product_id.py
|
Vee245/dukapepedj
|
8525166b38c8f74dc8d62a2fdf1905c32793efcd
|
[
"MIT"
] | null | null | null |
product/migrations/0003_product_quantity_alter_category_id_alter_product_id.py
|
Vee245/dukapepedj
|
8525166b38c8f74dc8d62a2fdf1905c32793efcd
|
[
"MIT"
] | null | null | null |
product/migrations/0003_product_quantity_alter_category_id_alter_product_id.py
|
Vee245/dukapepedj
|
8525166b38c8f74dc8d62a2fdf1905c32793efcd
|
[
"MIT"
] | null | null | null |
# Generated by Django 4.0.2 on 2022-02-18 08:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('product', '0002_product'),
]
operations = [
migrations.AddField(
model_name='product',
name='quantity',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='category',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='product',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
| 27.931034
| 111
| 0.592593
|
b452208042b2fd00a242db8f4af45ca34c4f9984
| 847
|
py
|
Python
|
notes_api/serializers.py
|
Sakkadas/Notes
|
e438f58957ca1737ca714b5619abf76535236f66
|
[
"MIT"
] | 2
|
2021-09-14T13:37:59.000Z
|
2021-09-17T20:52:11.000Z
|
notes_api/serializers.py
|
Sakkadas/Notes
|
e438f58957ca1737ca714b5619abf76535236f66
|
[
"MIT"
] | null | null | null |
notes_api/serializers.py
|
Sakkadas/Notes
|
e438f58957ca1737ca714b5619abf76535236f66
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
from notes.models import Note, Comment
class NoteSerializer(serializers.ModelSerializer):
class Meta:
model = Note
fields = (
'author', 'title',
'summary', 'created',
'updated', 'slug', 'source',
'anonymous', 'total_likes',
)
class NoteDetailSerializer(serializers.ModelSerializer):
class Meta:
model = Note
fields = (
'author', 'title', 'text',
'image', 'created',
'updated', 'slug', 'source',
'anonymous', 'total_likes',
)
class CommentSerializer(serializers.ModelSerializer):
class Meta:
model = Comment
fields = (
'note', 'author', 'parent',
'text', 'email', 'publish',
'status',
)
| 24.2
| 56
| 0.531287
|
37e1842a1e705379df91b8dbd32c79706b751060
| 2,051
|
py
|
Python
|
shopyo/models.py
|
hazeliao/shopyo
|
fddcfd11f7fac843bf29a8421589afb9b5f22703
|
[
"MIT"
] | null | null | null |
shopyo/models.py
|
hazeliao/shopyo
|
fddcfd11f7fac843bf29a8421589afb9b5f22703
|
[
"MIT"
] | null | null | null |
shopyo/models.py
|
hazeliao/shopyo
|
fddcfd11f7fac843bf29a8421589afb9b5f22703
|
[
"MIT"
] | 1
|
2020-02-20T19:53:45.000Z
|
2020-02-20T19:53:45.000Z
|
#from flask_sqlalchemy import SQLAlchemy
from werkzeug.security import generate_password_hash, check_password_hash
from addon import db
#from app import db
from flask_login import UserMixin
#db = SQLAlchemy()
class Users(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.String(10), primary_key=True)
name = db.Column(db.String(100))
password = db.Column(db.String(128))
admin_user = db.Column(db.Boolean, default=False)
def set_hash(self, password):
self.password = generate_password_hash(password, method="sha256")
def check_hash(self, password):
return check_password_hash(self.password, password)
class Products(db.Model):
__tablename__ = 'products'
barcode = db.Column(db.String(100), primary_key=True)
price = db.Column(db.Float)#
vat_price = db.Column(db.Float)
selling_price = db.Column(db.Float)
manufacturer = (db.Column(db.String(100),
db.ForeignKey('manufacturers.name')))
class People(db.Model):
__tablename__ = 'people'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(100))
age = db.Column(db.Integer)
birthday = db.Column(db.String(100))
about = db.Column(db.String(100))
social_media = db.Column(db.String(100))
class Manufacturers(db.Model):
__tablename__ = 'manufacturers'
name = db.Column(db.String(100), primary_key=True)
class Appointments(db.Model):
__tablename__ = 'appointments'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(100))
date = db.Column(db.String(20))
time = db.Column(db.String(20))
active = db.Column(db.String(20))
class Settings(db.Model):
__tablename__ = 'settings'
setting = db.Column(db.String(100), primary_key=True)
value = db.Column(db.String(100))
class Patients(db.Model):
__tablename__ = 'patients'
first_name = db.Column(db.String(100), primary_key=True)
last_name = db.Column(db.String(100))
# db.DateTime, default=db.func.current_timestamp()
| 28.887324
| 73
| 0.694295
|
7340257f29f79c8f2e517b2b4f7d6c72389dd875
| 2,131
|
py
|
Python
|
tests/test_computing_client.py
|
NagasawaRiow/py_nifcloud
|
cfb937a32a1214a554ba956d23a4131a56f65641
|
[
"MIT"
] | 4
|
2017-10-29T12:17:46.000Z
|
2017-11-04T16:55:14.000Z
|
tests/test_computing_client.py
|
NagasawaRiow/py_nifcloud
|
cfb937a32a1214a554ba956d23a4131a56f65641
|
[
"MIT"
] | 9
|
2017-10-30T04:46:33.000Z
|
2020-03-31T03:23:25.000Z
|
tests/test_computing_client.py
|
NagasawaRiow/py_nifcloud
|
cfb937a32a1214a554ba956d23a4131a56f65641
|
[
"MIT"
] | 3
|
2017-10-29T12:54:30.000Z
|
2018-09-24T10:14:46.000Z
|
# -*- encoding:utf-8 -*-
import unittest
from py_nifcloud import ComputingClient
from bs4 import BeautifulSoup
class TestComputingClientCreatePrivateLan(unittest.TestCase):
soup_response = None
sut = None
@classmethod
def setUpClass(cls):
cls.sut = ComputingClient(region_name="jp-east-1")
cls.response = cls.sut.create_private_lan(cidr_block="192.168.0.0/24")
cls.soup_response = BeautifulSoup(cls.response.text, "lxml-xml")
@classmethod
def tearDownClass(cls):
if cls.soup_response is not None:
cls.sut.delete_private_lan(network_id=cls.soup_response.find("networkId").text)
pass
def test_post_status_code(self):
self.assertEqual(self.response.status_code, 200)
def test_post_xml_root_name(self):
xml_root_name = self.soup_response.contents[0].name
self.assertEqual(xml_root_name, "NiftyCreatePrivateLanResponse")
def test_post_request_id(self):
request_id = self.soup_response.requestId
self.assertFalse(request_id.is_empty_element)
class TestComputingClientCreateSecurityGroup(unittest.TestCase):
soup_response = None
sut = None
group_name = "sdkTest"
@classmethod
def setUpClass(cls):
cls.sut = ComputingClient(region_name="jp-east-1")
cls.response = cls.sut.create_security_group(group_name=cls.group_name, group_description="sdkTest")
cls.soup_response = BeautifulSoup(cls.response.text, "lxml-xml")
@classmethod
def tearDownClass(cls):
if cls.soup_response is not None:
cls.sut.delete_security_group(group_name=cls.group_name)
pass
def test_post_status_code(self):
self.assertEqual(self.response.status_code, 200)
def test_post_xml_root_name(self):
xml_root_name = self.soup_response.contents[0].name
self.assertEqual(xml_root_name, "CreateSecurityGroupResponse")
def test_post_request_id(self):
request_id = self.soup_response.requestId
self.assertFalse(request_id.is_empty_element)
| 33.296875
| 109
| 0.696856
|
391da0177e1f9d75b0acc3b39e358227aaafb211
| 118,271
|
py
|
Python
|
src/sqlfluff/dialects/dialect_ansi.py
|
aviv/sqlfluff
|
f0be898b9fadf8950eefd4ff9e9713d3f3d9264f
|
[
"MIT"
] | null | null | null |
src/sqlfluff/dialects/dialect_ansi.py
|
aviv/sqlfluff
|
f0be898b9fadf8950eefd4ff9e9713d3f3d9264f
|
[
"MIT"
] | null | null | null |
src/sqlfluff/dialects/dialect_ansi.py
|
aviv/sqlfluff
|
f0be898b9fadf8950eefd4ff9e9713d3f3d9264f
|
[
"MIT"
] | null | null | null |
"""The core ANSI dialect.
This is the core SQL grammar. We'll probably extend this or make it pluggable
for other dialects. Here we encode the structure of the language.
There shouldn't be any underlying "machinery" here, that should all
be defined elsewhere.
A lot of the inspiration for this sql grammar is taken from the cockroach
labs full sql grammar. In particular their way for dividing up the expression
grammar. Check out their docs, they're awesome.
https://www.cockroachlabs.com/docs/stable/sql-grammar.html#select_stmt
"""
from enum import Enum
from typing import Generator, List, NamedTuple, Optional, Tuple, Union
from sqlfluff.core.dialects.base import Dialect
from sqlfluff.core.dialects.common import AliasInfo, ColumnAliasInfo
from sqlfluff.core.parser import (
AnyNumberOf,
AnySetOf,
Anything,
BaseFileSegment,
BaseSegment,
Bracketed,
CodeSegment,
CommentSegment,
Conditional,
Dedent,
Delimited,
GreedyUntil,
Indent,
KeywordSegment,
Matchable,
NamedParser,
NewlineSegment,
Nothing,
OneOf,
OptionallyBracketed,
Ref,
RegexLexer,
RegexParser,
SegmentGenerator,
Sequence,
StartsWith,
StringLexer,
StringParser,
SymbolSegment,
WhitespaceSegment,
)
from sqlfluff.core.parser.segments.base import BracketedSegment
from sqlfluff.dialects.dialect_ansi_keywords import (
ansi_reserved_keywords,
ansi_unreserved_keywords,
)
ansi_dialect = Dialect("ansi", root_segment_name="FileSegment")
ansi_dialect.set_lexer_matchers(
[
# Match all forms of whitespace except newlines and carriage returns:
# https://stackoverflow.com/questions/3469080/match-whitespace-but-not-newlines
# This pattern allows us to also match non-breaking spaces (#2189).
RegexLexer("whitespace", r"[^\S\r\n]+", WhitespaceSegment),
RegexLexer(
"inline_comment",
r"(--|#)[^\n]*",
CommentSegment,
segment_kwargs={"trim_start": ("--", "#")},
),
RegexLexer(
"block_comment",
r"\/\*([^\*]|\*(?!\/))*\*\/",
CommentSegment,
subdivider=RegexLexer(
"newline",
r"\r\n|\n",
NewlineSegment,
),
trim_post_subdivide=RegexLexer(
"whitespace",
r"[^\S\r\n]+",
WhitespaceSegment,
),
),
RegexLexer("single_quote", r"'([^'\\]|\\.)*'", CodeSegment),
RegexLexer("double_quote", r'"([^"\\]|\\.)*"', CodeSegment),
RegexLexer("back_quote", r"`[^`]*`", CodeSegment),
# See https://www.geeksforgeeks.org/postgresql-dollar-quoted-string-constants/
RegexLexer("dollar_quote", r"\$(\w*)\$[^\1]*?\$\1\$", CodeSegment),
# Numeric literal matches integers, decimals, and exponential formats,
# Pattern breakdown:
# (?> Atomic grouping
# (https://www.regular-expressions.info/atomic.html).
# \d+\.\d+ e.g. 123.456
# |\d+\.(?!\.) e.g. 123.
# (N.B. negative lookahead assertion to ensure we
# don't match range operators `..` in Exasol).
# |\.\d+ e.g. .456
# |\d+ e.g. 123
# )
# ([eE][+-]?\d+)? Optional exponential.
# (
# (?<=\.) If matched character ends with . (e.g. 123.) then
# don't worry about word boundary check.
# |(?=\b) Check that we are at word boundary to avoid matching
# valid naked identifiers (e.g. 123column).
# )
RegexLexer(
"numeric_literal",
r"(?>\d+\.\d+|\d+\.(?!\.)|\.\d+|\d+)([eE][+-]?\d+)?((?<=\.)|(?=\b))",
CodeSegment,
),
RegexLexer("like_operator", r"!?~~?\*?", CodeSegment),
RegexLexer("newline", r"\r\n|\n", NewlineSegment),
StringLexer("casting_operator", "::", CodeSegment),
StringLexer("equals", "=", CodeSegment),
StringLexer("greater_than", ">", CodeSegment),
StringLexer("less_than", "<", CodeSegment),
StringLexer("not", "!", CodeSegment),
StringLexer("dot", ".", CodeSegment),
StringLexer("comma", ",", CodeSegment, segment_kwargs={"type": "comma"}),
StringLexer("plus", "+", CodeSegment),
StringLexer("minus", "-", CodeSegment),
StringLexer("divide", "/", CodeSegment),
StringLexer("percent", "%", CodeSegment),
StringLexer("ampersand", "&", CodeSegment),
StringLexer("vertical_bar", "|", CodeSegment),
StringLexer("caret", "^", CodeSegment),
StringLexer("star", "*", CodeSegment),
StringLexer("bracket_open", "(", CodeSegment),
StringLexer("bracket_close", ")", CodeSegment),
StringLexer("sq_bracket_open", "[", CodeSegment),
StringLexer("sq_bracket_close", "]", CodeSegment),
StringLexer("crly_bracket_open", "{", CodeSegment),
StringLexer("crly_bracket_close", "}", CodeSegment),
StringLexer("colon", ":", CodeSegment),
StringLexer("semicolon", ";", CodeSegment),
RegexLexer("code", r"[0-9a-zA-Z_]+", CodeSegment),
]
)
# Set the bare functions
ansi_dialect.sets("bare_functions").update(
["current_timestamp", "current_time", "current_date"]
)
# Set the datetime units
ansi_dialect.sets("datetime_units").update(
[
"DAY",
"DAYOFYEAR",
"HOUR",
"MILLISECOND",
"MINUTE",
"MONTH",
"QUARTER",
"SECOND",
"WEEK",
"WEEKDAY",
"YEAR",
]
)
ansi_dialect.sets("date_part_function_name").update(["DATEADD"])
# Set Keywords
ansi_dialect.sets("unreserved_keywords").update(
[n.strip().upper() for n in ansi_unreserved_keywords.split("\n")]
)
ansi_dialect.sets("reserved_keywords").update(
[n.strip().upper() for n in ansi_reserved_keywords.split("\n")]
)
# Bracket pairs (a set of tuples).
# (name, startref, endref, persists)
# NOTE: The `persists` value controls whether this type
# of bracket is persisted during matching to speed up other
# parts of the matching process. Round brackets are the most
# common and match the largest areas and so are sufficient.
ansi_dialect.sets("bracket_pairs").update(
[
("round", "StartBracketSegment", "EndBracketSegment", True),
("square", "StartSquareBracketSegment", "EndSquareBracketSegment", False),
("curly", "StartCurlyBracketSegment", "EndCurlyBracketSegment", False),
]
)
# Set the value table functions. These are functions that, if they appear as
# an item in "FROM', are treated as returning a COLUMN, not a TABLE. Apparently,
# among dialects supported by SQLFluff, only BigQuery has this concept, but this
# set is defined in the ANSI dialect because:
# - It impacts core linter rules (see L020 and several other rules that subclass
# from it) and how they interpret the contents of table_expressions
# - At least one other database (DB2) has the same value table function,
# UNNEST(), as BigQuery. DB2 is not currently supported by SQLFluff.
ansi_dialect.sets("value_table_functions").update([])
ansi_dialect.add(
# Real segments
DelimiterSegment=Ref("SemicolonSegment"),
SemicolonSegment=StringParser(
";", SymbolSegment, name="semicolon", type="statement_terminator"
),
ColonSegment=StringParser(":", SymbolSegment, name="colon", type="colon"),
SliceSegment=StringParser(":", SymbolSegment, name="slice", type="slice"),
StartBracketSegment=StringParser(
"(", SymbolSegment, name="start_bracket", type="start_bracket"
),
EndBracketSegment=StringParser(
")", SymbolSegment, name="end_bracket", type="end_bracket"
),
StartSquareBracketSegment=StringParser(
"[", SymbolSegment, name="start_square_bracket", type="start_square_bracket"
),
EndSquareBracketSegment=StringParser(
"]", SymbolSegment, name="end_square_bracket", type="end_square_bracket"
),
StartCurlyBracketSegment=StringParser(
"{", SymbolSegment, name="start_curly_bracket", type="start_curly_bracket"
),
EndCurlyBracketSegment=StringParser(
"}", SymbolSegment, name="end_curly_bracket", type="end_curly_bracket"
),
CommaSegment=StringParser(",", SymbolSegment, name="comma", type="comma"),
DotSegment=StringParser(".", SymbolSegment, name="dot", type="dot"),
StarSegment=StringParser("*", SymbolSegment, name="star", type="star"),
TildeSegment=StringParser("~", SymbolSegment, name="tilde", type="tilde"),
CastOperatorSegment=StringParser(
"::", SymbolSegment, name="casting_operator", type="casting_operator"
),
PlusSegment=StringParser("+", SymbolSegment, name="plus", type="binary_operator"),
MinusSegment=StringParser("-", SymbolSegment, name="minus", type="binary_operator"),
PositiveSegment=StringParser(
"+", SymbolSegment, name="positive", type="sign_indicator"
),
NegativeSegment=StringParser(
"-", SymbolSegment, name="negative", type="sign_indicator"
),
DivideSegment=StringParser(
"/", SymbolSegment, name="divide", type="binary_operator"
),
MultiplySegment=StringParser(
"*", SymbolSegment, name="multiply", type="binary_operator"
),
ModuloSegment=StringParser(
"%", SymbolSegment, name="modulo", type="binary_operator"
),
SlashSegment=StringParser("/", SymbolSegment, name="slash", type="slash"),
AmpersandSegment=StringParser(
"&", SymbolSegment, name="ampersand", type="ampersand"
),
PipeSegment=StringParser("|", SymbolSegment, name="pipe", type="pipe"),
BitwiseXorSegment=StringParser(
"^", SymbolSegment, name="binary_xor", type="binary_operator"
),
LikeOperatorSegment=NamedParser(
"like_operator", SymbolSegment, name="like_operator", type="comparison_operator"
),
RawNotSegment=StringParser(
"!", SymbolSegment, name="raw_not", type="raw_comparison_operator"
),
RawEqualsSegment=StringParser(
"=", SymbolSegment, name="raw_equals", type="raw_comparison_operator"
),
RawGreaterThanSegment=StringParser(
">", SymbolSegment, name="raw_greater_than", type="raw_comparison_operator"
),
RawLessThanSegment=StringParser(
"<", SymbolSegment, name="raw_less_than", type="raw_comparison_operator"
),
# The following functions can be called without parentheses per ANSI specification
BareFunctionSegment=SegmentGenerator(
lambda dialect: RegexParser(
r"^(" + r"|".join(dialect.sets("bare_functions")) + r")$",
CodeSegment,
name="bare_function",
type="bare_function",
)
),
# The strange regex here it to make sure we don't accidentally match numeric
# literals. We also use a regex to explicitly exclude disallowed keywords.
NakedIdentifierSegment=SegmentGenerator(
# Generate the anti template from the set of reserved keywords
lambda dialect: RegexParser(
r"[A-Z0-9_]*[A-Z][A-Z0-9_]*",
CodeSegment,
name="naked_identifier",
type="identifier",
anti_template=r"^(" + r"|".join(dialect.sets("reserved_keywords")) + r")$",
)
),
VersionIdentifierSegment=RegexParser(
r"[A-Z0-9_.]*", CodeSegment, name="version", type="identifier"
),
ParameterNameSegment=RegexParser(
r"[A-Z][A-Z0-9_]*", CodeSegment, name="parameter", type="parameter"
),
FunctionNameIdentifierSegment=RegexParser(
r"[A-Z][A-Z0-9_]*",
CodeSegment,
name="function_name_identifier",
type="function_name_identifier",
),
# Maybe data types should be more restrictive?
DatatypeIdentifierSegment=SegmentGenerator(
# Generate the anti template from the set of reserved keywords
lambda dialect: RegexParser(
r"[A-Z][A-Z0-9_]*",
CodeSegment,
name="data_type_identifier",
type="data_type_identifier",
anti_template=r"^(NOT)$",
# TODO - this is a stopgap until we implement explicit data types
),
),
# Ansi Intervals
DatetimeUnitSegment=SegmentGenerator(
lambda dialect: RegexParser(
r"^(" + r"|".join(dialect.sets("datetime_units")) + r")$",
CodeSegment,
name="date_part",
type="date_part",
)
),
DatePartFunctionName=SegmentGenerator(
lambda dialect: RegexParser(
r"^(" + r"|".join(dialect.sets("date_part_function_name")) + r")$",
CodeSegment,
name="function_name_identifier",
type="function_name_identifier",
)
),
QuotedIdentifierSegment=NamedParser(
"double_quote", CodeSegment, name="quoted_identifier", type="identifier"
),
QuotedLiteralSegment=NamedParser(
"single_quote", CodeSegment, name="quoted_literal", type="literal"
),
SingleQuotedIdentifierSegment=NamedParser(
"single_quote", CodeSegment, name="quoted_identifier", type="identifier"
),
NumericLiteralSegment=NamedParser(
"numeric_literal", CodeSegment, name="numeric_literal", type="literal"
),
# NullSegment is defined seperately to the keyword so we can give it a different
# type
NullLiteralSegment=StringParser(
"null", KeywordSegment, name="null_literal", type="literal"
),
TrueSegment=StringParser(
"true", KeywordSegment, name="boolean_literal", type="literal"
),
FalseSegment=StringParser(
"false", KeywordSegment, name="boolean_literal", type="literal"
),
# We use a GRAMMAR here not a Segment. Otherwise we get an unnecessary layer
SingleIdentifierGrammar=OneOf(
Ref("NakedIdentifierSegment"), Ref("QuotedIdentifierSegment")
),
BooleanLiteralGrammar=OneOf(Ref("TrueSegment"), Ref("FalseSegment")),
# We specifically define a group of arithmetic operators to make it easier to
# override this if some dialects have different available operators
ArithmeticBinaryOperatorGrammar=OneOf(
Ref("PlusSegment"),
Ref("MinusSegment"),
Ref("DivideSegment"),
Ref("MultiplySegment"),
Ref("ModuloSegment"),
Ref("BitwiseAndSegment"),
Ref("BitwiseOrSegment"),
Ref("BitwiseXorSegment"),
Ref("BitwiseLShiftSegment"),
Ref("BitwiseRShiftSegment"),
),
StringBinaryOperatorGrammar=OneOf(Ref("ConcatSegment")),
BooleanBinaryOperatorGrammar=OneOf(
Ref("AndKeywordSegment"), Ref("OrKeywordSegment")
),
ComparisonOperatorGrammar=OneOf(
Ref("EqualsSegment"),
Ref("GreaterThanSegment"),
Ref("LessThanSegment"),
Ref("GreaterThanOrEqualToSegment"),
Ref("LessThanOrEqualToSegment"),
Ref("NotEqualToSegment"),
Ref("LikeOperatorSegment"),
),
# hookpoint for other dialects
# e.g. EXASOL str to date cast with DATE '2021-01-01'
DateTimeLiteralGrammar=Sequence(
OneOf("DATE", "TIME", "TIMESTAMP", "INTERVAL"), Ref("QuotedLiteralSegment")
),
# Hookpoint for other dialects
# e.g. INTO is optional in BIGQUERY
MergeIntoLiteralGrammar=Sequence("MERGE", "INTO"),
LiteralGrammar=OneOf(
Ref("QuotedLiteralSegment"),
Ref("NumericLiteralSegment"),
Ref("BooleanLiteralGrammar"),
Ref("QualifiedNumericLiteralSegment"),
# NB: Null is included in the literals, because it is a keyword which
# can otherwise be easily mistaken for an identifier.
Ref("NullLiteralSegment"),
Ref("DateTimeLiteralGrammar"),
),
AndKeywordSegment=StringParser("and", KeywordSegment, type="binary_operator"),
OrKeywordSegment=StringParser("or", KeywordSegment, type="binary_operator"),
# This is a placeholder for other dialects.
PreTableFunctionKeywordsGrammar=Nothing(),
BinaryOperatorGrammar=OneOf(
Ref("ArithmeticBinaryOperatorGrammar"),
Ref("StringBinaryOperatorGrammar"),
Ref("BooleanBinaryOperatorGrammar"),
Ref("ComparisonOperatorGrammar"),
),
# This pattern is used in a lot of places.
# Defined here to avoid repetition.
BracketedColumnReferenceListGrammar=Bracketed(
Delimited(
Ref("ColumnReferenceSegment"),
ephemeral_name="ColumnReferenceList",
)
),
OrReplaceGrammar=Sequence("OR", "REPLACE"),
TemporaryTransientGrammar=OneOf("TRANSIENT", Ref("TemporaryGrammar")),
TemporaryGrammar=OneOf("TEMP", "TEMPORARY"),
IfExistsGrammar=Sequence("IF", "EXISTS"),
IfNotExistsGrammar=Sequence("IF", "NOT", "EXISTS"),
LikeGrammar=OneOf("LIKE", "RLIKE", "ILIKE"),
IsClauseGrammar=OneOf(
"NULL",
"NAN",
Ref("BooleanLiteralGrammar"),
),
SelectClauseSegmentGrammar=Sequence(
"SELECT",
Ref("SelectClauseModifierSegment", optional=True),
Indent,
Delimited(
Ref("SelectClauseElementSegment"),
allow_trailing=True,
),
# NB: The Dedent for the indent above lives in the
# SelectStatementSegment so that it sits in the right
# place corresponding to the whitespace.
),
SelectClauseElementTerminatorGrammar=OneOf(
"FROM",
"WHERE",
Sequence("ORDER", "BY"),
"LIMIT",
Ref("CommaSegment"),
Ref("SetOperatorSegment"),
),
# Define these as grammars to allow child dialects to enable them (since they are
# non-standard keywords)
IsNullGrammar=Nothing(),
NotNullGrammar=Nothing(),
FromClauseTerminatorGrammar=OneOf(
"WHERE",
"LIMIT",
Sequence("GROUP", "BY"),
Sequence("ORDER", "BY"),
"HAVING",
"QUALIFY",
"WINDOW",
Ref("SetOperatorSegment"),
Ref("WithNoSchemaBindingClauseSegment"),
Ref("WithDataClauseSegment"),
),
WhereClauseTerminatorGrammar=OneOf(
"LIMIT",
Sequence("GROUP", "BY"),
Sequence("ORDER", "BY"),
"HAVING",
"QUALIFY",
"WINDOW",
"OVERLAPS",
),
PrimaryKeyGrammar=Sequence("PRIMARY", "KEY"),
ForeignKeyGrammar=Sequence("FOREIGN", "KEY"),
UniqueKeyGrammar=Sequence("UNIQUE"),
# Odd syntax, but prevents eager parameters being confused for data types
FunctionParameterGrammar=OneOf(
Sequence(
Ref("ParameterNameSegment", optional=True),
OneOf(Sequence("ANY", "TYPE"), Ref("DatatypeSegment")),
),
OneOf(Sequence("ANY", "TYPE"), Ref("DatatypeSegment")),
),
# This is a placeholder for other dialects.
SimpleArrayTypeGrammar=Nothing(),
BaseExpressionElementGrammar=OneOf(
Ref("LiteralGrammar"),
Ref("BareFunctionSegment"),
Ref("IntervalExpressionSegment"),
Ref("FunctionSegment"),
Ref("ColumnReferenceSegment"),
Ref("ExpressionSegment"),
),
FilterClauseGrammar=Sequence(
"FILTER", Bracketed(Sequence("WHERE", Ref("ExpressionSegment")))
),
FrameClauseUnitGrammar=OneOf("ROWS", "RANGE"),
# It's as a sequence to allow to parametrize that in Postgres dialect with LATERAL
JoinKeywords=Sequence("JOIN"),
# NATURAL joins are not supported in all dialects (e.g. not in Bigquery
# or T-SQL). So define here to allow override with Nothing() for those.
NaturalJoinKeywords=Sequence(
"NATURAL",
OneOf(
# Note that NATURAL joins do not support CROSS joins
"INNER",
Sequence(
OneOf("LEFT", "RIGHT", "FULL"),
Ref.keyword("OUTER", optional=True),
optional=True,
),
optional=True,
),
),
NestedJoinSegment=Nothing(),
ReferentialActionGrammar=OneOf(
"RESTRICT",
"CASCADE",
Sequence("SET", "NULL"),
Sequence("NO", "ACTION"),
Sequence("SET", "DEFAULT"),
),
DropBehaviorGrammar=OneOf("RESTRICT", "CASCADE", optional=True),
ReferenceDefinitionGrammar=Sequence(
"REFERENCES",
Ref("TableReferenceSegment"),
# Foreign columns making up FOREIGN KEY constraint
Ref("BracketedColumnReferenceListGrammar", optional=True),
Sequence(
"MATCH",
OneOf(
"FULL",
"PARTIAL",
"SIMPLE",
),
optional=True,
),
AnySetOf(
# ON DELETE clause, e.g. ON DELETE NO ACTION
Sequence(
"ON",
"DELETE",
Ref("ReferentialActionGrammar"),
),
# ON UPDATE clause, e.g. ON UPDATE SET NULL
Sequence(
"ON",
"UPDATE",
Ref("ReferentialActionGrammar"),
),
),
),
)
class FileSegment(BaseFileSegment):
"""A segment representing a whole file or script.
This is also the default "root" segment of the dialect,
and so is usually instantiated directly. It therefore
has no match_grammar.
"""
# NB: We don't need a match_grammar here because we're
# going straight into instantiating it directly usually.
parse_grammar: Optional[Matchable] = Delimited(
Ref("StatementSegment"),
delimiter=AnyNumberOf(Ref("DelimiterSegment"), min_times=1),
allow_gaps=True,
allow_trailing=True,
)
class IntervalExpressionSegment(BaseSegment):
"""An interval expression segment."""
type = "interval_expression"
match_grammar: Matchable = Sequence(
"INTERVAL",
OneOf(
# The Numeric Version
Sequence(
Ref("NumericLiteralSegment"),
OneOf(Ref("QuotedLiteralSegment"), Ref("DatetimeUnitSegment")),
),
# The String version
Ref("QuotedLiteralSegment"),
),
)
class ArrayLiteralSegment(BaseSegment):
"""An array literal segment."""
type = "array_literal"
match_grammar: Matchable = Bracketed(
Delimited(Ref("ExpressionSegment"), optional=True),
bracket_type="square",
)
class DatatypeSegment(BaseSegment):
"""A data type segment.
Supports timestamp with(out) time zone. Doesn't currently support intervals.
"""
type = "data_type"
match_grammar: Matchable = OneOf(
Sequence(
OneOf("TIME", "TIMESTAMP"),
Bracketed(Ref("NumericLiteralSegment"), optional=True),
Sequence(OneOf("WITH", "WITHOUT"), "TIME", "ZONE", optional=True),
),
Sequence(
"DOUBLE",
"PRECISION",
),
Sequence(
OneOf(
Sequence(
OneOf("CHARACTER", "BINARY"),
OneOf("VARYING", Sequence("LARGE", "OBJECT")),
),
Sequence(
# Some dialects allow optional qualification of data types with
# schemas
Sequence(
Ref("SingleIdentifierGrammar"),
Ref("DotSegment"),
allow_gaps=False,
optional=True,
),
Ref("DatatypeIdentifierSegment"),
allow_gaps=False,
),
),
Bracketed(
OneOf(
Delimited(Ref("ExpressionSegment")),
# The brackets might be empty for some cases...
optional=True,
),
# There may be no brackets for some data types
optional=True,
),
Ref("CharCharacterSetSegment", optional=True),
),
)
# hookpoint
ansi_dialect.add(CharCharacterSetSegment=Nothing())
class ObjectReferenceSegment(BaseSegment):
"""A reference to an object."""
type = "object_reference"
# match grammar (don't allow whitespace)
match_grammar: Matchable = Delimited(
Ref("SingleIdentifierGrammar"),
delimiter=OneOf(
Ref("DotSegment"), Sequence(Ref("DotSegment"), Ref("DotSegment"))
),
terminator=OneOf(
"ON",
"AS",
"USING",
Ref("CommaSegment"),
Ref("CastOperatorSegment"),
Ref("StartSquareBracketSegment"),
Ref("StartBracketSegment"),
Ref("BinaryOperatorGrammar"),
Ref("ColonSegment"),
Ref("DelimiterSegment"),
Ref("JoinLikeClauseGrammar"),
BracketedSegment,
),
allow_gaps=False,
)
class ObjectReferencePart(NamedTuple):
"""Details about a table alias."""
part: str # Name of the part
# Segment(s) comprising the part. Usuaully just one segment, but could
# be multiple in dialects (e.g. BigQuery) that support unusual
# characters in names (e.g. "-")
segments: List[BaseSegment]
@classmethod
def _iter_reference_parts(cls, elem) -> Generator[ObjectReferencePart, None, None]:
"""Extract the elements of a reference and yield."""
# trim on quotes and split out any dots.
for part in elem.raw_trimmed().split("."):
yield cls.ObjectReferencePart(part, [elem])
def iter_raw_references(self) -> Generator[ObjectReferencePart, None, None]:
"""Generate a list of reference strings and elements.
Each reference is an ObjectReferencePart. If some are split, then a
segment may appear twice, but the substring will only appear once.
"""
# Extract the references from those identifiers (because some may be quoted)
for elem in self.recursive_crawl("identifier"):
yield from self._iter_reference_parts(elem)
def is_qualified(self):
"""Return if there is more than one element to the reference."""
return len(list(self.iter_raw_references())) > 1
def qualification(self):
"""Return the qualification type of this reference."""
return "qualified" if self.is_qualified() else "unqualified"
class ObjectReferenceLevel(Enum):
"""Labels for the "levels" of a reference.
Note: Since SQLFluff does not have access to database catalog
information, interpreting references will often be ambiguous.
Typical example: The first part *may* refer to a schema, but that is
almost always optional if referring to an object in some default or
currently "active" schema. For this reason, use of this enum is optional
and intended mainly to clarify the intent of the code -- no guarantees!
Additionally, the terminology may vary by dialect, e.g. in BigQuery,
"project" would be a more accurate term than "schema".
"""
OBJECT = 1
TABLE = 2
SCHEMA = 3
def extract_possible_references(
self, level: Union[ObjectReferenceLevel, int]
) -> List[ObjectReferencePart]:
"""Extract possible references of a given level.
"level" may be (but is not required to be) a value from the
ObjectReferenceLevel enum defined above.
NOTE: The base implementation here returns at most one part, but
dialects such as BigQuery that support nesting (e.g. STRUCT) may return
multiple reference parts.
"""
level = self._level_to_int(level)
refs = list(self.iter_raw_references())
if len(refs) >= level:
return [refs[-level]]
return []
def extract_possible_multipart_references(
self, levels: List[Union[ObjectReferenceLevel, int]]
) -> List[Tuple[ObjectReferencePart, ...]]:
"""Extract possible multipart references, e.g. schema.table."""
levels_tmp = [self._level_to_int(level) for level in levels]
min_level = min(levels_tmp)
max_level = max(levels_tmp)
refs = list(self.iter_raw_references())
if len(refs) >= max_level:
return [tuple(refs[-max_level : 1 - min_level])]
return []
@staticmethod
def _level_to_int(level: Union[ObjectReferenceLevel, int]) -> int:
# If it's an ObjectReferenceLevel, get the value. Otherwise, assume it's
# an int.
level = getattr(level, "value", level)
assert isinstance(level, int)
return level
class TableReferenceSegment(ObjectReferenceSegment):
"""A reference to an table, CTE, subquery or alias."""
type = "table_reference"
class SchemaReferenceSegment(ObjectReferenceSegment):
"""A reference to a schema."""
type = "schema_reference"
class DatabaseReferenceSegment(ObjectReferenceSegment):
"""A reference to a database."""
type = "database_reference"
class IndexReferenceSegment(ObjectReferenceSegment):
"""A reference to an index."""
type = "index_reference"
class ExtensionReferenceSegment(ObjectReferenceSegment):
"""A reference to an extension."""
type = "extension_reference"
class ColumnReferenceSegment(ObjectReferenceSegment):
"""A reference to column, field or alias."""
type = "column_reference"
class SequenceReferenceSegment(ObjectReferenceSegment):
"""A reference to a sequence."""
type = "sequence_reference"
class TriggerReferenceSegment(ObjectReferenceSegment):
"""A reference to a trigger."""
type = "trigger_reference"
class SingleIdentifierListSegment(BaseSegment):
"""A comma delimited list of identifiers."""
type = "identifier_list"
match_grammar: Matchable = Delimited(Ref("SingleIdentifierGrammar"))
class ArrayAccessorSegment(BaseSegment):
"""An array accessor e.g. [3:4]."""
type = "array_accessor"
match_grammar: Matchable = Bracketed(
Delimited(
OneOf(Ref("NumericLiteralSegment"), Ref("ExpressionSegment")),
delimiter=Ref("SliceSegment"),
ephemeral_name="ArrayAccessorContent",
),
bracket_type="square",
)
class AliasedObjectReferenceSegment(BaseSegment):
"""A reference to an object with an `AS` clause."""
type = "object_reference"
match_grammar: Matchable = Sequence(
Ref("ObjectReferenceSegment"), Ref("AliasExpressionSegment")
)
ansi_dialect.add(
# This is a hook point to allow subclassing for other dialects
AliasedTableReferenceGrammar=Sequence(
Ref("TableReferenceSegment"), Ref("AliasExpressionSegment")
)
)
class AliasExpressionSegment(BaseSegment):
"""A reference to an object with an `AS` clause.
The optional AS keyword allows both implicit and explicit aliasing.
"""
type = "alias_expression"
match_grammar: Matchable = Sequence(
Ref.keyword("AS", optional=True),
OneOf(
Sequence(
Ref("SingleIdentifierGrammar"),
# Column alias in VALUES clause
Bracketed(Ref("SingleIdentifierListSegment"), optional=True),
),
Ref("SingleQuotedIdentifierSegment"),
),
)
class ShorthandCastSegment(BaseSegment):
"""A casting operation using '::'."""
type = "cast_expression"
match_grammar: Matchable = Sequence(
Ref("CastOperatorSegment"), Ref("DatatypeSegment"), allow_gaps=True
)
class QualifiedNumericLiteralSegment(BaseSegment):
"""A numeric literal with a + or - sign preceding.
The qualified numeric literal is a compound of a raw
literal and a plus/minus sign. We do it this way rather
than at the lexing step because the lexer doesn't deal
well with ambiguity.
"""
type = "numeric_literal"
match_grammar: Matchable = Sequence(
OneOf(Ref("PlusSegment"), Ref("MinusSegment")),
Ref("NumericLiteralSegment"),
allow_gaps=False,
)
ansi_dialect.add(
# FunctionContentsExpressionGrammar intended as a hook to override
# in other dialects.
FunctionContentsExpressionGrammar=Ref("ExpressionSegment"),
FunctionContentsGrammar=AnyNumberOf(
Ref("ExpressionSegment"),
# A Cast-like function
Sequence(Ref("ExpressionSegment"), "AS", Ref("DatatypeSegment")),
# An extract-like or substring-like function
Sequence(
OneOf(Ref("DatetimeUnitSegment"), Ref("ExpressionSegment")),
"FROM",
Ref("ExpressionSegment"),
),
Sequence(
# Allow an optional distinct keyword here.
Ref.keyword("DISTINCT", optional=True),
OneOf(
# Most functions will be using the delimited route
# but for COUNT(*) or similar we allow the star segment
# here.
Ref("StarSegment"),
Delimited(Ref("FunctionContentsExpressionGrammar")),
),
),
Ref(
"OrderByClauseSegment"
), # used by string_agg (postgres), group_concat (exasol),listagg (snowflake)..
Sequence(Ref.keyword("SEPARATOR"), Ref("LiteralGrammar")),
# like a function call: POSITION ( 'QL' IN 'SQL')
Sequence(
OneOf(
Ref("QuotedLiteralSegment"),
Ref("SingleIdentifierGrammar"),
Ref("ColumnReferenceSegment"),
),
"IN",
OneOf(
Ref("QuotedLiteralSegment"),
Ref("SingleIdentifierGrammar"),
Ref("ColumnReferenceSegment"),
),
),
Sequence(OneOf("IGNORE", "RESPECT"), "NULLS"),
),
PostFunctionGrammar=OneOf(
# Optional OVER suffix for window functions.
# This is supported in bigquery & postgres (and its derivatives)
# and so is included here for now.
Ref("OverClauseSegment"),
# Filter clause supported by both Postgres and SQLite
Ref("FilterClauseGrammar"),
),
)
class OverClauseSegment(BaseSegment):
"""An OVER clause for window functions."""
type = "over_clause"
match_grammar: Matchable = Sequence(
"OVER",
OneOf(
Ref("SingleIdentifierGrammar"), # Window name
Bracketed(
Ref("WindowSpecificationSegment", optional=True),
),
),
)
class WindowSpecificationSegment(BaseSegment):
"""Window specification within OVER(...)."""
type = "window_specification"
match_grammar: Matchable = Sequence(
Ref("SingleIdentifierGrammar", optional=True), # "Base" window name
Ref("PartitionClauseSegment", optional=True),
Ref("OrderByClauseSegment", optional=True),
Ref("FrameClauseSegment", optional=True),
optional=True,
ephemeral_name="OverClauseContent",
)
class FunctionNameSegment(BaseSegment):
"""Function name, including any prefix bits, e.g. project or schema."""
type = "function_name"
match_grammar: Matchable = Sequence(
# Project name, schema identifier, etc.
AnyNumberOf(
Sequence(
Ref("SingleIdentifierGrammar"),
Ref("DotSegment"),
),
),
# Base function name
OneOf(
Ref("FunctionNameIdentifierSegment"),
Ref("QuotedIdentifierSegment"),
),
allow_gaps=False,
)
class FunctionSegment(BaseSegment):
"""A scalar or aggregate function.
Maybe in the future we should distinguish between
aggregate functions and other functions. For now
we treat them the same because they look the same
for our purposes.
"""
type = "function"
match_grammar: Matchable = OneOf(
Sequence(
# Treat functions which take date parts separately
# So those functions parse date parts as DatetimeUnitSegment
# rather than identifiers.
Sequence(
Ref("DatePartFunctionNameSegment"),
Bracketed(
Delimited(
Ref("DatetimeUnitSegment"),
Ref(
"FunctionContentsGrammar",
# The brackets might be empty for some functions...
optional=True,
ephemeral_name="FunctionContentsGrammar",
),
)
),
),
),
Sequence(
Sequence(
AnyNumberOf(
Ref("FunctionNameSegment"),
max_times=1,
min_times=1,
exclude=OneOf(
Ref("DatePartFunctionNameSegment"),
Ref("ValuesClauseSegment"),
),
),
Bracketed(
Ref(
"FunctionContentsGrammar",
# The brackets might be empty for some functions...
optional=True,
ephemeral_name="FunctionContentsGrammar",
)
),
),
Ref("PostFunctionGrammar", optional=True),
),
)
class PartitionClauseSegment(BaseSegment):
"""A `PARTITION BY` for window functions."""
type = "partitionby_clause"
match_grammar: Matchable = StartsWith(
"PARTITION",
terminator=OneOf("ORDER", Ref("FrameClauseUnitGrammar")),
enforce_whitespace_preceding_terminator=True,
)
parse_grammar: Optional[Matchable] = Sequence(
"PARTITION",
"BY",
Indent,
# Brackets are optional in a partition by statement
OptionallyBracketed(Delimited(Ref("ExpressionSegment"))),
Dedent,
)
class FrameClauseSegment(BaseSegment):
"""A frame clause for window functions.
https://docs.oracle.com/cd/E17952_01/mysql-8.0-en/window-functions-frames.html
"""
type = "frame_clause"
_frame_extent = OneOf(
Sequence("CURRENT", "ROW"),
Sequence(
OneOf(Ref("NumericLiteralSegment"), "UNBOUNDED"),
OneOf("PRECEDING", "FOLLOWING"),
),
)
match_grammar: Matchable = Sequence(
Ref("FrameClauseUnitGrammar"),
OneOf(_frame_extent, Sequence("BETWEEN", _frame_extent, "AND", _frame_extent)),
)
ansi_dialect.add(
# This is a hook point to allow subclassing for other dialects
PostTableExpressionGrammar=Nothing()
)
class FromExpressionElementSegment(BaseSegment):
"""A table expression."""
type = "from_expression_element"
match_grammar: Matchable = Sequence(
Ref("PreTableFunctionKeywordsGrammar", optional=True),
OptionallyBracketed(Ref("TableExpressionSegment")),
OneOf(
Ref("AliasExpressionSegment"),
exclude=OneOf(
Ref("SamplingExpressionSegment"),
Ref("JoinLikeClauseGrammar"),
),
optional=True,
),
# https://cloud.google.com/bigquery/docs/reference/standard-sql/arrays#flattening_arrays
Sequence("WITH", "OFFSET", Ref("AliasExpressionSegment"), optional=True),
Ref("SamplingExpressionSegment", optional=True),
Ref("PostTableExpressionGrammar", optional=True),
)
def get_eventual_alias(self) -> Optional[AliasInfo]:
"""Return the eventual table name referred to by this table expression.
Returns:
:obj:`tuple` of (:obj:`str`, :obj:`BaseSegment`, :obj:`bool`) containing
a string representation of the alias, a reference to the
segment containing it, and whether it's an alias.
"""
alias_expression = self.get_child("alias_expression")
tbl_expression = self.get_child("table_expression")
if not tbl_expression: # pragma: no cover
tbl_expression = self.get_child("bracketed").get_child("table_expression")
ref = tbl_expression.get_child("object_reference")
if alias_expression:
# If it has an alias, return that
segment = alias_expression.get_child("identifier")
if segment:
return AliasInfo(
segment.raw, segment, True, self, alias_expression, ref
)
# If not return the object name (or None if there isn't one)
if ref:
# Return the last element of the reference.
penultimate_ref: ObjectReferenceSegment.ObjectReferencePart = list(
ref.iter_raw_references()
)[-1]
return AliasInfo(
penultimate_ref.part,
penultimate_ref.segments[0],
False,
self,
None,
ref,
)
# No references or alias, return None
return None
class FromExpressionSegment(BaseSegment):
"""A from expression segment."""
type = "from_expression"
match_grammar: Matchable = Sequence(
Indent,
OneOf(
# check first for MLTableExpression, because of possible FunctionSegment in
# MainTableExpression
Ref("MLTableExpressionSegment"),
Ref("FromExpressionElementSegment"),
),
Conditional(Dedent, indented_joins=False),
AnyNumberOf(
Ref("JoinClauseSegment"), Ref("JoinLikeClauseGrammar"), optional=True
),
Conditional(Dedent, indented_joins=True),
)
class TableExpressionSegment(BaseSegment):
"""The main table expression e.g. within a FROM clause."""
type = "table_expression"
match_grammar: Matchable = OneOf(
Ref("ValuesClauseSegment"),
Ref("BareFunctionSegment"),
Ref("FunctionSegment"),
Ref("TableReferenceSegment"),
# Nested Selects
Bracketed(Ref("SelectableGrammar")),
Bracketed(Ref("MergeStatementSegment")),
)
class WildcardIdentifierSegment(ObjectReferenceSegment):
"""Any identifier of the form a.b.*.
This inherits iter_raw_references from the
ObjectReferenceSegment.
"""
type = "wildcard_identifier"
match_grammar: Matchable = Sequence(
# *, blah.*, blah.blah.*, etc.
AnyNumberOf(
Sequence(Ref("SingleIdentifierGrammar"), Ref("DotSegment"), allow_gaps=True)
),
Ref("StarSegment"),
allow_gaps=False,
)
def iter_raw_references(self):
"""Generate a list of reference strings and elements.
Each element is a tuple of (str, segment). If some are
split, then a segment may appear twice, but the substring
will only appear once.
"""
# Extract the references from those identifiers (because some may be quoted)
for elem in self.recursive_crawl("identifier", "star"):
yield from self._iter_reference_parts(elem)
class WildcardExpressionSegment(BaseSegment):
"""A star (*) expression for a SELECT clause.
This is separate from the identifier to allow for
some dialects which extend this logic to allow
REPLACE, EXCEPT or similar clauses e.g. BigQuery.
"""
type = "wildcard_expression"
match_grammar: Matchable = Sequence(
# *, blah.*, blah.blah.*, etc.
Ref("WildcardIdentifierSegment")
)
class SelectClauseElementSegment(BaseSegment):
"""An element in the targets of a select statement."""
type = "select_clause_element"
# Important to split elements before parsing, otherwise debugging is really hard.
match_grammar: Matchable = GreedyUntil(
Ref("SelectClauseElementTerminatorGrammar"),
enforce_whitespace_preceding_terminator=True,
)
parse_grammar: Optional[Matchable] = OneOf(
# *, blah.*, blah.blah.*, etc.
Ref("WildcardExpressionSegment"),
Sequence(
Ref("BaseExpressionElementGrammar"),
Ref("AliasExpressionSegment", optional=True),
),
)
def get_alias(self) -> Optional[ColumnAliasInfo]:
"""Get info on alias within SELECT clause element."""
alias_expression_segment = next(self.recursive_crawl("alias_expression"), None)
if alias_expression_segment is None:
# Return None if no alias expression is found.
return None
alias_identifier_segment = next(
(s for s in alias_expression_segment.segments if s.is_type("identifier")),
None,
)
if alias_identifier_segment is None:
# Return None if no alias identifier expression is found.
# Happened in the past due to bad syntax
return None # pragma: no cover
# Get segment being aliased.
aliased_segment = next(
s
for s in self.segments
if not s.is_whitespace and not s.is_meta and s != alias_expression_segment
)
# Find all the columns being aliased.
column_reference_segments = []
if aliased_segment.is_type("column_reference"):
column_reference_segments.append(aliased_segment)
else:
column_reference_segments.extend(
aliased_segment.recursive_crawl("column_reference")
)
return ColumnAliasInfo(
alias_identifier_name=alias_identifier_segment.raw,
aliased_segment=aliased_segment,
column_reference_segments=column_reference_segments,
)
class SelectClauseModifierSegment(BaseSegment):
"""Things that come after SELECT but before the columns."""
type = "select_clause_modifier"
match_grammar: Matchable = OneOf(
"DISTINCT",
"ALL",
)
class SelectClauseSegment(BaseSegment):
"""A group of elements in a select target statement."""
type = "select_clause"
match_grammar: Matchable = StartsWith(
Sequence("SELECT", Ref("WildcardExpressionSegment", optional=True)),
terminator=OneOf(
"FROM",
"WHERE",
Sequence("ORDER", "BY"),
"LIMIT",
"OVERLAPS",
Ref("SetOperatorSegment"),
),
enforce_whitespace_preceding_terminator=True,
)
parse_grammar: Matchable = Ref("SelectClauseSegmentGrammar")
class JoinClauseSegment(BaseSegment):
"""Any number of join clauses, including the `JOIN` keyword."""
type = "join_clause"
match_grammar: Matchable = OneOf(
# NB These qualifiers are optional
# TODO: Allow nested joins like:
# ....FROM S1.T1 t1 LEFT JOIN ( S2.T2 t2 JOIN S3.T3 t3 ON t2.col1=t3.col1) ON
# tab1.col1 = tab2.col1
Sequence(
OneOf(
"CROSS",
"INNER",
Sequence(
OneOf(
"FULL",
"LEFT",
"RIGHT",
),
Ref.keyword("OUTER", optional=True),
),
optional=True,
),
Ref("JoinKeywords"),
Indent,
Sequence(
Ref("FromExpressionElementSegment"),
AnyNumberOf(Ref("NestedJoinSegment")),
Conditional(Dedent, indented_using_on=False),
# NB: this is optional
OneOf(
# ON clause
Ref("JoinOnConditionSegment"),
# USING clause
Sequence(
"USING",
Indent,
Bracketed(
# NB: We don't use BracketedColumnReferenceListGrammar
# here because we're just using SingleIdentifierGrammar,
# rather than ObjectReferenceSegment or
# ColumnReferenceSegment.
# This is a) so that we don't lint it as a reference and
# b) because the column will probably be returned anyway
# during parsing.
Delimited(
Ref("SingleIdentifierGrammar"),
ephemeral_name="UsingClauseContents",
)
),
Dedent,
),
# Unqualified joins *are* allowed. They just might not
# be a good idea.
optional=True,
),
Conditional(Indent, indented_using_on=False),
),
Dedent,
),
# Note NATURAL joins do not support Join conditions
Sequence(
Ref("NaturalJoinKeywords"),
Ref("JoinKeywords"),
Indent,
Ref("FromExpressionElementSegment"),
Dedent,
),
)
def get_eventual_aliases(self) -> List[Tuple[BaseSegment, AliasInfo]]:
"""Return the eventual table name referred to by this join clause."""
buff = []
from_expression = self.get_child("from_expression_element")
alias: AliasInfo = from_expression.get_eventual_alias()
# Only append if non null. A None reference, may
# indicate a generator expression or similar.
if alias:
buff.append((from_expression, alias))
# In some dialects, like TSQL, join clauses can have nested join clauses
for join_clause in self.get_children("join_clause"):
aliases: List[
Tuple[BaseSegment, AliasInfo]
] = join_clause.get_eventual_aliases()
# Only append if non null. A None reference, may
# indicate a generator expression or similar.
if aliases:
buff = buff + aliases
return buff
class JoinOnConditionSegment(BaseSegment):
"""The `ON` condition within a `JOIN` clause."""
type = "join_on_condition"
match_grammar: Matchable = Sequence(
"ON",
Indent,
OptionallyBracketed(Ref("ExpressionSegment")),
Dedent,
)
ansi_dialect.add(
# This is a hook point to allow subclassing for other dialects
JoinLikeClauseGrammar=Nothing(),
)
class FromClauseSegment(BaseSegment):
"""A `FROM` clause like in `SELECT`.
NOTE: this is a delimited set of table expressions, with a variable
number of optional join clauses with those table expressions. The
delmited aspect is the higher of the two such that the following is
valid (albeit unusual):
```
SELECT *
FROM a JOIN b, c JOIN d
```
"""
type = "from_clause"
match_grammar: Matchable = StartsWith(
"FROM",
terminator=Ref("FromClauseTerminatorGrammar"),
enforce_whitespace_preceding_terminator=True,
)
parse_grammar: Optional[Matchable] = Sequence(
"FROM",
Delimited(
Ref("FromExpressionSegment"),
),
)
def get_eventual_aliases(self) -> List[Tuple[BaseSegment, AliasInfo]]:
"""List the eventual aliases of this from clause.
Comes as a list of tuples (table expr, tuple (string, segment, bool)).
"""
buff = []
direct_table_children = []
join_clauses = []
for from_expression in self.get_children("from_expression"):
direct_table_children += from_expression.get_children(
"from_expression_element"
)
join_clauses += from_expression.get_children("join_clause")
# Iterate through the potential sources of aliases
for clause in direct_table_children:
alias: AliasInfo = clause.get_eventual_alias()
# Only append if non null. A None reference, may
# indicate a generator expression or similar.
table_expr = (
clause
if clause in direct_table_children
else clause.get_child("from_expression_element")
)
if alias:
buff.append((table_expr, alias))
for clause in join_clauses:
aliases: List[Tuple[BaseSegment, AliasInfo]] = clause.get_eventual_aliases()
# Only append if non null. A None reference, may
# indicate a generator expression or similar.
if aliases:
buff = buff + aliases
return buff
class WhenClauseSegment(BaseSegment):
"""A 'WHEN' clause for a 'CASE' statement."""
type = "when_clause"
match_grammar: Matchable = Sequence(
"WHEN",
Indent,
Ref("ExpressionSegment"),
"THEN",
Ref("ExpressionSegment"),
Dedent,
)
class ElseClauseSegment(BaseSegment):
"""An 'ELSE' clause for a 'CASE' statement."""
type = "else_clause"
match_grammar: Matchable = Sequence(
"ELSE", Indent, Ref("ExpressionSegment"), Dedent
)
class CaseExpressionSegment(BaseSegment):
"""A `CASE WHEN` clause."""
type = "case_expression"
match_grammar: Matchable = OneOf(
Sequence(
"CASE",
Indent,
AnyNumberOf(Ref("WhenClauseSegment")),
Ref("ElseClauseSegment", optional=True),
Dedent,
"END",
),
Sequence(
"CASE",
Ref("ExpressionSegment"),
Indent,
AnyNumberOf(Ref("WhenClauseSegment")),
Ref("ElseClauseSegment", optional=True),
Dedent,
"END",
),
)
ansi_dialect.add(
# Expression_A_Grammar
# https://www.cockroachlabs.com/docs/v20.2/sql-grammar.html#a_expr
Expression_A_Grammar=Sequence(
OneOf(
Ref("Expression_C_Grammar"),
Sequence(
OneOf(
Ref("PositiveSegment"),
Ref("NegativeSegment"),
# Ref('TildeSegment'),
"NOT",
"PRIOR",
# used in CONNECT BY clauses (EXASOL, Snowflake, Postgres...)
),
Ref("Expression_C_Grammar"),
),
),
AnyNumberOf(
OneOf(
Sequence(
OneOf(
Sequence(
Ref.keyword("NOT", optional=True),
Ref("LikeGrammar"),
),
Sequence(
Ref("BinaryOperatorGrammar"),
Ref.keyword("NOT", optional=True),
),
# We need to add a lot more here...
),
Ref("Expression_C_Grammar"),
Sequence(
Ref.keyword("ESCAPE"),
Ref("Expression_C_Grammar"),
optional=True,
),
),
Sequence(
Ref.keyword("NOT", optional=True),
"IN",
Bracketed(
OneOf(
Delimited(
Ref("Expression_A_Grammar"),
),
Ref("SelectableGrammar"),
ephemeral_name="InExpression",
)
),
),
Sequence(
Ref.keyword("NOT", optional=True),
"IN",
Ref("FunctionSegment"), # E.g. UNNEST()
),
Sequence(
"IS",
Ref.keyword("NOT", optional=True),
Ref("IsClauseGrammar"),
),
Ref("IsNullGrammar"),
Ref("NotNullGrammar"),
Sequence(
# e.g. NOT EXISTS, but other expressions could be met as
# well by inverting the condition with the NOT operator
"NOT",
Ref("Expression_C_Grammar"),
),
Sequence(
Ref.keyword("NOT", optional=True),
"BETWEEN",
Ref("Expression_B_Grammar"),
"AND",
Ref("Expression_A_Grammar"),
),
)
),
),
# Expression_B_Grammar: Does not directly feed into Expression_A_Grammar
# but is used for a BETWEEN statement within Expression_A_Grammar.
# https://www.cockroachlabs.com/docs/v20.2/sql-grammar.htm#b_expr
Expression_B_Grammar=Sequence(
OneOf(
Ref("Expression_C_Grammar"),
Sequence(
OneOf(
Ref("PositiveSegment"),
Ref("NegativeSegment"),
),
Ref("Expression_B_Grammar"),
),
),
AnyNumberOf(
Sequence(
OneOf(
Ref("ArithmeticBinaryOperatorGrammar"),
Ref("StringBinaryOperatorGrammar"),
Ref("ComparisonOperatorGrammar"),
),
Ref("Expression_C_Grammar"),
),
),
),
# Expression_C_Grammar
# https://www.cockroachlabs.com/docs/v20.2/sql-grammar.htm#c_expr
Expression_C_Grammar=OneOf(
Sequence("EXISTS", Bracketed(Ref("SelectStatementSegment"))),
# should be first priority, otherwise EXISTS() would be matched as a function
Sequence(
OneOf(
Ref("Expression_D_Grammar"),
Ref("CaseExpressionSegment"),
),
AnyNumberOf(Ref("ShorthandCastSegment")),
),
),
# Expression_D_Grammar
# https://www.cockroachlabs.com/docs/v20.2/sql-grammar.htm#d_expr
Expression_D_Grammar=Sequence(
OneOf(
Ref("BareFunctionSegment"),
Ref("FunctionSegment"),
Bracketed(
OneOf(
# We're using the expression segment here rather than the grammar so
# that in the parsed structure we get nested elements.
Ref("ExpressionSegment"),
Ref("SelectableGrammar"),
Delimited(
Ref(
"ColumnReferenceSegment"
), # WHERE (a,b,c) IN (select a,b,c FROM...)
Ref(
"FunctionSegment"
), # WHERE (a, substr(b,1,3)) IN (select c,d FROM...)
Ref("LiteralGrammar"), # WHERE (a, 2) IN (SELECT b, c FROM ...)
Ref("LocalAliasSegment"), # WHERE (LOCAL.a, LOCAL.b) IN (...)
),
ephemeral_name="BracketedExpression",
),
),
# Allow potential select statement without brackets
Ref("SelectStatementSegment"),
Ref("LiteralGrammar"),
Ref("IntervalExpressionSegment"),
Ref("TypelessStructSegment"),
Ref("ColumnReferenceSegment"),
# For triggers we allow "NEW.*" but not just "*" nor "a.b.*"
# So can't use WildcardIdentifierSegment nor WildcardExpressionSegment
Sequence(
Ref("SingleIdentifierGrammar"), Ref("DotSegment"), Ref("StarSegment")
),
Sequence(
Ref("SimpleArrayTypeGrammar", optional=True), Ref("ArrayLiteralSegment")
),
Sequence(
Ref("DatatypeSegment"),
OneOf(
Ref("QuotedLiteralSegment"),
Ref("NumericLiteralSegment"),
Ref("BooleanLiteralGrammar"),
Ref("NullLiteralSegment"),
Ref("DateTimeLiteralGrammar"),
),
),
Ref("LocalAliasSegment"),
),
Ref("Accessor_Grammar", optional=True),
allow_gaps=True,
),
Accessor_Grammar=AnyNumberOf(Ref("ArrayAccessorSegment")),
)
class EqualsSegment(BaseSegment):
"""Equals operator."""
type = "comparison_operator"
name = "equals"
match_grammar: Matchable = Ref("RawEqualsSegment")
class GreaterThanSegment(BaseSegment):
"""Greater than operator."""
type = "comparison_operator"
name = "greater_than"
match_grammar: Matchable = Ref("RawGreaterThanSegment")
class LessThanSegment(BaseSegment):
"""Less than operator."""
type = "comparison_operator"
name = "less_than"
match_grammar: Matchable = Ref("RawLessThanSegment")
class GreaterThanOrEqualToSegment(BaseSegment):
"""Greater than or equal to operator."""
type = "comparison_operator"
name = "greater_than_equal_to"
match_grammar: Matchable = Sequence(
Ref("RawGreaterThanSegment"), Ref("RawEqualsSegment"), allow_gaps=False
)
class LessThanOrEqualToSegment(BaseSegment):
"""Less than or equal to operator."""
type = "comparison_operator"
name = "less_than_equal_to"
match_grammar: Matchable = Sequence(
Ref("RawLessThanSegment"), Ref("RawEqualsSegment"), allow_gaps=False
)
class NotEqualToSegment(BaseSegment):
"""Not equal to operator."""
type = "comparison_operator"
name = "not_equal_to"
match_grammar: Matchable = OneOf(
Sequence(Ref("RawNotSegment"), Ref("RawEqualsSegment"), allow_gaps=False),
Sequence(
Ref("RawLessThanSegment"), Ref("RawGreaterThanSegment"), allow_gaps=False
),
)
class ConcatSegment(BaseSegment):
"""Concat operator."""
type = "binary_operator"
name = "concatenate"
match_grammar: Matchable = Sequence(
Ref("PipeSegment"), Ref("PipeSegment"), allow_gaps=False
)
class BitwiseAndSegment(BaseSegment):
"""Bitwise and operator."""
type = "binary_operator"
name = "binary_and"
match_grammar: Matchable = Ref("AmpersandSegment")
class BitwiseOrSegment(BaseSegment):
"""Bitwise or operator."""
type = "binary_operator"
name = "binary_or"
match_grammar: Matchable = Ref("PipeSegment")
class BitwiseLShiftSegment(BaseSegment):
"""Bitwise left-shift operator."""
type = "binary_operator"
match_grammar: Matchable = Sequence(
Ref("RawLessThanSegment"), Ref("RawLessThanSegment"), allow_gaps=False
)
class BitwiseRShiftSegment(BaseSegment):
"""Bitwise right-shift operator."""
type = "binary_operator"
match_grammar: Matchable = Sequence(
Ref("RawGreaterThanSegment"), Ref("RawGreaterThanSegment"), allow_gaps=False
)
class ExpressionSegment(BaseSegment):
"""A expression, either arithmetic or boolean.
NB: This is potentially VERY recursive and
mostly uses the grammars above. This version
also doesn't bound itself first, and so is potentially
VERY SLOW. I don't really like this solution.
We rely on elements of the expression to bound
themselves rather than bounding at the expression
level. Trying to bound the ExpressionSegment itself
has been too unstable and not resilient enough to
other bugs.
"""
type = "expression"
match_grammar: Matchable = Ref("Expression_A_Grammar")
class WhereClauseSegment(BaseSegment):
"""A `WHERE` clause like in `SELECT` or `INSERT`."""
type = "where_clause"
match_grammar: Matchable = StartsWith(
"WHERE",
terminator=Ref("WhereClauseTerminatorGrammar"),
enforce_whitespace_preceding_terminator=True,
)
parse_grammar: Optional[Matchable] = Sequence(
"WHERE",
Indent,
OptionallyBracketed(Ref("ExpressionSegment")),
Dedent,
)
class OrderByClauseSegment(BaseSegment):
"""A `ORDER BY` clause like in `SELECT`."""
type = "orderby_clause"
match_grammar: Matchable = StartsWith(
Sequence("ORDER", "BY"),
terminator=OneOf(
"LIMIT",
"HAVING",
"QUALIFY",
# For window functions
"WINDOW",
Ref("FrameClauseUnitGrammar"),
"SEPARATOR",
),
)
parse_grammar: Optional[Matchable] = Sequence(
"ORDER",
"BY",
Indent,
Delimited(
Sequence(
OneOf(
Ref("ColumnReferenceSegment"),
# Can `ORDER BY 1`
Ref("NumericLiteralSegment"),
# Can order by an expression
Ref("ExpressionSegment"),
),
OneOf("ASC", "DESC", optional=True),
# NB: This isn't really ANSI, and isn't supported in Mysql, but
# is supported in enough other dialects for it to make sense here
# for now.
Sequence("NULLS", OneOf("FIRST", "LAST"), optional=True),
),
terminator=OneOf(Ref.keyword("LIMIT"), Ref("FrameClauseUnitGrammar")),
),
Dedent,
)
class GroupByClauseSegment(BaseSegment):
"""A `GROUP BY` clause like in `SELECT`."""
type = "groupby_clause"
match_grammar: Matchable = StartsWith(
Sequence("GROUP", "BY"),
terminator=OneOf("ORDER", "LIMIT", "HAVING", "QUALIFY", "WINDOW"),
enforce_whitespace_preceding_terminator=True,
)
parse_grammar: Optional[Matchable] = Sequence(
"GROUP",
"BY",
Indent,
Delimited(
OneOf(
Ref("ColumnReferenceSegment"),
# Can `GROUP BY 1`
Ref("NumericLiteralSegment"),
# Can `GROUP BY coalesce(col, 1)`
Ref("ExpressionSegment"),
),
terminator=OneOf("ORDER", "LIMIT", "HAVING", "QUALIFY", "WINDOW"),
),
Dedent,
)
class HavingClauseSegment(BaseSegment):
"""A `HAVING` clause like in `SELECT`."""
type = "having_clause"
match_grammar: Matchable = StartsWith(
"HAVING",
terminator=OneOf("ORDER", "LIMIT", "QUALIFY", "WINDOW"),
enforce_whitespace_preceding_terminator=True,
)
parse_grammar: Optional[Matchable] = Sequence(
"HAVING",
Indent,
OptionallyBracketed(Ref("ExpressionSegment")),
Dedent,
)
class LimitClauseSegment(BaseSegment):
"""A `LIMIT` clause like in `SELECT`."""
type = "limit_clause"
match_grammar: Matchable = Sequence(
"LIMIT",
Indent,
OneOf(
Ref("NumericLiteralSegment"),
Sequence(
Ref("NumericLiteralSegment"), "OFFSET", Ref("NumericLiteralSegment")
),
Sequence(
Ref("NumericLiteralSegment"),
Ref("CommaSegment"),
Ref("NumericLiteralSegment"),
),
),
Dedent,
)
class OverlapsClauseSegment(BaseSegment):
"""An `OVERLAPS` clause like in `SELECT."""
type = "overlaps_clause"
match_grammar: Matchable = StartsWith(
"OVERLAPS",
)
parse_grammar: Optional[Matchable] = Sequence(
"OVERLAPS",
OneOf(
Sequence(
Bracketed(
Ref("DateTimeLiteralGrammar"),
Ref("CommaSegment"),
Ref("DateTimeLiteralGrammar"),
)
),
Ref("ColumnReferenceSegment"),
),
)
class NamedWindowSegment(BaseSegment):
"""A WINDOW clause."""
type = "named_window"
match_grammar: Matchable = Sequence(
"WINDOW",
Indent,
Delimited(
Ref("NamedWindowExpressionSegment"),
),
Dedent,
)
class NamedWindowExpressionSegment(BaseSegment):
"""Named window expression."""
type = "named_window_expression"
match_grammar: Matchable = Sequence(
Ref("SingleIdentifierGrammar"), # Window name
"AS",
Bracketed(
Ref("WindowSpecificationSegment"),
),
)
class ValuesClauseSegment(BaseSegment):
"""A `VALUES` clause like in `INSERT`."""
type = "values_clause"
match_grammar: Matchable = Sequence(
OneOf("VALUE", "VALUES"),
Delimited(
Sequence(
# MySQL uses `ROW` in it's value statement.
# Currently SQLFluff doesn't differentiate between
# Values statement:
# https://dev.mysql.com/doc/refman/8.0/en/values.html
# and Values() function (used in INSERT statements):
# https://dev.mysql.com/doc/refman/8.0/en/miscellaneous-functions.html#function_values
# TODO: split these out in future.
Ref.keyword("ROW", optional=True),
Bracketed(
Delimited(
"DEFAULT",
Ref("ExpressionSegment"),
ephemeral_name="ValuesClauseElements",
)
),
),
),
)
class UnorderedSelectStatementSegment(BaseSegment):
"""A `SELECT` statement without any ORDER clauses or later.
This is designed for use in the context of set operations,
for other use cases, we should use the main
SelectStatementSegment.
"""
type = "select_statement"
# match grammar. This one makes sense in the context of knowing that it's
# definitely a statement, we just don't know what type yet.
match_grammar: Matchable = StartsWith(
# NB: In bigquery, the select clause may include an EXCEPT, which
# will also match the set operator, but by starting with the whole
# select clause rather than just the SELECT keyword, we mitigate that
# here.
Ref("SelectClauseSegment"),
terminator=OneOf(
Ref("SetOperatorSegment"),
Ref("WithNoSchemaBindingClauseSegment"),
Ref("WithDataClauseSegment"),
Ref("OrderByClauseSegment"),
Ref("LimitClauseSegment"),
Ref("NamedWindowSegment"),
),
enforce_whitespace_preceding_terminator=True,
)
parse_grammar: Matchable = Sequence(
Ref("SelectClauseSegment"),
# Dedent for the indent in the select clause.
# It's here so that it can come AFTER any whitespace.
Dedent,
Ref("FromClauseSegment", optional=True),
Ref("WhereClauseSegment", optional=True),
Ref("GroupByClauseSegment", optional=True),
Ref("HavingClauseSegment", optional=True),
Ref("OverlapsClauseSegment", optional=True),
)
class SelectStatementSegment(BaseSegment):
"""A `SELECT` statement."""
type = "select_statement"
# match grammar. This one makes sense in the context of knowing that it's
# definitely a statement, we just don't know what type yet.
match_grammar: Matchable = StartsWith(
# NB: In bigquery, the select clause may include an EXCEPT, which
# will also match the set operator, but by starting with the whole
# select clause rather than just the SELECT keyword, we mitigate that
# here.
Ref("SelectClauseSegment"),
terminator=OneOf(
Ref("SetOperatorSegment"),
Ref("WithNoSchemaBindingClauseSegment"),
Ref("WithDataClauseSegment"),
),
enforce_whitespace_preceding_terminator=True,
)
# Inherit most of the parse grammar from the original.
parse_grammar: Matchable = UnorderedSelectStatementSegment.parse_grammar.copy(
insert=[
Ref("OrderByClauseSegment", optional=True),
Ref("LimitClauseSegment", optional=True),
Ref("NamedWindowSegment", optional=True),
]
)
ansi_dialect.add(
# Things that behave like select statements
SelectableGrammar=OneOf(
OptionallyBracketed(Ref("WithCompoundStatementSegment")),
Ref("NonWithSelectableGrammar"),
),
# Things that behave like select statements, which can form part of with
# expressions.
NonWithSelectableGrammar=OneOf(
Ref("SetExpressionSegment"),
OptionallyBracketed(Ref("SelectStatementSegment")),
Ref("NonSetSelectableGrammar"),
),
# Things that do not behave like select statements, which can form part of with
# expressions.
NonWithNonSelectableGrammar=OneOf(
Ref("UpdateStatementSegment"),
Ref("InsertStatementSegment"),
Ref("DeleteStatementSegment"),
),
# Things that behave like select statements, which can form part of set expressions.
NonSetSelectableGrammar=OneOf(
Ref("ValuesClauseSegment"),
Ref("UnorderedSelectStatementSegment"),
# If it's bracketed, we can have the full select statment here,
# otherwise we can't because any order by clauses should belong
# to the set expression.
Bracketed(Ref("SelectStatementSegment")),
),
)
class CTEDefinitionSegment(BaseSegment):
"""A CTE Definition from a WITH statement.
`tab (col1,col2) AS (SELECT a,b FROM x)`
"""
type = "common_table_expression"
match_grammar: Matchable = Sequence(
Ref("SingleIdentifierGrammar"),
Bracketed(
Ref("SingleIdentifierListSegment"),
optional=True,
),
"AS",
Bracketed(
# Ephemeral here to subdivide the query.
Ref("SelectableGrammar", ephemeral_name="SelectableGrammar")
),
)
def get_identifier(self) -> BaseSegment:
"""Gets the identifier of this CTE.
Note: it blindly get the first identifier it finds
which given the structure of a CTE definition is
usually the right one.
"""
return self.get_child("identifier")
class WithCompoundStatementSegment(BaseSegment):
"""A `SELECT` statement preceded by a selection of `WITH` clauses.
`WITH tab (col1,col2) AS (SELECT a,b FROM x)`
"""
type = "with_compound_statement"
# match grammar
match_grammar: Matchable = StartsWith("WITH")
parse_grammar: Optional[Matchable] = Sequence(
"WITH",
Ref.keyword("RECURSIVE", optional=True),
Conditional(Indent, indented_ctes=True),
Delimited(
Ref("CTEDefinitionSegment"),
terminator=Ref.keyword("SELECT"),
),
Conditional(Dedent, indented_ctes=True),
OneOf(
Ref("NonWithSelectableGrammar"),
Ref("NonWithNonSelectableGrammar"),
),
)
class SetOperatorSegment(BaseSegment):
"""A set operator such as Union, Minus, Except or Intersect."""
type = "set_operator"
match_grammar: Matchable = OneOf(
Sequence("UNION", OneOf("DISTINCT", "ALL", optional=True)),
"INTERSECT",
"EXCEPT",
"MINUS",
exclude=Sequence("EXCEPT", Bracketed(Anything())),
)
class SetExpressionSegment(BaseSegment):
"""A set expression with either Union, Minus, Except or Intersect."""
type = "set_expression"
# match grammar
match_grammar: Matchable = Sequence(
Ref("NonSetSelectableGrammar"),
AnyNumberOf(
Sequence(
Ref("SetOperatorSegment"),
Ref("NonSetSelectableGrammar"),
),
min_times=1,
),
Ref("OrderByClauseSegment", optional=True),
Ref("LimitClauseSegment", optional=True),
Ref("NamedWindowSegment", optional=True),
)
class InsertStatementSegment(BaseSegment):
"""An `INSERT` statement."""
type = "insert_statement"
match_grammar: Matchable = StartsWith("INSERT")
parse_grammar: Optional[Matchable] = Sequence(
"INSERT",
# Maybe OVERWRITE is just snowflake?
# (It's also Hive but that has full insert grammar implementation)
Ref.keyword("OVERWRITE", optional=True),
"INTO",
Ref("TableReferenceSegment"),
Ref("BracketedColumnReferenceListGrammar", optional=True),
Ref("SelectableGrammar"),
)
class MergeStatementSegment(BaseSegment):
"""A `MERGE` statement."""
type = "merge_statement"
match_grammar: Matchable = StartsWith(
Ref("MergeIntoLiteralGrammar"),
)
# Note separate `match_grammar` as overridden in other dialects.
parse_grammar: Optional[Matchable] = Sequence(
Ref("MergeIntoLiteralGrammar"),
Indent,
OneOf(
Ref("TableReferenceSegment"),
Ref("AliasedTableReferenceGrammar"),
),
Dedent,
"USING",
Indent,
OneOf(
Ref("TableReferenceSegment"),
Ref("AliasedTableReferenceGrammar"),
Sequence(
Bracketed(
Ref("SelectableGrammar"),
),
Ref("AliasExpressionSegment"),
),
),
Dedent,
Ref("JoinOnConditionSegment"),
Ref("MergeMatchSegment"),
)
class MergeMatchSegment(BaseSegment):
"""Contains dialect specific merge operations.
Hookpoint for dialect specific behavior
e.g. UpdateClause / DeleteClause, multiple MergeMatchedClauses
"""
type = "merge_match"
match_grammar: Matchable = AnyNumberOf(
Ref("MergeMatchedClauseSegment"),
Ref("MergeNotMatchedClauseSegment"),
min_times=1,
)
class MergeMatchedClauseSegment(BaseSegment):
"""The `WHEN MATCHED` clause within a `MERGE` statement."""
type = "merge_when_matched_clause"
match_grammar: Matchable = Sequence(
"WHEN",
"MATCHED",
Sequence("AND", Ref("ExpressionSegment"), optional=True),
"THEN",
Indent,
OneOf(
Ref("MergeUpdateClauseSegment"),
Ref("MergeDeleteClauseSegment"),
),
Dedent,
)
class MergeNotMatchedClauseSegment(BaseSegment):
"""The `WHEN NOT MATCHED` clause within a `MERGE` statement."""
type = "merge_when_not_matched_clause"
match_grammar: Matchable = Sequence(
"WHEN",
"NOT",
"MATCHED",
Sequence("AND", Ref("ExpressionSegment"), optional=True),
"THEN",
Indent,
Ref("MergeInsertClauseSegment"),
Dedent,
)
class MergeUpdateClauseSegment(BaseSegment):
"""`UPDATE` clause within the `MERGE` statement."""
type = "merge_update_clause"
match_grammar: Matchable = Sequence(
"UPDATE",
Indent,
Ref("SetClauseListSegment"),
Dedent,
)
class MergeInsertClauseSegment(BaseSegment):
"""`INSERT` clause within the `MERGE` statement."""
type = "merge_insert_clause"
match_grammar: Matchable = Sequence(
"INSERT",
Indent,
Ref("BracketedColumnReferenceListGrammar", optional=True),
Dedent,
Indent,
Ref("ValuesClauseSegment", optional=True),
Dedent,
)
class MergeDeleteClauseSegment(BaseSegment):
"""`DELETE` clause within the `MERGE` statement."""
type = "merge_delete_clause"
match_grammar: Matchable = Ref.keyword("DELETE")
class TransactionStatementSegment(BaseSegment):
"""A `COMMIT`, `ROLLBACK` or `TRANSACTION` statement."""
type = "transaction_statement"
match_grammar: Matchable = Sequence(
# COMMIT [ WORK ] [ AND [ NO ] CHAIN ]
# ROLLBACK [ WORK ] [ AND [ NO ] CHAIN ]
# BEGIN | END TRANSACTION | WORK
# NOTE: "TO SAVEPOINT" is not yet supported
# https://docs.snowflake.com/en/sql-reference/sql/begin.html
# https://www.postgresql.org/docs/current/sql-end.html
OneOf("START", "BEGIN", "COMMIT", "ROLLBACK", "END"),
OneOf("TRANSACTION", "WORK", optional=True),
Sequence("NAME", Ref("SingleIdentifierGrammar"), optional=True),
Sequence("AND", Ref.keyword("NO", optional=True), "CHAIN", optional=True),
)
class ColumnConstraintSegment(BaseSegment):
"""A column option; each CREATE TABLE column can have 0 or more."""
type = "column_constraint_segment"
# Column constraint from
# https://www.postgresql.org/docs/12/sql-createtable.html
match_grammar: Matchable = Sequence(
Sequence(
"CONSTRAINT",
Ref("ObjectReferenceSegment"), # Constraint name
optional=True,
),
OneOf(
Sequence(Ref.keyword("NOT", optional=True), "NULL"), # NOT NULL or NULL
Sequence("CHECK", Bracketed(Ref("ExpressionSegment"))),
Sequence( # DEFAULT <value>
"DEFAULT",
OneOf(
Ref("LiteralGrammar"),
Ref("FunctionSegment"),
Ref("BareFunctionSegment"),
),
),
Ref("PrimaryKeyGrammar"),
Ref("UniqueKeyGrammar"), # UNIQUE
"AUTO_INCREMENT", # AUTO_INCREMENT (MySQL)
"UNSIGNED", # UNSIGNED (MySQL)
Ref("ReferenceDefinitionGrammar"), # REFERENCES reftable [ ( refcolumn) ]x
Ref("CommentClauseSegment"),
),
)
class ColumnDefinitionSegment(BaseSegment):
"""A column definition, e.g. for CREATE TABLE or ALTER TABLE."""
type = "column_definition"
match_grammar: Matchable = Sequence(
Ref("SingleIdentifierGrammar"), # Column name
Ref("DatatypeSegment"), # Column type
Bracketed(Anything(), optional=True), # For types like VARCHAR(100)
AnyNumberOf(
Ref("ColumnConstraintSegment", optional=True),
),
)
class IndexColumnDefinitionSegment(BaseSegment):
"""A column definition for CREATE INDEX."""
type = "index_column_definition"
match_grammar: Matchable = Sequence(
Ref("SingleIdentifierGrammar"), # Column name
OneOf("ASC", "DESC", optional=True),
)
class TableConstraintSegment(BaseSegment):
"""A table constraint, e.g. for CREATE TABLE."""
type = "table_constraint"
# Later add support for CHECK constraint, others?
# e.g. CONSTRAINT constraint_1 PRIMARY KEY(column_1)
match_grammar: Matchable = Sequence(
Sequence( # [ CONSTRAINT <Constraint name> ]
"CONSTRAINT", Ref("ObjectReferenceSegment"), optional=True
),
OneOf(
Sequence( # UNIQUE ( column_name [, ... ] )
"UNIQUE",
Ref("BracketedColumnReferenceListGrammar"),
# Later add support for index_parameters?
),
Sequence( # PRIMARY KEY ( column_name [, ... ] ) index_parameters
Ref("PrimaryKeyGrammar"),
# Columns making up PRIMARY KEY constraint
Ref("BracketedColumnReferenceListGrammar"),
# Later add support for index_parameters?
),
Sequence( # FOREIGN KEY ( column_name [, ... ] )
# REFERENCES reftable [ ( refcolumn [, ... ] ) ]
Ref("ForeignKeyGrammar"),
# Local columns making up FOREIGN KEY constraint
Ref("BracketedColumnReferenceListGrammar"),
Ref(
"ReferenceDefinitionGrammar"
), # REFERENCES reftable [ ( refcolumn) ]
),
),
)
class TableEndClauseSegment(BaseSegment):
"""Allow for additional table endings.
(like WITHOUT ROWID for SQLite)
"""
type = "table_end_clause_segment"
match_grammar: Matchable = Nothing()
class TypelessStructSegment(BaseSegment):
"""Expression to construct a STRUCT with implicit types.
(Yes in BigQuery for example)
"""
type = "typeless_struct"
match_grammar: Matchable = Nothing()
class CreateTableStatementSegment(BaseSegment):
"""A `CREATE TABLE` statement."""
type = "create_table_statement"
# https://crate.io/docs/sql-99/en/latest/chapters/18.html
# https://www.postgresql.org/docs/12/sql-createtable.html
match_grammar: Matchable = Sequence(
"CREATE",
Ref("OrReplaceGrammar", optional=True),
Ref("TemporaryTransientGrammar", optional=True),
"TABLE",
Ref("IfNotExistsGrammar", optional=True),
Ref("TableReferenceSegment"),
OneOf(
# Columns and comment syntax:
Sequence(
Bracketed(
Delimited(
OneOf(
Ref("TableConstraintSegment"),
Ref("ColumnDefinitionSegment"),
),
)
),
Ref("CommentClauseSegment", optional=True),
),
# Create AS syntax:
Sequence(
"AS",
OptionallyBracketed(Ref("SelectableGrammar")),
),
# Create like syntax
Sequence("LIKE", Ref("TableReferenceSegment")),
),
Ref("TableEndClauseSegment", optional=True),
)
class CommentClauseSegment(BaseSegment):
"""A comment clause.
e.g. COMMENT 'view/table/column description'
"""
type = "comment_clause"
match_grammar: Matchable = Sequence("COMMENT", Ref("QuotedLiteralSegment"))
class CreateSchemaStatementSegment(BaseSegment):
"""A `CREATE SCHEMA` statement."""
type = "create_schema_statement"
match_grammar: Matchable = Sequence(
"CREATE",
"SCHEMA",
Ref("IfNotExistsGrammar", optional=True),
Ref("SchemaReferenceSegment"),
)
class SetSchemaStatementSegment(BaseSegment):
"""A `SET SCHEMA` statement."""
type = "set_schema_statement"
match_grammar: Matchable = Sequence(
"SET",
"SCHEMA",
Ref("IfNotExistsGrammar", optional=True),
Ref("SchemaReferenceSegment"),
)
class DropSchemaStatementSegment(BaseSegment):
"""A `DROP SCHEMA` statement."""
type = "drop_schema_statement"
match_grammar: Matchable = Sequence(
"DROP",
"SCHEMA",
Ref("IfExistsGrammar", optional=True),
Ref("SchemaReferenceSegment"),
Ref("DropBehaviorGrammar", optional=True),
)
class DropTypeStatementSegment(BaseSegment):
"""A `DROP TYPE` statement."""
type = "drop_type_statement"
match_grammar: Matchable = Sequence(
"DROP",
"TYPE",
Ref("IfExistsGrammar", optional=True),
Ref("ObjectReferenceSegment"),
Ref("DropBehaviorGrammar", optional=True),
)
class CreateDatabaseStatementSegment(BaseSegment):
"""A `CREATE DATABASE` statement."""
type = "create_database_statement"
match_grammar: Matchable = Sequence(
"CREATE",
"DATABASE",
Ref("IfNotExistsGrammar", optional=True),
Ref("DatabaseReferenceSegment"),
)
class DropDatabaseStatementSegment(BaseSegment):
"""A `DROP DATABASE` statement."""
type = "drop_database_statement"
match_grammar: Matchable = Sequence(
"DROP",
"DATABASE",
Ref("IfExistsGrammar", optional=True),
Ref("DatabaseReferenceSegment"),
Ref("DropBehaviorGrammar", optional=True),
)
class CreateExtensionStatementSegment(BaseSegment):
"""A `CREATE EXTENSION` statement.
https://www.postgresql.org/docs/9.1/sql-createextension.html
"""
type = "create_extension_statement"
match_grammar: Matchable = Sequence(
"CREATE",
"EXTENSION",
Ref("IfNotExistsGrammar", optional=True),
Ref("ExtensionReferenceSegment"),
Ref.keyword("WITH", optional=True),
Sequence("SCHEMA", Ref("SchemaReferenceSegment"), optional=True),
Sequence("VERSION", Ref("VersionIdentifierSegment"), optional=True),
Sequence("FROM", Ref("VersionIdentifierSegment"), optional=True),
)
class CreateIndexStatementSegment(BaseSegment):
"""A `CREATE INDEX` statement."""
type = "create_index_statement"
match_grammar: Matchable = Sequence(
"CREATE",
Ref("OrReplaceGrammar", optional=True),
Ref.keyword("UNIQUE", optional=True),
"INDEX",
Ref("IfNotExistsGrammar", optional=True),
Ref("IndexReferenceSegment"),
"ON",
Ref("TableReferenceSegment"),
Sequence(
Bracketed(
Delimited(
Ref("IndexColumnDefinitionSegment"),
),
)
),
)
class AlterTableStatementSegment(BaseSegment):
"""An `ALTER TABLE` statement."""
type = "alter_table_statement"
# Based loosely on:
# https://dev.mysql.com/doc/refman/8.0/en/alter-table.html
# TODO: Flesh this out with more detail.
match_grammar: Matchable = Sequence(
"ALTER",
"TABLE",
Ref("TableReferenceSegment"),
Delimited(
OneOf(
# Table options
Sequence(
Ref("ParameterNameSegment"),
Ref("EqualsSegment", optional=True),
OneOf(Ref("LiteralGrammar"), Ref("NakedIdentifierSegment")),
),
# Add things
Sequence(
OneOf("ADD", "MODIFY"),
Ref.keyword("COLUMN", optional=True),
Ref("ColumnDefinitionSegment"),
OneOf(
Sequence(
OneOf("FIRST", "AFTER"), Ref("ColumnReferenceSegment")
),
# Bracketed Version of the same
Ref("BracketedColumnReferenceListGrammar"),
optional=True,
),
),
# Rename
Sequence(
"RENAME",
OneOf("AS", "TO", optional=True),
Ref("TableReferenceSegment"),
),
),
),
)
class CreateViewStatementSegment(BaseSegment):
"""A `CREATE VIEW` statement."""
type = "create_view_statement"
# https://crate.io/docs/sql-99/en/latest/chapters/18.html#create-view-statement
# https://dev.mysql.com/doc/refman/8.0/en/create-view.html
# https://www.postgresql.org/docs/12/sql-createview.html
match_grammar: Matchable = Sequence(
"CREATE",
Ref("OrReplaceGrammar", optional=True),
"VIEW",
Ref("IfNotExistsGrammar", optional=True),
Ref("TableReferenceSegment"),
# Optional list of column names
Ref("BracketedColumnReferenceListGrammar", optional=True),
"AS",
OptionallyBracketed(Ref("SelectableGrammar")),
Ref("WithNoSchemaBindingClauseSegment", optional=True),
)
class DropTableStatementSegment(BaseSegment):
"""A `DROP TABLE` statement."""
type = "drop_table_statement"
match_grammar: Matchable = Sequence(
"DROP",
"TABLE",
Ref("IfExistsGrammar", optional=True),
Ref("TableReferenceSegment"),
Ref("DropBehaviorGrammar", optional=True),
)
class DropViewStatementSegment(BaseSegment):
"""A `DROP VIEW` statement."""
type = "drop_view_statement"
match_grammar: Matchable = Sequence(
"DROP",
"VIEW",
Ref("IfExistsGrammar", optional=True),
Ref("TableReferenceSegment"),
Ref("DropBehaviorGrammar", optional=True),
)
class DropUserStatementSegment(BaseSegment):
"""A `DROP USER` statement."""
type = "drop_user_statement"
match_grammar: Matchable = Sequence(
"DROP",
"USER",
Ref("IfExistsGrammar", optional=True),
Ref("ObjectReferenceSegment"),
)
class TruncateStatementSegment(BaseSegment):
"""`TRUNCATE TABLE` statement."""
type = "truncate_table"
match_grammar: Matchable = Sequence(
"TRUNCATE",
Ref.keyword("TABLE", optional=True),
Ref("TableReferenceSegment"),
)
class DropIndexStatementSegment(BaseSegment):
"""A `DROP INDEX` statement."""
type = "drop_index_statement"
# DROP INDEX <Index name> [CONCURRENTLY] [IF EXISTS] {RESTRICT | CASCADE}
match_grammar: Matchable = Sequence(
"DROP",
"INDEX",
Ref.keyword("CONCURRENTLY", optional=True),
Ref("IfExistsGrammar", optional=True),
Ref("IndexReferenceSegment"),
Ref("DropBehaviorGrammar", optional=True),
)
class AccessStatementSegment(BaseSegment):
"""A `GRANT` or `REVOKE` statement.
In order to help reduce code duplication we decided to implement other dialect
specific grants (like Snowflake) here too which will help with maintainability. We
also note that this causes the grammar to be less "correct", but the benefits
outweigh the con in our opinion.
Grant specific information:
* https://www.postgresql.org/docs/9.0/sql-grant.html
* https://docs.snowflake.com/en/sql-reference/sql/grant-privilege.html
Revoke specific information:
* https://www.postgresql.org/docs/9.0/sql-revoke.html
* https://docs.snowflake.com/en/sql-reference/sql/revoke-role.html
* https://docs.snowflake.com/en/sql-reference/sql/revoke-privilege.html
* https://docs.snowflake.com/en/sql-reference/sql/revoke-privilege-share.html
"""
type = "access_statement"
# Privileges that can be set on the account (specific to snowflake)
_global_permissions = OneOf(
Sequence(
"CREATE",
OneOf(
"ROLE",
"USER",
"WAREHOUSE",
"DATABASE",
"INTEGRATION",
),
),
Sequence("APPLY", "MASKING", "POLICY"),
Sequence("EXECUTE", "TASK"),
Sequence("MANAGE", "GRANTS"),
Sequence("MONITOR", OneOf("EXECUTION", "USAGE")),
)
_schema_object_names = [
"TABLE",
"VIEW",
"STAGE",
"FUNCTION",
"PROCEDURE",
"ROUTINE",
"SEQUENCE",
"STREAM",
"TASK",
]
_schema_object_types = OneOf(
*_schema_object_names,
Sequence("MATERIALIZED", "VIEW"),
Sequence("EXTERNAL", "TABLE"),
Sequence("FILE", "FORMAT"),
)
# We reuse the object names above and simply append an `S` to the end of them to get
# plurals
_schema_object_types_plural = OneOf(
*[f"{object_name}S" for object_name in _schema_object_names]
)
_permissions = Sequence(
OneOf(
Sequence(
"CREATE",
OneOf(
"SCHEMA",
Sequence("MASKING", "POLICY"),
"PIPE",
_schema_object_types,
),
),
Sequence("IMPORTED", "PRIVILEGES"),
"APPLY",
"CONNECT",
"CREATE",
"DELETE",
"EXECUTE",
"INSERT",
"MODIFY",
"MONITOR",
"OPERATE",
"OWNERSHIP",
"READ",
"REFERENCE_USAGE",
"REFERENCES",
"SELECT",
"TEMP",
"TEMPORARY",
"TRIGGER",
"TRUNCATE",
"UPDATE",
"USAGE",
"USE_ANY_ROLE",
"WRITE",
Sequence("ALL", Ref.keyword("PRIVILEGES", optional=True)),
),
Ref("BracketedColumnReferenceListGrammar", optional=True),
)
# All of the object types that we can grant permissions on.
# This list will contain ansi sql objects as well as dialect specific ones.
_objects = OneOf(
"ACCOUNT",
Sequence(
OneOf(
Sequence("RESOURCE", "MONITOR"),
"WAREHOUSE",
"DATABASE",
"DOMAIN",
"INTEGRATION",
"LANGUAGE",
"SCHEMA",
"ROLE",
"TABLESPACE",
"TYPE",
Sequence(
"FOREIGN",
OneOf("SERVER", Sequence("DATA", "WRAPPER")),
),
Sequence("ALL", "SCHEMAS", "IN", "DATABASE"),
Sequence("FUTURE", "SCHEMAS", "IN", "DATABASE"),
_schema_object_types,
Sequence("ALL", _schema_object_types_plural, "IN", "SCHEMA"),
Sequence(
"FUTURE",
_schema_object_types_plural,
"IN",
OneOf("DATABASE", "SCHEMA"),
),
optional=True,
),
Delimited(Ref("ObjectReferenceSegment"), terminator=OneOf("TO", "FROM")),
Ref("FunctionParameterListGrammar", optional=True),
),
Sequence("LARGE", "OBJECT", Ref("NumericLiteralSegment")),
)
match_grammar: Matchable = OneOf(
# Based on https://www.postgresql.org/docs/13/sql-grant.html
# and https://docs.snowflake.com/en/sql-reference/sql/grant-privilege.html
Sequence(
"GRANT",
OneOf(
Sequence(
Delimited(
OneOf(_global_permissions, _permissions),
delimiter=Ref("CommaSegment"),
terminator="ON",
),
"ON",
_objects,
),
Sequence("ROLE", Ref("ObjectReferenceSegment")),
Sequence("OWNERSHIP", "ON", "USER", Ref("ObjectReferenceSegment")),
# In the case where a role is granted non-explicitly,
# e.g. GRANT ROLE_NAME TO OTHER_ROLE_NAME
# See https://www.postgresql.org/docs/current/sql-grant.html
Ref("ObjectReferenceSegment"),
),
"TO",
OneOf("GROUP", "USER", "ROLE", "SHARE", optional=True),
Delimited(
OneOf(Ref("ObjectReferenceSegment"), Ref("FunctionSegment"), "PUBLIC"),
delimiter=Ref("CommaSegment"),
),
OneOf(
Sequence("WITH", "GRANT", "OPTION"),
Sequence("WITH", "ADMIN", "OPTION"),
Sequence("COPY", "CURRENT", "GRANTS"),
optional=True,
),
Sequence(
"GRANTED",
"BY",
OneOf(
"CURRENT_USER",
"SESSION_USER",
Ref("ObjectReferenceSegment"),
),
optional=True,
),
),
# Based on https://www.postgresql.org/docs/12/sql-revoke.html
Sequence(
"REVOKE",
Sequence("GRANT", "OPTION", "FOR", optional=True),
OneOf(
Sequence(
Delimited(
OneOf(_global_permissions, _permissions),
delimiter=Ref("CommaSegment"),
terminator="ON",
),
"ON",
_objects,
),
Sequence("ROLE", Ref("ObjectReferenceSegment")),
Sequence("OWNERSHIP", "ON", "USER", Ref("ObjectReferenceSegment")),
),
"FROM",
OneOf("GROUP", "USER", "ROLE", "SHARE", optional=True),
Delimited(
Ref("ObjectReferenceSegment"),
delimiter=Ref("CommaSegment"),
),
Ref("DropBehaviorGrammar", optional=True),
),
)
class DeleteStatementSegment(BaseSegment):
"""A `DELETE` statement.
DELETE FROM <table name> [ WHERE <search condition> ]
"""
type = "delete_statement"
# match grammar. This one makes sense in the context of knowing that it's
# definitely a statement, we just don't know what type yet.
match_grammar: Matchable = StartsWith("DELETE")
parse_grammar: Optional[Matchable] = Sequence(
"DELETE",
Ref("FromClauseSegment"),
Ref("WhereClauseSegment", optional=True),
)
class UpdateStatementSegment(BaseSegment):
"""An `Update` statement.
UPDATE <table name> SET <set clause list> [ WHERE <search condition> ]
"""
type = "update_statement"
match_grammar: Matchable = StartsWith("UPDATE")
parse_grammar: Optional[Matchable] = Sequence(
"UPDATE",
Ref("TableReferenceSegment"),
# SET is not a resevered word in all dialects (e.g. RedShift)
# So specifically exclude as an allowed implict alias to avoid parsing errors
OneOf(Ref("AliasExpressionSegment"), exclude=Ref.keyword("SET"), optional=True),
Ref("SetClauseListSegment"),
Ref("FromClauseSegment", optional=True),
Ref("WhereClauseSegment", optional=True),
)
class SetClauseListSegment(BaseSegment):
"""SQL 1992 set clause list.
<set clause list> ::=
<set clause> [ { <comma> <set clause> }... ]
<set clause> ::=
<object column> <equals operator> <update source>
<update source> ::=
<value expression>
| <null specification>
| DEFAULT
<object column> ::= <column name>
"""
type = "set_clause_list"
match_grammar: Matchable = Sequence(
"SET",
Indent,
OneOf(
Ref("SetClauseSegment"),
# set clause
AnyNumberOf(
Delimited(Ref("SetClauseSegment")),
),
),
Dedent,
)
class SetClauseSegment(BaseSegment):
"""SQL 1992 set clause.
<set clause> ::=
<object column> <equals operator> <update source>
<update source> ::=
<value expression>
| <null specification>
| DEFAULT
<object column> ::= <column name>
"""
type = "set_clause"
match_grammar: Matchable = Sequence(
Ref("ColumnReferenceSegment"),
Ref("EqualsSegment"),
OneOf(
Ref("LiteralGrammar"),
Ref("BareFunctionSegment"),
Ref("FunctionSegment"),
Ref("ColumnReferenceSegment"),
Ref("ExpressionSegment"),
"DEFAULT",
),
AnyNumberOf(Ref("ShorthandCastSegment")),
)
class FunctionDefinitionGrammar(BaseSegment):
"""This is the body of a `CREATE FUNCTION AS` statement."""
type = "function_definition"
match_grammar: Matchable = Sequence(
"AS",
Ref("QuotedLiteralSegment"),
Sequence(
"LANGUAGE",
# Not really a parameter, but best fit for now.
Ref("ParameterNameSegment"),
optional=True,
),
)
class CreateFunctionStatementSegment(BaseSegment):
"""A `CREATE FUNCTION` statement.
This version in the ANSI dialect should be a "common subset" of the
structure of the code for those dialects.
postgres: https://www.postgresql.org/docs/9.1/sql-createfunction.html
snowflake: https://docs.snowflake.com/en/sql-reference/sql/create-function.html
bigquery:
https://cloud.google.com/bigquery/docs/reference/standard-sql/user-defined-functions
"""
type = "create_function_statement"
match_grammar: Matchable = Sequence(
"CREATE",
Ref("OrReplaceGrammar", optional=True),
Ref("TemporaryGrammar", optional=True),
"FUNCTION",
Anything(),
)
parse_grammar: Optional[Matchable] = Sequence(
"CREATE",
Ref("OrReplaceGrammar", optional=True),
Ref("TemporaryGrammar", optional=True),
"FUNCTION",
Ref("IfNotExistsGrammar", optional=True),
Ref("FunctionNameSegment"),
Ref("FunctionParameterListGrammar"),
Sequence( # Optional function return type
"RETURNS",
Ref("DatatypeSegment"),
optional=True,
),
Ref("FunctionDefinitionGrammar"),
)
class FunctionParameterListGrammar(BaseSegment):
"""The parameters for a function ie. `(string, number)`."""
type = "function_parameter_list"
# Function parameter list
match_grammar: Matchable = Bracketed(
Delimited(
Ref("FunctionParameterGrammar"),
delimiter=Ref("CommaSegment"),
optional=True,
),
)
class CreateModelStatementSegment(BaseSegment):
"""A BigQuery `CREATE MODEL` statement."""
type = "create_model_statement"
# https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-create
match_grammar: Matchable = Sequence(
"CREATE",
Ref("OrReplaceGrammar", optional=True),
"MODEL",
Ref("IfNotExistsGrammar", optional=True),
Ref("ObjectReferenceSegment"),
Sequence(
"OPTIONS",
Bracketed(
Delimited(
Sequence(
Ref("ParameterNameSegment"),
Ref("EqualsSegment"),
OneOf(
# This covers many but not all the extensive list of
# possible 'CREATE MODEL' options.
Ref("LiteralGrammar"), # Single value
Bracketed(
# E.g. input_label_cols: list of column names
Delimited(Ref("QuotedLiteralSegment")),
bracket_type="square",
optional=True,
),
),
),
)
),
optional=True,
),
"AS",
Ref("SelectableGrammar"),
)
class CreateTypeStatementSegment(BaseSegment):
"""A `CREATE TYPE` statement.
This is based around the Postgres syntax.
https://www.postgresql.org/docs/current/sql-createtype.html
Note: This is relatively permissive currently
and does not lint the syntax strictly, to allow
for some deviation between dialects.
"""
type = "create_type_statement"
match_grammar: Matchable = Sequence(
"CREATE",
"TYPE",
Ref("ObjectReferenceSegment"),
Sequence("AS", OneOf("ENUM", "RANGE", optional=True), optional=True),
Bracketed(Delimited(Anything()), optional=True),
)
class CreateRoleStatementSegment(BaseSegment):
"""A `CREATE ROLE` statement.
A very simple create role syntax which can be extended
by other dialects.
"""
type = "create_role_statement"
match_grammar: Matchable = Sequence(
"CREATE",
"ROLE",
Ref("ObjectReferenceSegment"),
)
class DropModelStatementSegment(BaseSegment):
"""A `DROP MODEL` statement."""
type = "drop_MODELstatement"
# DROP MODEL <Model name> [IF EXISTS}
# https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-drop-model
match_grammar: Matchable = Sequence(
"DROP",
"MODEL",
Ref("IfExistsGrammar", optional=True),
Ref("ObjectReferenceSegment"),
)
class MLTableExpressionSegment(BaseSegment):
"""An ML table expression."""
type = "ml_table_expression"
# E.g. ML.WEIGHTS(MODEL `project.dataset.model`)
match_grammar: Matchable = Sequence(
"ML",
Ref("DotSegment"),
Ref("SingleIdentifierGrammar"),
Bracketed(
Sequence("MODEL", Ref("ObjectReferenceSegment")),
OneOf(
Sequence(
Ref("CommaSegment"),
Bracketed(
Ref("SelectableGrammar"),
),
),
optional=True,
),
),
)
class StatementSegment(BaseSegment):
"""A generic segment, to any of its child subsegments."""
type = "statement"
match_grammar: Matchable = GreedyUntil(Ref("DelimiterSegment"))
parse_grammar: Matchable = OneOf(
Ref("SelectableGrammar"),
Ref("MergeStatementSegment"),
Ref("InsertStatementSegment"),
Ref("TransactionStatementSegment"),
Ref("DropTableStatementSegment"),
Ref("DropViewStatementSegment"),
Ref("DropUserStatementSegment"),
Ref("TruncateStatementSegment"),
Ref("AccessStatementSegment"),
Ref("CreateTableStatementSegment"),
Ref("CreateTypeStatementSegment"),
Ref("CreateRoleStatementSegment"),
Ref("AlterTableStatementSegment"),
Ref("CreateSchemaStatementSegment"),
Ref("SetSchemaStatementSegment"),
Ref("DropSchemaStatementSegment"),
Ref("DropTypeStatementSegment"),
Ref("CreateDatabaseStatementSegment"),
Ref("DropDatabaseStatementSegment"),
Ref("CreateExtensionStatementSegment"),
Ref("CreateIndexStatementSegment"),
Ref("DropIndexStatementSegment"),
Ref("CreateViewStatementSegment"),
Ref("DeleteStatementSegment"),
Ref("UpdateStatementSegment"),
Ref("CreateFunctionStatementSegment"),
Ref("CreateModelStatementSegment"),
Ref("DropModelStatementSegment"),
Ref("DescribeStatementSegment"),
Ref("UseStatementSegment"),
Ref("ExplainStatementSegment"),
Ref("CreateSequenceStatementSegment"),
Ref("AlterSequenceStatementSegment"),
Ref("DropSequenceStatementSegment"),
Ref("CreateTriggerStatementSegment"),
Ref("DropTriggerStatementSegment"),
)
def get_table_references(self):
"""Use parsed tree to extract table references."""
table_refs = {
tbl_ref.raw for tbl_ref in self.recursive_crawl("table_reference")
}
cte_refs = {
cte_def.get_identifier().raw
for cte_def in self.recursive_crawl("common_table_expression")
}
# External references are any table references which aren't
# also cte aliases.
return table_refs - cte_refs
class WithNoSchemaBindingClauseSegment(BaseSegment):
"""WITH NO SCHEMA BINDING clause for Redshift's Late Binding Views.
https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_VIEW.html
"""
type = "with_no_schema_binding_clause"
match_grammar: Matchable = Sequence(
"WITH",
"NO",
"SCHEMA",
"BINDING",
)
class WithDataClauseSegment(BaseSegment):
"""WITH [NO] DATA clause for Postgres' MATERIALIZED VIEWS.
https://www.postgresql.org/docs/9.3/sql-creatematerializedview.html
"""
type = "with_data_clause"
match_grammar: Matchable = Sequence("WITH", Sequence("NO", optional=True), "DATA")
class DescribeStatementSegment(BaseSegment):
"""A `Describe` statement.
DESCRIBE <object type> <object name>
"""
type = "describe_statement"
match_grammar: Matchable = StartsWith("DESCRIBE")
parse_grammar: Optional[Matchable] = Sequence(
"DESCRIBE",
Ref("NakedIdentifierSegment"),
Ref("ObjectReferenceSegment"),
)
class UseStatementSegment(BaseSegment):
"""A `USE` statement."""
type = "use_statement"
match_grammar: Matchable = Sequence(
"USE",
Ref("DatabaseReferenceSegment"),
)
class ExplainStatementSegment(BaseSegment):
"""An `Explain` statement.
EXPLAIN explainable_stmt
"""
type = "explain_statement"
explainable_stmt: Matchable = OneOf(
Ref("SelectableGrammar"),
Ref("InsertStatementSegment"),
Ref("UpdateStatementSegment"),
Ref("DeleteStatementSegment"),
)
match_grammar: Matchable = Sequence(
"EXPLAIN",
explainable_stmt,
)
class CreateSequenceOptionsSegment(BaseSegment):
"""Options for Create Sequence statement.
https://docs.oracle.com/cd/B19306_01/server.102/b14200/statements_6015.htm
"""
type = "create_sequence_options_segment"
match_grammar: Matchable = OneOf(
Sequence("INCREMENT", "BY", Ref("NumericLiteralSegment")),
Sequence(
"START", Ref.keyword("WITH", optional=True), Ref("NumericLiteralSegment")
),
OneOf(
Sequence("MINVALUE", Ref("NumericLiteralSegment")),
Sequence("NO", "MINVALUE"),
),
OneOf(
Sequence("MAXVALUE", Ref("NumericLiteralSegment")),
Sequence("NO", "MAXVALUE"),
),
OneOf(Sequence("CACHE", Ref("NumericLiteralSegment")), "NOCACHE"),
OneOf("CYCLE", "NOCYCLE"),
OneOf("ORDER", "NOORDER"),
)
class CreateSequenceStatementSegment(BaseSegment):
"""Create Sequence statement.
https://docs.oracle.com/cd/B19306_01/server.102/b14200/statements_6015.htm
"""
type = "create_sequence_statement"
match_grammar: Matchable = Sequence(
"CREATE",
"SEQUENCE",
Ref("SequenceReferenceSegment"),
AnyNumberOf(Ref("CreateSequenceOptionsSegment"), optional=True),
)
class AlterSequenceOptionsSegment(BaseSegment):
"""Options for Alter Sequence statement.
https://docs.oracle.com/cd/B19306_01/server.102/b14200/statements_2011.htm
"""
type = "alter_sequence_options_segment"
match_grammar: Matchable = OneOf(
Sequence("INCREMENT", "BY", Ref("NumericLiteralSegment")),
OneOf(
Sequence("MINVALUE", Ref("NumericLiteralSegment")),
Sequence("NO", "MINVALUE"),
),
OneOf(
Sequence("MAXVALUE", Ref("NumericLiteralSegment")),
Sequence("NO", "MAXVALUE"),
),
OneOf(Sequence("CACHE", Ref("NumericLiteralSegment")), "NOCACHE"),
OneOf("CYCLE", "NOCYCLE"),
OneOf("ORDER", "NOORDER"),
)
class AlterSequenceStatementSegment(BaseSegment):
"""Alter Sequence Statement.
https://docs.oracle.com/cd/B19306_01/server.102/b14200/statements_2011.htm
"""
type = "alter_sequence_statement"
match_grammar: Matchable = Sequence(
"ALTER",
"SEQUENCE",
Ref("SequenceReferenceSegment"),
AnyNumberOf(Ref("AlterSequenceOptionsSegment")),
)
class DropSequenceStatementSegment(BaseSegment):
"""Drop Sequence Statement.
https://docs.oracle.com/cd/E11882_01/server.112/e41084/statements_9001.htm
"""
type = "drop_sequence_statement"
match_grammar: Matchable = Sequence(
"DROP", "SEQUENCE", Ref("SequenceReferenceSegment")
)
class DatePartFunctionNameSegment(BaseSegment):
"""DATEADD function name segment.
Need to be able to specify this as type function_name
so that linting rules identify it properly
"""
type = "function_name"
match_grammar: Matchable = Ref("DatePartFunctionName")
class CreateTriggerStatementSegment(BaseSegment):
"""Create Trigger Statement.
https://www.postgresql.org/docs/14/sql-createtrigger.html
Edited as per notes in above - what doesn't match ANSI
"""
type = "create_trigger"
match_grammar: Matchable = Sequence("CREATE", "TRIGGER", Anything())
parse_grammar: Optional[Matchable] = Sequence(
"CREATE",
"TRIGGER",
Ref("TriggerReferenceSegment"),
OneOf("BEFORE", "AFTER", Sequence("INSTEAD", "OF"), optional=True),
Delimited(
"INSERT",
"DELETE",
Sequence(
"UPDATE",
"OF",
Delimited(
Ref("ColumnReferenceSegment"),
terminator=OneOf("OR", "ON"),
),
),
delimiter="OR",
terminator="ON",
),
"ON",
Ref("TableReferenceSegment"),
AnyNumberOf(
Sequence(
"REFERENCING",
"OLD",
"ROW",
"AS",
Ref("ParameterNameSegment"),
"NEW",
"ROW",
"AS",
Ref("ParameterNameSegment"),
),
Sequence("FROM", Ref("TableReferenceSegment")),
OneOf(
Sequence("NOT", "DEFERRABLE"),
Sequence(
Ref.keyword("DEFERRABLE", optional=True),
OneOf(
Sequence("INITIALLY", "IMMEDIATE"),
Sequence("INITIALLY", "DEFERRED"),
),
),
),
Sequence(
"FOR", Ref.keyword("EACH", optional=True), OneOf("ROW", "STATEMENT")
),
Sequence("WHEN", Bracketed(Ref("ExpressionSegment"))),
),
Sequence(
"EXECUTE",
"PROCEDURE",
Ref("FunctionNameIdentifierSegment"),
Bracketed(Ref("FunctionContentsGrammar", optional=True)),
optional=True,
),
)
class DropTriggerStatementSegment(BaseSegment):
"""Drop Trigger Statement.
Taken from specification in https://www.postgresql.org/docs/14/sql-droptrigger.html
Edited as per notes in above - what doesn't match ANSI
"""
type = "drop_trigger"
match_grammar: Matchable = Sequence(
"DROP", "TRIGGER", Ref("TriggerReferenceSegment")
)
class SamplingExpressionSegment(BaseSegment):
"""A sampling expression."""
type = "sample_expression"
match_grammar: Matchable = Sequence(
"TABLESAMPLE",
OneOf("BERNOULLI", "SYSTEM"),
Bracketed(Ref("NumericLiteralSegment")),
Sequence(
OneOf("REPEATABLE"),
Bracketed(Ref("NumericLiteralSegment")),
optional=True,
),
)
class LocalAliasSegment(BaseSegment):
"""The `LOCAL.ALIAS` syntax allows to use a alias name of a column within clauses.
A hookpoint for other dialects e.g. Exasol.
"""
type = "local_alias_segment"
match_grammar: Matchable = Nothing()
| 32.244002
| 102
| 0.583651
|
6ee89f1ae37b1877dfdc45c22a0e2829de13a380
| 2,948
|
py
|
Python
|
pibrew/models.py
|
MrLeeh/pibrew
|
a2dc764e1ebfc5945d542e8f3e3b18251223974b
|
[
"MIT"
] | null | null | null |
pibrew/models.py
|
MrLeeh/pibrew
|
a2dc764e1ebfc5945d542e8f3e3b18251223974b
|
[
"MIT"
] | null | null | null |
pibrew/models.py
|
MrLeeh/pibrew
|
a2dc764e1ebfc5945d542e8f3e3b18251223974b
|
[
"MIT"
] | null | null | null |
from . import db
from utils import s_to_hms, hms_to_s
class OrderableMixin:
# TODO: implement testing
order = db.Column(db.Integer, index=True)
def _get_model_class(self):
for c in db.Model._decl_class_registry.values():
if (hasattr(c, '__tablename__') and
c.__tablename__ == self.__tablename__):
return c
def __init__(self):
self._model = self._get_model_class()
if self._model.query.count() == 0:
self.order = 0
else:
self.order = max((item.order for item in self._model.query)) + 1
def move_up(self):
self._model = self._get_model_class()
items = self._model.query.order_by(self._model.order).all()
id_ = items.index(self)
# if first item then do nothing
if id_ == 0:
return
# get the item before which we swap position with
item_before = items[id_ - 1]
# swap order numbers with the item before
x = self.order
self.order = item_before.order
item_before.order = x
db.session.add(self)
db.session.add(item_before)
db.session.commit()
# normalize order numbers for all items
for i, item in enumerate(self._model.query.order_by(self._model.order)):
item.order = i
db.session.commit()
def move_down(self):
self._model = self._get_model_class()
items = self._model.query.order_by(self._model.order).all()
id_ = items.index(self)
# if first item then do nothing
if id_ == len(items) - 1:
return
# get the item before which we swap position with
item_after = items[id_ + 1]
# swap order numbers with the item before
x = self.order
self.order = item_after.order
item_after.order = x
db.session.add(self)
db.session.add(item_after)
db.session.commit()
# normalize order numbers for all items
for i, item in enumerate(self._model.query.order_by(self._model.order)):
item.order = i
db.session.commit()
class Setting(db.Model):
__tablename__ = 'settings'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64))
value = db.Column(db.Float)
class SequenceStep(db.Model, OrderableMixin):
__tablename = 'sequence_steps'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64))
duration = db.Column(db.Integer)
temperature = db.Column(db.Float(precision=1))
tolerance = db.Column(db.Float(precision=1))
heater = db.Column(db.Boolean)
mixer = db.Column(db.Boolean)
def __init__(self, *args, **kwargs):
db.Model.__init__(self, *args, **kwargs)
OrderableMixin.__init__(self)
def duration_formatted(self):
return '{:02}:{:02}:{:02}'.format(*s_to_hms(self.duration)) or ''
| 29.188119
| 80
| 0.611601
|
edd057810862e9b45592c01aea58e9494fbf7d26
| 113
|
py
|
Python
|
nft_generator/command/version.py
|
nft-generator/nft_generator
|
e2e17a204054a39eaf5d904d0956542e11155360
|
[
"MIT"
] | 6
|
2021-12-05T10:00:28.000Z
|
2022-01-09T02:22:46.000Z
|
nft_generator/command/version.py
|
nft-generator/nft_generator
|
e2e17a204054a39eaf5d904d0956542e11155360
|
[
"MIT"
] | null | null | null |
nft_generator/command/version.py
|
nft-generator/nft_generator
|
e2e17a204054a39eaf5d904d0956542e11155360
|
[
"MIT"
] | 5
|
2021-12-05T14:35:37.000Z
|
2022-01-13T17:02:10.000Z
|
import click
from nft_generator import __version__
@click.command()
def version():
print(__version__)
| 12.555556
| 37
| 0.734513
|
6d16d84f070ed0a9cc38ff77ebd670d3de9e514b
| 409
|
py
|
Python
|
src/jk_argparsing/textprimitives/__init__.py
|
jkpubsrc/python-module-jk-argsparsing
|
9ef4b907c67fd1df5bd8c378df46ba527ebc2a83
|
[
"Apache-2.0"
] | null | null | null |
src/jk_argparsing/textprimitives/__init__.py
|
jkpubsrc/python-module-jk-argsparsing
|
9ef4b907c67fd1df5bd8c378df46ba527ebc2a83
|
[
"Apache-2.0"
] | null | null | null |
src/jk_argparsing/textprimitives/__init__.py
|
jkpubsrc/python-module-jk-argsparsing
|
9ef4b907c67fd1df5bd8c378df46ba527ebc2a83
|
[
"Apache-2.0"
] | null | null | null |
__version__ = "0.2021.9.10"
from .XLineFragment import XLineFragment
from .columnLayouterL2R import columnLayouterL2R
from .ITextBlock import ITextBlock
from .TextBlock import TextBlock
from .TextBlockSequence import TextBlockSequence
from .TextGridBlock import TextGridBlock
from .TextPrefixBlock import TextPrefixBlock
from .TextColumnsBlock import TextColumnsBlock
from .TextEmpty import TextEmpty
| 22.722222
| 48
| 0.848411
|
86c658d051dd3e28e7bfecdccc237396528753ca
| 9,077
|
py
|
Python
|
catalyst/utils/config.py
|
vaklyuenkov/catalyst
|
402294aa5b27784d23cee2b8fff5a1ed26dec8a8
|
[
"Apache-2.0"
] | 1
|
2019-11-29T11:23:13.000Z
|
2019-11-29T11:23:13.000Z
|
catalyst/utils/config.py
|
vaklyuenkov/catalyst
|
402294aa5b27784d23cee2b8fff5a1ed26dec8a8
|
[
"Apache-2.0"
] | null | null | null |
catalyst/utils/config.py
|
vaklyuenkov/catalyst
|
402294aa5b27784d23cee2b8fff5a1ed26dec8a8
|
[
"Apache-2.0"
] | null | null | null |
import copy
import json
import os
import platform
import shutil
import subprocess
import sys
import re
from collections import OrderedDict
from logging import getLogger
from pathlib import Path
from typing import List, Any, Dict, Union
import safitty
import yaml
from catalyst import utils
from catalyst.utils.tensorboard import SummaryWriter
LOG = getLogger(__name__)
def load_ordered_yaml(
stream, Loader=yaml.Loader, object_pairs_hook=OrderedDict
):
"""
Loads `yaml` config into OrderedDict
Args:
stream: opened file with yaml
Loader: base class for yaml Loader
object_pairs_hook: type of mapping
Returns:
dict: configuration
"""
class OrderedLoader(Loader):
pass
def construct_mapping(loader, node):
loader.flatten_mapping(node)
return object_pairs_hook(loader.construct_pairs(node))
OrderedLoader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, construct_mapping
)
OrderedLoader.add_implicit_resolver(
"tag:yaml.org,2002:float",
re.compile(
u"""^(?:
[-+]?(?:[0-9][0-9_]*)\\.[0-9_]*(?:[eE][-+]?[0-9]+)?
|[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+)
|\\.[0-9_]+(?:[eE][-+][0-9]+)?
|[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\\.[0-9_]*
|[-+]?\\.(?:inf|Inf|INF)
|\\.(?:nan|NaN|NAN))$""", re.X
), list(u"-+0123456789.")
)
return yaml.load(stream, OrderedLoader)
def _decode_dict(dictionary: Dict[str, Union[bytes, str]]) -> Dict[str, str]:
"""
Decode bytes values in the dictionary to UTF-8
Args:
dictionary: a dict
Returns:
dict: decoded dict
"""
result = {
k: v.decode("UTF-8") if type(v) == bytes else v
for k, v in dictionary.items()
}
return result
def get_environment_vars() -> Dict[str, Any]:
"""
Creates a dictionary with environment variables
Returns:
dict: environment variables
"""
result = {
"python_version": sys.version,
"conda_environment": os.environ.get("CONDA_DEFAULT_ENV", ""),
"creation_time": utils.get_utcnow_time(),
"sysname": platform.uname()[0],
"nodename": platform.uname()[1],
"release": platform.uname()[2],
"version": platform.uname()[3],
"architecture": platform.uname()[4],
"user": os.environ.get("USER", ""),
"path": os.environ.get("PWD", ""),
}
with open(os.devnull, "w") as devnull:
try:
git_branch = subprocess.check_output(
"git rev-parse --abbrev-ref HEAD".split(),
shell=True,
stderr=devnull
).strip().decode("UTF-8")
git_local_commit = subprocess.check_output(
"git rev-parse HEAD".split(), shell=True, stderr=devnull
)
git_origin_commit = subprocess.check_output(
f"git rev-parse origin/{git_branch}".split(),
shell=True,
stderr=devnull
)
git = dict(
branch=git_branch,
local_commit=git_local_commit,
origin_commit=git_origin_commit
)
result["git"] = _decode_dict(git)
except (subprocess.CalledProcessError, FileNotFoundError):
pass
result = _decode_dict(result)
return result
def list_pip_packages() -> str:
result = ""
with open(os.devnull, "w") as devnull:
try:
result = subprocess.check_output(
"pip freeze".split(), stderr=devnull
).strip().decode("UTF-8")
except FileNotFoundError:
pass
except subprocess.CalledProcessError as e:
raise Exception("Failed to list packages") from e
return result
def list_conda_packages() -> str:
result = ""
conda_meta_path = Path(sys.prefix) / "conda-meta"
if conda_meta_path.exists():
# We are currently in conda venv
with open(os.devnull, "w") as devnull:
try:
result = subprocess.check_output(
"conda list --export".split(), stderr=devnull
).strip().decode("UTF-8")
except FileNotFoundError:
pass
except subprocess.CalledProcessError as e:
raise Exception(
"Running from conda env, but failed to list conda packages"
) from e
return result
def dump_environment(
experiment_config: Dict,
logdir: str,
configs_path: List[str] = None,
) -> None:
"""
Saves config, environment variables and package list in JSON into logdir
Args:
experiment_config (dict): experiment config
logdir (str): path to logdir
configs_path: path(s) to config
"""
configs_path = configs_path or []
configs_path = [
Path(path) for path in configs_path if isinstance(path, str)
]
config_dir = Path(logdir) / "configs"
config_dir.mkdir(exist_ok=True, parents=True)
environment = get_environment_vars()
safitty.save(experiment_config, config_dir / "_config.json")
safitty.save(environment, config_dir / "_environment.json")
pip_pkg = list_pip_packages()
(config_dir / "pip-packages.txt").write_text(pip_pkg)
conda_pkg = list_conda_packages()
if conda_pkg:
(config_dir / "conda-packages.txt").write_text(conda_pkg)
for path in configs_path:
name: str = path.name
outpath = config_dir / name
shutil.copyfile(path, outpath)
config_str = json.dumps(experiment_config, indent=2, ensure_ascii=False)
config_str = config_str.replace("\n", "\n\n")
environment_str = json.dumps(environment, indent=2, ensure_ascii=False)
environment_str = environment_str.replace("\n", "\n\n")
pip_pkg = pip_pkg.replace("\n", "\n\n")
conda_pkg = conda_pkg.replace("\n", "\n\n")
with SummaryWriter(config_dir) as writer:
writer.add_text("_config", config_str, 0)
writer.add_text("_environment", environment_str, 0)
writer.add_text("pip-packages", pip_pkg, 0)
if conda_pkg:
writer.add_text("conda-packages", conda_pkg, 0)
def parse_config_args(*, config, args, unknown_args):
for arg in unknown_args:
arg_name, value = arg.split("=")
arg_name = arg_name.lstrip("-").strip("/")
value_content, value_type = value.rsplit(":", 1)
if "/" in arg_name:
arg_names = arg_name.split("/")
if value_type == "str":
arg_value = value_content
if arg_value.lower() == "none":
arg_value = None
else:
arg_value = eval("%s(%s)" % (value_type, value_content))
config_ = config
for arg_name in arg_names[:-1]:
if arg_name not in config_:
config_[arg_name] = {}
config_ = config_[arg_name]
config_[arg_names[-1]] = arg_value
else:
if value_type == "str":
arg_value = value_content
else:
arg_value = eval("%s(%s)" % (value_type, value_content))
args.__setattr__(arg_name, arg_value)
args_exists_ = config.get("args")
if args_exists_ is None:
config["args"] = dict()
for key, value in args._get_kwargs():
if value is not None:
if key in ["logdir", "baselogdir"] and value == "":
continue
config["args"][key] = value
return config, args
def parse_args_uargs(args, unknown_args):
"""
Function for parsing configuration files
Args:
args: recognized arguments
unknown_args: unrecognized arguments
Returns:
tuple: updated arguments, dict with config
"""
args_ = copy.deepcopy(args)
# load params
config = {}
for config_path in args_.configs:
with open(config_path, "r") as fin:
if config_path.endswith("json"):
config_ = json.load(fin, object_pairs_hook=OrderedDict)
elif config_path.endswith("yml"):
config_ = load_ordered_yaml(fin)
else:
raise Exception("Unknown file format")
config = utils.merge_dicts(config, config_)
config, args_ = parse_config_args(
config=config, args=args_, unknown_args=unknown_args
)
# hack with argparse in config
config_args = config.get("args", None)
if config_args is not None:
for key, value in config_args.items():
arg_value = getattr(args_, key, None)
if arg_value is None or \
(key in ["logdir", "baselogdir"] and arg_value == ""):
arg_value = value
setattr(args_, key, arg_value)
return args_, config
__all__ = [
"load_ordered_yaml", "get_environment_vars", "dump_environment",
"parse_config_args", "parse_args_uargs"
]
| 29.858553
| 79
| 0.585546
|
bb9dd9f051edcad29be8229ad4954aaeed2c4745
| 16,217
|
py
|
Python
|
src/application-insights/azext_applicationinsights/vendored_sdks/mgmt_applicationinsights/v2015_05_01/operations/_annotations_operations.py
|
haroonf/azure-cli-extensions
|
61c044d34c224372f186934fa7c9313f1cd3a525
|
[
"MIT"
] | 207
|
2017-11-29T06:59:41.000Z
|
2022-03-31T10:00:53.000Z
|
src/application-insights/azext_applicationinsights/vendored_sdks/mgmt_applicationinsights/v2015_05_01/operations/_annotations_operations.py
|
haroonf/azure-cli-extensions
|
61c044d34c224372f186934fa7c9313f1cd3a525
|
[
"MIT"
] | 4,061
|
2017-10-27T23:19:56.000Z
|
2022-03-31T23:18:30.000Z
|
src/application-insights/azext_applicationinsights/vendored_sdks/mgmt_applicationinsights/v2015_05_01/operations/_annotations_operations.py
|
haroonf/azure-cli-extensions
|
61c044d34c224372f186934fa7c9313f1cd3a525
|
[
"MIT"
] | 802
|
2017-10-11T17:36:26.000Z
|
2022-03-31T22:24:32.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, List, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class AnnotationsOperations(object):
"""AnnotationsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.applicationinsights.v2015_05_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name, # type: str
resource_name, # type: str
start, # type: str
end, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.AnnotationsListResult"]
"""Gets the list of annotations for a component for given time range.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param resource_name: The name of the Application Insights component resource.
:type resource_name: str
:param start: The start time to query from for annotations, cannot be older than 90 days from
current date.
:type start: str
:param end: The end time to query for annotations.
:type end: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AnnotationsListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.applicationinsights.v2015_05_01.models.AnnotationsListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AnnotationsListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2015-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
query_parameters['start'] = self._serialize.query("start", start, 'str')
query_parameters['end'] = self._serialize.query("end", end, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('AnnotationsListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.AnnotationError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/components/{resourceName}/Annotations'} # type: ignore
def create(
self,
resource_group_name, # type: str
resource_name, # type: str
annotation_properties, # type: "_models.Annotation"
**kwargs # type: Any
):
# type: (...) -> List["_models.Annotation"]
"""Create an Annotation of an Application Insights component.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param resource_name: The name of the Application Insights component resource.
:type resource_name: str
:param annotation_properties: Properties that need to be specified to create an annotation of a
Application Insights component.
:type annotation_properties: ~azure.mgmt.applicationinsights.v2015_05_01.models.Annotation
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of Annotation, or the result of cls(response)
:rtype: list[~azure.mgmt.applicationinsights.v2015_05_01.models.Annotation]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.Annotation"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2015-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(annotation_properties, 'Annotation')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.AnnotationError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('[Annotation]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/components/{resourceName}/Annotations'} # type: ignore
def delete(
self,
resource_group_name, # type: str
resource_name, # type: str
annotation_id, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Delete an Annotation of an Application Insights component.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param resource_name: The name of the Application Insights component resource.
:type resource_name: str
:param annotation_id: The unique annotation ID. This is unique within a Application Insights
component.
:type annotation_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2015-05-01"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'annotationId': self._serialize.url("annotation_id", annotation_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/components/{resourceName}/Annotations/{annotationId}'} # type: ignore
def get(
self,
resource_group_name, # type: str
resource_name, # type: str
annotation_id, # type: str
**kwargs # type: Any
):
# type: (...) -> List["_models.Annotation"]
"""Get the annotation for given id.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param resource_name: The name of the Application Insights component resource.
:type resource_name: str
:param annotation_id: The unique annotation ID. This is unique within a Application Insights
component.
:type annotation_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of Annotation, or the result of cls(response)
:rtype: list[~azure.mgmt.applicationinsights.v2015_05_01.models.Annotation]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.Annotation"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2015-05-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'annotationId': self._serialize.url("annotation_id", annotation_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.AnnotationError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('[Annotation]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/components/{resourceName}/Annotations/{annotationId}'} # type: ignore
| 49.898462
| 197
| 0.663995
|
40d45d7dd6ffc8f2dc4937acc7380fe153027fe2
| 1,110
|
py
|
Python
|
program2solution/generatordriver.py
|
dpocheng/Python-Data-Structure-of-Python
|
66b99fa41f831e8c3089e4e2fd664a7ef3268e5e
|
[
"Apache-2.0"
] | null | null | null |
program2solution/generatordriver.py
|
dpocheng/Python-Data-Structure-of-Python
|
66b99fa41f831e8c3089e4e2fd664a7ef3268e5e
|
[
"Apache-2.0"
] | null | null | null |
program2solution/generatordriver.py
|
dpocheng/Python-Data-Structure-of-Python
|
66b99fa41f831e8c3089e4e2fd664a7ef3268e5e
|
[
"Apache-2.0"
] | null | null | null |
from generator import lets, transform, running_count, n_with_pad, sequence, alternate
for i in transform('abCdeFg',str.upper):
print(i,end=' ')
print()
for i in transform(lets('abCdeFg'),str.upper):
print(i,end=' ')
print()
for i in running_count('bananastand',lambda x : x in 'aeiou'): # is vowel
print(i,end=' ')
print()
for i in running_count(lets('bananastand'),lambda x : x in 'aeiou'): # is vowel
print(i,end=' ')
print()
for i in n_with_pad('abcdefg',3,None):
print(i,end=' ')
print()
for i in n_with_pad('abcdefg',10,'?'):
print(i,end=' ')
print()
for i in n_with_pad('abcdefg',10):
print(i,end=' ')
print()
for i in n_with_pad(lets('abcdefg'),10):
print(i,end=' ')
print()
for i in sequence('abcde','fg','hijk'):
print(i,end=' ')
print()
for i in sequence(lets('abcde'),lets('fg'),lets('hijk')):
print(i,end=' ')
print()
for i in alternate('abcde','fg','hijk'):
print(i,end=' ')
print()
for i in alternate(lets('abcde'),lets('fg'),lets('hijk')):
print(i,end=' ')
print()
| 19.137931
| 86
| 0.581081
|
6d5f4cc54d13ce3abdf7fad62f8a6679715e0d5e
| 7,038
|
py
|
Python
|
docs/conf.py
|
peopledoc/django-pipeline
|
59865e1387a2b785a3948244639d1827fd4f2c99
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
peopledoc/django-pipeline
|
59865e1387a2b785a3948244639d1827fd4f2c99
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
peopledoc/django-pipeline
|
59865e1387a2b785a3948244639d1827fd4f2c99
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Pipeline documentation build configuration file, created by
# sphinx-quickstart on Sat Apr 30 17:47:55 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-pipeline'
copyright = u'2011-2012, Timothée Peignier'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.3'
# The full version, including alpha/beta/rc tags.
release = '1.3.20'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-pipelinedoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-pipeline.tex', u'Pipeline Documentation',
u'Timothée Peignier', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-pipeline', u'Pipeline Documentation',
[u'Timothée Peignier'], 1)
]
| 32.284404
| 80
| 0.720233
|
5b9cccf42399e80bc7cfa831f6ef74365b8c8910
| 315
|
py
|
Python
|
AirTrafficDevTool/tutorial.py
|
tegginamaniss/AirTrafficDevTool
|
fa19efbcffa8c2d2cc30c194b29d8ca6dee6131a
|
[
"MIT"
] | null | null | null |
AirTrafficDevTool/tutorial.py
|
tegginamaniss/AirTrafficDevTool
|
fa19efbcffa8c2d2cc30c194b29d8ca6dee6131a
|
[
"MIT"
] | null | null | null |
AirTrafficDevTool/tutorial.py
|
tegginamaniss/AirTrafficDevTool
|
fa19efbcffa8c2d2cc30c194b29d8ca6dee6131a
|
[
"MIT"
] | null | null | null |
"""This file is to demonstrate the use of the modules"""
# Overview
from AirTrafficDevTool.overview import Overview
view = Overview(input_file='sample.yaml', save_plots=True)
print(view.output_names())
# SSD Individual
from AirTrafficDevTool.ssd_individual import SSDi
# SSD
from AirTrafficDevTool.ssd import SSD
| 26.25
| 58
| 0.803175
|
be07692a1ccb7cead63490a42a2abd60dc80bb81
| 809
|
py
|
Python
|
setup.py
|
RileyGibbs/django-emailmgr
|
82dae79aceab20ac2146103067d31b01ee51731a
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
RileyGibbs/django-emailmgr
|
82dae79aceab20ac2146103067d31b01ee51731a
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
RileyGibbs/django-emailmgr
|
82dae79aceab20ac2146103067d31b01ee51731a
|
[
"BSD-3-Clause"
] | null | null | null |
import os
from distutils.core import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(name='django-emailmgr',
version='2.0',
description = "An email manager for Django user",
long_description = read('README'),
author='Val L33',
author_email='val@neekware.com',
url='https://github.com/un33k/django-emailmgr',
packages=['emailmgr'],
#install_requires = [''],
classifiers=['Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Utilities'],
)
| 32.36
| 70
| 0.592089
|
02d5d8440b26cdaeeaeb58dc283e1052d6445168
| 8,196
|
py
|
Python
|
ml-agents/mlagents/trainers/ppo/optimizer_torch.py
|
mattinjersey/ml-agents
|
7dcf947f541d661cc4a94b0e7758e1b12c6be078
|
[
"Apache-2.0"
] | 1
|
2021-12-02T23:22:56.000Z
|
2021-12-02T23:22:56.000Z
|
ml-agents/mlagents/trainers/ppo/optimizer_torch.py
|
mattinjersey/ml-agents
|
7dcf947f541d661cc4a94b0e7758e1b12c6be078
|
[
"Apache-2.0"
] | null | null | null |
ml-agents/mlagents/trainers/ppo/optimizer_torch.py
|
mattinjersey/ml-agents
|
7dcf947f541d661cc4a94b0e7758e1b12c6be078
|
[
"Apache-2.0"
] | null | null | null |
from typing import Dict, cast
from mlagents.torch_utils import torch
from mlagents.trainers.buffer import AgentBuffer
from mlagents_envs.timers import timed
from mlagents.trainers.policy.torch_policy import TorchPolicy
from mlagents.trainers.optimizer.torch_optimizer import TorchOptimizer
from mlagents.trainers.settings import TrainerSettings, PPOSettings
from mlagents.trainers.torch.agent_action import AgentAction
from mlagents.trainers.torch.action_log_probs import ActionLogProbs
from mlagents.trainers.torch.utils import ModelUtils
class TorchPPOOptimizer(TorchOptimizer):
def __init__(self, policy: TorchPolicy, trainer_settings: TrainerSettings):
"""
Takes a Policy and a Dict of trainer parameters and creates an Optimizer around the policy.
The PPO optimizer has a value estimator and a loss function.
:param policy: A TFPolicy object that will be updated by this PPO Optimizer.
:param trainer_params: Trainer parameters dictionary that specifies the
properties of the trainer.
"""
# Create the graph here to give more granular control of the TF graph to the Optimizer.
super().__init__(policy, trainer_settings)
params = list(self.policy.actor_critic.parameters())
self.hyperparameters: PPOSettings = cast(
PPOSettings, trainer_settings.hyperparameters
)
self.decay_learning_rate = ModelUtils.DecayedValue(
self.hyperparameters.learning_rate_schedule,
self.hyperparameters.learning_rate,
1e-10,
self.trainer_settings.max_steps,
)
self.decay_epsilon = ModelUtils.DecayedValue(
self.hyperparameters.learning_rate_schedule,
self.hyperparameters.epsilon,
0.1,
self.trainer_settings.max_steps,
)
self.decay_beta = ModelUtils.DecayedValue(
self.hyperparameters.learning_rate_schedule,
self.hyperparameters.beta,
1e-5,
self.trainer_settings.max_steps,
)
self.optimizer = torch.optim.Adam(
params, lr=self.trainer_settings.hyperparameters.learning_rate
)
self.stats_name_to_update_name = {
"Losses/Value Loss": "value_loss",
"Losses/Policy Loss": "policy_loss",
}
self.stream_names = list(self.reward_signals.keys())
def ppo_value_loss(
self,
values: Dict[str, torch.Tensor],
old_values: Dict[str, torch.Tensor],
returns: Dict[str, torch.Tensor],
epsilon: float,
loss_masks: torch.Tensor,
) -> torch.Tensor:
"""
Evaluates value loss for PPO.
:param values: Value output of the current network.
:param old_values: Value stored with experiences in buffer.
:param returns: Computed returns.
:param epsilon: Clipping value for value estimate.
:param loss_mask: Mask for losses. Used with LSTM to ignore 0'ed out experiences.
"""
value_losses = []
for name, head in values.items():
old_val_tensor = old_values[name]
returns_tensor = returns[name]
clipped_value_estimate = old_val_tensor + torch.clamp(
head - old_val_tensor, -1 * epsilon, epsilon
)
v_opt_a = (returns_tensor - head) ** 2
v_opt_b = (returns_tensor - clipped_value_estimate) ** 2
value_loss = ModelUtils.masked_mean(torch.max(v_opt_a, v_opt_b), loss_masks)
value_losses.append(value_loss)
value_loss = torch.mean(torch.stack(value_losses))
return value_loss
def ppo_policy_loss(
self,
advantages: torch.Tensor,
log_probs: torch.Tensor,
old_log_probs: torch.Tensor,
loss_masks: torch.Tensor,
) -> torch.Tensor:
"""
Evaluate PPO policy loss.
:param advantages: Computed advantages.
:param log_probs: Current policy probabilities
:param old_log_probs: Past policy probabilities
:param loss_masks: Mask for losses. Used with LSTM to ignore 0'ed out experiences.
"""
advantage = advantages.unsqueeze(-1)
decay_epsilon = self.hyperparameters.epsilon
r_theta = torch.exp(log_probs - old_log_probs)
p_opt_a = r_theta * advantage
p_opt_b = (
torch.clamp(r_theta, 1.0 - decay_epsilon, 1.0 + decay_epsilon) * advantage
)
policy_loss = -1 * ModelUtils.masked_mean(
torch.min(p_opt_a, p_opt_b), loss_masks
)
return policy_loss
@timed
def update(self, batch: AgentBuffer, num_sequences: int) -> Dict[str, float]:
"""
Performs update on model.
:param batch: Batch of experiences.
:param num_sequences: Number of sequences to process.
:return: Results of update.
"""
# Get decayed parameters
decay_lr = self.decay_learning_rate.get_value(self.policy.get_current_step())
decay_eps = self.decay_epsilon.get_value(self.policy.get_current_step())
decay_bet = self.decay_beta.get_value(self.policy.get_current_step())
returns = {}
old_values = {}
for name in self.reward_signals:
old_values[name] = ModelUtils.list_to_tensor(
batch[f"{name}_value_estimates"]
)
returns[name] = ModelUtils.list_to_tensor(batch[f"{name}_returns"])
vec_obs = [ModelUtils.list_to_tensor(batch["vector_obs"])]
act_masks = ModelUtils.list_to_tensor(batch["action_mask"])
actions = AgentAction.from_dict(batch)
memories = [
ModelUtils.list_to_tensor(batch["memory"][i])
for i in range(0, len(batch["memory"]), self.policy.sequence_length)
]
if len(memories) > 0:
memories = torch.stack(memories).unsqueeze(0)
if self.policy.use_vis_obs:
vis_obs = []
for idx, _ in enumerate(
self.policy.actor_critic.network_body.visual_processors
):
vis_ob = ModelUtils.list_to_tensor(batch["visual_obs%d" % idx])
vis_obs.append(vis_ob)
else:
vis_obs = []
log_probs, entropy, values = self.policy.evaluate_actions(
vec_obs,
vis_obs,
masks=act_masks,
actions=actions,
memories=memories,
seq_len=self.policy.sequence_length,
)
old_log_probs = ActionLogProbs.from_dict(batch).flatten()
log_probs = log_probs.flatten()
loss_masks = ModelUtils.list_to_tensor(batch["masks"], dtype=torch.bool)
value_loss = self.ppo_value_loss(
values, old_values, returns, decay_eps, loss_masks
)
policy_loss = self.ppo_policy_loss(
ModelUtils.list_to_tensor(batch["advantages"]),
log_probs,
old_log_probs,
loss_masks,
)
loss = (
policy_loss
+ 0.5 * value_loss
- decay_bet * ModelUtils.masked_mean(entropy, loss_masks)
)
# Set optimizer learning rate
ModelUtils.update_learning_rate(self.optimizer, decay_lr)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
update_stats = {
# NOTE: abs() is not technically correct, but matches the behavior in TensorFlow.
# TODO: After PyTorch is default, change to something more correct.
"Losses/Policy Loss": torch.abs(policy_loss).item(),
"Losses/Value Loss": value_loss.item(),
"Policy/Learning Rate": decay_lr,
"Policy/Epsilon": decay_eps,
"Policy/Beta": decay_bet,
}
for reward_provider in self.reward_signals.values():
update_stats.update(reward_provider.update(batch))
return update_stats
def get_modules(self):
modules = {"Optimizer": self.optimizer}
for reward_provider in self.reward_signals.values():
modules.update(reward_provider.get_modules())
return modules
| 39.028571
| 99
| 0.634578
|
36865a60e5da71a98f55744c640c23823aa034b9
| 39,268
|
py
|
Python
|
plasmapy/particles/ionization_state_collection.py
|
LeoMurphyWM24/PlasmaPy
|
2dbfd24209e33259a90ceb293b93e89144636db0
|
[
"BSD-2-Clause",
"MIT",
"BSD-2-Clause-Patent",
"BSD-1-Clause",
"BSD-3-Clause"
] | null | null | null |
plasmapy/particles/ionization_state_collection.py
|
LeoMurphyWM24/PlasmaPy
|
2dbfd24209e33259a90ceb293b93e89144636db0
|
[
"BSD-2-Clause",
"MIT",
"BSD-2-Clause-Patent",
"BSD-1-Clause",
"BSD-3-Clause"
] | null | null | null |
plasmapy/particles/ionization_state_collection.py
|
LeoMurphyWM24/PlasmaPy
|
2dbfd24209e33259a90ceb293b93e89144636db0
|
[
"BSD-2-Clause",
"MIT",
"BSD-2-Clause-Patent",
"BSD-1-Clause",
"BSD-3-Clause"
] | null | null | null |
"""
A class for storing ionization state data for multiple elements or
isotopes.
"""
__all__ = ["IonizationStateCollection"]
import astropy.units as u
import numpy as np
from numbers import Integral, Real
from typing import Dict, List, Optional, Tuple, Union
from plasmapy.particles.atomic import atomic_number
from plasmapy.particles.exceptions import (
ChargeError,
InvalidParticleError,
ParticleError,
)
from plasmapy.particles.ionization_state import IonicLevel, IonizationState
from plasmapy.particles.particle_class import CustomParticle, Particle, ParticleLike
from plasmapy.particles.particle_collections import ParticleList
from plasmapy.particles.symbols import particle_symbol
from plasmapy.utils.decorators import validate_quantities
def _atomic_number_and_mass_number(p: Particle):
return (p.atomic_number, p.mass_number if p.isotope else 0)
class IonizationStateCollection:
"""
Describe the ionization state distributions of multiple elements
or isotopes.
Parameters
----------
inputs: `list`, `tuple`, or `dict`
A `list` or `tuple` of elements or isotopes (if ``T_e`` is
provided); a `list` of `~plasmapy.particles.IonizationState`
instances; a `dict` with elements or isotopes as keys and
a `~numpy.ndarray` of ionic fractions as the values; or a `dict`
with elements or isotopes as keys and `~astropy.units.Quantity`
instances with units of number density.
abundances: `dict`, optional, keyword-only
A `dict` with `~plasmapy.particles.particle_class.ParticleLike`
objects used as the keys and the corresponding relative abundance as the
values. The values must be positive real numbers.
log_abundances: `dict`, optional, keyword-only
A `dict` with `~plasmapy.particles.particle_class.ParticleLike`
objects used as the keys and the corresponding base 10 logarithms of their
relative abundances as the values. The values must be real numbers.
n0: `~astropy.units.Quantity`, optional, keyword-only
The number density normalization factor corresponding to the
abundances. The number density of each element is the product
of its abundance and ``n0``.
T_e: `~astropy.units.Quantity`, optional, keyword-only
The electron temperature in units of temperature or thermal
energy per particle.
kappa: `float`, optional, keyword-only
The value of kappa for a kappa distribution function.
tol: `float` or `integer`, optional, keyword-only
The absolute tolerance used by `~numpy.isclose` when testing
normalizations and making comparisons. Defaults to ``1e-15``.
Raises
------
`~plasmapy.particles.exceptions.ParticleError`
If `~plasmapy.particles.IonizationStateCollection` cannot be instantiated.
See Also
--------
~plasmapy.particles.ionization_state.IonicLevel
~plasmapy.particles.ionization_state.IonizationState
Examples
--------
>>> from astropy import units as u
>>> from plasmapy.particles import IonizationStateCollection
>>> states = IonizationStateCollection(
... {'H': [0.5, 0.5], 'He': [0.95, 0.05, 0]},
... T_e = 1.2e4 * u.K,
... n0 = 1e15 * u.m ** -3,
... abundances = {'H': 1, 'He': 0.08},
... )
>>> states.ionic_fractions
{'H': array([0.5, 0.5]), 'He': array([0.95, 0.05, 0. ])}
The number densities are given by the ionic fractions multiplied by
the abundance and the number density scaling factor ``n0``.
>>> states.number_densities['H']
<Quantity [5.e+14, 5.e+14] 1 / m3>
>>> states['He'] = [0.4, 0.59, 0.01]
To change the ionic fractions for a single element, use item
assignment.
>>> states = IonizationStateCollection(['H', 'He'])
>>> states['H'] = [0.1, 0.9]
Item assignment will also work if you supply number densities.
>>> states['He'] = [0.4, 0.6, 0.0] * u.m ** -3
>>> states.ionic_fractions['He']
array([0.4, 0.6, 0. ])
>>> states.number_densities['He']
<Quantity [0.4, 0.6, 0. ] 1 / m3>
Notes
-----
No more than one of ``abundances`` and ``log_abundances`` may be
specified.
If the value provided during item assignment is a
`~astropy.units.Quantity` with units of number density that retains
the total element density, then the ionic fractions will be set
proportionately.
When making comparisons between `~plasmapy.particles.IonizationStateCollection`
instances, `~numpy.nan` values are treated as equal. Equality tests
are performed to within a tolerance of ``tol``.
"""
# TODO: Improve explanation of dunder methods in docstring
# TODO: Add functionality to equilibrate initial ionization states
@validate_quantities(T_e={"equivalencies": u.temperature_energy()})
def __init__(
self,
inputs: Union[Dict[str, np.ndarray], List, Tuple],
*,
T_e: u.K = np.nan * u.K,
abundances: Optional[Dict[str, Real]] = None,
log_abundances: Optional[Dict[str, Real]] = None,
n0: u.m ** -3 = np.nan * u.m ** -3,
tol: Real = 1e-15,
kappa: Real = np.inf,
):
set_abundances = True
if isinstance(inputs, dict):
all_quantities = np.all(
[isinstance(fracs, u.Quantity) for fracs in inputs.values()]
)
if all_quantities:
right_units = np.all(
[fracs[0].si.unit == u.m ** -3 for fracs in inputs.values()]
)
if not right_units:
raise ParticleError(
"Units must be inverse volume for number densities."
)
abundances_provided = (
abundances is not None or log_abundances is not None
)
if abundances_provided:
raise ParticleError(
"Abundances cannot be provided if inputs "
"provides number density information."
)
set_abundances = False
try:
self._pars = {}
self.T_e = T_e
self.n0 = n0
self.tol = tol
self.ionic_fractions = inputs
if set_abundances:
self.abundances = abundances
self.log_abundances = log_abundances
self.kappa = kappa
except Exception as exc:
raise ParticleError(
"Unable to create IonizationStateCollection object."
) from exc
def __len__(self) -> int:
return len(self._base_particles)
def __str__(self) -> str:
return f"<IonizationStateCollection for: {', '.join(self.base_particles)}>"
def __repr__(self) -> str:
return self.__str__()
def __getitem__(self, *values) -> Union[IonizationState, IonicLevel]:
errmsg = f"Invalid indexing for IonizationStateCollection instance: {values[0]}"
one_input = not isinstance(values[0], tuple)
two_inputs = len(values[0]) == 2
if not one_input and not two_inputs:
raise IndexError(errmsg)
try:
arg1 = values[0] if one_input else values[0][0]
int_charge = None if one_input else values[0][1]
particle = arg1 if arg1 in self.base_particles else particle_symbol(arg1)
if int_charge is None:
return IonizationState(
particle=particle,
ionic_fractions=self.ionic_fractions[particle],
T_e=self._pars["T_e"],
n_elem=np.sum(self.number_densities[particle]),
tol=self.tol,
)
if not isinstance(int_charge, Integral):
raise TypeError(f"{int_charge} is not a valid charge for {particle}.")
elif not 0 <= int_charge <= atomic_number(particle):
raise ChargeError(f"{int_charge} is not a valid charge for {particle}.")
return IonicLevel(
ion=particle_symbol(particle, Z=int_charge),
ionic_fraction=self.ionic_fractions[particle][int_charge],
number_density=self.number_densities[particle][int_charge],
)
except Exception as exc:
raise IndexError(errmsg) from exc
def __setitem__(self, key, value):
errmsg = (
f"Cannot set item for this IonizationStateCollection instance for "
f"key = {repr(key)} and value = {repr(value)}"
)
try:
particle = particle_symbol(key)
self.ionic_fractions[key]
except (ParticleError, TypeError):
raise KeyError(
f"{errmsg} because {repr(key)} is an invalid particle."
) from None
except KeyError:
raise KeyError(
f"{errmsg} because {repr(key)} is not one of the base "
f"particles whose ionization state is being kept track "
f"of."
) from None
if isinstance(value, u.Quantity) and value.unit != u.dimensionless_unscaled:
try:
new_number_densities = value.to(u.m ** -3)
except u.UnitConversionError:
raise ValueError(
f"{errmsg} because the units of value do not "
f"correspond to a number density."
) from None
old_n_elem = np.sum(self.number_densities[particle])
new_n_elem = np.sum(new_number_densities)
density_was_nan = np.all(np.isnan(self.number_densities[particle]))
same_density = u.quantity.allclose(old_n_elem, new_n_elem, rtol=self.tol)
if not same_density and not density_was_nan:
raise ValueError(
f"{errmsg} because the old element number density "
f"of {old_n_elem} is not approximately equal to "
f"the new element number density of {new_n_elem}."
)
value = (new_number_densities / new_n_elem).to(u.dimensionless_unscaled)
# If the abundance of this particle has not been defined,
# then set the abundance if there is enough (but not too
# much) information to do so.
abundance_is_undefined = np.isnan(self.abundances[particle])
isnan_of_abundance_values = np.isnan(list(self.abundances.values()))
all_abundances_are_nan = np.all(isnan_of_abundance_values)
n_is_defined = not np.isnan(self.n0)
if abundance_is_undefined:
if n_is_defined:
self._pars["abundances"][particle] = new_n_elem / self.n0
elif all_abundances_are_nan:
self.n0 = new_n_elem
self._pars["abundances"][particle] = 1
else:
raise ParticleError(
f"Cannot set number density of {particle} to "
f"{value * new_n_elem} when the number density "
f"scaling factor is undefined, the abundance "
f"of {particle} is undefined, and some of the "
f"abundances of other elements/isotopes is "
f"defined."
)
try:
new_fractions = np.array(value, dtype=float)
except Exception as exc:
raise TypeError(
f"{errmsg} because value cannot be converted into an "
f"array that represents ionic fractions."
) from exc
# TODO: Create a separate function that makes sure ionic
# TODO: fractions are valid to reduce code repetition. This
# TODO: would probably best go as a private function in
# TODO: ionization_state.py.
required_nstates = atomic_number(particle) + 1
new_nstates = len(new_fractions)
if new_nstates != required_nstates:
raise ValueError(
f"{errmsg} because value must have {required_nstates} "
f"ionization levels but instead corresponds to "
f"{new_nstates} levels."
)
all_nans = np.all(np.isnan(new_fractions))
if not all_nans and (new_fractions.min() < 0 or new_fractions.max() > 1):
raise ValueError(
f"{errmsg} because the new ionic fractions are not "
f"all between 0 and 1."
)
normalized = np.isclose(np.sum(new_fractions), 1, rtol=self.tol)
if not normalized and not all_nans:
raise ValueError(
f"{errmsg} because the ionic fractions are not normalized to one."
)
self._ionic_fractions[particle][:] = new_fractions[:]
def __iter__(self):
yield from [self[key] for key in self.ionic_fractions.keys()]
def __eq__(self, other):
if not isinstance(other, IonizationStateCollection):
raise TypeError(
"IonizationStateCollection instance can only be compared with "
"other IonizationStateCollection instances."
)
if self.base_particles != other.base_particles:
raise ParticleError(
"Two IonizationStateCollection instances can be compared only "
"if the base particles are the same."
)
min_tol = np.min([self.tol, other.tol])
# Check any of a whole bunch of equality measures, recalling
# that np.nan == np.nan is False.
for attribute in ["T_e", "n_e", "kappa"]:
this = getattr(self, attribute)
that = getattr(other, attribute)
# TODO: Maybe create a function in utils called same_enough
# TODO: that would take care of all of these disparate
# TODO: equality measures.
this_equals_that = np.any(
[
this == that,
this is that,
np.isnan(this) and np.isnan(that),
np.isinf(this) and np.isinf(that),
u.quantity.allclose(this, that, rtol=min_tol),
]
)
if not this_equals_that:
return False
for attribute in ["ionic_fractions", "number_densities"]:
this_dict = getattr(self, attribute)
that_dict = getattr(other, attribute)
for particle in self.base_particles:
this = this_dict[particle]
that = that_dict[particle]
this_equals_that = np.any(
[
this is that,
np.all(np.isnan(this)) and np.all(np.isnan(that)),
u.quantity.allclose(this, that, rtol=min_tol),
]
)
if not this_equals_that:
return False
return True
@property
def ionic_fractions(self) -> Dict[str, np.array]:
"""
A `dict` containing the ionic fractions for each element and
isotope.
The keys of this `dict` are the symbols for each element or
isotope. The values will be `~numpy.ndarray` objects containing
the ionic fractions for each ionization level corresponding to
each element or isotope.
"""
return self._ionic_fractions
@ionic_fractions.setter
def ionic_fractions(self, inputs: Union[Dict, List, Tuple]):
"""
Set the ionic fractions.
Notes
-----
The ionic fractions are initialized during instantiation of
`~plasmapy.particles.IonizationStateCollection`. After this, the
only way to reset the ionic fractions via the ``ionic_fractions``
attribute is via a `dict` with elements or isotopes that are a
superset of the previous elements or isotopes. However, you may
use item assignment of the `~plasmapy.particles.IonizationState`
instance to assign new ionic fractions one element or isotope
at a time.
Raises
------
`~plasmapy.particles.exceptions.ParticleError`
If the ionic fractions cannot be set.
`TypeError`
If ``inputs`` is not a `list`, `tuple`, or `dict` during
instantiation, or if ``inputs`` is not a `dict` when it is
being set.
"""
# A potential problem is that using item assignment on the
# ionic_fractions attribute could cause the original attributes
# to be overwritten without checks being performed. We might
# eventually want to create a new class or subclass of UserDict
# that goes through these checks. In the meantime, we should
# make it clear to users to set ionic_fractions by using item
# assignment on the IonizationStateCollection instance as a whole. An
# example of the problem is `s = IonizationStateCollection(["He"])` being
# followed by `s.ionic_fractions["He"] = 0.3`.
if hasattr(self, "_ionic_fractions"):
if not isinstance(inputs, dict):
raise TypeError(
"Can only reset ionic_fractions with a dict if "
"ionic_fractions has been set already."
)
old_particles = set(self.base_particles)
new_particles = {particle_symbol(key) for key in inputs.keys()}
missing_particles = old_particles - new_particles
if missing_particles:
raise ParticleError(
"Can only reset ionic fractions with a dict if "
"the new base particles are a superset of the "
"prior base particles. To change ionic fractions "
"for one base particle, use item assignment on the "
"IonizationStateCollection instance instead."
)
if isinstance(inputs, dict):
original_keys = inputs.keys()
ionfrac_types = {type(inputs[key]) for key in original_keys}
inputs_have_quantities = u.Quantity in ionfrac_types
if inputs_have_quantities and len(ionfrac_types) != 1:
raise TypeError(
"Ionic fraction information may only be inputted "
"as a Quantity object if all ionic fractions are "
"Quantity arrays with units of inverse volume."
)
try:
particles = {key: Particle(key) for key in original_keys}
except (InvalidParticleError, TypeError) as exc:
raise ParticleError(
"Unable to create IonizationStateCollection instance "
"because not all particles are valid."
) from exc
# The particles whose ionization states are to be recorded
# should be elements or isotopes but not ions or neutrals.
for key in particles.keys():
is_element = particles[key].is_category("element")
has_charge_info = particles[key].is_category(
any_of=["charged", "uncharged"]
)
if not is_element or has_charge_info:
raise ParticleError(
f"{key} is not an element or isotope without "
f"charge information."
)
# We are sorting the elements/isotopes by atomic number and
# mass number since we will often want to plot and analyze
# things and this is the most sensible order.
def _sort_entries_by_atomic_and_mass_numbers(k):
return (
particles[k].atomic_number,
particles[k].mass_number if particles[k].isotope else 0,
)
sorted_keys = sorted(
original_keys, key=_sort_entries_by_atomic_and_mass_numbers
)
_elements_and_isotopes = []
_particle_instances = []
new_ionic_fractions = {}
if inputs_have_quantities:
n_elems = {}
for key in sorted_keys:
new_key = particles[key].symbol
_particle_instances.append(particles[key])
if new_key in _elements_and_isotopes:
raise ParticleError(
"Repeated particles in IonizationStateCollection."
)
nstates_input = len(inputs[key])
nstates = particles[key].atomic_number + 1
if nstates != nstates_input:
raise ParticleError(
f"The ionic fractions array for {key} must "
f"have a length of {nstates}."
)
_elements_and_isotopes.append(new_key)
if inputs_have_quantities:
try:
number_densities = inputs[key].to(u.m ** -3)
n_elem = np.sum(number_densities)
new_ionic_fractions[new_key] = np.array(
number_densities / n_elem
)
n_elems[key] = n_elem
except u.UnitConversionError as exc:
raise ParticleError("Units are not inverse volume.") from exc
elif (
isinstance(inputs[key], np.ndarray)
and inputs[key].dtype.kind == "f"
):
new_ionic_fractions[particles[key].symbol] = inputs[key]
else:
try:
new_ionic_fractions[particles[key].symbol] = np.array(
inputs[key], dtype=float
)
except ValueError as exc:
raise ParticleError(
f"Inappropriate ionic fractions for {key}."
) from exc
for key in _elements_and_isotopes:
fractions = new_ionic_fractions[key]
if not np.all(np.isnan(fractions)):
if np.min(fractions) < 0 or np.max(fractions) > 1:
raise ParticleError(
f"Ionic fractions for {key} are not between 0 and 1."
)
if not np.isclose(np.sum(fractions), 1, atol=self.tol, rtol=0):
raise ParticleError(
f"Ionic fractions for {key} are not normalized to 1."
)
# When the inputs provide the densities, the abundances must
# not have been provided because that would be redundant
# or contradictory information. The number density scaling
# factor might or might not have been provided. Have the
# number density scaling factor default to the total number
# of neutrals and ions across all elements and isotopes, if
# it was not provided. Then go ahead and calculate the
# abundances based on that. However, we need to be careful
# that the abundances are not overwritten during the
# instantiation of the class.
if inputs_have_quantities:
if np.isnan(self.n0):
new_n = 0 * u.m ** -3
for key in _elements_and_isotopes:
new_n += n_elems[key]
self.n0 = new_n
new_abundances = {}
for key in _elements_and_isotopes:
new_abundances[key] = float(n_elems[key] / self.n0)
self._pars["abundances"] = new_abundances
elif isinstance(inputs, (list, tuple)):
try:
_particle_instances = [Particle(particle) for particle in inputs]
except (InvalidParticleError, TypeError) as exc:
raise ParticleError(
"Invalid inputs to IonizationStateCollection."
) from exc
_particle_instances.sort(key=_atomic_number_and_mass_number)
_elements_and_isotopes = [
particle.symbol for particle in _particle_instances
]
new_ionic_fractions = {
particle.symbol: np.full(
particle.atomic_number + 1, fill_value=np.nan, dtype=float
)
for particle in _particle_instances
}
else:
raise TypeError("Incorrect inputs to set ionic_fractions.")
for i in range(1, len(_particle_instances)):
if _particle_instances[i - 1].element == _particle_instances[i].element:
if (
not _particle_instances[i - 1].isotope
and _particle_instances[i].isotope
):
raise ParticleError(
"Cannot have an element and isotopes of that element."
)
self._particle_instances = _particle_instances
self._base_particles = _elements_and_isotopes
self._ionic_fractions = new_ionic_fractions
def normalize(self) -> None:
"""
Normalize the ionic fractions so that the sum for each element
equals one.
"""
for particle in self.base_particles:
tot = np.sum(self.ionic_fractions[particle])
self.ionic_fractions[particle] = self.ionic_fractions[particle] / tot
@property
@validate_quantities
def n_e(self) -> u.m ** -3:
"""The electron number density under the assumption of quasineutrality."""
number_densities = self.number_densities
n_e = 0.0 * u.m ** -3
for elem in self.base_particles:
atomic_numb = atomic_number(elem)
number_of_ionization_states = atomic_numb + 1
charge_numbers = np.linspace(0, atomic_numb, number_of_ionization_states)
n_e += np.sum(number_densities[elem] * charge_numbers)
return n_e
@property
@validate_quantities
def n0(self) -> u.m ** -3:
"""The number density scaling factor."""
return self._pars["n"]
@n0.setter
@validate_quantities
def n0(self, n: u.m ** -3):
"""Set the number density scaling factor."""
try:
n = n.to(u.m ** -3)
except u.UnitConversionError as exc:
raise ParticleError("Units cannot be converted to u.m ** -3.") from exc
except Exception as exc:
raise ParticleError(f"{n} is not a valid number density.") from exc
if n < 0 * u.m ** -3:
raise ParticleError("Number density cannot be negative.")
self._pars["n"] = n.to(u.m ** -3)
@property
def number_densities(self) -> Dict[str, u.Quantity]:
"""
A `dict` containing the number densities for the elements and/or
isotopes composing the collection.
"""
return {
elem: self.n0 * self.abundances[elem] * self.ionic_fractions[elem]
for elem in self.base_particles
}
@property
def abundances(self) -> Optional[Dict[ParticleLike, Real]]:
"""The elemental abundances."""
return self._pars["abundances"]
@abundances.setter
def abundances(self, abundances_dict: Optional[Dict[ParticleLike, Real]]):
"""
Set the elemental (or isotopic) abundances. The elements and
isotopes must be the same as or a superset of the elements whose
ionization states are being tracked.
"""
if abundances_dict is None:
self._pars["abundances"] = {elem: np.nan for elem in self.base_particles}
elif not isinstance(abundances_dict, dict):
raise TypeError(
"The abundances attribute must be a dict with "
"elements or isotopes as keys and real numbers "
"representing relative abundances as values."
)
else:
old_keys = abundances_dict.keys()
try:
new_keys_dict = {}
for old_key in old_keys:
new_keys_dict[particle_symbol(old_key)] = old_key
except Exception:
raise ParticleError(
f"The key {repr(old_key)} in the abundances "
f"dictionary is not a valid element or isotope."
)
new_elements = new_keys_dict.keys()
old_elements_set = set(self.base_particles)
new_elements_set = set(new_elements)
if old_elements_set - new_elements_set:
raise ParticleError(
f"The abundances of the following particles are "
f"missing: {old_elements_set - new_elements_set}"
)
new_abundances_dict = {}
for element in new_elements:
inputted_abundance = abundances_dict[new_keys_dict[element]]
try:
inputted_abundance = float(inputted_abundance)
except Exception:
raise TypeError(
f"The abundance for {element} was provided as"
f"{inputted_abundance}, which cannot be "
f"converted to a real number."
) from None
if inputted_abundance < 0:
raise ParticleError(f"The abundance of {element} is negative.")
new_abundances_dict[element] = inputted_abundance
self._pars["abundances"] = new_abundances_dict
@property
def log_abundances(self) -> Dict[str, Real]:
"""
A `dict` with atomic or isotope symbols as keys and the base 10
logarithms of the relative abundances as the corresponding values.
"""
return {
atom: np.log10(abundance) for atom, abundance in self.abundances.items()
}
@log_abundances.setter
def log_abundances(self, value: Optional[Dict[str, Real]]):
"""Set the base 10 logarithm of the relative abundances."""
if value is not None:
try:
new_abundances_input = {
atom: 10 ** log_abundance for atom, log_abundance in value.items()
}
self.abundances = new_abundances_input
except Exception:
raise ParticleError("Invalid log_abundances.") from None
@property
def T_e(self) -> u.K:
"""The electron temperature."""
return self._pars["T_e"]
@T_e.setter
@validate_quantities(
electron_temperature=dict(equivalencies=u.temperature_energy())
)
def T_e(self, electron_temperature: u.K):
"""Set the electron temperature."""
try:
temperature = electron_temperature.to(
u.K, equivalencies=u.temperature_energy()
)
except (AttributeError, u.UnitsError):
raise ParticleError(
f"{electron_temperature} is not a valid temperature."
) from None
if temperature < 0 * u.K:
raise ParticleError("The electron temperature cannot be negative.")
self._pars["T_e"] = temperature
@property
def kappa(self) -> np.real:
"""
The κ parameter for a kappa distribution function for electrons.
The value of ``kappa`` must be greater than ``1.5`` in order to
have a valid distribution function. If ``kappa`` equals
`~numpy.inf`, then the distribution function reduces to a
Maxwellian.
"""
return self._pars["kappa"]
@kappa.setter
def kappa(self, value: Real):
"""
Set the kappa parameter for a kappa distribution function for
electrons. The value must be between ``1.5`` and `~numpy.inf`.
"""
kappa_errmsg = "kappa must be a real number greater than 1.5"
if not isinstance(value, Real):
raise TypeError(kappa_errmsg)
if value <= 1.5:
raise ValueError(kappa_errmsg)
self._pars["kappa"] = np.real(value)
@property
def base_particles(self) -> List[str]:
"""
A `list` of the elements and isotopes whose ionization states
are being kept track of.
"""
return self._base_particles
@property
def tol(self) -> np.real:
"""The absolute tolerance for comparisons."""
return self._tol
@tol.setter
def tol(self, atol: Real):
"""Set the absolute tolerance for comparisons."""
if not isinstance(atol, Real):
raise TypeError("The attribute tol must be a real number.")
if 0 <= atol <= 1.0:
self._tol = np.real(atol)
else:
raise ValueError("Need 0 <= tol <= 1.")
def average_ion(
self,
*,
include_neutrals: bool = True,
use_rms_charge: bool = False,
use_rms_mass: bool = False,
) -> CustomParticle:
"""
Return a |CustomParticle| representing the mean particle
included across all ionization states.
By default, this method will use the weighted mean to calculate
the properties of the |CustomParticle|, where the weights for
each ionic level is given by its ionic fraction multiplied by
the abundance of the base element or isotope. If
``use_rms_charge`` or ``use_rms_mass`` is `True`, then this
method will return the root mean square of the charge or mass,
respectively.
Parameters
----------
include_neutrals : `bool`, optional, keyword-only
If `True`, include neutrals when calculating the mean values
of the different particles. If `False`, exclude neutrals.
Defaults to `True`.
use_rms_charge : `bool`, optional, keyword-only
If `True`, use the root mean square charge instead of the
mean charge. Defaults to `False`.
use_rms_mass : `bool`, optional, keyword-only
If `True`, use the root mean square mass instead of the mean
mass. Defaults to `False`.
Raises
------
`~plasmapy.particles.exceptions.ParticleError`
If the abundance of any of the elements or isotopes is not
defined and the |IonizationStateCollection| instance includes
more than one element or isotope.
Returns
-------
~plasmapy.particles.particle_class.CustomParticle
Examples
--------
>>> states = IonizationStateCollection(
... {"H": [0.1, 0.9], "He": [0, 0.1, 0.9]},
... abundances={"H": 1, "He": 0.1}
... )
>>> states.average_ion()
CustomParticle(mass=2.12498...e-27 kg, charge=1.5876...e-19 C)
>>> states.average_ion(include_neutrals=False, use_rms_charge=True, use_rms_mass=True)
CustomParticle(mass=2.633...e-27 kg, charge=1.805...e-19 C)
"""
min_charge = 0 if include_neutrals else 1
all_particles = ParticleList()
all_abundances = []
for base_particle in self.base_particles:
ionization_state = self[base_particle]
ionic_levels = ionization_state.to_list()[min_charge:]
all_particles.extend(ionic_levels)
base_particle_abundance = self.abundances[base_particle]
if np.isnan(base_particle_abundance):
if len(self) == 1:
base_particle_abundance = 1
else:
raise ParticleError(
"Unable to provide an average particle without abundances."
)
ionic_fractions = ionization_state.ionic_fractions[min_charge:]
ionic_abundances = base_particle_abundance * ionic_fractions
all_abundances.extend(ionic_abundances)
return all_particles.average_particle(
use_rms_charge=use_rms_charge,
use_rms_mass=use_rms_mass,
abundances=all_abundances,
)
def summarize(self, minimum_ionic_fraction: Real = 0.01) -> None:
"""
Print quicklook information for an
`~plasmapy.particles.IonizationStateCollection` instance.
Parameters
----------
minimum_ionic_fraction: `Real`
If the ionic fraction for a particular ionization state is
below this level, then information for it will not be
printed. Defaults to 0.01.
Examples
--------
>>> states = IonizationStateCollection(
... {'H': [0.1, 0.9], 'He': [0.95, 0.05, 0.0]},
... T_e = 12000 * u.K,
... n0 = 3e9 * u.cm ** -3,
... abundances = {'H': 1.0, 'He': 0.1},
... kappa = 3.4,
... )
>>> states.summarize()
IonizationStateCollection instance for: H, He
----------------------------------------------------------------
H 0+: 0.100 n_i = 3.00e+14 m**-3 T_i = 1.20e+04 K
H 1+: 0.900 n_i = 2.70e+15 m**-3 T_i = 1.20e+04 K
----------------------------------------------------------------
He 0+: 0.950 n_i = 2.85e+14 m**-3 T_i = 1.20e+04 K
He 1+: 0.050 n_i = 1.50e+13 m**-3 T_i = 1.20e+04 K
----------------------------------------------------------------
n_e = 2.71e+15 m**-3
T_e = 1.20e+04 K
kappa = 3.40
----------------------------------------------------------------
"""
separator_line = 64 * "-"
output = [
f"IonizationStateCollection instance for: {', '.join(self.base_particles)}"
]
# Get the ionic symbol with the corresponding ionic fraction and
# number density (if available), but only for the most abundant
# ionization levels for each element.
for ionization_state in self:
states_info = ionization_state._get_states_info(minimum_ionic_fraction)
if len(states_info) > 0:
output += states_info
output[-1] += "\n" + separator_line
attributes = []
if np.isfinite(self.n_e):
attributes.append(f"n_e = {self.n_e.value:.2e} m**-3")
if np.isfinite(self.T_e):
attributes.append(f"T_e = {self.T_e.value:.2e} K")
if np.isfinite(self.kappa):
attributes.append(f"kappa = {self.kappa:.2f}")
if attributes:
attributes.append(separator_line)
output.append("\n".join(attributes))
if len(output) > 1:
output[0] += "\n" + separator_line
output_string = "\n".join(output)
else:
output_string = output[0]
print(output_string.strip("\n"))
| 38.802372
| 94
| 0.568223
|
4b7cc2b46c19cd5ad1fdb65d12dd3e7ebd988ebb
| 6,258
|
py
|
Python
|
functions/system.py
|
codions-forks/flask-nginx-rtmp-manager
|
9088c44616a2e94f6771216af6f22c241064e321
|
[
"MIT"
] | 1
|
2021-09-26T05:32:00.000Z
|
2021-09-26T05:32:00.000Z
|
functions/system.py
|
codions-forks/flask-nginx-rtmp-manager
|
9088c44616a2e94f6771216af6f22c241064e321
|
[
"MIT"
] | null | null | null |
functions/system.py
|
codions-forks/flask-nginx-rtmp-manager
|
9088c44616a2e94f6771216af6f22c241064e321
|
[
"MIT"
] | null | null | null |
from threading import Thread
from functools import wraps
import subprocess
import os
import datetime
import smtplib
from flask import flash
from html.parser import HTMLParser
import ipaddress
import json
import secrets
import logging
from globals import globalvars
from classes.shared import db
from classes import settings
from classes import logs
from classes import RecordedVideo
from classes import Sec
from functions import cachedDbCalls
def asynch(func):
@wraps(func)
def async_func(*args, **kwargs):
func_hl = Thread(target = func, args = args, kwargs = kwargs)
func_hl.start()
return func_hl
return async_func
def check_existing_settings():
settingsQuery = settings.settings.query.all()
if settingsQuery != []:
db.session.close()
return True
else:
db.session.close()
return False
# Class Required for HTML Stripping in strip_html
class MLStripper(HTMLParser):
def __init__(self):
super().__init__()
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return ''.join(self.fed)
def strip_html(html):
s = MLStripper()
s.feed(html)
return s.get_data()
def videoupload_allowedExt(filename, allowedExtensions):
if not "." in filename:
return False
ext = filename.rsplit(".", 1)[1]
if ext.upper() in allowedExtensions:
return True
else:
return False
def formatSiteAddress(systemAddress):
try:
ipaddress.ip_address(systemAddress)
return systemAddress
except ValueError:
try:
ipaddress.ip_address(systemAddress.split(':')[0])
return systemAddress.split(':')[0]
except ValueError:
return systemAddress
def table2Dict(table):
exportedTableList = table.query.all()
dataList = []
for tbl in exportedTableList:
dataList.append(dict((column.name, str(getattr(tbl, column.name))) for column in tbl.__table__.columns))
return dataList
def sendTestEmail(smtpServer, smtpPort, smtpTLS, smtpSSL, smtpUsername, smtpPassword, smtpSender, smtpReceiver):
try:
server = smtplib.SMTP(smtpServer, int(smtpPort))
if smtpSSL is True:
server = smtplib.SMTP_SSL(smtpServer, int(smtpPort))
if smtpTLS is True:
server.starttls()
server.ehlo()
if smtpUsername and smtpPassword:
server.login(smtpUsername, smtpPassword)
msg = "Test Email - Your Instance of OSP has been successfully configured!"
server.sendmail(smtpSender, smtpReceiver, msg)
except Exception as e:
logging.error(e)
newLog(1, "Test Email Failed for " + str(smtpServer) + "Reason:" + str(e))
return False
server.quit()
newLog(1, "Test Email Successful for " + str(smtpServer))
return True
def newLog(logType, message):
newLogItem = logs.logs(datetime.datetime.utcnow(), str(message), logType)
db.session.add(newLogItem)
db.session.commit()
return True
def rebuildOSPEdgeConf():
f = open("/opt/osp/conf/osp-edge.conf", "w")
ospEdgeQuery = settings.edgeStreamer.query.filter_by(active=True).all()
f.write('split_clients "${remote_addr}AAA" $ospedge_node {\n')
if ospEdgeQuery != []:
for edge in ospEdgeQuery:
if edge.port == 80 or edge.port == 443:
f.write(str(edge.loadPct) + "% " + edge.address + ";\n")
else:
f.write(str(edge.loadPct) + "% " + edge.address + ":" + str(edge.port) +";\n" )
else:
f.write("100% 127.0.0.1;\n")
f.write("}")
f.close()
return True
def systemFixes(app):
logging.info({"level": "info", "message": "Checking for 0.7.x Clips"})
# Fix for Beta 6 Switch from Fake Clips to real clips
clipQuery = RecordedVideo.Clips.query.filter_by(videoLocation=None).all()
videos_root = globalvars.videoRoot + 'videos/'
for clip in clipQuery:
originalVideo = videos_root + clip.recordedVideo.videoLocation
clipVideoLocation = clip.recordedVideo.channel.channelLoc + '/clips/' + 'clip-' + str(clip.id) + ".mp4"
fullvideoLocation = videos_root + clipVideoLocation
clip.videoLocation = clipVideoLocation
clipVideo = subprocess.run(['ffmpeg', '-ss', str(clip.startTime), '-i', originalVideo, '-c', 'copy', '-t', str(clip.length), '-avoid_negative_ts', '1', fullvideoLocation])
db.session.commmit()
# Create the Stickers directory if it does not exist
if not os.path.isdir(app.config['WEB_ROOT'] + "/images/stickers"):
try:
os.mkdir(app.config['WEB_ROOT'] + "/images/stickers")
except OSError:
flash("Unable to create <web-root>/images/stickers", "error")
# Create the stream-thumb directory if it does not exist
if not os.path.isdir(app.config['WEB_ROOT'] + "stream-thumb"):
try:
os.mkdir(app.config['WEB_ROOT'] + "stream-thumb")
except OSError:
flash("Unable to create <web-root>/stream-thumb", "error")
# Check fs_uniquifier
userQuery = Sec.User.query.filter_by(fs_uniquifier=None).all()
for user in userQuery:
user.fs_uniquifier = str(secrets.token_hex(nbytes=16))
db.session.commit()
return True
def initializeThemes():
sysSettings = cachedDbCalls.getSystemSettings()
logging.info({"level": "info", "message": "Importing Theme Data into Global Cache"})
# Import Theme Data into Theme Dictionary
with open('templates/themes/' + sysSettings.systemTheme + '/theme.json') as f:
globalvars.themeData = json.load(f)
return True
def checkOSPEdgeConf():
sysSettings = cachedDbCalls.getSystemSettings()
logging.info({"level": "info", "message": "Rebuilding OSP Edge Conf File"})
# Initialize the OSP Edge Configuration - Mostly for Docker
if sysSettings.buildEdgeOnRestart is True:
try:
rebuildOSPEdgeConf()
except:
logging.error("Error Rebuilding Edge Config")
return False
else:
logging.info({"level": "info", "message": "Skipping Rebuilding '/opt/osp/conf/osp-edge.conf' per System Setting"})
return True
| 33.645161
| 179
| 0.654842
|
090eb1865b187c74496f41effefc6916f93a697d
| 11,420
|
py
|
Python
|
socketio/transports.py
|
fuhrysteve/gevent-socketio
|
ec5cd40a9eae06f16ac657a03656bc90f82cdf41
|
[
"BSD-3-Clause"
] | 1
|
2015-05-15T18:30:43.000Z
|
2015-05-15T18:30:43.000Z
|
socketio/transports.py
|
fuhrysteve/gevent-socketio
|
ec5cd40a9eae06f16ac657a03656bc90f82cdf41
|
[
"BSD-3-Clause"
] | null | null | null |
socketio/transports.py
|
fuhrysteve/gevent-socketio
|
ec5cd40a9eae06f16ac657a03656bc90f82cdf41
|
[
"BSD-3-Clause"
] | null | null | null |
import gevent
try:
from urllib.parse import unquote_plus, parse_qs
except ImportError:
from urllib.parse import unquote_plus
from urlparse import parse_qs
from geventwebsocket import WebSocketError
from gevent.queue import Empty
class BaseTransport(object):
"""Base class for all transports. Mostly wraps handler class functions."""
def __init__(self, handler, config, **kwargs):
"""Base transport class.
:param config: dict Should contain the config keys, like
``heartbeat_interval``, ``heartbeat_timeout`` and
``close_timeout``.
"""
self.content_type = ("Content-Type", "text/plain; charset=UTF-8")
self.headers = [
("Access-Control-Allow-Origin", "*"),
("Access-Control-Allow-Credentials", "true"),
("Access-Control-Allow-Methods", "POST, GET, OPTIONS"),
("Access-Control-Max-Age", 3600),
]
self.handler = handler
self.config = config
def write(self, data=""):
# Gevent v 0.13
if hasattr(self.handler, 'response_headers_list'):
if 'Content-Length' not in self.handler.response_headers_list:
self.handler.response_headers.append(('Content-Length', len(data)))
self.handler.response_headers_list.append('Content-Length')
elif not hasattr(self.handler, 'provided_content_length') or self.handler.provided_content_length is None:
# Gevent 1.0bX
l = len(data)
self.handler.provided_content_length = l
self.handler.response_headers.append(('Content-Length', l))
self.handler.write_smart(data)
def start_response(self, status, headers, **kwargs):
if "Content-Type" not in [x[0] for x in headers]:
headers.append(self.content_type)
headers.extend(self.headers)
self.handler.start_response(status, headers, **kwargs)
class XHRPollingTransport(BaseTransport):
def __init__(self, *args, **kwargs):
super(XHRPollingTransport, self).__init__(*args, **kwargs)
def options(self):
self.start_response("200 OK", ())
self.write()
return []
def get(self, socket):
socket.heartbeat()
heartbeat_interval = self.config['heartbeat_interval']
payload = self.get_messages_payload(socket, timeout=heartbeat_interval)
if not payload:
payload = "8::" # NOOP
self.start_response("200 OK", [])
self.write(payload)
def _request_body(self):
return self.handler.wsgi_input.readline()
def post(self, socket):
for message in self.decode_payload(self._request_body()):
socket.put_server_msg(message)
self.start_response("200 OK", [
("Connection", "close"),
("Content-Type", "text/plain")
])
self.write("1")
def get_messages_payload(self, socket, timeout=None):
"""This will fetch the messages from the Socket's queue, and if
there are many messes, pack multiple messages in one payload and return
"""
try:
msgs = socket.get_multiple_client_msgs(timeout=timeout)
data = self.encode_payload(msgs)
except Empty:
data = ""
return data
def encode_payload(self, messages):
"""Encode list of messages. Expects messages to be unicode.
``messages`` - List of raw messages to encode, if necessary
"""
if not messages or messages[0] is None:
return ''
if len(messages) == 1:
return messages[0].encode('utf-8')
payload = u''.join([(u'\ufffd%d\ufffd%s' % (len(p), p))
for p in messages if p is not None])
# FIXME: why is it so that we must filter None from here ? How
# is it even possible that a None gets in there ?
return payload.encode('utf-8')
def decode_payload(self, payload):
"""This function can extract multiple messages from one HTTP payload.
Some times, the XHR/JSONP/.. transports can pack more than one message
on a single packet. They are encoding following the WebSocket
semantics, which need to be reproduced here to unwrap the messages.
The semantics are:
\ufffd + [length as a string] + \ufffd + [payload as a unicode string]
This function returns a list of messages, even though there is only
one.
Inspired by socket.io/lib/transports/http.js
"""
payload = payload.decode('utf-8')
if payload[0] == u"\ufffd":
ret = []
while len(payload) != 0:
len_end = payload.find(u"\ufffd", 1)
length = int(payload[1:len_end])
msg_start = len_end + 1
msg_end = length + msg_start
message = payload[msg_start:msg_end]
ret.append(message)
payload = payload[msg_end:]
return ret
return [payload]
def do_exchange(self, socket, request_method):
if not socket.connection_established:
# Runs only the first time we get a Socket opening
self.start_response("200 OK", [
("Connection", "close"),
])
self.write("1::") # 'connect' packet
return
elif request_method in ("GET", "POST", "OPTIONS"):
return getattr(self, request_method.lower())(socket)
else:
raise Exception("No support for the method: " + request_method)
class JSONPolling(XHRPollingTransport):
def __init__(self, handler, config):
super(JSONPolling, self).__init__(handler, config)
self.content_type = ("Content-Type", "text/javascript; charset=UTF-8")
def _request_body(self):
data = super(JSONPolling, self)._request_body()
# resolve %20%3F's, take out wrapping d="...", etc..
data = unquote_plus(data)[3:-1] \
.replace(r'\"', '"') \
.replace(r"\\", "\\")
# For some reason, in case of multiple messages passed in one
# query, IE7 sends it escaped, not utf-8 encoded. This dirty
# hack handled it
if data[0] == "\\":
data = data.decode("unicode_escape").encode("utf-8")
return data
def write(self, data):
"""Just quote out stuff before sending it out"""
args = parse_qs(self.handler.environ.get("QUERY_STRING"))
if "i" in args:
i = args["i"]
else:
i = "0"
# TODO: don't we need to quote this data in here ?
super(JSONPolling, self).write("io.j[%s]('%s');" % (i, data))
class XHRMultipartTransport(XHRPollingTransport):
def __init__(self, handler):
super(JSONPolling, self).__init__(handler)
self.content_type = (
"Content-Type",
"multipart/x-mixed-replace;boundary=\"socketio\""
)
def do_exchange(self, socket, request_method):
if request_method == "GET":
return self.get(socket)
elif request_method == "POST":
return self.post(socket)
else:
raise Exception("No support for such method: " + request_method)
def get(self, socket):
header = "Content-Type: text/plain; charset=UTF-8\r\n\r\n"
self.start_response("200 OK", [("Connection", "keep-alive")])
self.write_multipart("--socketio\r\n")
self.write_multipart(header)
self.write_multipart(str(socket.sessid) + "\r\n")
self.write_multipart("--socketio\r\n")
def chunk():
while True:
payload = self.get_messages_payload(socket)
if not payload:
# That would mean the call to Queue.get() returned Empty,
# so it was in fact killed, since we pass no timeout=..
return
# See below
else:
try:
self.write_multipart(header)
self.write_multipart(payload)
self.write_multipart("--socketio\r\n")
except socket.error:
# The client might try to reconnect, even with a socket
# error, so let's just let it go, and not kill the
# socket completely. Other processes will ensure
# we kill everything if the user expires the timeouts.
#
# WARN: this means that this payload is LOST, unless we
# decide to re-inject it into the queue.
return
socket.spawn(chunk)
class WebsocketTransport(BaseTransport):
def do_exchange(self, socket, request_method):
websocket = self.handler.environ['wsgi.websocket']
websocket.send("1::") # 'connect' packet
def send_into_ws():
while True:
message = socket.get_client_msg()
if message is None:
break
try:
websocket.send(message)
except (WebSocketError, TypeError):
# We can't send a message on the socket
# it is dead, let the other sockets know
socket.disconnect()
def read_from_ws():
while True:
message = websocket.receive()
if message is None:
break
else:
if message is not None:
socket.put_server_msg(message)
socket.spawn(send_into_ws)
socket.spawn(read_from_ws)
class FlashSocketTransport(WebsocketTransport):
pass
class HTMLFileTransport(XHRPollingTransport):
"""Not tested at all!"""
def __init__(self, handler, config):
super(HTMLFileTransport, self).__init__(handler, config)
self.content_type = ("Content-Type", "text/html")
def write_packed(self, data):
self.write("<script>_('%s');</script>" % data)
def write(self, data):
l = 1024 * 5
super(HTMLFileTransport, self).write("%d\r\n%s%s\r\n" % (l, data, " " * (l - len(data))))
def do_exchange(self, socket, request_method):
return super(HTMLFileTransport, self).do_exchange(socket, request_method)
def get(self, socket):
self.start_response("200 OK", [
("Connection", "keep-alive"),
("Content-Type", "text/html"),
("Transfer-Encoding", "chunked"),
])
self.write("<html><body><script>var _ = function (msg) { parent.s._(msg, document); };</script>")
self.write_packed("1::") # 'connect' packet
def chunk():
while True:
payload = self.get_messages_payload(socket)
if not payload:
# That would mean the call to Queue.get() returned Empty,
# so it was in fact killed, since we pass no timeout=..
return
else:
try:
self.write_packed(payload)
except socket.error:
# See comments for XHRMultipart
return
socket.spawn(chunk)
| 35.576324
| 114
| 0.565762
|
43f5bbfe883669bf821bf99a6cb8bd21d72f8220
| 1,233
|
py
|
Python
|
python/pmmlserver/pmmlserver/test_model.py
|
ittus/kserve
|
922a9b7e8a9a86b5ae65faf4ce863927873fd456
|
[
"Apache-2.0"
] | 1,146
|
2019-03-27T21:14:34.000Z
|
2021-09-22T08:36:46.000Z
|
python/pmmlserver/pmmlserver/test_model.py
|
ittus/kserve
|
922a9b7e8a9a86b5ae65faf4ce863927873fd456
|
[
"Apache-2.0"
] | 1,803
|
2019-03-27T22:16:02.000Z
|
2021-09-22T15:27:44.000Z
|
python/pmmlserver/pmmlserver/test_model.py
|
ittus/kserve
|
922a9b7e8a9a86b5ae65faf4ce863927873fd456
|
[
"Apache-2.0"
] | 573
|
2019-03-27T21:14:58.000Z
|
2021-09-20T21:15:52.000Z
|
# Copyright 2021 The KServe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from pmmlserver import PmmlModel
model_dir = model_dir = os.path.join(os.path.dirname(__file__), "example_model", "model")
def test_model():
server = PmmlModel("model", model_dir)
server.load()
request = {"instances": [[5.1, 3.5, 1.4, 0.2]]}
response = server.predict(request)
expect_result = {'Species': 'setosa',
'Probability_setosa': 1.0,
'Probability_versicolor': 0.0,
'Probability_virginica': 0.0,
'Node_Id': '2'}
assert isinstance(response["predictions"][0], dict)
assert response["predictions"][0] == expect_result
| 34.25
| 89
| 0.675588
|
f51b11ec3975eca1ee743510963bc64358482ed9
| 3,432
|
py
|
Python
|
app/api/v2/schemas.py
|
webclinic017/ark-invest-api
|
3edc46421e7e9ed44465c264e24f359be48f8212
|
[
"MIT"
] | 1
|
2021-09-25T07:17:42.000Z
|
2021-09-25T07:17:42.000Z
|
app/api/v2/schemas.py
|
webclinic017/ark-invest-api
|
3edc46421e7e9ed44465c264e24f359be48f8212
|
[
"MIT"
] | null | null | null |
app/api/v2/schemas.py
|
webclinic017/ark-invest-api
|
3edc46421e7e9ed44465c264e24f359be48f8212
|
[
"MIT"
] | null | null | null |
import datetime
from typing import List, Optional
from pydantic import BaseModel
class V2_FundProfileData(BaseModel):
symbol: str
name: str
description: str
fund_type: str
inception_date: datetime.date
cusip: str
isin: str
website: str
class Config:
orm_mode = True
class V2_FundProfile(BaseModel):
symbol: str
profile: Optional[V2_FundProfileData] = {}
class Config:
orm_mode = True
class V2_FundHoldingData(BaseModel):
date: datetime.date
ticker: Optional[str]
company: str
cusip: str
shares: int
market_value: float
weight: float
weight_rank: int
class Config:
orm_mode = True
class V2_FundHolding(BaseModel):
symbol: str
date_from: Optional[datetime.date]
date_to: Optional[datetime.date]
holdings: List[V2_FundHoldingData] = []
class Config:
orm_mode = True
class V2_FundTradeData(BaseModel):
date: datetime.date
ticker: Optional[str]
company: str
direction: str
cusip: str
shares: int
etf_percent: float
class Config:
orm_mode = True
class V2_FundTrades(BaseModel):
symbol: str
date_from: datetime.date = None
date_to: datetime.date = None
trades: List[V2_FundTradeData] = []
class Config:
orm_mode = True
class V2_FundOwnershipData(BaseModel):
date: datetime.date
fund: str
weight: float
weight_rank: int
shares: int
market_value: float
class Config:
orm_mode = True
class V2_FundOwnershipTotals(BaseModel):
funds: int
shares: int
market_value: float
class V2_FundOwnershipList(BaseModel):
date: datetime.date
ownership: List[V2_FundOwnershipData]
totals: V2_FundOwnershipTotals
class Config:
orm_mode = True
class V2_FundOwnership(BaseModel):
symbol: str
date_from: Optional[datetime.date]
date_to: Optional[datetime.date]
data: List[V2_FundOwnershipList] = []
class Config:
orm_mode = True
class V2_FundNewsData(BaseModel):
id: int
datetime: datetime.datetime
related: str
source: str
headline: str
summary: str
url: str
image: str
class Config:
orm_mode = True
class V2_FundNews(BaseModel):
symbol: str
date_from: Optional[datetime.date]
date_to: Optional[datetime.date]
news: List[V2_FundNewsData] = []
class Config:
orm_mode = True
class V2_StockProfileData(BaseModel):
ticker: str
name: Optional[str]
country: Optional[str]
industry: Optional[str]
sector: Optional[str]
fullTimeEmployees: Optional[int]
summary: Optional[str]
website: Optional[str]
market: Optional[str]
exchange: Optional[str]
currency: Optional[str]
marketCap: Optional[float]
sharesOutstanding: Optional[int]
class Config:
orm_mode = True
class V2_StockProfile(BaseModel):
symbol: str
profile: Optional[V2_StockProfileData] = {}
class Config:
orm_mode = True
class V2_StockTradeData(BaseModel):
date: datetime.date
fund: str
direction: str
shares: int
etf_percent: float
class Config:
orm_mode = True
class V2_StockTrades(BaseModel):
symbol: str
date_from: Optional[datetime.date]
date_to: Optional[datetime.date]
trades: List[V2_StockTradeData] = []
class Config:
orm_mode = True
| 18.754098
| 47
| 0.668415
|
4de430bb582372dbc051d46c086b4cb9c0f0ecac
| 28,385
|
py
|
Python
|
nipype/interfaces/mrtrix/preprocess.py
|
eort/nipype
|
04d0159686a8d656905e9e06110287c6c60c1523
|
[
"Apache-2.0"
] | null | null | null |
nipype/interfaces/mrtrix/preprocess.py
|
eort/nipype
|
04d0159686a8d656905e9e06110287c6c60c1523
|
[
"Apache-2.0"
] | null | null | null |
nipype/interfaces/mrtrix/preprocess.py
|
eort/nipype
|
04d0159686a8d656905e9e06110287c6c60c1523
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data'))
>>> os.chdir(datadir)
"""
from __future__ import (print_function, division, unicode_literals,
absolute_import)
import os.path as op
from ...utils.filemanip import split_filename
from ..base import (CommandLineInputSpec, CommandLine, traits, TraitedSpec,
File, InputMultiPath, isdefined)
class MRConvertInputSpec(CommandLineInputSpec):
in_file = File(
exists=True,
argstr='%s',
mandatory=True,
position=-2,
desc='voxel-order data filename')
out_filename = File(
genfile=True, argstr='%s', position=-1, desc='Output filename')
extract_at_axis = traits.Enum(
1,
2,
3,
argstr='-coord %s',
position=1,
desc=
'"Extract data only at the coordinates specified. This option specifies the Axis. Must be used in conjunction with extract_at_coordinate.'
)
extract_at_coordinate = traits.List(
traits.Float,
argstr='%s',
sep=',',
position=2,
minlen=1,
maxlen=3,
desc=
'"Extract data only at the coordinates specified. This option specifies the coordinates. Must be used in conjunction with extract_at_axis. Three comma-separated numbers giving the size of each voxel in mm.'
)
voxel_dims = traits.List(
traits.Float,
argstr='-vox %s',
sep=',',
position=3,
minlen=3,
maxlen=3,
desc=
'Three comma-separated numbers giving the size of each voxel in mm.')
output_datatype = traits.Enum(
"nii",
"float",
"char",
"short",
"int",
"long",
"double",
argstr='-output %s',
position=2,
desc=
'"i.e. Bfloat". Can be "char", "short", "int", "long", "float" or "double"'
) # , usedefault=True)
extension = traits.Enum(
"mif",
"nii",
"float",
"char",
"short",
"int",
"long",
"double",
position=2,
desc=
'"i.e. Bfloat". Can be "char", "short", "int", "long", "float" or "double"',
usedefault=True)
layout = traits.Enum(
"nii",
"float",
"char",
"short",
"int",
"long",
"double",
argstr='-output %s',
position=2,
desc=
'specify the layout of the data in memory. The actual layout produced will depend on whether the output image format can support it.'
)
resample = traits.Float(
argstr='-scale %d',
position=3,
units='mm',
desc='Apply scaling to the intensity values.')
offset_bias = traits.Float(
argstr='-scale %d',
position=3,
units='mm',
desc='Apply offset to the intensity values.')
replace_NaN_with_zero = traits.Bool(
argstr='-zero', position=3, desc="Replace all NaN values with zero.")
prs = traits.Bool(
argstr='-prs',
position=3,
desc=
"Assume that the DW gradients are specified in the PRS frame (Siemens DICOM only)."
)
class MRConvertOutputSpec(TraitedSpec):
converted = File(exists=True, desc='path/name of 4D volume in voxel order')
class MRConvert(CommandLine):
"""
Perform conversion between different file types and optionally extract a subset of the input image.
If used correctly, this program can be a very useful workhorse.
In addition to converting images between different formats, it can
be used to extract specific studies from a data set, extract a specific
region of interest, flip the images, or to scale the intensity of the images.
Example
-------
>>> import nipype.interfaces.mrtrix as mrt
>>> mrconvert = mrt.MRConvert()
>>> mrconvert.inputs.in_file = 'dwi_FA.mif'
>>> mrconvert.inputs.out_filename = 'dwi_FA.nii'
>>> mrconvert.run() # doctest: +SKIP
"""
_cmd = 'mrconvert'
input_spec = MRConvertInputSpec
output_spec = MRConvertOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['converted'] = self.inputs.out_filename
if not isdefined(outputs['converted']):
outputs['converted'] = op.abspath(self._gen_outfilename())
else:
outputs['converted'] = op.abspath(outputs['converted'])
return outputs
def _gen_filename(self, name):
if name == 'out_filename':
return self._gen_outfilename()
else:
return None
def _gen_outfilename(self):
_, name, _ = split_filename(self.inputs.in_file)
if isdefined(self.inputs.out_filename):
outname = self.inputs.out_filename
else:
outname = name + '_mrconvert.' + self.inputs.extension
return outname
class DWI2TensorInputSpec(CommandLineInputSpec):
in_file = InputMultiPath(
File(exists=True),
argstr='%s',
mandatory=True,
position=-2,
desc='Diffusion-weighted images')
out_filename = File(
name_template="%s_tensor.mif",
name_source="in_file",
output_name="tensor",
argstr='%s',
desc='Output tensor filename',
position=-1)
encoding_file = File(
argstr='-grad %s',
position=2,
desc=('Encoding file supplied as a 4xN text file with '
'each line is in the format [ X Y Z b ], where '
'[ X Y Z ] describe the direction of the applied '
'gradient, and b gives the b-value in units '
'(1000 s/mm^2). See FSL2MRTrix()'))
ignore_slice_by_volume = traits.List(
traits.Int,
argstr='-ignoreslices %s',
sep=' ',
position=2,
minlen=2,
maxlen=2,
desc=('Requires two values (i.e. [34 '
'1] for [Slice Volume] Ignores '
'the image slices specified '
'when computing the tensor. '
'Slice here means the z '
'coordinate of the slice to be '
'ignored.'))
ignore_volumes = traits.List(
traits.Int,
argstr='-ignorevolumes %s',
sep=' ',
position=2,
minlen=1,
desc=('Requires two values (i.e. [2 5 6] for '
'[Volumes] Ignores the image volumes '
'specified when computing the tensor.'))
quiet = traits.Bool(
argstr='-quiet',
position=1,
desc=("Do not display information messages or progress "
"status."))
debug = traits.Bool(
argstr='-debug', position=1, desc="Display debugging messages.")
class DWI2TensorOutputSpec(TraitedSpec):
tensor = File(
exists=True, desc='path/name of output diffusion tensor image')
class DWI2Tensor(CommandLine):
"""
Converts diffusion-weighted images to tensor images.
Example
-------
>>> import nipype.interfaces.mrtrix as mrt
>>> dwi2tensor = mrt.DWI2Tensor()
>>> dwi2tensor.inputs.in_file = 'dwi.mif'
>>> dwi2tensor.inputs.encoding_file = 'encoding.txt'
>>> dwi2tensor.cmdline
'dwi2tensor -grad encoding.txt dwi.mif dwi_tensor.mif'
>>> dwi2tensor.run() # doctest: +SKIP
"""
_cmd = 'dwi2tensor'
input_spec = DWI2TensorInputSpec
output_spec = DWI2TensorOutputSpec
class Tensor2VectorInputSpec(CommandLineInputSpec):
in_file = File(
exists=True,
argstr='%s',
mandatory=True,
position=-2,
desc='Diffusion tensor image')
out_filename = File(
genfile=True, argstr='%s', position=-1, desc='Output vector filename')
quiet = traits.Bool(
argstr='-quiet',
position=1,
desc="Do not display information messages or progress status.")
debug = traits.Bool(
argstr='-debug', position=1, desc="Display debugging messages.")
class Tensor2VectorOutputSpec(TraitedSpec):
vector = File(
exists=True,
desc=
'the output image of the major eigenvectors of the diffusion tensor image.'
)
class Tensor2Vector(CommandLine):
"""
Generates a map of the major eigenvectors of the tensors in each voxel.
Example
-------
>>> import nipype.interfaces.mrtrix as mrt
>>> tensor2vector = mrt.Tensor2Vector()
>>> tensor2vector.inputs.in_file = 'dwi_tensor.mif'
>>> tensor2vector.run() # doctest: +SKIP
"""
_cmd = 'tensor2vector'
input_spec = Tensor2VectorInputSpec
output_spec = Tensor2VectorOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['vector'] = self.inputs.out_filename
if not isdefined(outputs['vector']):
outputs['vector'] = op.abspath(self._gen_outfilename())
else:
outputs['vector'] = op.abspath(outputs['vector'])
return outputs
def _gen_filename(self, name):
if name == 'out_filename':
return self._gen_outfilename()
else:
return None
def _gen_outfilename(self):
_, name, _ = split_filename(self.inputs.in_file)
return name + '_vector.mif'
class Tensor2FractionalAnisotropyInputSpec(CommandLineInputSpec):
in_file = File(
exists=True,
argstr='%s',
mandatory=True,
position=-2,
desc='Diffusion tensor image')
out_filename = File(
genfile=True,
argstr='%s',
position=-1,
desc='Output Fractional Anisotropy filename')
quiet = traits.Bool(
argstr='-quiet',
position=1,
desc="Do not display information messages or progress status.")
debug = traits.Bool(
argstr='-debug', position=1, desc="Display debugging messages.")
class Tensor2FractionalAnisotropyOutputSpec(TraitedSpec):
FA = File(
exists=True,
desc=
'the output image of the major eigenvectors of the diffusion tensor image.'
)
class Tensor2FractionalAnisotropy(CommandLine):
"""
Generates a map of the fractional anisotropy in each voxel.
Example
-------
>>> import nipype.interfaces.mrtrix as mrt
>>> tensor2FA = mrt.Tensor2FractionalAnisotropy()
>>> tensor2FA.inputs.in_file = 'dwi_tensor.mif'
>>> tensor2FA.run() # doctest: +SKIP
"""
_cmd = 'tensor2FA'
input_spec = Tensor2FractionalAnisotropyInputSpec
output_spec = Tensor2FractionalAnisotropyOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['FA'] = self.inputs.out_filename
if not isdefined(outputs['FA']):
outputs['FA'] = op.abspath(self._gen_outfilename())
else:
outputs['FA'] = op.abspath(outputs['FA'])
return outputs
def _gen_filename(self, name):
if name == 'out_filename':
return self._gen_outfilename()
else:
return None
def _gen_outfilename(self):
_, name, _ = split_filename(self.inputs.in_file)
return name + '_FA.mif'
class Tensor2ApparentDiffusionInputSpec(CommandLineInputSpec):
in_file = File(
exists=True,
argstr='%s',
mandatory=True,
position=-2,
desc='Diffusion tensor image')
out_filename = File(
genfile=True,
argstr='%s',
position=-1,
desc='Output Fractional Anisotropy filename')
quiet = traits.Bool(
argstr='-quiet',
position=1,
desc="Do not display information messages or progress status.")
debug = traits.Bool(
argstr='-debug', position=1, desc="Display debugging messages.")
class Tensor2ApparentDiffusionOutputSpec(TraitedSpec):
ADC = File(
exists=True,
desc=
'the output image of the major eigenvectors of the diffusion tensor image.'
)
class Tensor2ApparentDiffusion(CommandLine):
"""
Generates a map of the apparent diffusion coefficient (ADC) in each voxel
Example
-------
>>> import nipype.interfaces.mrtrix as mrt
>>> tensor2ADC = mrt.Tensor2ApparentDiffusion()
>>> tensor2ADC.inputs.in_file = 'dwi_tensor.mif'
>>> tensor2ADC.run() # doctest: +SKIP
"""
_cmd = 'tensor2ADC'
input_spec = Tensor2ApparentDiffusionInputSpec
output_spec = Tensor2ApparentDiffusionOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['ADC'] = self.inputs.out_filename
if not isdefined(outputs['ADC']):
outputs['ADC'] = op.abspath(self._gen_outfilename())
else:
outputs['ADC'] = op.abspath(outputs['ADC'])
return outputs
def _gen_filename(self, name):
if name == 'out_filename':
return self._gen_outfilename()
else:
return None
def _gen_outfilename(self):
_, name, _ = split_filename(self.inputs.in_file)
return name + '_ADC.mif'
class MRMultiplyInputSpec(CommandLineInputSpec):
in_files = InputMultiPath(
File(exists=True),
argstr='%s',
mandatory=True,
position=-2,
desc='Input images to be multiplied')
out_filename = File(
genfile=True, argstr='%s', position=-1, desc='Output image filename')
quiet = traits.Bool(
argstr='-quiet',
position=1,
desc="Do not display information messages or progress status.")
debug = traits.Bool(
argstr='-debug', position=1, desc="Display debugging messages.")
class MRMultiplyOutputSpec(TraitedSpec):
out_file = File(exists=True, desc='the output image of the multiplication')
class MRMultiply(CommandLine):
"""
Multiplies two images.
Example
-------
>>> import nipype.interfaces.mrtrix as mrt
>>> MRmult = mrt.MRMultiply()
>>> MRmult.inputs.in_files = ['dwi.mif', 'dwi_WMProb.mif']
>>> MRmult.run() # doctest: +SKIP
"""
_cmd = 'mrmult'
input_spec = MRMultiplyInputSpec
output_spec = MRMultiplyOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['out_file'] = self.inputs.out_filename
if not isdefined(outputs['out_file']):
outputs['out_file'] = op.abspath(self._gen_outfilename())
else:
outputs['out_file'] = op.abspath(outputs['out_file'])
return outputs
def _gen_filename(self, name):
if name == 'out_filename':
return self._gen_outfilename()
else:
return None
def _gen_outfilename(self):
_, name, _ = split_filename(self.inputs.in_files[0])
return name + '_MRMult.mif'
class MRTrixViewerInputSpec(CommandLineInputSpec):
in_files = InputMultiPath(
File(exists=True),
argstr='%s',
mandatory=True,
position=-2,
desc='Input images to be viewed')
quiet = traits.Bool(
argstr='-quiet',
position=1,
desc="Do not display information messages or progress status.")
debug = traits.Bool(
argstr='-debug', position=1, desc="Display debugging messages.")
class MRTrixViewerOutputSpec(TraitedSpec):
pass
class MRTrixViewer(CommandLine):
"""
Loads the input images in the MRTrix Viewer.
Example
-------
>>> import nipype.interfaces.mrtrix as mrt
>>> MRview = mrt.MRTrixViewer()
>>> MRview.inputs.in_files = 'dwi.mif'
>>> MRview.run() # doctest: +SKIP
"""
_cmd = 'mrview'
input_spec = MRTrixViewerInputSpec
output_spec = MRTrixViewerOutputSpec
def _list_outputs(self):
return
class MRTrixInfoInputSpec(CommandLineInputSpec):
in_file = File(
exists=True,
argstr='%s',
mandatory=True,
position=-2,
desc='Input images to be read')
class MRTrixInfoOutputSpec(TraitedSpec):
pass
class MRTrixInfo(CommandLine):
"""
Prints out relevant header information found in the image specified.
Example
-------
>>> import nipype.interfaces.mrtrix as mrt
>>> MRinfo = mrt.MRTrixInfo()
>>> MRinfo.inputs.in_file = 'dwi.mif'
>>> MRinfo.run() # doctest: +SKIP
"""
_cmd = 'mrinfo'
input_spec = MRTrixInfoInputSpec
output_spec = MRTrixInfoOutputSpec
def _list_outputs(self):
return
class GenerateWhiteMatterMaskInputSpec(CommandLineInputSpec):
in_file = File(
exists=True,
argstr='%s',
mandatory=True,
position=-3,
desc='Diffusion-weighted images')
binary_mask = File(
exists=True,
argstr='%s',
mandatory=True,
position=-2,
desc='Binary brain mask')
out_WMProb_filename = File(
genfile=True,
argstr='%s',
position=-1,
desc='Output WM probability image filename')
encoding_file = File(
exists=True,
argstr='-grad %s',
mandatory=True,
position=1,
desc=
'Gradient encoding, supplied as a 4xN text file with each line is in the format [ X Y Z b ], where [ X Y Z ] describe the direction of the applied gradient, and b gives the b-value in units (1000 s/mm^2). See FSL2MRTrix'
)
noise_level_margin = traits.Float(
argstr='-margin %s',
desc=
'Specify the width of the margin on either side of the image to be used to estimate the noise level (default = 10)'
)
class GenerateWhiteMatterMaskOutputSpec(TraitedSpec):
WMprobabilitymap = File(exists=True, desc='WMprobabilitymap')
class GenerateWhiteMatterMask(CommandLine):
"""
Generates a white matter probability mask from the DW images.
Example
-------
>>> import nipype.interfaces.mrtrix as mrt
>>> genWM = mrt.GenerateWhiteMatterMask()
>>> genWM.inputs.in_file = 'dwi.mif'
>>> genWM.inputs.encoding_file = 'encoding.txt'
>>> genWM.run() # doctest: +SKIP
"""
_cmd = 'gen_WM_mask'
input_spec = GenerateWhiteMatterMaskInputSpec
output_spec = GenerateWhiteMatterMaskOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['WMprobabilitymap'] = op.abspath(self._gen_outfilename())
return outputs
def _gen_filename(self, name):
if name == 'out_WMProb_filename':
return self._gen_outfilename()
else:
return None
def _gen_outfilename(self):
_, name, _ = split_filename(self.inputs.in_file)
return name + '_WMProb.mif'
class ErodeInputSpec(CommandLineInputSpec):
in_file = File(
exists=True,
argstr='%s',
mandatory=True,
position=-2,
desc='Input mask image to be eroded')
out_filename = File(
genfile=True, argstr='%s', position=-1, desc='Output image filename')
number_of_passes = traits.Int(
argstr='-npass %s', desc='the number of passes (default: 1)')
dilate = traits.Bool(
argstr='-dilate',
position=1,
desc="Perform dilation rather than erosion")
quiet = traits.Bool(
argstr='-quiet',
position=1,
desc="Do not display information messages or progress status.")
debug = traits.Bool(
argstr='-debug', position=1, desc="Display debugging messages.")
class ErodeOutputSpec(TraitedSpec):
out_file = File(exists=True, desc='the output image')
class Erode(CommandLine):
"""
Erode (or dilates) a mask (i.e. binary) image
Example
-------
>>> import nipype.interfaces.mrtrix as mrt
>>> erode = mrt.Erode()
>>> erode.inputs.in_file = 'mask.mif'
>>> erode.run() # doctest: +SKIP
"""
_cmd = 'erode'
input_spec = ErodeInputSpec
output_spec = ErodeOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['out_file'] = self.inputs.out_filename
if not isdefined(outputs['out_file']):
outputs['out_file'] = op.abspath(self._gen_outfilename())
else:
outputs['out_file'] = op.abspath(outputs['out_file'])
return outputs
def _gen_filename(self, name):
if name == 'out_filename':
return self._gen_outfilename()
else:
return None
def _gen_outfilename(self):
_, name, _ = split_filename(self.inputs.in_file)
return name + '_erode.mif'
class ThresholdInputSpec(CommandLineInputSpec):
in_file = File(
exists=True,
argstr='%s',
mandatory=True,
position=-2,
desc='The input image to be thresholded')
out_filename = File(
genfile=True,
argstr='%s',
position=-1,
desc='The output binary image mask.')
absolute_threshold_value = traits.Float(
argstr='-abs %s',
desc='Specify threshold value as absolute intensity.')
percentage_threshold_value = traits.Float(
argstr='-percent %s',
desc=
'Specify threshold value as a percentage of the peak intensity in the input image.'
)
invert = traits.Bool(
argstr='-invert', position=1, desc="Invert output binary mask")
replace_zeros_with_NaN = traits.Bool(
argstr='-nan', position=1, desc="Replace all zero values with NaN")
quiet = traits.Bool(
argstr='-quiet',
position=1,
desc="Do not display information messages or progress status.")
debug = traits.Bool(
argstr='-debug', position=1, desc="Display debugging messages.")
class ThresholdOutputSpec(TraitedSpec):
out_file = File(exists=True, desc='The output binary image mask.')
class Threshold(CommandLine):
"""
Create bitwise image by thresholding image intensity.
By default, the threshold level is determined using a histogram analysis
to cut out the background. Otherwise, the threshold intensity can be
specified using command line options.
Note that only the first study is used for thresholding.
Example
-------
>>> import nipype.interfaces.mrtrix as mrt
>>> thresh = mrt.Threshold()
>>> thresh.inputs.in_file = 'wm_mask.mif'
>>> thresh.run() # doctest: +SKIP
"""
_cmd = 'threshold'
input_spec = ThresholdInputSpec
output_spec = ThresholdOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['out_file'] = self.inputs.out_filename
if not isdefined(outputs['out_file']):
outputs['out_file'] = op.abspath(self._gen_outfilename())
else:
outputs['out_file'] = op.abspath(outputs['out_file'])
return outputs
def _gen_filename(self, name):
if name == 'out_filename':
return self._gen_outfilename()
else:
return None
def _gen_outfilename(self):
_, name, _ = split_filename(self.inputs.in_file)
return name + '_thresh.mif'
class MedianFilter3DInputSpec(CommandLineInputSpec):
in_file = File(
exists=True,
argstr='%s',
mandatory=True,
position=-2,
desc='Input images to be smoothed')
out_filename = File(
genfile=True, argstr='%s', position=-1, desc='Output image filename')
quiet = traits.Bool(
argstr='-quiet',
position=1,
desc="Do not display information messages or progress status.")
debug = traits.Bool(
argstr='-debug', position=1, desc="Display debugging messages.")
class MedianFilter3DOutputSpec(TraitedSpec):
out_file = File(exists=True, desc='the output image')
class MedianFilter3D(CommandLine):
"""
Smooth images using a 3x3x3 median filter.
Example
-------
>>> import nipype.interfaces.mrtrix as mrt
>>> median3d = mrt.MedianFilter3D()
>>> median3d.inputs.in_file = 'mask.mif'
>>> median3d.run() # doctest: +SKIP
"""
_cmd = 'median3D'
input_spec = MedianFilter3DInputSpec
output_spec = MedianFilter3DOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['out_file'] = self.inputs.out_filename
if not isdefined(outputs['out_file']):
outputs['out_file'] = op.abspath(self._gen_outfilename())
else:
outputs['out_file'] = op.abspath(outputs['out_file'])
return outputs
def _gen_filename(self, name):
if name == 'out_filename':
return self._gen_outfilename()
else:
return None
def _gen_outfilename(self):
_, name, _ = split_filename(self.inputs.in_file)
return name + '_median3D.mif'
class MRTransformInputSpec(CommandLineInputSpec):
in_files = InputMultiPath(
File(exists=True),
argstr='%s',
mandatory=True,
position=-2,
desc='Input images to be transformed')
out_filename = File(
genfile=True, argstr='%s', position=-1, desc='Output image')
invert = traits.Bool(
argstr='-inverse',
position=1,
desc="Invert the specified transform before using it")
replace_transform = traits.Bool(
argstr='-replace',
position=1,
desc=
"replace the current transform by that specified, rather than applying it to the current transform"
)
transformation_file = File(
exists=True,
argstr='-transform %s',
position=1,
desc='The transform to apply, in the form of a 4x4 ascii file.')
template_image = File(
exists=True,
argstr='-template %s',
position=1,
desc='Reslice the input image to match the specified template image.')
reference_image = File(
exists=True,
argstr='-reference %s',
position=1,
desc=
'in case the transform supplied maps from the input image onto a reference image, use this option to specify the reference. Note that this implicitly sets the -replace option.'
)
flip_x = traits.Bool(
argstr='-flipx',
position=1,
desc=
"assume the transform is supplied assuming a coordinate system with the x-axis reversed relative to the MRtrix convention (i.e. x increases from right to left). This is required to handle transform matrices produced by FSL's FLIRT command. This is only used in conjunction with the -reference option."
)
quiet = traits.Bool(
argstr='-quiet',
position=1,
desc="Do not display information messages or progress status.")
debug = traits.Bool(
argstr='-debug', position=1, desc="Display debugging messages.")
class MRTransformOutputSpec(TraitedSpec):
out_file = File(exists=True, desc='the output image of the transformation')
class MRTransform(CommandLine):
"""
Apply spatial transformations or reslice images
Example
-------
>>> MRxform = MRTransform()
>>> MRxform.inputs.in_files = 'anat_coreg.mif'
>>> MRxform.run() # doctest: +SKIP
"""
_cmd = 'mrtransform'
input_spec = MRTransformInputSpec
output_spec = MRTransformOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['out_file'] = self.inputs.out_filename
if not isdefined(outputs['out_file']):
outputs['out_file'] = op.abspath(self._gen_outfilename())
else:
outputs['out_file'] = op.abspath(outputs['out_file'])
return outputs
def _gen_filename(self, name):
if name == 'out_filename':
return self._gen_outfilename()
else:
return None
def _gen_outfilename(self):
_, name, _ = split_filename(self.inputs.in_files[0])
return name + '_MRTransform.mif'
| 30.488722
| 309
| 0.606799
|
ab3f94dae95bd31ba97f85594ef76e1fe21cace3
| 9,543
|
py
|
Python
|
nexus/examples/qmcpack/graphene/graphene.py
|
markdewing/qmcpack
|
4bd3e10ceb0faf8d2b3095338da5a56eda0dc1ba
|
[
"NCSA"
] | null | null | null |
nexus/examples/qmcpack/graphene/graphene.py
|
markdewing/qmcpack
|
4bd3e10ceb0faf8d2b3095338da5a56eda0dc1ba
|
[
"NCSA"
] | null | null | null |
nexus/examples/qmcpack/graphene/graphene.py
|
markdewing/qmcpack
|
4bd3e10ceb0faf8d2b3095338da5a56eda0dc1ba
|
[
"NCSA"
] | 1
|
2019-07-23T17:44:39.000Z
|
2019-07-23T17:44:39.000Z
|
#! /usr/bin/env python
from nexus import settings,job,run_project
from nexus import generate_physical_system
from nexus import generate_pwscf
from nexus import generate_pw2qmcpack
from nexus import generate_qmcpack
from nexus import loop,linear,vmc,dmc
# general settings for nexus
settings(
pseudo_dir = '../pseudopotentials',# directory with all pseudopotentials
sleep = 3, # check on runs every 'sleep' seconds
generate_only = 0, # only make input files
status_only = 0, # only show status of runs
machine = 'ws16', # local machine is 16 core workstation
)
# generate the graphene physical system
graphene = generate_physical_system(
lattice = 'hexagonal', # hexagonal cell shape
cell = 'primitive', # primitive cell
centering = 'P', # primitive basis centering
constants = (2.462,10.0), # a,c constants
units = 'A', # in Angstrom
atoms = ('C','C'), # C primitive atoms
basis = [[ 0 , 0 , 0], # basis vectors
[2./3,1./3, 0]],
tiling = (2,2,1), # tiling of primitive cell
kgrid = (1,1,1), # Monkhorst-Pack grid
kshift = (.5,.5,.5), # and shift
C = 4 # C has 4 valence electrons
)
# list of simulations in workflow
sims = []
# scf run produces charge density
scf = generate_pwscf(
# nexus inputs
identifier = 'scf', # identifier/file prefix
path = 'graphene/scf', # directory for scf run
job = job(cores=16), # run on 16 cores
pseudos = ['C.BFD.upf'], # pwscf PP file
system = graphene, # run graphene
# input format selector
input_type = 'scf', # scf, nscf, relax, or generic
# pwscf input parameters
input_dft = 'lda', # dft functional
ecut = 150 , # planewave energy cutoff (Ry)
conv_thr = 1e-6, # scf convergence threshold (Ry)
mixing_beta = .7, # charge mixing factor
kgrid = (8,8,8), # MP grid of primitive cell
kshift = (1,1,1), # to converge charge density
wf_collect = False, # don't collect orbitals
use_folded = True, # use primitive rep of graphene
)
sims.append(scf)
# nscf run to produce orbitals for jastrow optimization
nscf_opt = generate_pwscf(
# nexus inputs
identifier = 'nscf', # identifier/file prefix
path = 'graphene/nscf_opt', # directory for nscf run
job = job(cores=16), # run on 16 cores
pseudos = ['C.BFD.upf'], # pwscf PP file
system = graphene, # run graphene
# input format selector
input_type = 'nscf', # scf, nscf, relax, or generic
# pwscf input parameters
input_dft = 'lda', # dft functional
ecut = 150 , # planewave energy cutoff (Ry)
conv_thr = 1e-6, # scf convergence threshold (Ry)
mixing_beta = .7, # charge mixing factor
nosym = True, # don't symmetrize k-points
use_folded = True, # use primitive rep of graphene
wf_collect = True, # write out orbitals
kgrid = (1,1,1), # single k-point for opt
kshift = (0,0,0), # gamma point
# workflow dependencies
dependencies = (scf,'charge_density'),
)
sims.append(nscf_opt)
# orbital conversion job for jastrow optimization
p2q_opt = generate_pw2qmcpack(
# nexus inputs
identifier = 'p2q',
path = 'graphene/nscf_opt',
job = job(cores=1),
# pw2qmcpack input parameters
write_psir = False,
# workflow dependencies
dependencies = (nscf_opt,'orbitals'),
)
sims.append(p2q_opt)
# Jastrow optimization
opt = generate_qmcpack(
# nexus inputs
identifier = 'opt', # identifier/file prefix
path = 'graphene/opt', # directory for opt run
job = job(cores=16,app='qmcpack'),
pseudos = ['C.BFD.xml'], # qmcpack PP file
system = graphene, # run graphene
# input format selector
input_type = 'basic',
# qmcpack input parameters
corrections = [],
jastrows = [('J1','bspline',8), # 1 body bspline jastrow
('J2','bspline',8)], # 2 body bspline jastrow
calculations = [
loop(max = 6, # No. of loop iterations
qmc = linear( # linearized optimization method
energy = 0.0, # cost function
unreweightedvariance = 1.0, # is all unreweighted variance
reweightedvariance = 0.0, # no energy or r.w. var.
timestep = 0.5, # vmc timestep (1/Ha)
warmupsteps = 100, # MC steps before data collected
samples = 16000,# samples used for cost function
stepsbetweensamples = 10, # steps between uncorr. samples
blocks = 10, # ignore this
minwalkers = 0.1,# and this
bigchange = 15.0,# and this
alloweddifference = 1e-4 # and this, for now
)
)
],
# workflow dependencies
dependencies = (p2q_opt,'orbitals'),
)
sims.append(opt)
# nscf run to produce orbitals for final dmc
nscf = generate_pwscf(
# nexus inputs
identifier = 'nscf', # identifier/file prefix
path = 'graphene/nscf', # directory for nscf run
job = job(cores=16), # run on 16 cores
pseudos = ['C.BFD.upf'], # pwscf PP file
system = graphene, # run graphene
# input format selector
input_type = 'nscf', # scf, nscf, relax, or generic
# pwscf input parameters
input_dft = 'lda', # dft functional
ecut = 150 , # planewave energy cutoff (Ry)
conv_thr = 1e-6, # scf convergence threshold (Ry)
mixing_beta = .7, # charge mixing factor
nosym = True, # don't symmetrize k-points
use_folded = True, # use primitive rep of graphene
wf_collect = True, # write out orbitals
# workflow dependencies
dependencies = (scf,'charge_density'),
)
sims.append(nscf)
# orbital conversion job for final dmc
p2q = generate_pw2qmcpack(
# nexus inputs
identifier = 'p2q',
path = 'graphene/nscf',
job = job(cores=1),
# pw2qmcpack input parameters
write_psir = False,
# workflow dependencies
dependencies = (nscf,'orbitals'),
)
sims.append(p2q)
# final dmc run
qmc = generate_qmcpack(
# nexus inputs
identifier = 'qmc', # identifier/file prefix
path = 'graphene/qmc', # directory for dmc run
job = job(cores=16,app='qmcpack'),
pseudos = ['C.BFD.xml'], # qmcpack PP file
system = graphene, # run graphene
# input format selector
input_type = 'basic',
# qmcpack input parameters
corrections = [], # no finite size corrections
jastrows = [], # overwritten from opt
calculations = [ # qmcpack input parameters for qmc
vmc( # vmc parameters
timestep = 0.5, # vmc timestep (1/Ha)
warmupsteps = 100, # No. of MC steps before data is collected
blocks = 200, # No. of data blocks recorded in scalar.dat
steps = 10, # No. of steps per block
substeps = 3, # MC steps taken w/o computing E_local
samplesperthread = 40 # No. of dmc walkers per thread
),
dmc( # dmc parameters
timestep = 0.01, # dmc timestep (1/Ha)
warmupsteps = 50, # No. of MC steps before data is collected
blocks = 400, # No. of data blocks recorded in scalar.dat
steps = 5, # No. of steps per block
nonlocalmoves = True # use Casula's T-moves
), # (retains variational principle for NLPP's)
],
# workflow dependencies
dependencies = [(p2q,'orbitals'),
(opt,'jastrow')],
)
# nexus monitors all runs
run_project(sims)
# print out the total energy
performed_runs = not settings.generate_only and not settings.status_only
if performed_runs:
# get the qmcpack analyzer object
# it contains all of the statistically analyzed data from the run
qa = qmc.load_analyzer_image()
# get the local energy from dmc.dat
le = qa.dmc[1].dmc.LocalEnergy # dmc series 1, dmc.dat, local energy
# print the total energy for the 8 atom system
print 'The DMC ground state energy for graphene is:'
print ' {0} +/- {1} Ha'.format(le.mean,le.error)
#end if
| 42.039648
| 81
| 0.529184
|
d21e6ed26c7087518edd8a20cecc1c634d7382b1
| 8,637
|
py
|
Python
|
python/cm/unit_tests/test_config_group.py
|
arenadata/adcm
|
a499caa30adc2a53e7b3f46c96a865f9e4079e4e
|
[
"Apache-2.0"
] | 16
|
2019-11-28T18:05:21.000Z
|
2021-12-08T18:09:18.000Z
|
python/cm/unit_tests/test_config_group.py
|
arenadata/adcm
|
a499caa30adc2a53e7b3f46c96a865f9e4079e4e
|
[
"Apache-2.0"
] | 1,127
|
2019-11-29T08:57:25.000Z
|
2022-03-31T20:21:32.000Z
|
python/cm/unit_tests/test_config_group.py
|
arenadata/adcm
|
a499caa30adc2a53e7b3f46c96a865f9e4079e4e
|
[
"Apache-2.0"
] | 10
|
2019-11-28T18:05:06.000Z
|
2022-01-13T06:16:40.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase
from cm.models import GroupConfig, ConfigLog
from cm.unit_tests import utils
class GroupConfigTest(TestCase):
"""Tests `GroupConfig` model"""
def setUp(self) -> None:
self.cluster_config = {'group': {'string': 'string'}, 'activatable_group': {'integer': 1}}
self.cluster_attr = {'activatable_group': {'active': True}}
self.cluster = utils.gen_cluster()
utils.gen_prototype_config(
prototype=self.cluster.prototype, name='group', field_type='group', display_name='group'
)
utils.gen_prototype_config(
prototype=self.cluster.prototype,
name='group',
field_type='string',
subname='string',
display_name='string',
)
utils.gen_prototype_config(
prototype=self.cluster.prototype,
name='activatable_group',
field_type='group',
display_name='activatable_group',
)
utils.gen_prototype_config(
prototype=self.cluster.prototype,
name='activatable_group',
field_type='integer',
subname='integer',
display_name='integer',
)
self.cluster.config = utils.gen_config(config=self.cluster_config, attr=self.cluster_attr)
self.cluster.save()
@staticmethod
def create_group(name, object_id, model_name):
return GroupConfig.objects.create(
object_id=object_id, object_type=ContentType.objects.get(model=model_name), name=name
)
def test_create_group(self):
"""Test create groups for objects"""
group = self.create_group('group', self.cluster.id, 'cluster')
parent_cl = ConfigLog.objects.get(id=self.cluster.config.current)
parent_cl.save()
cl = ConfigLog.objects.get(id=group.config.current)
self.assertDictEqual(parent_cl.config, cl.config)
self.assertDictEqual(parent_cl.attr, {'activatable_group': {'active': True}})
cl_attr = {
'activatable_group': {'active': True},
'group_keys': {'group': {'string': False}, 'activatable_group': {'integer': False}},
'custom_group_keys': {
'group': {'string': False},
'activatable_group': {'integer': False},
},
}
self.assertDictEqual(cl.attr, cl_attr)
def test_get_group_config(self):
"""Test get_group_config() method"""
group = self.create_group('group', self.cluster.id, 'cluster')
self.assertDictEqual(group.get_diff_config(), {})
cl = ConfigLog.objects.get(id=group.config.current)
cl.config = {'group': {'string': 'str'}, 'activatable_group': {'integer': 1}}
cl.attr = {
'activatable_group': {'active': True},
'group_keys': {'group': {'string': True}, 'activatable_group': {'integer': False}},
}
cl.save()
self.assertDictEqual(group.get_diff_config(), {'group': {'string': 'str'}})
def test_get_config_spec(self):
"""Test get_config_spec() method"""
group = self.create_group('group', self.cluster.id, 'cluster')
spec = {
'group': {
'type': 'group',
'group_customization': False,
'fields': {
'string': {
'type': 'string',
'group_customization': False,
}
},
},
'activatable_group': {
'type': 'group',
'group_customization': False,
'fields': {
'integer': {
'type': 'integer',
'group_customization': False,
}
},
},
}
self.assertDictEqual(group.get_config_spec(), spec)
def test_create_group_keys(self):
"""Test create_group_keys() method"""
group = self.create_group('group', self.cluster.id, 'cluster')
utils.gen_prototype_config(
prototype=self.cluster.prototype,
name='level1_1',
field_type='string',
group_customization=True,
)
utils.gen_prototype_config(
prototype=self.cluster.prototype,
name='level1_2',
field_type='integer',
group_customization=False,
)
utils.gen_prototype_config(
prototype=self.cluster.prototype,
name='level1_3',
field_type='group',
group_customization=False,
)
utils.gen_prototype_config(
prototype=self.cluster.prototype,
name='level1_3',
subname='level2_1',
field_type='list',
group_customization=True,
)
test_group_keys = {
'activatable_group': {'integer': False},
'group': {'string': False},
'level1_1': False,
'level1_2': False,
'level1_3': {'level2_1': False},
}
test_custom_group_keys = {
'activatable_group': {'integer': False},
'group': {'string': False},
'level1_1': True,
'level1_2': False,
'level1_3': {'level2_1': True},
}
group_keys, custom_group_keys = group.create_group_keys(group.get_config_spec())
self.assertDictEqual(test_group_keys, group_keys)
self.assertDictEqual(test_custom_group_keys, custom_group_keys)
def test_update_parent_config(self):
"""Test update parent config for group"""
group = self.create_group('group', self.cluster.id, 'cluster')
cl = ConfigLog.objects.get(id=group.config.current)
cl.config = {'group': {'string': 'str'}, 'activatable_group': {'integer': 1}}
cl.attr = {
'activatable_group': {'active': True},
'group_keys': {'group': {'string': True}, 'activatable_group': {'integer': False}},
}
cl.save()
parent_cl = ConfigLog.objects.get(id=self.cluster.config.current)
parent_cl.config = {'group': {'string': 'string'}, 'activatable_group': {'integer': 100}}
parent_cl.save()
group.refresh_from_db()
cl = ConfigLog.objects.get(id=group.config.current)
self.assertDictEqual(
cl.config, {'group': {'string': 'str'}, 'activatable_group': {'integer': 100}}
)
parent_cl.config = {'group': {'string': 'string'}, 'activatable_group': {'integer': 100}}
parent_cl.attr = {'activatable_group': {'active': False}}
parent_cl.save()
group.refresh_from_db()
cl = ConfigLog.objects.get(id=group.config.current)
self.assertDictEqual(
cl.config, {'group': {'string': 'str'}, 'activatable_group': {'integer': 100}}
)
self.assertDictEqual(
cl.attr,
{
'activatable_group': {'active': True},
'group_keys': {'group': {'string': True}, 'activatable_group': {'integer': False}},
'custom_group_keys': {
'group': {'string': False},
'activatable_group': {'integer': False},
},
},
)
def test_create_config_for_group(self):
"""Test create new config for GroupConfig"""
group = self.create_group('group', self.cluster.id, 'cluster')
cl_current = ConfigLog.objects.get(id=group.config.current)
attr = copy.deepcopy(cl_current.attr)
attr.update(
{
'custom_group_keys': {
'group': {'string': True},
'activatable_group': {'integer': True},
}
}
)
cl_new = ConfigLog.objects.create(
obj_ref=cl_current.obj_ref, config=cl_current.config, attr=attr
)
self.assertDictEqual(cl_current.attr, cl_new.attr)
| 38.905405
| 100
| 0.569179
|
0c6a664f45e026f0f3f989ce2b33e292675e978c
| 4,190
|
py
|
Python
|
litex_things/deps/litex_boards/litex_boards/official/targets/genesys2.py
|
bjonnh/fomu-playground
|
9f95ed7b28d15ce219d09c16c2c8d6b5594adceb
|
[
"0BSD"
] | null | null | null |
litex_things/deps/litex_boards/litex_boards/official/targets/genesys2.py
|
bjonnh/fomu-playground
|
9f95ed7b28d15ce219d09c16c2c8d6b5594adceb
|
[
"0BSD"
] | null | null | null |
litex_things/deps/litex_boards/litex_boards/official/targets/genesys2.py
|
bjonnh/fomu-playground
|
9f95ed7b28d15ce219d09c16c2c8d6b5594adceb
|
[
"0BSD"
] | null | null | null |
#!/usr/bin/env python3
# This file is Copyright (c) 2019 Florent Kermarrec <florent@enjoy-digital.fr>
# License: BSD
import argparse
from migen import *
from litex_boards.platforms import genesys2
from litex.soc.cores.clock import *
from litex.soc.integration.soc_sdram import *
from litex.soc.integration.builder import *
from litedram.modules import MT41J256M16
from litedram.phy import s7ddrphy
from liteeth.phy.s7rgmii import LiteEthPHYRGMII
from liteeth.mac import LiteEthMAC
# CRG ----------------------------------------------------------------------------------------------
class _CRG(Module):
def __init__(self, platform, sys_clk_freq):
self.clock_domains.cd_sys = ClockDomain()
self.clock_domains.cd_sys4x = ClockDomain(reset_less=True)
self.clock_domains.cd_clk200 = ClockDomain()
# # #
self.cd_sys.clk.attr.add("keep")
self.cd_sys4x.clk.attr.add("keep")
self.submodules.pll = pll = S7MMCM(speedgrade=-2)
self.comb += pll.reset.eq(~platform.request("cpu_reset_n"))
pll.register_clkin(platform.request("clk200"), 200e6)
pll.create_clkout(self.cd_sys, sys_clk_freq)
pll.create_clkout(self.cd_sys4x, 4*sys_clk_freq)
pll.create_clkout(self.cd_clk200, 200e6)
self.submodules.idelayctrl = S7IDELAYCTRL(self.cd_clk200)
# BaseSoC ------------------------------------------------------------------------------------------
class BaseSoC(SoCSDRAM):
def __init__(self, sys_clk_freq=int(125e6), integrated_rom_size=0x8000, **kwargs):
platform = genesys2.Platform()
SoCSDRAM.__init__(self, platform, clk_freq=sys_clk_freq,
integrated_rom_size=integrated_rom_size,
integrated_sram_size=0x8000,
**kwargs)
self.submodules.crg = _CRG(platform, sys_clk_freq)
# sdram
self.submodules.ddrphy = s7ddrphy.K7DDRPHY(platform.request("ddram"), sys_clk_freq=sys_clk_freq)
self.add_csr("ddrphy")
sdram_module = MT41J256M16(self.clk_freq, "1:4")
self.register_sdram(self.ddrphy,
sdram_module.geom_settings,
sdram_module.timing_settings)
# EthernetSoC ------------------------------------------------------------------------------------------
class EthernetSoC(BaseSoC):
mem_map = {
"ethmac": 0xb0000000,
}
mem_map.update(BaseSoC.mem_map)
def __init__(self, **kwargs):
BaseSoC.__init__(self, integrated_rom_size=0x10000, **kwargs)
self.submodules.ethphy = LiteEthPHYRGMII(self.platform.request("eth_clocks"),
self.platform.request("eth"))
self.add_csr("ethphy")
self.submodules.ethmac = LiteEthMAC(phy=self.ethphy, dw=32,
interface="wishbone", endianness=self.cpu.endianness)
self.add_wb_slave(self.mem_map["ethmac"], self.ethmac.bus, 0x2000)
self.add_memory_region("ethmac", self.mem_map["ethmac"], 0x2000, io_region=True)
self.add_csr("ethmac")
self.add_interrupt("ethmac")
self.ethphy.crg.cd_eth_rx.clk.attr.add("keep")
self.ethphy.crg.cd_eth_tx.clk.attr.add("keep")
self.platform.add_period_constraint(self.ethphy.crg.cd_eth_rx.clk, 1e9/125e6)
self.platform.add_period_constraint(self.ethphy.crg.cd_eth_tx.clk, 1e9/125e6)
self.platform.add_false_path_constraints(
self.crg.cd_sys.clk,
self.ethphy.crg.cd_eth_rx.clk,
self.ethphy.crg.cd_eth_tx.clk)
# Build --------------------------------------------------------------------------------------------
def main():
parser = argparse.ArgumentParser(description="LiteX SoC on Genesys2")
builder_args(parser)
soc_sdram_args(parser)
parser.add_argument("--with-ethernet", action="store_true",
help="enable Ethernet support")
args = parser.parse_args()
cls = EthernetSoC if args.with_ethernet else BaseSoC
soc = cls(**soc_sdram_argdict(args))
builder = Builder(soc, **builder_argdict(args))
builder.build()
if __name__ == "__main__":
main()
| 37.410714
| 104
| 0.608831
|
94442a6d5035396531eddb30b9580ab4f6026b49
| 27,999
|
py
|
Python
|
interfaces/python/test/nodes_test.py
|
shawncal/ELL
|
aa491f053692f985c79335d2f3f95de754ff1349
|
[
"MIT"
] | 1
|
2020-09-18T04:38:45.000Z
|
2020-09-18T04:38:45.000Z
|
interfaces/python/test/nodes_test.py
|
awesomemachinelearning/ELL
|
cb897e3aec148a1e9bd648012b5f53ab9d0dd20c
|
[
"MIT"
] | null | null | null |
interfaces/python/test/nodes_test.py
|
awesomemachinelearning/ELL
|
cb897e3aec148a1e9bd648012b5f53ab9d0dd20c
|
[
"MIT"
] | 1
|
2020-07-30T11:34:56.000Z
|
2020-07-30T11:34:56.000Z
|
import json
import math
import os
from testing import Testing
import numpy as np
import ell_helper # noqa: F401
import ell
script_path = os.path.dirname(os.path.abspath(__file__))
def test_with_serialization(testing, map, test_name, callback, context):
result = []
for i in range(3):
if i > 0:
filename = "{}{}.json".format(test_name, i)
map.Save(filename)
try:
with open(filename, "r") as f:
json.load(f)
except Exception as e:
raise Exception("### ELL model is not valid json: {}".format(e))
map = ell.model.Map(filename)
result += [callback(testing, map, i, context)]
return result
def test_reorder(testing):
# Test a model that reorders the input data
model = ell.model.Model()
x = np.array([1, 2, 3, 4, 5, 6]).reshape((2, 1, 3)).astype(np.float32)
order = [2, 0, 1]
expected = np.transpose(x, tuple(order))
layout = ell.model.PortMemoryLayout(list(x.shape))
input = model.AddInput(layout, ell.nodes.PortType.real)
reorder = model.AddReorderData(input, order)
layout = ell.model.PortMemoryLayout(list(expected.shape))
output = model.AddOutput(layout, reorder)
map = ell.model.Map(model, input, output)
result = map.Compute(x)
testing.ProcessTest("Testing ReorderDataNode Compute",
testing.IsEqual(np.array(result), expected.ravel()))
compiled = map.Compile("host", "reorder_test", "predict")
result2 = compiled.Compute(x)
testing.ProcessTest("Testing ReorderDataNode Compiled",
testing.IsEqual(np.array(result2), expected.ravel()))
def test_multiply(testing):
# Test a model that multiplies input vector by a constant matrix
model = ell.model.Model()
x = np.array([1, 2, 3])
layout = ell.model.PortMemoryLayout([int(3)])
input = model.AddInput(layout, ell.nodes.PortType.real)
a = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9]).astype(np.float).reshape((3, 3))
matrix = model.AddConstant(a.ravel(), ell.model.PortMemoryLayout([3, 3]), ell.nodes.PortType.real)
multiply = model.AddMatrixMultiply(matrix, input)
output = model.AddOutput(layout, multiply)
map = ell.model.Map(model, input, output)
expected = a.dot(x)
result = map.Compute(x)
testing.ProcessTest("Testing AddMatrixMultiply Compute",
testing.IsEqual(np.array(result), expected))
compiled = map.Compile("host", "multiply_test", "predict")
result2 = compiled.Compute(x)
testing.ProcessTest("Testing AddMatrixMultiply Compiled",
testing.IsEqual(np.array(result2), expected))
class UnaryTest:
def __init__(self, name, op, func):
self.name = name
self.op = op
self.func = func
def hard_sigmoid(x):
scale = 0.2
bias = 0.5
if x < -bias / scale:
return 0
if x > (1 - bias) / scale:
return 1
return (scale * x) + bias
def hard_tanh(x):
if x < -1:
return -1
elif x > 1:
return 1
return x
def sigmoid(x):
return 1.0 / (math.exp(-x) + 1)
def softmax(x):
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum(axis=0)
def sign(x):
if x >= 0:
return 1
return -1
def test_unary(testing):
# Test a model that performs a unary operation
for test in [UnaryTest("abs", ell.nodes.UnaryOperationType.abs, lambda x: abs(x)),
UnaryTest("cos", ell.nodes.UnaryOperationType.cos, lambda x: math.cos(x)),
UnaryTest("exp", ell.nodes.UnaryOperationType.exp, lambda x: math.exp(x)),
UnaryTest("hardSigmoid", ell.nodes.UnaryOperationType.hardSigmoid, lambda x: hard_sigmoid(x)),
UnaryTest("hardTanh", ell.nodes.UnaryOperationType.hardTanh, lambda x: hard_tanh(x)),
UnaryTest("log", ell.nodes.UnaryOperationType.log, lambda x: math.log(x)),
UnaryTest("sigmoid", ell.nodes.UnaryOperationType.sigmoid, lambda x: sigmoid(x)),
UnaryTest("sign", ell.nodes.UnaryOperationType.sign, lambda x: sign(x)),
UnaryTest("sin", ell.nodes.UnaryOperationType.sin, lambda x: math.sin(x)),
UnaryTest("softmax", ell.nodes.UnaryOperationType.softmax, lambda x: softmax(x)),
UnaryTest("sqrt", ell.nodes.UnaryOperationType.sqrt, lambda x: math.sqrt(x)),
UnaryTest("square", ell.nodes.UnaryOperationType.square, lambda x: x * x),
UnaryTest("tanh", ell.nodes.UnaryOperationType.tanh, lambda x: math.tanh(x))]:
model = ell.model.Model()
layout = ell.model.PortMemoryLayout([int(10)])
input = model.AddInput(layout, ell.nodes.PortType.smallReal)
a = np.array(range(10)).astype(np.float32) + 1
multiply = model.AddUnaryOperation(input, test.op)
output = model.AddOutput(layout, multiply)
expected = np.array([test.func(x) for x in a])
if test.name == "softmax":
expected = softmax(a)
map = ell.model.Map(model, input, output)
result = map.Compute(a)
testing.ProcessTest("Testing AddUnaryOperation {}".format(test.name),
testing.IsEqual(np.array(result), expected, tol=1e-6))
compiled = map.Compile("host", "test", "predict")
result = compiled.Compute(a)
testing.ProcessTest("Testing AddUnaryOperation Compiled {}".format(test.name),
testing.IsEqual(np.array(result), expected, tol=1e-6))
def load_vad_data():
vad_data = os.path.join(script_path, "..", "..", "..", "libraries", "dsp", "VadData.txt")
if not os.path.isfile(vad_data):
vad_data = os.path.join(script_path, "..", "..", "..", "libraries", "dsp", "test", "src", "VadData.txt")
dataset = ell.data.AutoSupervisedDataset()
dataset.Load(vad_data)
return dataset
def add_vad_node(builder, ell_model, input_node):
sample_rate = 8000 # this is the same rate used to generate VadData.txt
frame_duration = 0.032
tau_up = 1.54
tau_down = 0.074326
large_input = 2.400160
gain_att = 0.002885
threshold_up = 3.552713
threshold_down = 0.931252
level_threshold = 0.007885
return builder.AddVoiceActivityDetectorNode(
ell_model, ell.nodes.PortElements(input_node.GetOutputPort("output")), sample_rate, frame_duration,
tau_up, tau_down, large_input, gain_att, threshold_up, threshold_down, level_threshold)
def add_vad_node2(model, input_node):
sample_rate = 8000 # this is the same rate used to generate VadData.txt
frame_duration = 0.032
tau_up = 1.54
tau_down = 0.074326
large_input = 2.400160
gain_att = 0.002885
threshold_up = 3.552713
threshold_down = 0.931252
level_threshold = 0.007885
return model.AddVoiceActivityDetector(input_node, sample_rate, frame_duration, tau_up, tau_down, large_input,
gain_att, threshold_up, threshold_down, level_threshold)
def test_voice_activity_node(testing):
builder = ell.model.ModelBuilder()
ell_model = ell.model.Model()
dataset = load_vad_data()
size = dataset.NumFeatures()
input_shape = ell.model.PortMemoryLayout([int(size)])
output_shape = ell.model.PortMemoryLayout([1])
input_node = builder.AddInputNode(ell_model, input_shape, ell.nodes.PortType.real)
vad_node = add_vad_node(builder, ell_model, input_node)
# cast the integer output of VAD to double since our CompiledMap doesn't yet support having
# different input and output types.
cast_node = builder.AddTypeCastNode(ell_model, ell.nodes.PortElements(vad_node.GetOutputPort("output")),
ell.nodes.PortType.real)
output_node = builder.AddOutputNode(ell_model, output_shape,
ell.nodes.PortElements(cast_node.GetOutputPort("output")))
map = ell.model.Map(ell_model, input_node, ell.nodes.PortElements(output_node.GetOutputPort("output")))
compiler_settings = ell.model.MapCompilerOptions()
compiler_settings.useBlas = False # not resolvable on our Linux test machines...
optimizer_options = ell.model.ModelOptimizerOptions()
compiled_map = map.Compile("host", "vadtest", "predict", compiler_settings, optimizer_options)
line = 1
errors = 0
for i in range(dataset.NumExamples()):
row = dataset.GetExample(i)
expected = row.GetLabel()
data = row.GetData().ToArray()
# watch out for AutoDataVector compression
if len(data) < size:
data.resize(size)
value = compiled_map.Compute(data)[0]
if value != expected:
print("### error on line {}, signam={}, expected={}".format(line, value, expected))
errors += 1
line += 1
testing.ProcessTest("test_voice_activity_node, errors={}".format(errors), errors == 0)
def create_tensor(value, size, rows, columns, channels):
a = np.ones(size) * value
a = a.reshape(rows, columns, channels)
return ell.math.DoubleTensor(a)
def test_gru_node_with_vad_reset(testing):
hidden_units = 10
errors = 0
builder = ell.model.ModelBuilder()
ell_model = ell.model.Model()
dataset = load_vad_data()
input_size = dataset.NumFeatures()
input_shape = ell.model.PortMemoryLayout([int(input_size)])
output_shape = ell.model.PortMemoryLayout([hidden_units])
dataType = ell.nodes.PortType.smallReal
input_node = builder.AddInputNode(ell_model, input_shape, dataType)
vad_node = add_vad_node(builder, ell_model, input_node)
numRows = hidden_units * 3
numCols = input_size
input_weights = np.ones(numRows * numCols) * 0.01
numCols = hidden_units
hidden_weights = np.ones(numRows * numCols) * 0.02
input_bias = np.ones(numRows) * 0.01
hidden_bias = np.ones(numRows) * 0.02
input_weights_node = builder.AddConstantNode(ell_model, input_weights, ell.nodes.PortType.smallReal)
hidden_weights_node = builder.AddConstantNode(ell_model, hidden_weights, ell.nodes.PortType.smallReal)
input_bias_node = builder.AddConstantNode(ell_model, input_bias, ell.nodes.PortType.smallReal)
hidden_bias_node = builder.AddConstantNode(ell_model, hidden_bias, ell.nodes.PortType.smallReal)
# now create a gru_node that takes the same input as the vad_node, and also takes
# the output of the vad_node as a reset signal.
gru_node = builder.AddGRUNode(
ell_model,
ell.nodes.PortElements(input_node.GetOutputPort("output")),
ell.nodes.PortElements(vad_node.GetOutputPort("output")),
hidden_units,
ell.nodes.PortElements(input_weights_node.GetOutputPort("output")),
ell.nodes.PortElements(hidden_weights_node.GetOutputPort("output")),
ell.nodes.PortElements(input_bias_node.GetOutputPort("output")),
ell.nodes.PortElements(hidden_bias_node.GetOutputPort("output")),
ell.neural.ActivationType.tanh,
ell.neural.ActivationType.sigmoid)
output_node = builder.AddOutputNode(
ell_model, output_shape,
ell.nodes.PortElements(gru_node.GetOutputPort("output")))
# test we can access GetMemoryLayout information on the ports.
output_size = list(gru_node.GetOutputPort("output").GetMemoryLayout().size)
expected_size = [hidden_units]
if output_size != expected_size:
print("The output port on the gru_node has size {}, we are expecting {}".format(output_size, expected_size))
errors += 1
map = ell.model.Map(ell_model, input_node, ell.nodes.PortElements(output_node.GetOutputPort("output")))
compiler_settings = ell.model.MapCompilerOptions()
compiler_settings.useBlas = False # not resolvable on our Linux test machines...
optimizer_options = ell.model.ModelOptimizerOptions()
compiled_map = map.Compile("host", "gruvadtest", "predict", compiler_settings, optimizer_options)
last_signal = 0
was_reset = False
for i in range(dataset.NumExamples()):
row = dataset.GetExample(i)
expected = row.GetLabel()
data = row.GetData().ToArray()
# watch out for AutoDataVector compression
if len(data) < input_size:
data.resize(input_size)
value = np.array(compiled_map.Compute(data))
total = np.sum(value)
if was_reset and total > 0.1:
errors += 1
if last_signal == 1 and expected == 0:
# reset should have happened which means the next sum must be close to zero.
was_reset = True
else:
was_reset = False
testing.ProcessTest("test_gru_node_with_vad_reset, errors={}".format(errors), errors == 0)
def fastgrnn_serialization_callback(testing, map, iteration, dataset):
compiler_settings = ell.model.MapCompilerOptions()
compiler_settings.useBlas = False # not resolvable on our Linux test machines...
optimizer_options = ell.model.ModelOptimizerOptions()
compiled_map = map.Compile("host", "test_fastgrnn_node", "predict", compiler_settings, optimizer_options)
compiled_map.WriteIR("test_fastgrnn_node.ll")
errors = 0
input_size = dataset.NumFeatures()
for i in range(dataset.NumExamples()):
row = dataset.GetExample(i)
data = row.GetData().ToArray()
# watch out for AutoDataVector compression
if len(data) < input_size:
data.resize(input_size)
computed_value = np.array(map.Compute(data))
compiled_value = np.array(compiled_map.Compute(data))
if not testing.IsEqual(computed_value, compiled_value, tol=1e-6):
errors += 1
if errors == 1:
print("### mismatch between compiled and computed value of FastGRNN node")
print("computed:", list(computed_value))
print("compiled:", list(compiled_value))
testing.ProcessTest("test_fastgrnn_node, iteration={}, errors={}".format(iteration, errors), errors == 0)
return compiled_value
def test_fastgrnn_node(testing):
hidden_units = 10
wRank = 0
uRank = 0
model = ell.model.Model()
dataset = load_vad_data()
input_size = dataset.NumFeatures()
input_shape = ell.model.PortMemoryLayout([int(input_size)])
output_shape = ell.model.PortMemoryLayout([hidden_units])
dataType = ell.nodes.PortType.smallReal
input_node = model.AddInput(input_shape, dataType)
numRows = hidden_units
numCols = input_size
input_weights1 = np.ones(numRows * numCols) * 0.01
input_weights2 = np.ones(1) * 0.01
numCols = hidden_units
hidden_weights1 = np.ones(numRows * numCols) * 0.1
hidden_weights2 = np.ones(1) * 0.1
bias_gate = np.ones(hidden_units) * 0.01
bias_update = np.ones(hidden_units) * 0.02
zeta = 1
nu = 0.5
reset_node = add_vad_node2(model, input_node)
input_weights1_node = model.AddConstant(input_weights1, dataType)
input_weights2_node = model.AddConstant(input_weights2, dataType)
hidden_weights1_node = model.AddConstant(hidden_weights1, dataType)
hidden_weights2_node = model.AddConstant(hidden_weights2, dataType)
bias_gate_node = model.AddConstant(bias_gate, dataType)
bias_update_node = model.AddConstant(bias_update, dataType)
zeta_node = model.AddConstant([zeta], dataType)
nu_node = model.AddConstant([nu], dataType)
fast_grnn = model.AddFastGRNN(input_node, reset_node, hidden_units, wRank, uRank, input_weights1_node,
input_weights2_node, hidden_weights1_node, hidden_weights2_node, bias_gate_node,
bias_update_node, zeta_node, nu_node, ell.neural.ActivationType.sigmoid,
ell.neural.ActivationType.tanh)
output_node = model.AddOutput(output_shape, fast_grnn)
map = ell.model.Map(model, input_node, ell.nodes.PortElements(output_node.GetOutputPort("output")))
result = test_with_serialization(testing, map, "test_fastgrnn_node", fastgrnn_serialization_callback, dataset)
testing.ProcessTest("test_fastgrnn_node iterations match",
np.allclose(result[0], result[1]) and np.allclose(result[1], result[2]))
def hamming_callback(testing, map, iteration, context):
size = map.GetInputShape().Size()
expected = np.hamming(size)
input = np.ones(size)
output = map.Compute(input)
testing.ProcessTest("test_hamming_node compute iteration {}".format(iteration), np.allclose(output, expected))
compiler_settings = ell.model.MapCompilerOptions()
compiler_settings.useBlas = False # not resolvable on our Linux test machines...
optimizer_options = ell.model.ModelOptimizerOptions()
compiled_map = map.Compile("host", "hammingtest", "predict", compiler_settings, optimizer_options)
compiled_output = compiled_map.Compute(input)
testing.ProcessTest("test_hamming_node compiled iteration {}".format(iteration),
np.allclose(compiled_output, expected))
return compiled_output
def test_hamming_node(testing):
mb = ell.model.ModelBuilder()
model = ell.model.Model()
size = 400
input_shape = ell.model.PortMemoryLayout([size])
output_shape = ell.model.PortMemoryLayout([size])
input_node = mb.AddInputNode(model, input_shape, ell.nodes.PortType.real)
hamming_node = mb.AddHammingWindowNode(model, ell.nodes.PortElements(input_node.GetOutputPort("output")))
outputNode = mb.AddOutputNode(model, output_shape, ell.nodes.PortElements(hamming_node.GetOutputPort("output")))
map = ell.model.Map(model, input_node, ell.nodes.PortElements(outputNode.GetOutputPort("output")))
test_with_serialization(testing, map, "test_hamming_node", hamming_callback, None)
def mel_filterbank_callback(testing, map, iteration, context):
size, num_filters, sample_rate = context
try:
from python_speech_features import get_filterbanks
except:
print("### skiping test_mel_filterbank because 'python_speech_features' module is not available")
return
fbanks = get_filterbanks(num_filters, size, sample_rate)
input = np.array(range(size)).astype(np.float)
chopped = input[0:fbanks.shape[1]]
expected = np.dot(chopped, fbanks.T)
output = map.Compute(input)
testing.ProcessTest("test_mel_filterbank compute iteration {}".format(iteration), np.allclose(output, expected))
compiler_settings = ell.model.MapCompilerOptions()
compiler_settings.useBlas = False # not resolvable on our Linux test machines...
optimizer_options = ell.model.ModelOptimizerOptions()
compiled_map = map.Compile("host", "hammingtest", "predict", compiler_settings, optimizer_options)
compiled_output = compiled_map.Compute(input)
testing.ProcessTest("test_mel_filterbank compiled iteration {}".format(iteration),
np.allclose(compiled_output, expected))
return compiled_output
def test_mel_filterbank(testing):
mb = ell.model.ModelBuilder()
model = ell.model.Model()
size = 512
num_filters = 13
sample_rate = 16000
input_shape = ell.model.PortMemoryLayout([size])
output_shape = ell.model.PortMemoryLayout([num_filters])
input_node = mb.AddInputNode(model, input_shape, ell.nodes.PortType.real)
filterbank_node = mb.AddMelFilterBankNode(model, ell.nodes.PortElements(input_node.GetOutputPort("output")),
sample_rate, size, num_filters, num_filters)
outputNode = mb.AddOutputNode(model, output_shape, ell.nodes.PortElements(filterbank_node.GetOutputPort("output")))
map = ell.model.Map(model, input_node, ell.nodes.PortElements(outputNode.GetOutputPort("output")))
test_with_serialization(testing, map, "test_mel_filterbank", mel_filterbank_callback,
(size, num_filters, sample_rate))
def fftnode_callback(testing, map, iteration, context):
inputSize = int(map.GetMetadataValue("inputSize"))
fftSize = int(map.GetMetadataValue("fftSize"))
if fftSize == 0:
fftSize = int(math.pow(2, math.ceil(math.log2(inputSize))))
a = np.array([float(i) * math.pi / 180 for i in range(inputSize)])
y1 = np.sin(a * 10)
y2 = np.sin(a * 20)
y3 = np.sin(a * 50)
signal = y1 + y2 + y3
expected = np.absolute(np.fft.rfft(signal, n=fftSize))
output = map.Compute(signal)
expected = expected[0:len(output)] # ell returns size/2, numpy returns (size/2)+1
filename = "ffttest_{}_{}_{}.npz".format(inputSize, fftSize, iteration)
np.savez(filename, output=np.array(output), expected=np.array(expected))
testing.ProcessTest("test_fftnode compute iteration {}".format(iteration), np.allclose(output, expected))
compiler_settings = ell.model.MapCompilerOptions()
compiler_settings.useBlas = False # not resolvable on our Linux test machines...
optimizer_options = ell.model.ModelOptimizerOptions()
compiled_map = map.Compile("host", "ffttest", "predict", compiler_settings, optimizer_options)
compiled_output = compiled_map.Compute(signal)
testing.ProcessTest("test_fftnode compiled iteration {}".format(iteration), np.allclose(compiled_output, output))
return compiled_output
def test_fftnode_size(testing, inputSize, fftSize):
mb = ell.model.ModelBuilder()
model = ell.model.Model()
input_shape = ell.model.PortMemoryLayout([inputSize])
input_node = mb.AddInputNode(model, input_shape, ell.nodes.PortType.real)
if (fftSize == 0):
fft_node = mb.AddFFTNode(model, ell.nodes.PortElements(input_node.GetOutputPort("output")))
else:
fft_node = mb.AddFFTNode(model, ell.nodes.PortElements(input_node.GetOutputPort("output")), fftSize)
output_size = fft_node.GetOutputPort("output").Size()
output_shape = ell.model.PortMemoryLayout([output_size])
outputNode = mb.AddOutputNode(model, output_shape, ell.nodes.PortElements(fft_node.GetOutputPort("output")))
map = ell.model.Map(model, input_node, ell.nodes.PortElements(outputNode.GetOutputPort("output")))
map.SetMetadataValue("inputSize", str(inputSize))
map.SetMetadataValue("fftSize", str(fftSize))
test_with_serialization(testing, map, "test_fftnode ({})".format(fftSize), fftnode_callback, None)
def cast_to_port_type(a, portType):
if portType == ell.nodes.PortType.real:
return a.astype(np.float)
elif portType == ell.nodes.PortType.smallReal:
return a.astype(np.float32)
elif portType == ell.nodes.PortType.integer:
return a.astype(np.int32)
elif portType == ell.nodes.PortType.bigInt:
return a.astype(np.int64)
raise Exception("Unsupported type")
class TypeCastInfo:
def __init__(self, name, t):
self.name = name
self.t = t
def create_vector(self, len):
if self.t == ell.nodes.PortType.real:
return ell.math.DoubleVector(len)
elif self.t == ell.nodes.PortType.smallReal:
return ell.math.FloatVector(len)
elif self.t == ell.nodes.PortType.integer:
return ell.math.IntVector(len)
elif self.t == ell.nodes.PortType.bigInt:
return ell.math.Int64Vector(len)
raise Exception("Unsupported type")
def cast_vector(self, a):
return cast_to_port_type(a, self.t)
def test_typecast(testing):
# Test a model that has differen types callbacks.
for t in [TypeCastInfo("real", ell.nodes.PortType.real),
TypeCastInfo("smallReal", ell.nodes.PortType.smallReal),
TypeCastInfo("integer", ell.nodes.PortType.integer),
TypeCastInfo("bigInt", ell.nodes.PortType.bigInt)
]:
model = ell.model.Model()
# input to a SourceNode is a timestamp.
layout = ell.model.PortMemoryLayout([int(10)])
input = model.AddInput(layout, ell.nodes.PortType.real)
cast = model.AddTypeCast(input, t.t)
output = model.AddOutput(layout, cast)
map = ell.model.Map(model, input, output)
a = np.array(range(10)).astype(np.float) / 2
expected = np.array(a)
i = ell.math.DoubleVector(a)
o = t.create_vector(len(a))
expected = t.cast_vector(a)
map.ComputeMultiple([i], [o])
testing.ProcessTest("Testing TypeCast to type {} Compute".format(t.name),
testing.IsEqual(np.array(o), expected))
compiled = map.Compile("host", "test", "predict")
o = t.create_vector(len(a))
compiled.ComputeMultiple([i], [o])
testing.ProcessTest("Testing TypeCast to type {} Compiled".format(t.name),
testing.IsEqual(np.array(o), expected))
def get_porttype_name(portType):
if portType == ell.nodes.PortType.smallReal:
return "smallReal"
if portType == ell.nodes.PortType.real:
return "real"
if portType == ell.nodes.PortType.integer:
return "integer"
if portType == ell.nodes.PortType.bigInt:
return "bigInt"
if portType == ell.nodes.PortType.categorical:
return "categorical"
if portType == ell.nodes.PortType.boolean:
return "boolean"
return "unknown"
def test_buffer(testing, portType):
model = ell.model.Model()
input_size = 10
output_size = 57
input = model.AddInput(ell.model.PortMemoryLayout([input_size]), portType)
buffer = model.AddBuffer(input, output_size)
output = model.AddOutput(buffer)
map = ell.model.Map(model, input, output)
compiled = map.Compile("host", "test", "predict")
portTypeName = get_porttype_name(portType)
expected = np.zeros((output_size))
for i in range(10):
input = cast_to_port_type(np.array(range(10)) + (i * input_size), portType)
out1 = map.Compute(input)
out2 = compiled.Compute(input)
expected = np.concatenate((expected[input_size:], input))
testing.ProcessTest("Testing test_buffer on {}, iteration {} compute".format(portTypeName, i),
testing.IsEqual(out1, expected, verbose=True))
testing.ProcessTest("Testing test_buffer on {}, iteration {} compiled".format(portTypeName, i),
testing.IsEqual(out2, expected, verbose=True))
# test reset works
map.Reset()
compiled.Reset()
input = cast_to_port_type(np.array(range(10)), portType)
out1 = map.Compute(input)
out2 = compiled.Compute(input)
expected = np.concatenate((np.zeros(output_size - input_size), input))
testing.ProcessTest("Testing test_buffer on {}, reset compute".format(portTypeName),
testing.IsEqual(out1, expected))
testing.ProcessTest("Testing test_buffer on {}, reset compiled".format(portTypeName),
testing.IsEqual(out2, expected))
def test_fftnode(testing):
try:
test_fftnode_size(testing, 100, 100)
testing.ProcessTest("test_fftnode needs updating for sizes that are not a power of 2.", False)
except Exception as e:
testing.ProcessTest("test_fftnode: {}".format(e), True)
test_fftnode_size(testing, 64, 64)
test_fftnode_size(testing, 100, 128)
test_fftnode_size(testing, 20, 16)
test_fftnode_size(testing, 20, 0)
def test():
testing = Testing()
test_buffer(testing, ell.nodes.PortType.smallReal)
test_buffer(testing, ell.nodes.PortType.real)
test_buffer(testing, ell.nodes.PortType.integer)
test_buffer(testing, ell.nodes.PortType.bigInt)
test_reorder(testing)
test_typecast(testing)
test_unary(testing)
# test_multiply(testing) # bugbug: crashing on Linux...
test_voice_activity_node(testing)
test_gru_node_with_vad_reset(testing)
test_hamming_node(testing)
test_mel_filterbank(testing)
test_fftnode(testing)
test_fastgrnn_node(testing)
return 0
if __name__ == "__main__":
test()
| 38.941586
| 119
| 0.674203
|
3f147fc74aec5b868a4015b4bc99ca7f6003c542
| 5,379
|
py
|
Python
|
django_api_example/settings_base.py
|
amrox/django-api-example
|
6c68e43078bb5e858ddea84d44a943ec9d7808b4
|
[
"MIT"
] | 17
|
2015-03-31T20:23:08.000Z
|
2021-06-08T00:46:57.000Z
|
django_api_example/settings_base.py
|
amrox/django-api-example
|
6c68e43078bb5e858ddea84d44a943ec9d7808b4
|
[
"MIT"
] | null | null | null |
django_api_example/settings_base.py
|
amrox/django-api-example
|
6c68e43078bb5e858ddea84d44a943ec9d7808b4
|
[
"MIT"
] | 4
|
2015-05-18T14:24:52.000Z
|
2022-02-18T06:52:52.000Z
|
# Django settings for django_api_example project.
import os.path
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
DEBUG = False
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '+&4x8flqhd=#koe817n47ayb=7r%4g1#a&zg(lk6n(hs_qpyr!'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'middleware.CloudMiddleware',
)
ROOT_URLCONF = 'django_api_example.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_ROOT, 'templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'oauth_provider',
'tasks',
'api'
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# OAuth Settings
OAUTH_BLACKLISTED_HOSTNAMES = ['localhost', '127.0.0.1']
OAUTH_SIGNATURE_METHODS = ['hmac-sha1',]
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'oauth_provider.backends.XAuthAuthenticationBackend',
)
| 33
| 115
| 0.693252
|
8a2dee5bfed3d8d61e54a95a785b30048285a1a5
| 6,407
|
py
|
Python
|
cppguts/dumpcpp.py
|
tierra-colada/cppguts
|
473e1462e11a1d081e9a4dad7efb3867ba02355c
|
[
"MIT"
] | 1
|
2021-09-19T21:58:08.000Z
|
2021-09-19T21:58:08.000Z
|
cppguts/dumpcpp.py
|
tierra-colada/cppguts
|
473e1462e11a1d081e9a4dad7efb3867ba02355c
|
[
"MIT"
] | null | null | null |
cppguts/dumpcpp.py
|
tierra-colada/cppguts
|
473e1462e11a1d081e9a4dad7efb3867ba02355c
|
[
"MIT"
] | null | null | null |
import argparse
import os
from clang.cindex import Cursor, Index
from pprint import pprint
def get_diag_info(diag):
return {'severity': diag.severity,
'location': diag.location,
'category_name': diag.category_name,
'spelling': diag.spelling,
'ranges': diag.ranges,
'fixits': diag.fixits}
def get_node_info(node: Cursor, children = None) -> dict:
return {'access_specifier': node.access_specifier,
'availability': node.availability,
'brief_comment': node.brief_comment,
'canonical': node.canonical,
'data': node.data,
'displayname': node.displayname,
# 'enum_type' : node.enum_type,
# 'enum_value' : node.enum_value,
'exception_specification_kind': node.exception_specification_kind,
'extent': node.extent,
'from_cursor_result': node.from_cursor_result,
'from_location': node.from_location,
'from_result': node.from_result,
'get_arguments': node.get_arguments(),
'get_bitfield_width': node.get_bitfield_width(),
'get_children': node.get_children(),
'get_definition': node.get_definition(),
'get_field_offsetof': node.get_field_offsetof(),
# 'get_included_file' : node.get_included_file(),
'get_num_template_arguments': node.get_num_template_arguments(),
# 'get_template_argument_kind' : node.get_template_argument_kind(),
# 'get_template_argument_type' : node.get_template_argument_type(),
# 'get_template_argument_unsigned_value' : node.get_template_argument_unsigned_value(),
# 'get_template_argument_value' : node.get_template_argument_value(),
'get_tokens': node.get_tokens(),
'get_usr': node.get_usr(),
'hash': node.hash,
'is_abstract_record': node.is_abstract_record(),
'is_anonymous': node.is_anonymous(),
'is_bitfield': node.is_bitfield(),
'is_const_method': node.is_const_method(),
'is_converting_constructor': node.is_converting_constructor(),
'is_copy_constructor': node.is_copy_constructor(),
'is_default_constructor': node.is_default_constructor(),
'is_default_method': node.is_default_method(),
'is_definition': node.is_definition(),
'is_move_constructor': node.is_move_constructor(),
'is_mutable_field': node.is_mutable_field(),
'is_pure_virtual_method': node.is_pure_virtual_method(),
'is_scoped_enum': node.is_scoped_enum(),
'is_static_method': node.is_static_method(),
'is_virtual_method': node.is_virtual_method(),
'kind': node.kind,
'lexical_parent.displayname': node.lexical_parent.displayname if node.lexical_parent else None,
'linkage': node.linkage,
'location': node.location,
# 'mangled_name': node.mangled_name if node.mangled_name else None,
# 'objc_type_encoding': node.objc_type_encoding,
# 'raw_comment': node.raw_comment,
# 'referenced': node.referenced,
'result_type spelling': node.result_type.spelling,
'semantic_parent.displayname': node.semantic_parent.displayname if node.semantic_parent else None,
'spelling': node.spelling,
'storage_class': node.storage_class,
# 'tls_kind': node.tls_kind,
'translation_unit spelling': node.translation_unit.spelling if node.translation_unit else None,
'type spelling': node.type.spelling,
# 'underlying_typedef_type spelling': node.underlying_typedef_type.spelling if node.underlying_typedef_type else None,
# 'walk_preorder': node.walk_preorder,
# 'xdata': node.xdata,
'children' : children}
def find_nodes(node: Cursor, nodes_found: list, objname: str):
if objname and objname == node.spelling:
nodes_found.append(node)
for child in node.get_children():
find_nodes(child, nodes_found, objname)
def get_info(node: Cursor, maxdepth: int = None, depth: int = 0) -> dict:
if maxdepth is not None and depth >= maxdepth:
children = None
else:
children = [get_info(c, maxdepth, depth+1)
for c in node.get_children()]
return get_node_info(node, children)
def main():
parser = argparse.ArgumentParser(description=
'Dump C++ file or dump only specified names.'
'After passing `correctcpp` flags you are allowed to pass clang '
'commands like `-I` (to include dir), `-std=c++17` and other. '
'Dont pass a file without flag to clang! Use `--dest-file=` instead.')
parser.add_argument('--file', dest='file', action='store',
type=type('string'), required=True, default=None,
help='file to be dumped')
parser.add_argument("--max-depth", dest="maxdepth", action='store',
metavar="N", type=int, required=False, default=None,
help="limit cursor expansion to depth N",)
parser.add_argument("--object-name", dest="objname", action='store',
type=type('string'), required=False, default=None,
help="parse only specified names (spelling)")
args, clangcmd = parser.parse_known_args()
if not os.path.isfile(args.file):
parser.error(f"specified file doesn't exist:\n{args.file}")
clangcmd.append(args.file)
index = Index.create()
tu = index.parse(None, clangcmd)
if not tu:
parser.error(f"unable to load input:\n{args.file}")
pprint(('diagnostics:', [get_diag_info(d) for d in tu.diagnostics]))
if args.objname:
nodes_found = []
find_nodes(tu.cursor, nodes_found, args.objname)
for node in nodes_found:
pprint(('found node', get_node_info(node)), indent=10)
else:
pprint(('nodes', get_info(tu.cursor)))
if __name__ == '__main__':
main()
| 46.766423
| 131
| 0.60309
|
3a9984bd826a0ce3b8534176ddb02bec546e7833
| 5,527
|
py
|
Python
|
configs/representation/ssb/ssb_r18_nc_sgd_cos_30e_r2_1x8x2_k400.py
|
happywu/mmaction2-CycleContrast
|
019734e471dffd1161b7a9c617ba862d2349a96c
|
[
"Apache-2.0"
] | null | null | null |
configs/representation/ssb/ssb_r18_nc_sgd_cos_30e_r2_1x8x2_k400.py
|
happywu/mmaction2-CycleContrast
|
019734e471dffd1161b7a9c617ba862d2349a96c
|
[
"Apache-2.0"
] | null | null | null |
configs/representation/ssb/ssb_r18_nc_sgd_cos_30e_r2_1x8x2_k400.py
|
happywu/mmaction2-CycleContrast
|
019734e471dffd1161b7a9c617ba862d2349a96c
|
[
"Apache-2.0"
] | null | null | null |
# model settings
temperature = 0.2
with_norm = True
query_dim = 128
model = dict(
type='SimSiamBaseTracker',
backbone=dict(
type='ResNet',
pretrained=None,
depth=18,
out_indices=(3, ),
# strides=(1, 2, 1, 1),
norm_cfg=dict(type='SyncBN', requires_grad=True),
norm_eval=False,
zero_init_residual=True),
# cls_head=None,
# patch_head=None,
img_head=dict(
type='SimSiamHead',
in_channels=512,
norm_cfg=dict(type='SyncBN'),
num_projection_fcs=3,
projection_mid_channels=512,
projection_out_channels=512,
num_predictor_fcs=2,
predictor_mid_channels=128,
predictor_out_channels=512,
with_norm=True,
loss_feat=dict(type='CosineSimLoss', negative=False),
spatial_type='avg'))
# model training and testing settings
train_cfg = dict(intra_video=False)
test_cfg = dict(
precede_frames=20,
topk=10,
temperature=0.2,
strides=(1, 2, 1, 1),
out_indices=(2, 3),
neighbor_range=24,
with_first=True,
with_first_neighbor=True,
output_dir='eval_results')
# dataset settings
dataset_type = 'VideoDataset'
dataset_type_val = 'DavisDataset'
data_prefix = 'data/kinetics400/videos_train'
ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt'
data_prefix_val = 'data/davis/DAVIS/JPEGImages/480p'
anno_prefix_val = 'data/davis/DAVIS/Annotations/480p'
data_root_val = 'data/davis/DAVIS'
ann_file_val = 'data/davis/DAVIS/ImageSets/davis2017_val_list_rawframes.txt'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
train_pipeline = [
dict(type='DecordInit'),
dict(type='SampleFrames', clip_len=1, frame_interval=8, num_clips=2),
# dict(type='DuplicateFrames', times=2),
dict(type='DecordDecode'),
dict(
type='RandomResizedCrop',
area_range=(0.2, 1.),
same_across_clip=False,
same_on_clip=False),
dict(type='Resize', scale=(224, 224), keep_ratio=False),
dict(
type='Flip',
flip_ratio=0.5,
same_across_clip=False,
same_on_clip=False),
# dict(
# type='ColorJitter',
# brightness=0.4,
# contrast=0.4,
# saturation=0.4,
# hue=0.1,
# p=0.8,
# same_across_clip=False,
# same_on_clip=False),
# dict(
# type='RandomGrayScale',
# p=0.2,
# same_across_clip=False,
# same_on_clip=False),
# dict(
# type='RandomGaussianBlur',
# p=0.5,
# same_across_clip=False,
# same_on_clip=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs', 'label'])
]
val_pipeline = [
dict(type='SequentialSampleFrames', frame_interval=1),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 480), keep_ratio=True),
dict(type='Flip', flip_ratio=0),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(
type='Collect',
keys=['imgs', 'ref_seg_map'],
meta_keys=('frame_dir', 'frame_inds', 'original_shape', 'seg_map')),
dict(type='ToTensor', keys=['imgs', 'ref_seg_map'])
]
data = dict(
videos_per_gpu=128,
workers_per_gpu=16,
val_workers_per_gpu=1,
train=dict(
type='RepeatDataset',
times=2,
dataset=dict(
type=dataset_type,
ann_file=ann_file_train,
data_prefix=data_prefix,
pipeline=train_pipeline)),
val=dict(
type=dataset_type_val,
ann_file=ann_file_val,
data_prefix=data_prefix_val,
data_root=data_root_val,
anno_prefix=anno_prefix_val,
pipeline=val_pipeline,
test_mode=True),
test=dict(
type=dataset_type_val,
ann_file=ann_file_val,
data_prefix=data_prefix_val,
data_root=data_root_val,
anno_prefix=anno_prefix_val,
pipeline=val_pipeline,
test_mode=True))
# optimizer
# optimizer = dict(type='Adam', lr=1e-4)
optimizer = dict(type='SGD', lr=0.05, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(policy='CosineAnnealing', min_lr=0, by_epoch=False)
# lr_config = dict(policy='Fixed')
# lr_config = dict(
# policy='step',
# warmup='linear',
# warmup_iters=100,
# warmup_ratio=0.001,
# step=[1, 2])
total_epochs = 30
checkpoint_config = dict(interval=1)
evaluation = dict(
interval=1,
metrics='davis',
key_indicator='feat_1.J&F-Mean',
rule='greater')
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook'),
dict(
type='WandbLoggerHook',
init_kwargs=dict(
project='mmaction2',
name='{{fileBasenameNoExtension}}',
resume=True,
tags=['ssb'],
dir='wandb/{{fileBasenameNoExtension}}',
config=dict(
model=model,
train_cfg=train_cfg,
test_cfg=test_cfg,
data=data))),
])
# runtime settings
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
find_unused_parameters = False
| 30.535912
| 78
| 0.619323
|
8b33edc80fef290692a878d9a4b1dbc2a57acf8c
| 25,404
|
py
|
Python
|
model/ner_model.py
|
riedlma/sequence_tagging
|
abe8c4b7ec38e5e4f2ce64d4ec6a33f3dbb87d62
|
[
"Apache-2.0"
] | 22
|
2018-05-09T14:12:10.000Z
|
2021-03-21T15:24:55.000Z
|
model/ner_model.py
|
riedlma/sequence_tagging
|
abe8c4b7ec38e5e4f2ce64d4ec6a33f3dbb87d62
|
[
"Apache-2.0"
] | null | null | null |
model/ner_model.py
|
riedlma/sequence_tagging
|
abe8c4b7ec38e5e4f2ce64d4ec6a33f3dbb87d62
|
[
"Apache-2.0"
] | 5
|
2018-05-11T08:29:47.000Z
|
2021-03-04T06:13:12.000Z
|
import numpy as np
import os, sys
import tensorflow as tf
from .data_utils import minibatches, pad_sequences, get_chunks,get_oov_embeddings
from .data_utils import load_vocab_rev, load_vocab
from .general_utils import Progbar
from .base_model import BaseModel
class NERModel(BaseModel):
"""Specialized class of Model for NER"""
def __init__(self, config):
super(NERModel, self).__init__(config)
self.idx_to_tag = {idx: tag for tag, idx in
self.config.vocab_tags.items()}
def add_placeholders(self):
"""Define placeholders = entries to computational graph"""
# shape = (batch size, max length of sentence in batch)
self.word_ids = tf.placeholder(tf.int32, shape=[None, None],
name="word_ids")
# shape = (batch size)
self.sequence_lengths = tf.placeholder(tf.int32, shape=[None],
name="sequence_lengths")
# shape = (batch size, max length of sentence, max length of word)
self.char_ids = tf.placeholder(tf.int32, shape=[None, None, None],
name="char_ids")
# shape = (batch_size, max_length of sentence)
self.word_lengths = tf.placeholder(tf.int32, shape=[None, None],
name="word_lengths")
# shape = (batch size, max length of sentence in batch)
self.labels = tf.placeholder(tf.int32, shape=[None, None],
name="labels")
# hyper parameters
self.dropout = tf.placeholder(dtype=tf.float32, shape=[],
name="dropout")
self.lr = tf.placeholder(dtype=tf.float32, shape=[],
name="lr")
def extract_identifiers(self,sentences):
res = []
for sentence in sentences:
s = []
for word in sentence:
s.append(word.identifier)
res.append(s)
return res
def extract_labels(self,sentences):
res = []
for sentence in sentences:
s = []
for word in sentence:
s.append(word)
res.append(s)
return res
def get_feed_dict(self, words, labels=None, lr=None, dropout=None):
"""Given some data, pad it and build a feed dictionary
Args:
words: list of sentences. A sentence is a list of ids of a list of
words. A word is a list of ids
labels: list of ids
lr: (float) learning rate
dropout: (float) keep prob
Returns:
dict {placeholder: value}
"""
# perform padding of the given data
if self.config.use_chars:
char_ids, word_ids = zip(*words)
word_ids, sequence_lengths = pad_sequences(word_ids, self.config.pad_token)
char_ids, word_lengths = pad_sequences(char_ids, pad_tok=0,
nlevels=2)
else:
word_ids, sequence_lengths = pad_sequences(words, self.config.pad_token)
word_ids_word = word_ids
word_ids = self.extract_identifiers(word_ids_word)
labels_word = labels
if not labels == None:
labels = self.extract_labels(labels_word)
feed = {
self.word_ids: word_ids,
self.sequence_lengths: sequence_lengths
}
if self.config.use_large_embeddings:
feed[self.word_embeddings_values]= self.config.embeddings
feed[self.backoff_embeddings_change_values] = self.config.oov_embeddings
if self.config.use_chars:
feed[self.char_ids] = char_ids
feed[self.word_lengths] = word_lengths
if labels is not None:
labels, _ = pad_sequences(labels, 0)
feed[self.labels] = labels
if lr is not None:
feed[self.lr] = lr
if dropout is not None:
feed[self.dropout] = dropout
return feed, sequence_lengths
def add_word_embeddings_op(self):
"""Defines self.word_embeddings
If self.config.embeddings is not None and is a np array initialized
with pre-trained word vectors, the word embeddings is just a look-up
and we don't train the vectors. Otherwise, a random matrix with
the correct shape is initialized.
"""
with tf.variable_scope("words"):
# Embeddings are trained from scratch
if self.config.embeddings is None:
self.logger.info("WARNING: randomly initializing word vectors")
if self.config.use_large_embeddings ==True and self.config.use_pretrained ==False:
sys.stderr.write("Using large embeddings without pre-trained embeddings is not valid")
sys.exit(0)
if self.config.use_large_embeddings==True:
_word_embeddings = tf.placeholder(
name="word_embeddings_values",
dtype=tf.float32,
shape=[self.config.nwords, self.config.dim_word])
else:
_word_embeddings = tf.get_variable(
name="_word_embeddings",
dtype=tf.float32,
shape=[self.config.nwords, self.config.dim_word])
else:
if self.config.use_large_embeddings==True:
_word_embeddings = tf.placeholder(
name="word_embeddings_values",
dtype=tf.float32,
shape=(self.config.nwords, self.config.dim_word))
else:
_word_embeddings = tf.Variable(
self.config.embeddings,
name="_word_embeddings",
dtype=tf.float32,
trainable=self.config.train_embeddings)
# check if random or OOV embeddings are added
if self.config.oov_size>0:
embeddings_oov = get_oov_embeddings(self.config)
if self.config.use_large_embeddings==True:
backoff_embeddings_change=tf.placeholder(
name="backoff_embeddings_change_values",
dtype=tf.float32,
shape=[self.config.oov_words, self.config.dim_word])
self.config.oov_embeddings = embeddings_oov
else:
backoff_embeddings_change = tf.Variable(
embeddings_oov,
name="backoff_embeddings_change",
dtype=tf.float32,
trainable=self.config.train_embeddings)
_new_word_embeddings = tf.concat([_word_embeddings,backoff_embeddings_change], axis=0)
if self.config.use_large_embeddings:
self.word_embeddings_values = _word_embeddings
self.backoff_embeddings_change_values = backoff_embeddings_change
if self.config.oov_size>0:
word_embeddings = tf.nn.embedding_lookup(_new_word_embeddings,
self.word_ids, name="word_embeddings")
else:
word_embeddings = tf.nn.embedding_lookup(_word_embeddings,
self.word_ids, name="word_embeddings")
with tf.variable_scope("chars"):
if self.config.use_chars:
# get char embeddings matrix
_char_embeddings = tf.get_variable(
name="_char_embeddings",
dtype=tf.float32,
shape=[self.config.nchars, self.config.dim_char])
char_embeddings = tf.nn.embedding_lookup(_char_embeddings,
self.char_ids, name="char_embeddings")
# put the time dimension on axis=1
s = tf.shape(char_embeddings)
char_embeddings = tf.reshape(char_embeddings,
shape=[s[0]*s[1], s[-2], self.config.dim_char])
word_lengths = tf.reshape(self.word_lengths, shape=[s[0]*s[1]])
# bi lstm on chars
cell_fw = tf.contrib.rnn.LSTMCell(self.config.hidden_size_char,
state_is_tuple=True)
cell_bw = tf.contrib.rnn.LSTMCell(self.config.hidden_size_char,
state_is_tuple=True)
_output = tf.nn.bidirectional_dynamic_rnn(
cell_fw, cell_bw, char_embeddings,
sequence_length=word_lengths, dtype=tf.float32)
# read and concat output
_, ((_, output_fw), (_, output_bw)) = _output
output = tf.concat([output_fw, output_bw], axis=-1)
# shape = (batch size, max sentence length, char hidden size)
output = tf.reshape(output,
shape=[s[0], s[1], 2*self.config.hidden_size_char])
word_embeddings = tf.concat([word_embeddings, output], axis=-1)
self.word_embeddings = tf.nn.dropout(word_embeddings, self.dropout)
def add_logits_op(self):
"""Defines self.logits
For each word in each sentence of the batch, it corresponds to a vector
of scores, of dimension equal to the number of tags.
"""
with tf.variable_scope("bi-lstm"):
cell_fw = tf.contrib.rnn.LSTMCell(self.config.hidden_size_lstm)
cell_bw = tf.contrib.rnn.LSTMCell(self.config.hidden_size_lstm)
(output_fw, output_bw), _ = tf.nn.bidirectional_dynamic_rnn(
cell_fw, cell_bw, self.word_embeddings,
sequence_length=self.sequence_lengths, dtype=tf.float32)
output = tf.concat([output_fw, output_bw], axis=-1)
output = tf.nn.dropout(output, self.dropout)
with tf.variable_scope("proj"):
W = tf.get_variable("W", dtype=tf.float32,
shape=[2*self.config.hidden_size_lstm, self.config.ntags])
b = tf.get_variable("b", shape=[self.config.ntags],
dtype=tf.float32, initializer=tf.zeros_initializer())
nsteps = tf.shape(output)[1]
output = tf.reshape(output, [-1, 2*self.config.hidden_size_lstm])
pred = tf.matmul(output, W) + b
self.logits = tf.reshape(pred, [-1, nsteps, self.config.ntags])
def add_pred_op(self):
"""Defines self.labels_pred
This op is defined only in the case where we don't use a CRF since in
that case we can make the prediction "in the graph" (thanks to tf
functions in other words). With theCRF, as the inference is coded
in python and not in pure tensroflow, we have to make the prediciton
outside the graph.
"""
if not self.config.use_crf:
self.labels_pred = tf.cast(tf.argmax(self.logits, axis=-1),
tf.int32)
def add_loss_op(self):
"""Defines the loss"""
if self.config.use_crf:
log_likelihood, trans_params = tf.contrib.crf.crf_log_likelihood(
self.logits, self.labels, self.sequence_lengths)
self.trans_params = trans_params # need to evaluate it for decoding
self.loss = tf.reduce_mean(-log_likelihood)
else:
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=self.logits, labels=self.labels)
mask = tf.sequence_mask(self.sequence_lengths)
losses = tf.boolean_mask(losses, mask)
self.loss = tf.reduce_mean(losses)
# for tensorboard
tf.summary.scalar("loss", self.loss)
def build(self):
# NER specific functions
self.add_placeholders()
self.add_word_embeddings_op()
self.add_logits_op()
self.add_pred_op()
self.add_loss_op()
# Generic functions that add training op and initialize session
self.add_train_op(self.config.lr_method, self.lr, self.loss,
self.config.clip)
self.initialize_session() # now self.sess is defined and vars are init
def predict_batch(self, words):
"""
Args:
words: list of sentences
Returns:
labels_pred: list of labels for each sentence
sequence_length
"""
fd, sequence_lengths = self.get_feed_dict(words, dropout=1.0)
if self.config.use_crf:
# get tag scores and transition params of CRF
viterbi_sequences = []
logits, trans_params = self.sess.run(
[self.logits, self.trans_params], feed_dict=fd)
# iterate over the sentences because no batching in vitervi_decode
for logit, sequence_length in zip(logits, sequence_lengths):
logit = logit[:sequence_length] # keep only the valid steps
viterbi_seq, viterbi_score = tf.contrib.crf.viterbi_decode(
logit, trans_params)
viterbi_sequences += [viterbi_seq]
return viterbi_sequences, sequence_lengths
else:
labels_pred = sess.run(self.labels_pred, feed_dict=fd)
return labels_pred, sequence_lengths
def run_epoch(self, train, dev, epoch):
"""Performs one complete pass over the train set and evaluate on dev
Args:
train: dataset that yields tuple of sentences, tags
dev: dataset
epoch: (int) index of the current epoch
Returns:
f1: (python float), score to select model on, higher is better
"""
# progbar stuff for logging
batch_size = self.config.batch_size
nbatches = (len(train) + batch_size - 1) // batch_size
prog = Progbar(target=nbatches)
# iterate over dataset
for i, (words, labels) in enumerate(minibatches(train, batch_size)):
fd, _ = self.get_feed_dict(words, labels, self.config.lr,
self.config.dropout)
_, train_loss, summary = self.sess.run(
[self.train_op, self.loss, self.merged], feed_dict=fd)
prog.update(i + 1, [("train loss", train_loss)])
# tensorboard
if i % 10 == 0:
self.file_writer.add_summary(summary, epoch*nbatches + i)
metrics = self.run_evaluate(dev)
msg = " - ".join(["{} {:04.2f}".format(k, v)
for k, v in metrics.items()])
self.logger.info(msg)
return metrics["f1"]
def run_evaluate(self, test):
"""Evaluates performance on test set
Args:
test: dataset that yields tuple of (sentences, tags)
Returns:
metrics: (dict) metrics["acc"] = 98.4, ...
"""
accs = []
correct_preds, total_correct, total_preds = 0., 0., 0.
for words, labels in minibatches(test, self.config.batch_size):
labels_pred, sequence_lengths = self.predict_batch(words)
for lab, lab_pred, length in zip(labels, labels_pred,
sequence_lengths):
lab = lab[:length]
lab_pred = lab_pred[:length]
accs += [a==b for (a, b) in zip(lab, lab_pred)]
lab_chunks = set(get_chunks(lab, self.config.vocab_tags))
lab_pred_chunks = set(get_chunks(lab_pred,self.config.vocab_tags))
correct_preds += len(lab_chunks & lab_pred_chunks)
total_preds += len(lab_pred_chunks)
total_correct += len(lab_chunks)
p = correct_preds / total_preds if correct_preds > 0 else 0
r = correct_preds / total_correct if correct_preds > 0 else 0
f1 = 2 * p * r / (p + r) if correct_preds > 0 else 0
acc = np.mean(accs)
return {"acc": 100*acc, "f1": 100*f1}
def write_output(self,text, output,write_binary):
if write_binary:
output.write(bytes(text, 'utf-8'))
else:
output.write(text)
def predict_test(self,test,separate = "\t", output = sys.stdout,write_binary=False):
#idx2word = load_vocab_rev(self.config.filename_words)
idx2tag = load_vocab_rev(self.config.filename_tags)
tag2idx = load_vocab(self.config.filename_tags)
for words, labels in minibatches(test, self.config.batch_size):
labels_pred, sequence_lengths = self.predict_batch(words)
for lab, lab_pred, length, word in zip(labels, labels_pred, sequence_lengths, words):
if self.config.use_chars:
for i in range(len(word[1])):
we = word[1][i]
unk = we.unknown.name
w = we.word+separate+we.processed_word+separate+unk
t = "O"
t = lab[i]
t2 = "O"
if lab_pred[i] in idx2tag:
t2=idx2tag[lab_pred[i]]
if t in tag2idx:
self.write_output(w+separate+t+separate+t2+"\n",output,write_binary)
else:
self.write_output(w+separate+t2+"\n",output,write_binary)
else:
for i in range(len(word)):
we = word[i]
unk = we.unknown.name
w = we.word+separate+we.processed_word+separate+unk
t = "O"
t = lab[i]
t2 = "O"
if lab_pred[i] in idx2tag:
t2=idx2tag[lab_pred[i]]
if t in tag2idx:
self.write_output(w+separate+t+separate+t2+"\n",output,write_binary)
else:
self.write_output(w+separate+t2+"\n",output,write_binary)
self.write_output("\n",output,write_binary)
def predict(self, words_raw):
"""Returns list of tags
Args:
words_raw: list of words (string), just one sentence (no batch)
Returns:
preds: list of tags (string), one for each word in the sentence
"""
words = [self.config.processing_word(w) for w in words_raw]
if type(words[0]) == tuple:
words = zip(*words)
pred_ids, _ = self.predict_batch([words])
preds = [self.idx_to_tag[idx] for idx in list(pred_ids[0])]
return preds
| 52.271605
| 3,156
| 0.41458
|
45eb30b727474e06ad7de8f47bece42b357feb19
| 68,596
|
py
|
Python
|
benchmarks/SimResults/micro_pinned_train_combos/cmpA_astarlbmtontoh264ref/power.py
|
TugberkArkose/MLScheduler
|
e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061
|
[
"Unlicense"
] | null | null | null |
benchmarks/SimResults/micro_pinned_train_combos/cmpA_astarlbmtontoh264ref/power.py
|
TugberkArkose/MLScheduler
|
e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061
|
[
"Unlicense"
] | null | null | null |
benchmarks/SimResults/micro_pinned_train_combos/cmpA_astarlbmtontoh264ref/power.py
|
TugberkArkose/MLScheduler
|
e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061
|
[
"Unlicense"
] | null | null | null |
power = {'BUSES': {'Area': 1.33155,
'Bus/Area': 1.33155,
'Bus/Gate Leakage': 0.00662954,
'Bus/Peak Dynamic': 0.0,
'Bus/Runtime Dynamic': 0.0,
'Bus/Subthreshold Leakage': 0.0691322,
'Bus/Subthreshold Leakage with power gating': 0.0259246,
'Gate Leakage': 0.00662954,
'Peak Dynamic': 0.0,
'Runtime Dynamic': 0.0,
'Subthreshold Leakage': 0.0691322,
'Subthreshold Leakage with power gating': 0.0259246},
'Core': [{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.148091,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.319006,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 1.04574,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.268999,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.465808,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.267154,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 1.00196,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.105566,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 6.79587,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.197563,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00975141,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.115955,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0721177,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.313518,
'Execution Unit/Register Files/Runtime Dynamic': 0.0818691,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.320766,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.728094,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 2.53631,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000148537,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000148537,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000128592,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 4.93517e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00103598,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00146164,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00145214,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0693285,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 4.40989,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.139797,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.235471,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 6.8457,
'Instruction Fetch Unit/Runtime Dynamic': 0.447511,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.179189,
'L2/Runtime Dynamic': 0.109643,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.8143,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.02244,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0510256,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0510257,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 3.05624,
'Load Store Unit/Runtime Dynamic': 1.32511,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.12582,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.251641,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0446541,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0473303,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.274191,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0229629,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.509887,
'Memory Management Unit/Runtime Dynamic': 0.0702932,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 21.9486,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.689251,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0401324,
'Renaming Unit/Free List/Runtime Dynamic': 0.0220491,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.123536,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 4.56169,
'Renaming Unit/Runtime Dynamic': 0.834836,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 5.3237,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.202689,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.0,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.133904,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.215982,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.10902,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.458906,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.153146,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.12249,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00561652,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0406144,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0415376,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0406144,
'Execution Unit/Register Files/Runtime Dynamic': 0.0471542,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.0855632,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.244862,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.35899,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00130406,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00130406,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00119353,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.00049359,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000596691,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00439835,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0104419,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0399312,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 2.53997,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.103132,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.135624,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 4.88175,
'Instruction Fetch Unit/Runtime Dynamic': 0.293528,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0179347,
'L2/Runtime Dynamic': 0.00412594,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.45962,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.592373,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0395509,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0395508,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.64639,
'Load Store Unit/Runtime Dynamic': 0.826975,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.0975257,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.195051,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0346122,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0348809,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.157926,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0169084,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.373492,
'Memory Management Unit/Runtime Dynamic': 0.0517893,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 15.6315,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00604137,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.069518,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.0755594,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 2.61097,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0684343,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.25644,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.313254,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.216965,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.349956,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.176646,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.743568,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.200119,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.79873,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0591804,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00910049,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0937184,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0673037,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.152899,
'Execution Unit/Register Files/Runtime Dynamic': 0.0764042,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.214551,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.545431,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 2.02722,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00131772,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00131772,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00118858,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000482458,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000966823,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00479084,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0111749,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0647007,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 4.11552,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.182521,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.219753,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 6.53377,
'Instruction Fetch Unit/Runtime Dynamic': 0.48294,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0164893,
'L2/Runtime Dynamic': 0.00395448,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 3.60185,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.14314,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0765046,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0765047,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 3.96312,
'Load Store Unit/Runtime Dynamic': 1.59694,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.188647,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.377295,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0669515,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0671128,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.255888,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0301776,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.527008,
'Memory Management Unit/Runtime Dynamic': 0.0972903,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 19.4286,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.155677,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.0116834,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.108384,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.275744,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 4.48409,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 4.15664e-05,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.202721,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.000273244,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.315513,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.508911,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.256881,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 1.0813,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.360813,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.52957,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 5.16216e-05,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.013234,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0957123,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0978738,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0957639,
'Execution Unit/Register Files/Runtime Dynamic': 0.111108,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.201649,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.634113,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 2.43462,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00151715,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00151715,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00133974,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000528652,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00140596,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00578,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.013892,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0940885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 5.98484,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.25818,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.319567,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 8.49381,
'Instruction Fetch Unit/Runtime Dynamic': 0.691508,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0171255,
'L2/Runtime Dynamic': 0.00564793,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 4.71436,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.67758,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.112497,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.112497,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 5.2456,
'Load Store Unit/Runtime Dynamic': 2.34487,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.277399,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.554798,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0984497,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0986844,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.372115,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0423912,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.697344,
'Memory Management Unit/Runtime Dynamic': 0.141076,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 22.5729,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.000135479,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.0142367,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.166599,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.180971,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 5.7987,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328}],
'DRAM': {'Area': 0,
'Gate Leakage': 0,
'Peak Dynamic': 6.432711566366688,
'Runtime Dynamic': 6.432711566366688,
'Subthreshold Leakage': 4.252,
'Subthreshold Leakage with power gating': 4.252},
'L3': [{'Area': 61.9075,
'Gate Leakage': 0.0484137,
'Peak Dynamic': 0.294712,
'Runtime Dynamic': 0.197683,
'Subthreshold Leakage': 6.80085,
'Subthreshold Leakage with power gating': 3.32364}],
'Processor': {'Area': 191.908,
'Gate Leakage': 1.53485,
'Peak Dynamic': 79.8763,
'Peak Power': 112.989,
'Runtime Dynamic': 18.4151,
'Subthreshold Leakage': 31.5774,
'Subthreshold Leakage with power gating': 13.9484,
'Total Cores/Area': 128.669,
'Total Cores/Gate Leakage': 1.4798,
'Total Cores/Peak Dynamic': 79.5816,
'Total Cores/Runtime Dynamic': 18.2175,
'Total Cores/Subthreshold Leakage': 24.7074,
'Total Cores/Subthreshold Leakage with power gating': 10.2429,
'Total L3s/Area': 61.9075,
'Total L3s/Gate Leakage': 0.0484137,
'Total L3s/Peak Dynamic': 0.294712,
'Total L3s/Runtime Dynamic': 0.197683,
'Total L3s/Subthreshold Leakage': 6.80085,
'Total L3s/Subthreshold Leakage with power gating': 3.32364,
'Total Leakage': 33.1122,
'Total NoCs/Area': 1.33155,
'Total NoCs/Gate Leakage': 0.00662954,
'Total NoCs/Peak Dynamic': 0.0,
'Total NoCs/Runtime Dynamic': 0.0,
'Total NoCs/Subthreshold Leakage': 0.0691322,
'Total NoCs/Subthreshold Leakage with power gating': 0.0259246}}
| 75.050328
| 124
| 0.681979
|
92705ecb114f5e0c4f32e4be35fb292b5120fb75
| 602
|
py
|
Python
|
01-algorithmic-design-and-techniques/week-2/fibonacci-partial-sum.py
|
andrewnachtigal/UCSD-Algorithms
|
25acae36752e37fab74b8e331db554af704ccf4c
|
[
"MIT"
] | null | null | null |
01-algorithmic-design-and-techniques/week-2/fibonacci-partial-sum.py
|
andrewnachtigal/UCSD-Algorithms
|
25acae36752e37fab74b8e331db554af704ccf4c
|
[
"MIT"
] | null | null | null |
01-algorithmic-design-and-techniques/week-2/fibonacci-partial-sum.py
|
andrewnachtigal/UCSD-Algorithms
|
25acae36752e37fab74b8e331db554af704ccf4c
|
[
"MIT"
] | null | null | null |
#!/user/bin/python
'''Fibonacci Partial Sum - Last Digit
Given two non-negative integers, find the last digit of a partial sum
of Fibonacci numbers, i.e.,
find last digit of: Fm + Fm+1 + ... + Fn
'''
import sys
def fibonacci_partial_sum_naive(from_, to):
sum = 0
current = 0
next = 1
for i in range(to + 1):
if i >= from_:
sum += current
current, next = next, current + next
return sum % 10
if __name__ == '__main__':
input = sys.stdin.read();
from_, to = map(int, input.split())
print(fibonacci_partial_sum_naive(from_, to))
| 18.8125
| 69
| 0.611296
|
f6ef363ba118ca050db9b2fdcfa2f3e9df13dbe9
| 31
|
py
|
Python
|
thanakan/slip/__init__.py
|
CircleOnCircles/thanakan
|
459e798029143187aa2f2fb5580d0aeb5352b12e
|
[
"MIT"
] | 8
|
2021-03-09T09:41:34.000Z
|
2022-03-05T05:54:42.000Z
|
thanakan/slip/__init__.py
|
CircleOnCircles/thanakan
|
459e798029143187aa2f2fb5580d0aeb5352b12e
|
[
"MIT"
] | 60
|
2021-03-12T07:35:19.000Z
|
2022-03-31T05:29:23.000Z
|
thanakan/slip/__init__.py
|
CircleOnCircles/thanakan
|
459e798029143187aa2f2fb5580d0aeb5352b12e
|
[
"MIT"
] | 3
|
2021-07-11T15:25:40.000Z
|
2021-12-01T10:53:01.000Z
|
from .qrcode import SlipQRData
| 15.5
| 30
| 0.83871
|
0a2a1b7b050fb15735b93cbcd742eba216962b0e
| 939
|
py
|
Python
|
bot/cogs/manage_classes.py
|
svengeance/ClemBot
|
42e739f64fe8b3ad1daa72fda0e5bef0de59519f
|
[
"MIT"
] | null | null | null |
bot/cogs/manage_classes.py
|
svengeance/ClemBot
|
42e739f64fe8b3ad1daa72fda0e5bef0de59519f
|
[
"MIT"
] | null | null | null |
bot/cogs/manage_classes.py
|
svengeance/ClemBot
|
42e739f64fe8b3ad1daa72fda0e5bef0de59519f
|
[
"MIT"
] | null | null | null |
import logging
import discord
import discord.ext.commands as commands
log = logging.getLogger(__name__)
class ManageClasses(commands.Cog):
def __init__(self, bot):
self.bot = bot
self._last_member = None
@commands.command()
async def hello(self, ctx, *, member: discord.Member = None):
member = member or ctx.author
if self._last_member is None or self._last_member.id != member.id:
await ctx.send('Hello {0.name}~'.format(member))
else:
await ctx.send('Hello {0.name}... This feels familiar.'.format(member))
self._last_member = member
@commands.command()
async def echo(self, ctx, *message, member: discord.Member = None):
embed = discord.Embed(title="Echo", color=0x522D80)
embed.add_field(name=ctx.author, value=' '.join(message))
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(ManageClasses(bot))
| 30.290323
| 83
| 0.652822
|
46d47a992972098d735fe0b79d3b7341e7701307
| 22,212
|
py
|
Python
|
07_train/archive/extras/bert/pytorch-finetune/train_scripts/utils_glue.py
|
dpai/workshop
|
d4936da77dac759ba2bac95a9584fde8e86c6b2b
|
[
"Apache-2.0"
] | 2,327
|
2020-03-01T09:47:34.000Z
|
2021-11-25T12:38:42.000Z
|
07_train/archive/extras/bert/pytorch-finetune/train_scripts/utils_glue.py
|
trideau/Data-Science-with-AWS-Workshop
|
7dbe7989fa99e88544da8bf262beec907c536093
|
[
"Apache-2.0"
] | 209
|
2020-03-01T17:14:12.000Z
|
2021-11-08T20:35:42.000Z
|
07_train/archive/extras/bert/pytorch-finetune/train_scripts/utils_glue.py
|
trideau/Data-Science-with-AWS-Workshop
|
7dbe7989fa99e88544da8bf262beec907c536093
|
[
"Apache-2.0"
] | 686
|
2020-03-03T17:24:51.000Z
|
2021-11-25T23:39:12.000Z
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" BERT classification fine-tuning: utilities to work with GLUE tasks """
from __future__ import absolute_import, division, print_function
import csv
import logging
import os
import sys
from io import open
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import matthews_corrcoef, f1_score
logger = logging.getLogger(__name__)
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r", encoding="utf-8-sig") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
if sys.version_info[0] >= 3: ## modified for SageMaker use.
if sys.version_info[0]==3:
line = list(cell for cell in line)
else:
line = list(unicode(cell, 'utf-8') for cell in line)
lines.append(line)
return lines
class MrpcProcessor(DataProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
logger.info("LOOKING AT {}".format(os.path.join(data_dir, "train.tsv")))
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = line[3]
text_b = line[4]
label = line[0]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class MnliProcessor(DataProcessor):
"""Processor for the MultiNLI data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")),
"dev_matched")
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[8]
text_b = line[9]
label = line[-1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class MnliMismatchedProcessor(MnliProcessor):
"""Processor for the MultiNLI Mismatched data set (GLUE version)."""
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_mismatched.tsv")),
"dev_matched")
class ColaProcessor(DataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = line[3]
label = line[1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
class Sst2Processor(DataProcessor):
"""Processor for the SST-2 data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = line[0]
label = line[1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
class StsbProcessor(DataProcessor):
"""Processor for the STS-B data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return [None]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[7]
text_b = line[8]
label = line[-1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class QqpProcessor(DataProcessor):
"""Processor for the QQP data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
try:
text_a = line[3]
text_b = line[4]
label = line[5]
except IndexError:
continue
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class QnliProcessor(DataProcessor):
"""Processor for the QNLI data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")),
"dev_matched")
def get_labels(self):
"""See base class."""
return ["entailment", "not_entailment"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[1]
text_b = line[2]
label = line[-1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class RteProcessor(DataProcessor):
"""Processor for the RTE data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["entailment", "not_entailment"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[1]
text_b = line[2]
label = line[-1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class WnliProcessor(DataProcessor):
"""Processor for the WNLI data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[1]
text_b = line[2]
label = line[-1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def convert_examples_to_features(examples, label_list, max_seq_length,
tokenizer, output_mode,
cls_token_at_end=False,
cls_token='[CLS]',
cls_token_segment_id=1,
sep_token='[SEP]',
sep_token_extra=False,
pad_on_left=False,
pad_token=0,
pad_token_segment_id=0,
sequence_a_segment_id=0,
sequence_b_segment_id=1,
mask_padding_with_zero=True):
""" Loads a data file into a list of `InputBatch`s
`cls_token_at_end` define the location of the CLS token:
- False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP]
- True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS]
`cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet)
"""
label_map = {label : i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(examples)))
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3". " -4" for RoBERTa.
special_tokens_count = 4 if sep_token_extra else 3
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - special_tokens_count)
else:
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
special_tokens_count = 3 if sep_token_extra else 2
if len(tokens_a) > max_seq_length - special_tokens_count:
tokens_a = tokens_a[:(max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = tokens_a + [sep_token]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
segment_ids = [sequence_a_segment_id] * len(tokens)
if tokens_b:
tokens += tokens_b + [sep_token]
segment_ids += [sequence_b_segment_id] * (len(tokens_b) + 1)
if cls_token_at_end:
tokens = tokens + [cls_token]
segment_ids = segment_ids + [cls_token_segment_id]
else:
tokens = [cls_token] + tokens
segment_ids = [cls_token_segment_id] + segment_ids
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_seq_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids
else:
input_ids = input_ids + ([pad_token] * padding_length)
input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
segment_ids = segment_ids + ([pad_token_segment_id] * padding_length)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
if output_mode == "classification":
label_id = label_map[example.label]
elif output_mode == "regression":
label_id = float(example.label)
else:
raise KeyError(output_mode)
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("tokens: %s" % " ".join(
[str(x) for x in tokens]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
logger.info("label: %s (id = %d)" % (example.label, label_id))
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id))
return features
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def simple_accuracy(preds, labels):
return (preds == labels).mean()
def acc_and_f1(preds, labels):
acc = simple_accuracy(preds, labels)
f1 = f1_score(y_true=labels, y_pred=preds)
return {
"acc": acc,
"f1": f1,
"acc_and_f1": (acc + f1) / 2,
}
def pearson_and_spearman(preds, labels):
pearson_corr = pearsonr(preds, labels)[0]
spearman_corr = spearmanr(preds, labels)[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def compute_metrics(task_name, preds, labels):
assert len(preds) == len(labels)
if task_name == "cola":
return {"mcc": matthews_corrcoef(labels, preds)}
elif task_name == "sst-2":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "mrpc":
return acc_and_f1(preds, labels)
elif task_name == "sts-b":
return pearson_and_spearman(preds, labels)
elif task_name == "qqp":
return acc_and_f1(preds, labels)
elif task_name == "mnli":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "mnli-mm":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "qnli":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "rte":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "wnli":
return {"acc": simple_accuracy(preds, labels)}
else:
raise KeyError(task_name)
processors = {
"cola": ColaProcessor,
"mnli": MnliProcessor,
"mnli-mm": MnliMismatchedProcessor,
"mrpc": MrpcProcessor,
"sst-2": Sst2Processor,
"sts-b": StsbProcessor,
"qqp": QqpProcessor,
"qnli": QnliProcessor,
"rte": RteProcessor,
"wnli": WnliProcessor,
}
output_modes = {
"cola": "classification",
"mnli": "classification",
"mnli-mm": "classification",
"mrpc": "classification",
"sst-2": "classification",
"sts-b": "regression",
"qqp": "classification",
"qnli": "classification",
"rte": "classification",
"wnli": "classification",
}
GLUE_TASKS_NUM_LABELS = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
| 35.768116
| 106
| 0.58617
|
222aa63dd3280e2f15b4bb5aac8ca37523cd63b4
| 3,542
|
py
|
Python
|
models/vgg_nobn_quant.py
|
ShihuiYin/QNN
|
b12223e5617e64509a9269228dec4d1a45342274
|
[
"MIT"
] | null | null | null |
models/vgg_nobn_quant.py
|
ShihuiYin/QNN
|
b12223e5617e64509a9269228dec4d1a45342274
|
[
"MIT"
] | null | null | null |
models/vgg_nobn_quant.py
|
ShihuiYin/QNN
|
b12223e5617e64509a9269228dec4d1a45342274
|
[
"MIT"
] | null | null | null |
'''VGG11/13/16/19 in Pytorch. Always proceed activation layer with batchnorm such that we can fuse these two'''
import torch
import torch.nn as nn
from .quant import QuantizeConv2d, QuantizeLinear, QuantizeActLayer, BatchNorm2d, BatchNorm1d
cfg = {
'VGG': [128, 128, 'M', 256, 256, 'M', 512, 512, 'M'],
'VGGS': [128, 128, 'M', 256, 256, 'M', 256, 256, 'M'],
'VGGT': [128, 128, 'M', 128, 256, 'M', 256, 256, 'M'],
'VGGA': [128, 128, 'M', 128, 256, 'M', 256, 256, 'A'],
'VGGD': [128, 128, 'M', 256, 256, 'M', 512, 512, 'M'],
'VGG11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'VGG13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'VGG16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'VGG19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
class VGG_nobn_quant(nn.Module):
def __init__(self, vgg_name, a_bits=2, w_bits=2, fc=1024, a_H=1., w_H=1.):
super(VGG_nobn_quant, self).__init__()
self.a_bits = a_bits
self.w_bits = w_bits
self.a_H = a_H
self.w_H = w_H
self.features = self._make_layers(cfg[vgg_name])
num_maxpooling_layers = cfg[vgg_name].count('M')
if 'S' in vgg_name or 'T' in vgg_name:
last_conv_layer_output_dim = 256 * (4 ** (5 - num_maxpooling_layers))
elif 'A' in vgg_name:
last_conv_layer_output_dim = 256
else:
last_conv_layer_output_dim = 512 * (4 ** (5 - num_maxpooling_layers))
self.classifier = nn.Sequential(
QuantizeLinear(last_conv_layer_output_dim, fc, n_bits=w_bits, H=w_H),
BatchNorm1d(fc, affine=False),
QuantizeActLayer(n_bits=a_bits, H=a_H),
QuantizeLinear(fc, fc, n_bits=w_bits, H=w_H),
BatchNorm1d(fc, affine=False),
QuantizeActLayer(n_bits=a_bits, H=a_H),
QuantizeLinear(fc, 10, n_bits=w_bits, H=w_H),
)
#self.regime = {
# 0: {'optimizer': 'Adam', 'betas': (0.9, 0.999),'lr': 5e-3},
# 40: {'lr': 1e-3},
# 80: {'lr': 5e-4},
# 100: {'lr': 1e-4},
# 120: {'lr': 5e-5},
# 140: {'lr': 1e-5}
#}
def forward(self, x):
out = self.features(x)
out = out.view(out.size(0), -1)
out = self.classifier(out)
return out
def _make_layers(self, cfg):
layers = []
in_channels = 3
for x in cfg:
if in_channels == 3:
layers += [QuantizeConv2d(in_channels, x, kernel_size=3, padding=1, n_bits=self.w_bits, H=self.w_H)]
in_channels = x
else:
if x == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
elif x == 'A':
layers += [nn.AvgPool2d(kernel_size=8)]
else:
layers += [BatchNorm2d(in_channels), QuantizeActLayer(n_bits=self.a_bits, H=self.a_H)]
layers += [QuantizeConv2d(in_channels, x, kernel_size=3, padding=1, n_bits=self.w_bits, H=self.w_H)]
in_channels = x
layers += [BatchNorm2d(in_channels, affine=False), QuantizeActLayer(n_bits=self.a_bits, H=self.a_H)]
return nn.Sequential(*layers)
def test():
net = VGG('VGG11')
x = torch.randn(2,3,32,32)
y = net(x)
print(y.size())
# test()
| 41.670588
| 120
| 0.530774
|
aee8eb337c0339fc53c6c7c9b926b3eb0e057ae3
| 1,720
|
py
|
Python
|
setup.py
|
sommersoft/Adafruit_CircuitPython_BLE
|
f50290be4cfb40b56ed10b99b9111bde27cf4923
|
[
"MIT"
] | null | null | null |
setup.py
|
sommersoft/Adafruit_CircuitPython_BLE
|
f50290be4cfb40b56ed10b99b9111bde27cf4923
|
[
"MIT"
] | null | null | null |
setup.py
|
sommersoft/Adafruit_CircuitPython_BLE
|
f50290be4cfb40b56ed10b99b9111bde27cf4923
|
[
"MIT"
] | null | null | null |
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='adafruit-circuitpython-ble',
use_scm_version=True,
setup_requires=['setuptools_scm'],
description='Bluetooth Low Energy (BLE) library for CircuitPython',
long_description=long_description,
long_description_content_type='text/x-rst',
# The project's main homepage.
url='https://github.com/adafruit/Adafruit_CircuitPython_BLE',
# Author details
author='Adafruit Industries',
author_email='circuitpython@adafruit.com',
install_requires=[
'adafruit-blinka',
'adafruit-blinka-bleio'
],
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries',
'Topic :: System :: Hardware',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
# What does your project relate to?
keywords='adafruit blinka circuitpython micropython ble bluetooth',
packages=find_packages(include=["adafruit_ble", "adafruit_ble.*"]),
)
| 28.196721
| 71
| 0.683721
|
ff00d063978daddf4e8c9aba9e0d3a536ab2a132
| 178
|
py
|
Python
|
biocoder/routing.py
|
east301/biocoder-server
|
e7b937e99608fa18675c972ec4377d946896cbb1
|
[
"Apache-2.0"
] | null | null | null |
biocoder/routing.py
|
east301/biocoder-server
|
e7b937e99608fa18675c972ec4377d946896cbb1
|
[
"Apache-2.0"
] | null | null | null |
biocoder/routing.py
|
east301/biocoder-server
|
e7b937e99608fa18675c972ec4377d946896cbb1
|
[
"Apache-2.0"
] | null | null | null |
#
# (c) 2016 biocoder developers
#
# This file is part of biocoder,
# released under Apache License Version 2.0 (http://www.apache.org/licenses/LICENCE).
#
channel_routing = []
| 19.777778
| 85
| 0.719101
|
c0f9f45dd68c58852456825b235538a3813bdb5f
| 2,752
|
py
|
Python
|
pyredner/transform.py
|
pableeto/redner
|
0d76e5a77bf1cc64d83571c6c92a64a64e901efb
|
[
"MIT"
] | null | null | null |
pyredner/transform.py
|
pableeto/redner
|
0d76e5a77bf1cc64d83571c6c92a64a64e901efb
|
[
"MIT"
] | null | null | null |
pyredner/transform.py
|
pableeto/redner
|
0d76e5a77bf1cc64d83571c6c92a64a64e901efb
|
[
"MIT"
] | null | null | null |
import math
import numpy as np
import tensorflow as tf
import torch
def radians(deg):
return (math.pi / 180.0) * deg
def normalize(v):
return v / torch.norm(v)
def gen_look_at_matrix(pos, look, up):
d = normalize(look - pos)
right = normalize(torch.cross(d, normalize(up)))
new_up = normalize(torch.cross(right, d))
z = torch.zeros([1], dtype=torch.float32)
o = torch.ones([1], dtype=torch.float32)
return torch.transpose(torch.stack([torch.cat([right , z], 0),
torch.cat([new_up, z], 0),
torch.cat([d , z], 0),
torch.cat([pos , o], 0)]), 0, 1).contiguous()
def gen_scale_matrix(scale):
o = torch.ones([1], dtype=torch.float32)
return torch.diag(torch.cat([scale, o], 0))
def gen_translate_matrix(translate):
z = torch.zeros([1], dtype=torch.float32)
o = torch.ones([1], dtype=torch.float32)
return torch.stack([torch.cat([o, z, z, translate[0:1]], 0),
torch.cat([z, o, z, translate[1:2]], 0),
torch.cat([z, z, o, translate[2:3]], 0),
torch.cat([z, z, z, o], 0)])
def gen_perspective_matrix(fov, clip_near, clip_far):
clip_dist = clip_far - clip_near
cot = 1 / torch.tan(radians(fov / 2.0))
z = torch.zeros([1], dtype=torch.float32)
o = torch.ones([1], dtype=torch.float32)
return torch.stack([torch.cat([cot, z, z, z], 0),
torch.cat([ z, cot, z, z], 0),
torch.cat([ z, z, 1 / clip_dist, - clip_near / clip_dist], 0),
torch.cat([ z, z, o, z], 0)])
def gen_rotate_matrix(angles):
theta = angles[0]
phi = angles[1]
psi = angles[2]
rot_x = torch.zeros((3, 3), dtype=torch.float32)
rot_y = torch.zeros((3, 3), dtype=torch.float32)
rot_z = torch.zeros((3, 3), dtype=torch.float32)
rot_x[0, 0] = 1
rot_x[0, 1] = 0
rot_x[0, 2] = 0
rot_x[1, 0] = 0
rot_x[1, 1] = theta.cos()
rot_x[1, 2] = theta.sin()
rot_x[2, 0] = 0
rot_x[2, 1] = -theta.sin()
rot_x[2, 2] = theta.cos()
rot_y[0, 0] = phi.cos()
rot_y[0, 1] = 0
rot_y[0, 2] = -phi.sin()
rot_y[1, 0] = 0
rot_y[1, 1] = 1
rot_y[1, 2] = 0
rot_y[2, 0] = phi.sin()
rot_y[2, 1] = 0
rot_y[2, 2] = phi.cos()
rot_z[0, 0] = psi.cos()
rot_z[0, 1] = -psi.sin()
rot_z[0, 2] = 0
rot_z[1, 0] = psi.sin()
rot_z[1, 1] = psi.cos()
rot_z[1, 2] = 0
rot_z[2, 0] = 0
rot_z[2, 1] = 0
rot_z[2, 2] = 1
return rot_z @ (rot_y @ rot_x)
| 33.560976
| 90
| 0.49782
|
adcb40b51f54d78c62e5118ddeece7fe0dec36b8
| 660
|
py
|
Python
|
Scripts/rst2pseudoxml.py
|
akshat0703/Topsis-Akshat-101917081
|
c2c4e6598eeff7666d0c325df2a0bdb7c279a172
|
[
"MIT"
] | null | null | null |
Scripts/rst2pseudoxml.py
|
akshat0703/Topsis-Akshat-101917081
|
c2c4e6598eeff7666d0c325df2a0bdb7c279a172
|
[
"MIT"
] | null | null | null |
Scripts/rst2pseudoxml.py
|
akshat0703/Topsis-Akshat-101917081
|
c2c4e6598eeff7666d0c325df2a0bdb7c279a172
|
[
"MIT"
] | null | null | null |
#!C:\Users\hp\Desktop\Assignment-4\Topsis-Akshat-101917081\Scripts\python.exe
# $Id: rst2pseudoxml.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing pseudo-XML.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates pseudo-XML from standalone reStructuredText '
'sources (for testing purposes). ' + default_description)
publish_cmdline(description=description)
| 27.5
| 77
| 0.74697
|
a8391f883dfeeecffd335f24b60be82612615d10
| 8,906
|
py
|
Python
|
Code/Igor&Kostia/tfidf_by_st_features.py
|
ChenglongChen/Kaggle_Homedepot
|
55c1033d0af3b6cf2f033fe4bcf3e1e0ffda3445
|
[
"MIT"
] | 465
|
2016-04-27T13:17:36.000Z
|
2020-05-15T11:05:13.000Z
|
Code/Igor&Kostia/tfidf_by_st_features.py
|
CharlotteSean/Kaggle_HomeDepot
|
55c1033d0af3b6cf2f033fe4bcf3e1e0ffda3445
|
[
"MIT"
] | 1
|
2016-10-15T04:33:54.000Z
|
2016-10-15T04:33:54.000Z
|
Code/Igor&Kostia/tfidf_by_st_features.py
|
CharlotteSean/Kaggle_HomeDepot
|
55c1033d0af3b6cf2f033fe4bcf3e1e0ffda3445
|
[
"MIT"
] | 230
|
2016-04-30T06:35:17.000Z
|
2019-12-04T08:23:22.000Z
|
# -*- coding: utf-8 -*-
"""
Code for calculating some similar to TFIDF features for all docs related to the same unique search term.
Competition: HomeDepot Search Relevance
Author: Kostia Omelianchuk
Team: Turing test
"""
from config_IgorKostia import *
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestRegressor, BaggingRegressor, GradientBoostingRegressor
from sklearn.ensemble import AdaBoostRegressor
from nltk.stem.snowball import SnowballStemmer, PorterStemmer
import nltk
from time import time
import re
import os
import math as m
#data loading
df_all=pd.read_csv(PROCESSINGTEXT_DIR+"/df_train_and_test_processed.csv", encoding="ISO-8859-1")
df_all1=pd.read_csv(PROCESSINGTEXT_DIR+"/df_product_descriptions_processed.csv", encoding="ISO-8859-1")
df_all2 = pd.merge(df_all, df_all1, how="left", on="product_uid")
df_all = df_all2
p = df_all.keys()
for i in range(len(p)):
print p[i]
for var in df_all.keys():
df_all[var]=df_all[var].fillna("")
#building base of the documents related to unique search term
st = df_all["search_term_stemmed"]
pt = df_all["product_title_stemmed"]
pd = df_all["product_description_stemmed"]
st_l = list(st)
st_lu = list(set(st))
t=list()
another_t=list()
for i in range(len(st_lu)):
t.append("")
another_t.append("")
for i in range(len(st_lu)):
another_t[i] = list(pt[st==st_lu[i]])
t[i]= list(pd[st==st_lu[i]])
#damareu-levenstein distance
def dld(s1, s2):
d = {}
lenstr1 = len(s1)
lenstr2 = len(s2)
for i in xrange(-1,lenstr1+1):
d[(i,-1)] = i+1
for j in xrange(-1,lenstr2+1):
d[(-1,j)] = j+1
for i in xrange(lenstr1):
for j in xrange(lenstr2):
if s1[i] == s2[j]:
cost = 0
else:
cost = 1
d[(i,j)] = min(
d[(i-1,j)] + 1, # deletion
d[(i,j-1)] + 1, # insertion
d[(i-1,j-1)] + cost, # substitution
)
if i and j and s1[i]==s2[j-1] and s1[i-1] == s2[j]:
d[(i,j)] = min (d[(i,j)], d[i-2,j-2] + cost) # transposition
return d[lenstr1-1,lenstr2-1]
#make a list of words which are not close in dld distance meaning
def dld_replacer(s1):
s=s1
uwords=list()
allwords=list()
for i in range(len(s)):
words = s[i].split(" ")
for j in range(len(words)):
for i1 in range(len(s)):
words2 = s[i1].split(" ")
for j1 in range(len(words2)):
# print i,j,i1,j1,len(s)
if dld(words[j], words2[j1])<min(3,max(len(words2[j1])-3,1)):
allwords.append(words2[j1])
words2[j1]=words[j]
uwords.append(words[j])
d=""
#print d
for t in range(len(words2)):
d=d+words2[t]+" "
#print d
# print d
d=d[0:len(d)-1]
s[i1]=d
d=""
for t1 in range(len(words)):
d=d+words[t1]+" "
#print d
d=d[0:len(d)-1]
s[i]=d
allwords = list(set(allwords))
uwords=list(set(uwords))
return s, allwords, uwords
#make a simle list of word
def simple_replacer(s1):
s=s1
uwords=list()
for i in range(len(s)):
words = s[i].split(" ")
for j in range(len(words)):
uwords.append(words[j])
uwords=list(set(uwords))
return uwords
new_t=t
new_t1=list()
alwords1=list()
uwords=list()
uwords1=list()
for i in range(len(t)):
tmp1,tmp2,tmp3= dld_replacer(another_t[i])
uwords1.append(tmp3)
new_t1.append(tmp1)
alwords1.append(tmp2)
tmp4= simple_replacer(t[i])
uwords.append(tmp4)
idf_list=list()
t_length=list()
d_length=list()
idf_list1=list()
t_length1=list()
d_length1=list()
#calculate count of unique words for each document related to unique search term
def idf_from_list(key_word,new_t,uwords):
j = st_lu.index(key_word)
idf_list=list()
d = 0
l = len(new_t[j])
for t in range(l):
tmp_list=list()
words=new_t[j][t].split(" ")
for i in range(len(uwords[j])):
d = d + words.count(uwords[j][i])
tmp_list.append(words.count(uwords[j][i]))
idf_list.append(tmp_list)
return idf_list, d, l
for i in range(len(st_lu)) :
tmp1,tmp2,tmp3 = idf_from_list(st_lu[i],new_t,uwords)
idf_list.append(tmp1)
d_length.append(tmp2)
t_length.append(tmp3)
tmp1,tmp2,tmp3 = idf_from_list(st_lu[i],new_t1,uwords1)
idf_list1.append(tmp1)
d_length1.append(tmp2)
t_length1.append(tmp3)
if (i%1000)==0:
print i
st = df_all["search_term_stemmed"]
pt = df_all["product_title_stemmed"]
pd = df_all["product_description_stemmed"]
for i in range(len(st_lu)):
another_t[i] = list(pt[st==st_lu[i]])
t[i]= list(pd[st==st_lu[i]])
if (i%1000)==0:
print i
list1=list()
list2=list()
list3=list()
list4=list()
list5=list()
list6=list()
list7=list()
list8=list()
list9=list()
list10=list()
list11=list()
list12=list()
#calculate features using st=search_term and pd=product_description
for i in range(len(df_all)):
df_parsed=pd
#j = st_lu.index(df_all["search_term_parsed"][i])
#k=t[j].index(df_all["product_title_parsed"][i])
j = st_lu.index(df_all["search_term_stemmed"][i])
k=t[j].index(df_parsed[i])
if d_length[j]==0:
d_length[j]=1
f1=(sum(idf_list[j][k])+0.0)/d_length[j]
f2=(sum(idf_list[j][k])+0.0)/t_length[j]
f3=d_length[j]
f4=t_length[j]
#f5=len(df_all["product_title_parsed"][i].split(" "))/len(list(set(new_t[j][k].split(" "))))
f5=len(df_parsed[i].split(" "))/len(list(set(new_t[j][k].split(" "))))
f6=(sum(idf_list[j][k])+0.0)*m.log(d_length[j])
f7=(sum(idf_list[j][k])+0.0)*m.log(t_length[j])
f8=(sum(idf_list[j][k])+0.0)/((d_length[j]+0.0)/len(idf_list[j]))
f9=(sum(idf_list[j][k])+0.0)/((t_length[j]+0.0)/len(idf_list[j]))
f10=(len(list(set(list(st_lu[j].split(" "))))) +0.0)/d_length[j]
f11=(len(list(set(list(st_lu[j].split(" "))))) +0.0)/t_length[j]
f12=len(list(set(list(st_lu[j].split(" "))))) /((d_length[j]+0.0)/len(idf_list[j]))
if (i%1000)==0:
print i
list1.append(f1)
list2.append(f2)
list3.append(f3)
list4.append(f4)
list5.append(f5)
list6.append(f6)
list7.append(f7)
list8.append(f8)
list9.append(f9)
list10.append(f10)
list11.append(f11)
list12.append(f12)
list_of_list=[list1,list2,list3,list4,list5,list6,list7,list8,list9,list10,list11,list12]
st_names=["id"]
for j in range(12):
df_all["st_tfidf_"+str(j)]=list_of_list[j]
st_names.append("st_tfidf_"+str(j))
list1=list()
list2=list()
list3=list()
list4=list()
list5=list()
list6=list()
list7=list()
list8=list()
list9=list()
list10=list()
list11=list()
list12=list()
new_t=new_t1
idf_list=idf_list1
d_length=d_length1
t_length=t_length1
t=another_t
#calculate features using st=search_term and pd=product_title
for i in range(len(df_all)):
df_parsed=pt
#j = st_lu.index(df_all["search_term_parsed"][i])
#k=t[j].index(df_all["product_title_parsed"][i])
j = st_lu.index(df_all["search_term_stemmed"][i])
k=t[j].index(df_parsed[i])
if d_length[j]==0:
d_length[j]=1
f1=(sum(idf_list[j][k])+0.0)/d_length[j]
f2=(sum(idf_list[j][k])+0.0)/t_length[j]
f3=d_length[j]
f4=t_length[j]
#f5=len(df_all["product_title_parsed"][i].split(" "))/len(list(set(new_t[j][k].split(" "))))
f5=len(df_parsed[i].split(" "))/len(list(set(new_t[j][k].split(" "))))
f6=(sum(idf_list[j][k])+0.0)*m.log(d_length[j])
f7=(sum(idf_list[j][k])+0.0)*m.log(t_length[j])
f8=(sum(idf_list[j][k])+0.0)/((d_length[j]+0.0)/len(idf_list[j]))
f9=(sum(idf_list[j][k])+0.0)/((t_length[j]+0.0)/len(idf_list[j]))
f10=(len(list(set(list(st_lu[j].split(" "))))) +0.0)/d_length[j]
f11=(len(list(set(list(st_lu[j].split(" "))))) +0.0)/t_length[j]
f12=len(list(set(list(st_lu[j].split(" "))))) /((d_length[j]+0.0)/len(idf_list[j]))
if (i%1000)==0:
print i
list1.append(f1)
list2.append(f2)
list3.append(f3)
list4.append(f4)
list5.append(f5)
list6.append(f6)
list7.append(f7)
list8.append(f8)
list9.append(f9)
list10.append(f10)
list11.append(f11)
list12.append(f12)
for j in range(12):
df_all["st_tfidf_"+str(j)"+".1"]=list_of_list[j]
st_names.append("st_tfidf_"+str(j)"+".1")
#save features
b=df_all[st_names]
b.to_csv(FEATURES_DIR+"/df_st_tfidf.csv", index=False)
| 26.4273
| 104
| 0.588817
|
37b897768f815c01fd1959412b4cef509944d10d
| 12,179
|
py
|
Python
|
electrum_mars/gui/kivy/uix/dialogs/tx_dialog.py
|
marscoin/electrum-mars
|
e95274b0ad959bdb02226c988303339a24acb8bf
|
[
"MIT"
] | 3
|
2021-08-15T08:05:00.000Z
|
2021-11-21T21:35:10.000Z
|
electrum_mars/gui/kivy/uix/dialogs/tx_dialog.py
|
marscoin/electrum-mars
|
e95274b0ad959bdb02226c988303339a24acb8bf
|
[
"MIT"
] | 1
|
2021-12-02T08:04:05.000Z
|
2021-12-02T08:04:05.000Z
|
electrum_mars/gui/kivy/uix/dialogs/tx_dialog.py
|
marscoin/electrum-mars
|
e95274b0ad959bdb02226c988303339a24acb8bf
|
[
"MIT"
] | null | null | null |
import copy
from datetime import datetime
from typing import NamedTuple, Callable, TYPE_CHECKING
from functools import partial
from kivy.app import App
from kivy.factory import Factory
from kivy.properties import ObjectProperty
from kivy.lang import Builder
from kivy.clock import Clock
from kivy.uix.label import Label
from kivy.uix.dropdown import DropDown
from kivy.uix.button import Button
from electrum_mars.util import InvalidPassword
from electrum_mars.address_synchronizer import TX_HEIGHT_LOCAL
from electrum_mars.wallet import CannotBumpFee, CannotDoubleSpendTx
from electrum_mars.transaction import Transaction, PartialTransaction
from electrum_mars.network import NetworkException
from electrum_mars.gui.kivy.i18n import _
from electrum_mars.gui.kivy.util import address_colors
from ..actiondropdown import ActionDropdown, ActionButtonOption
from .question import Question
if TYPE_CHECKING:
from ...main_window import ElectrumWindow
Builder.load_string('''
#:import KIVY_GUI_PATH electrum_mars.gui.kivy.KIVY_GUI_PATH
<TxDialog>
id: popup
title: _('Transaction')
is_mine: True
can_sign: False
can_broadcast: False
can_rbf: False
fee_str: ''
feerate_str: ''
date_str: ''
date_label:''
amount_str: ''
tx_hash: ''
status_str: ''
description: ''
outputs_str: ''
BoxLayout:
orientation: 'vertical'
ScrollView:
scroll_type: ['bars', 'content']
bar_width: '25dp'
GridLayout:
height: self.minimum_height
size_hint_y: None
cols: 1
spacing: '10dp'
padding: '10dp'
GridLayout:
height: self.minimum_height
size_hint_y: None
cols: 1
spacing: '10dp'
BoxLabel:
text: _('Status')
value: root.status_str
BoxLabel:
text: _('Description') if root.description else ''
value: root.description
BoxLabel:
text: root.date_label
value: root.date_str
BoxLabel:
text: _('Amount sent') if root.is_mine else _('Amount received')
value: root.amount_str
BoxLabel:
text: _('Transaction fee') if root.fee_str else ''
value: root.fee_str
BoxLabel:
text: _('Transaction fee rate') if root.feerate_str else ''
value: root.feerate_str
TopLabel:
text: _('Transaction ID') + ':' if root.tx_hash else ''
TxHashLabel:
data: root.tx_hash
name: _('Transaction ID')
TopLabel:
text: _('Outputs') + ':'
OutputList:
id: output_list
Widget:
size_hint: 1, 0.1
BoxLayout:
size_hint: 1, None
height: '48dp'
ActionDropdown:
id: action_dropdown
size_hint: 0.5, None
height: '48dp'
IconButton:
size_hint: 0.5, None
height: '48dp'
icon: f'atlas://{KIVY_GUI_PATH}/theming/atlas/light/qrcode'
on_release: root.show_qr()
Button:
size_hint: 0.5, None
height: '48dp'
text: _('Label')
on_release: root.label_dialog()
Button:
size_hint: 0.5, None
height: '48dp'
text: _('Close')
on_release: root.dismiss()
''')
class TxDialog(Factory.Popup):
def __init__(self, app, tx):
Factory.Popup.__init__(self)
self.app = app # type: ElectrumWindow
self.wallet = self.app.wallet
self.tx = tx # type: Transaction
# If the wallet can populate the inputs with more info, do it now.
# As a result, e.g. we might learn an imported address tx is segwit,
# or that a beyond-gap-limit address is is_mine.
# note: this might fetch prev txs over the network.
# note: this is a no-op for complete txs
tx.add_info_from_wallet(self.wallet)
def on_open(self):
self.update()
def update(self):
format_amount = self.app.format_amount_and_units
tx_details = self.wallet.get_tx_info(self.tx)
tx_mined_status = tx_details.tx_mined_status
exp_n = tx_details.mempool_depth_bytes
amount, fee = tx_details.amount, tx_details.fee
self.status_str = tx_details.status
self.description = tx_details.label
self.can_broadcast = tx_details.can_broadcast
self.can_rbf = tx_details.can_bump
self.can_dscancel = tx_details.can_dscancel
self.tx_hash = tx_details.txid or ''
if tx_mined_status.timestamp:
self.date_label = _('Date')
self.date_str = datetime.fromtimestamp(tx_mined_status.timestamp).isoformat(' ')[:-3]
elif exp_n is not None:
self.date_label = _('Mempool depth')
self.date_str = _('{} from tip').format('%.2f MB'%(exp_n/1000000))
else:
self.date_label = ''
self.date_str = ''
self.can_sign = self.wallet.can_sign(self.tx)
if amount is None:
self.amount_str = _("Transaction unrelated to your wallet")
elif amount > 0:
self.is_mine = False
self.amount_str = format_amount(amount)
else:
self.is_mine = True
self.amount_str = format_amount(-amount)
risk_of_burning_coins = (isinstance(self.tx, PartialTransaction)
and self.can_sign
and fee is not None
and bool(self.wallet.get_warning_for_risk_of_burning_coins_as_fees(self.tx)))
if fee is not None and not risk_of_burning_coins:
self.fee_str = format_amount(fee)
fee_per_kb = fee / self.tx.estimated_size() * 1000
self.feerate_str = self.app.format_fee_rate(fee_per_kb)
else:
self.fee_str = _('unknown')
self.feerate_str = _('unknown')
self.ids.output_list.update(self.tx.outputs())
for dict_entry in self.ids.output_list.data:
dict_entry['color'], dict_entry['background_color'] = address_colors(self.wallet, dict_entry['address'])
self.can_remove_tx = tx_details.can_remove
self.update_action_dropdown()
def update_action_dropdown(self):
action_dropdown = self.ids.action_dropdown # type: ActionDropdown
# note: button texts need to be short; there is only horizontal space for ~13 chars
options = (
ActionButtonOption(text=_('Sign'), func=lambda btn: self.do_sign(), enabled=self.can_sign),
ActionButtonOption(text=_('Broadcast'), func=lambda btn: self.do_broadcast(), enabled=self.can_broadcast),
ActionButtonOption(text=_('Bump fee'), func=lambda btn: self.do_rbf(), enabled=self.can_rbf),
ActionButtonOption(text=_('Cancel') + '\n(double-spend)', func=lambda btn: self.do_dscancel(), enabled=self.can_dscancel),
ActionButtonOption(text=_('Remove'), func=lambda btn: self.remove_local_tx(), enabled=self.can_remove_tx),
)
action_dropdown.update(options=options)
def _add_info_to_tx_from_wallet_and_network(self, tx: PartialTransaction) -> bool:
"""Returns whether successful."""
# note side-effect: tx is being mutated
assert isinstance(tx, PartialTransaction)
try:
# note: this might download input utxos over network
# FIXME network code in gui thread...
tx.add_info_from_wallet(self.wallet, ignore_network_issues=False)
except NetworkException as e:
self.app.show_error(repr(e))
return False
return True
def do_rbf(self):
from .bump_fee_dialog import BumpFeeDialog
tx = self.tx
txid = tx.txid()
assert txid
if not isinstance(tx, PartialTransaction):
tx = PartialTransaction.from_tx(tx)
if not self._add_info_to_tx_from_wallet_and_network(tx):
return
fee = tx.get_fee()
assert fee is not None
size = tx.estimated_size()
cb = partial(self._do_rbf, tx=tx, txid=txid)
d = BumpFeeDialog(self.app, fee, size, cb)
d.open()
def _do_rbf(
self,
new_fee_rate,
is_final,
*,
tx: PartialTransaction,
txid: str,
):
if new_fee_rate is None:
return
try:
new_tx = self.wallet.bump_fee(
tx=tx,
txid=txid,
new_fee_rate=new_fee_rate,
)
except CannotBumpFee as e:
self.app.show_error(str(e))
return
new_tx.set_rbf(not is_final)
self.tx = new_tx
self.update()
self.do_sign()
def do_dscancel(self):
from .dscancel_dialog import DSCancelDialog
tx = self.tx
txid = tx.txid()
assert txid
if not isinstance(tx, PartialTransaction):
tx = PartialTransaction.from_tx(tx)
if not self._add_info_to_tx_from_wallet_and_network(tx):
return
fee = tx.get_fee()
assert fee is not None
size = tx.estimated_size()
cb = partial(self._do_dscancel, tx=tx)
d = DSCancelDialog(self.app, fee, size, cb)
d.open()
def _do_dscancel(
self,
new_fee_rate,
*,
tx: PartialTransaction,
):
if new_fee_rate is None:
return
try:
new_tx = self.wallet.dscancel(
tx=tx,
new_fee_rate=new_fee_rate,
)
except CannotDoubleSpendTx as e:
self.app.show_error(str(e))
return
self.tx = new_tx
self.update()
self.do_sign()
def do_sign(self):
self.app.protected(_("Sign this transaction?"), self._do_sign, ())
def _do_sign(self, password):
self.status_str = _('Signing') + '...'
Clock.schedule_once(lambda dt: self.__do_sign(password), 0.1)
def __do_sign(self, password):
try:
self.app.wallet.sign_transaction(self.tx, password)
except InvalidPassword:
self.app.show_error(_("Invalid PIN"))
self.update()
def do_broadcast(self):
self.app.broadcast(self.tx)
def show_qr(self):
original_raw_tx = str(self.tx)
qr_data = self.tx.to_qr_data()
self.app.qr_dialog(_("Raw Transaction"), qr_data, text_for_clipboard=original_raw_tx)
def remove_local_tx(self):
txid = self.tx.txid()
num_child_txs = len(self.wallet.get_depending_transactions(txid))
question = _("Are you sure you want to remove this transaction?")
if num_child_txs > 0:
question = (_("Are you sure you want to remove this transaction and {} child transactions?")
.format(num_child_txs))
def on_prompt(b):
if b:
self.wallet.remove_transaction(txid)
self.wallet.save_db()
self.app._trigger_update_wallet() # FIXME private...
self.dismiss()
d = Question(question, on_prompt)
d.open()
def label_dialog(self):
from .label_dialog import LabelDialog
key = self.tx.txid()
text = self.app.wallet.get_label_for_txid(key)
def callback(text):
self.app.wallet.set_label(key, text)
self.update()
self.app.history_screen.update()
d = LabelDialog(_('Enter Transaction Label'), text, callback)
d.open()
| 36.032544
| 134
| 0.579112
|
3eaf0ab922df10034e59896da1d5bcdb6a5efb45
| 3,403
|
py
|
Python
|
tests/conftest.py
|
bcsummers/falcon-provider-audit
|
abb0d6ce85a60d345f6bb2b8ecf845a134ddd1b8
|
[
"Apache-2.0"
] | null | null | null |
tests/conftest.py
|
bcsummers/falcon-provider-audit
|
abb0d6ce85a60d345f6bb2b8ecf845a134ddd1b8
|
[
"Apache-2.0"
] | null | null | null |
tests/conftest.py
|
bcsummers/falcon-provider-audit
|
abb0d6ce85a60d345f6bb2b8ecf845a134ddd1b8
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Testing conf module."""
# standard library
import os
import threading
# third-party
import pytest
from falcon import testing
from .Custom.app import app_db_1, app_dual_1, app_dual_2
from .Syslog.syslog_server import TestSyslogServers
# the log directory for all test cases
_LOG_DIRECTORY = os.path.join(os.getcwd(), 'log')
test_syslog = TestSyslogServers(address='0.0.0.0', log_directory=_LOG_DIRECTORY)
tcp_server = test_syslog.start_tcp_server(port=5141)
udp_server = test_syslog.start_udp_server(port=5140)
@pytest.fixture
def client_db_1() -> testing.TestClient:
"""Create testing client"""
return testing.TestClient(app_db_1)
@pytest.fixture
def client_dual_1() -> testing.TestClient:
"""Create testing client"""
return testing.TestClient(app_dual_1)
@pytest.fixture
def client_dual_2() -> testing.TestClient:
"""Create testing client"""
return testing.TestClient(app_dual_2)
@pytest.fixture
def client_rotating_logger_1() -> testing.TestClient:
"""Create testing client"""
from .Rotating_Logger.app import ( # pylint: disable=import-outside-toplevel
app_rotating_logger_1,
)
return testing.TestClient(app_rotating_logger_1)
@pytest.fixture
def client_rotating_logger_2() -> testing.TestClient:
"""Create testing client"""
from .Rotating_Logger.app import ( # pylint: disable=import-outside-toplevel
app_rotating_logger_2,
)
return testing.TestClient(app_rotating_logger_2)
@pytest.fixture
def client_tcp_logger_1() -> testing.TestClient:
"""Create testing client"""
from .Syslog.app import app_tcp_syslog_logger_1 # pylint: disable=import-outside-toplevel
return testing.TestClient(app_tcp_syslog_logger_1)
@pytest.fixture
def client_tcp_logger_2() -> testing.TestClient:
"""Create testing client"""
from .Syslog.app import app_tcp_syslog_logger_2 # pylint: disable=import-outside-toplevel
return testing.TestClient(app_tcp_syslog_logger_2)
@pytest.fixture
def client_udp_logger_1() -> testing.TestClient:
"""Create testing client"""
from .Syslog.app import app_udp_syslog_logger_1 # pylint: disable=import-outside-toplevel
return testing.TestClient(app_udp_syslog_logger_1)
@pytest.fixture
def client_udp_logger_2() -> testing.TestClient:
"""Create testing client"""
from .Syslog.app import app_udp_syslog_logger_2 # pylint: disable=import-outside-toplevel
return testing.TestClient(app_udp_syslog_logger_2)
@pytest.fixture
def log_directory() -> str:
"""Return the log directory"""
return _LOG_DIRECTORY
def pytest_configure() -> None:
"""Clear the log directory after tests are complete"""
# start TCP syslog servers
tcp_thread = threading.Thread(name='tcp_server', target=tcp_server.serve_forever, daemon=True)
tcp_thread.start()
# start UDP syslog servers
udp_thread = threading.Thread(name='udp_server', target=udp_server.serve_forever, daemon=True)
udp_thread.start()
def pytest_unconfigure(config) -> None: # pylint: disable=unused-argument
"""Clear the log directory after tests are complete"""
if os.path.isdir(_LOG_DIRECTORY):
for log_file in os.listdir(_LOG_DIRECTORY):
file_path = os.path.join(_LOG_DIRECTORY, log_file)
if os.path.isfile(file_path):
os.unlink(file_path)
os.rmdir(_LOG_DIRECTORY)
| 29.336207
| 98
| 0.740817
|
e9ced684f486fa28f952dbdea3c27e36c4ecf5e4
| 2,310
|
py
|
Python
|
tests/remoteexecution/remotecache.py
|
samkenxstream/buildstream
|
2164ac3ad2854eea30f85af6af2bc8a0b8754f3f
|
[
"Apache-2.0"
] | null | null | null |
tests/remoteexecution/remotecache.py
|
samkenxstream/buildstream
|
2164ac3ad2854eea30f85af6af2bc8a0b8754f3f
|
[
"Apache-2.0"
] | null | null | null |
tests/remoteexecution/remotecache.py
|
samkenxstream/buildstream
|
2164ac3ad2854eea30f85af6af2bc8a0b8754f3f
|
[
"Apache-2.0"
] | null | null | null |
# Pylint doesn't play well with fixtures and dependency injection from pytest
# pylint: disable=redefined-outer-name
import copy
import os
import pytest
from buildstream.exceptions import ErrorDomain
from buildstream._testing import cli_remote_execution as cli # pylint: disable=unused-import
from buildstream._testing.integration import assert_contains
pytestmark = pytest.mark.remoteexecution
DATA_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "project")
# Test building an executable with remote-execution and remote-cache enabled
@pytest.mark.datafiles(DATA_DIR)
def test_remote_autotools_build(cli, datafiles, remote_services):
project = str(datafiles)
checkout = os.path.join(cli.directory, "checkout")
element_name = "autotools/amhello.bst"
services = cli.ensure_services()
assert set(services) == set(["action-cache", "execution", "storage"])
# Enable remote cache and remove explicit remote execution CAS configuration.
config_without_remote_cache = copy.deepcopy(cli.config)
cli.configure({"cache": {"storage-service": {"url": remote_services.storage_service}}})
del cli.config["remote-execution"]["storage-service"]
config_with_remote_cache = cli.config
# Build element with remote execution.
result = cli.run(project=project, args=["build", element_name])
result.assert_success()
# Attempt checkout from local cache by temporarily disabling remote cache.
# This should fail as the build result shouldn't have been downloaded to the local cache.
cli.config = config_without_remote_cache
result = cli.run(project=project, args=["artifact", "checkout", element_name, "--directory", checkout])
result.assert_main_error(ErrorDomain.STREAM, "uncached-checkout-attempt")
cli.config = config_with_remote_cache
# Attempt checkout again with remote cache.
result = cli.run(project=project, args=["artifact", "checkout", element_name, "--directory", checkout])
result.assert_success()
assert_contains(
checkout,
[
"/usr",
"/usr/lib",
"/usr/bin",
"/usr/share",
"/usr/bin/hello",
"/usr/share/doc",
"/usr/share/doc/amhello",
"/usr/share/doc/amhello/README",
],
)
| 36.666667
| 107
| 0.707359
|
9b1daac0e039f4267204b40e358883476e26e960
| 5,015
|
py
|
Python
|
app/business/transfer.py
|
justincc/RDSDS-Server
|
ed59110a9e56d19944c87464f682ce49111ad1e4
|
[
"Apache-2.0"
] | null | null | null |
app/business/transfer.py
|
justincc/RDSDS-Server
|
ed59110a9e56d19944c87464f682ce49111ad1e4
|
[
"Apache-2.0"
] | 1
|
2020-08-11T10:48:16.000Z
|
2020-08-11T10:48:16.000Z
|
app/business/transfer.py
|
justincc/RDSDS-Server
|
ed59110a9e56d19944c87464f682ce49111ad1e4
|
[
"Apache-2.0"
] | 2
|
2020-10-13T14:22:59.000Z
|
2020-11-13T16:50:54.000Z
|
from fastapi import APIRouter
from fastapi.encoders import jsonable_encoder
from starlette.requests import Request
from starlette.responses import JSONResponse
from app.models.transfer import TransferBase, TransferType, TransferResponse
from app.models.objects import Error
from app.business import globus
from app.crud.objects import get_object_access_methods, get_contents
async def create_transfer(transferBase: TransferBase, request: Request):
"""This function creates a transfer activity for an object"""
transfer_type = transferBase.transfer_type
# Code for globus
if transfer_type == TransferType.GLOBUS:
tokens = await globus.verify_globus_code(request)
if not tokens:
return JSONResponse(status_code=403, content={
"status_code": 403,
"msg": "The requester is not authorized to perform this action, Please login through /globus/login"
})
else:
transfer_client = await globus.get_transfer_client(request)
if not transferBase.source:
if transferBase.object_id:
transferBase = await get_globus_source_from_object(transferBase)
isFolder = await check_if_bundle(transferBase.object_id)
return await globus.create_transfer_globus(transferBase, transfer_client, isFolder)
async def check_if_bundle(object_id: str):
"""This function checks if an object is a bundle or not"""
object_contents = await get_contents(object_id)
if len(object_contents) == 0 :
return False
else:
return True
async def get_globus_source_from_object(transferBase: TransferBase):
"""This function checks if an object have a globus source to transfer from"""
object_access_methods = await get_object_access_methods(transferBase.object_id)
for am in object_access_methods:
if (am['type'] == TransferType.GLOBUS):
#source_endpoint = source.split(':')[1].replace('/','')
#source_path = source.split(':')[2]
transferBase.source = am['access_url']
return transferBase
async def get_transfer_list(request: Request):
"""This function checks for transfer list for an authenticated user"""
transfer_status_list = []
# Code for globus
tokens = await globus.verify_globus_code(request)
if tokens:
globus_item_count = 10
if 'globus_item_count' in request.query_params:
globus_item_count = request.path_params['globus_item_count']
transfer_client = await globus.get_transfer_client(request)
transfer_response = await globus.get_transfer_globus_list(transfer_client, globus_item_count)
transfer_status_list.append(transfer_response)
else:
error_response = {'globus' : 'No authorization available'}
transfer_status_list.append(error_response)
# TODO Other type of transfers
transfer_status_json = jsonable_encoder(transfer_status_list)
return JSONResponse(content=transfer_status_json, status_code=200)
async def get_transfer(transfer_id: str, request: Request):
"""This function checks for status for a transfer ID"""
if transfer_id.startswith('globus'):
tokens = await globus.verify_globus_code(request)
if not tokens:
return JSONResponse(status_code=403, content={
"status_code": 403,
"msg": "The requester is not authorized to perform this action, Please login through /globus/login"
})
else:
globus_transfer_id = transfer_id.replace('globus-','')
transfer_client = await globus.get_transfer_client(request)
transfer_response = await globus.get_transfer_globus(globus_transfer_id, transfer_client)
transfer_response_json = jsonable_encoder(transfer_response)
return JSONResponse(content=transfer_response_json, status_code=transfer_response['status'])
else:
return None
async def delete_transfer(transfer_id: str, request: Request):
"""This function deletes/cancels a transfer ID"""
if transfer_id.startswith('globus'):
tokens = await globus.verify_globus_code(request)
if not tokens:
return JSONResponse(status_code=403, content={
"status_code": 403,
"msg": "The requester is not authorized to perform this action, Please login through /globus/login"
})
else:
globus_transfer_id = transfer_id.replace('globus-','')
transfer_client = await globus.get_transfer_client(request)
transfer_response = await globus.delete_transfer_globus(globus_transfer_id, transfer_client)
transfer_response_json = jsonable_encoder(transfer_response)
return JSONResponse(content=transfer_response_json, status_code=transfer_response['status'])
else:
return None
| 47.761905
| 116
| 0.689332
|
91c649ccc1c9491021a73489985d22fa34fa5f3f
| 6,158
|
py
|
Python
|
simulation/env_46.py
|
liaojh1998/cross-modal-concept2robot
|
2a00937eb2ac02cbe3d5d5fa0f5868e85d194f6e
|
[
"MIT"
] | 4
|
2021-08-04T08:14:36.000Z
|
2022-03-14T05:59:46.000Z
|
simulation/env_46.py
|
liaojh1998/cross-modal-concept2robot
|
2a00937eb2ac02cbe3d5d5fa0f5868e85d194f6e
|
[
"MIT"
] | null | null | null |
simulation/env_46.py
|
liaojh1998/cross-modal-concept2robot
|
2a00937eb2ac02cbe3d5d5fa0f5868e85d194f6e
|
[
"MIT"
] | 2
|
2021-08-28T13:19:31.000Z
|
2021-09-17T17:48:41.000Z
|
#!/usr/bin/env python3
import time
import math
from datetime import datetime
from time import sleep
import numpy as np
import random
import cv2
import os
import argparse
import torch
from scipy.spatial.transform import Rotation as R
import sys
sys.path.append('./')
from env import Engine
from utils_env import get_view,safe_path,cut_frame,point2traj,get_gripper_pos,backup_code
################ Baseline Reward
import signal
import importlib
import torch
import torch.nn as nn
import sh
import re
import torch.nn.functional as F
np.set_printoptions(precision=4,suppress=True,linewidth=300)
class Engine46(Engine):
def __init__(self, worker_id, opti, p_id, taskId=5, maxSteps=15, n_dmps=3, cReward=True):
super(Engine46,self).__init__(opti, wid=worker_id, p_id=p_id, maxSteps=maxSteps, taskId=taskId, n_dmps=n_dmps, cReward=cReward,robot_model=None)
self.opti = opti
self._wid = worker_id
self.robot.gripperMaxForce = 200.0
self.robot.armMaxForce = 200.0
self.robot.jd = [0.01] * 14
self.p.setPhysicsEngineParameter(useSplitImpulse=True,splitImpulsePenetrationThreshold=0.01)
self.load_model()
self.p.setPhysicsEngineParameter(enableConeFriction=1)
self.p.setPhysicsEngineParameter(contactBreakingThreshold=0.001)
self.p.setPhysicsEngineParameter(allowedCcdPenetration=0.0)
self.p.setPhysicsEngineParameter(numSolverIterations=20)
self.p.setPhysicsEngineParameter(numSubSteps=10)
self.p.setPhysicsEngineParameter(constraintSolverType=self.p.CONSTRAINT_SOLVER_LCP_DANTZIG,globalCFM=0.000001)
self.p.setPhysicsEngineParameter(enableFileCaching=0)
self.p.setTimeStep(1 / 30.0)
self.p.setGravity(0,0,-9.81)
self.count = 0
self.fw1 = open("p1.txt","w")
self.fw2 = open("p2.txt","w")
def init_obj(self):
self.obj_id = self.p.loadURDF(fileName=os.path.join(self.urdf_dir,"obj_libs/drawers/d4/d4.urdf"),useFixedBase=True)
self.p.changeVisualShape (self.obj_id, -1, rgbaColor=[1.,0.,0.,1])
self.p.changeVisualShape (self.obj_id, 0, rgbaColor=[0,0,1,1.0])
self.p.changeVisualShape (self.obj_id, 1, rgbaColor=[0,0,1,1.0])
self.p.changeVisualShape (self.obj_id, 2, rgbaColor=[0,0,1,1.0])
self.p.changeVisualShape (self.obj_id, 3, rgbaColor=[0,0,1,1.0])
self.p.changeVisualShape (self.obj_id, 4, rgbaColor=[0,0,1,1.0])
self.p.resetJointState(self.obj_id,0,0.05)
numJoint = self.p.getNumJoints(self.obj_id)
for jointIndex in range(numJoint):
jointInfo = self.p.getJointInfo(self.obj_id,jointIndex)
print(jointInfo)
def reset_obj(self):
self.obj_x = 0.38
self.obj_y = 0.05
self.obj_z = 0.35
self.obj1_ori = self.p.getQuaternionFromEuler ([math.pi/2.0,0 ,-math.pi/2.0 + 0.1])
transl = np.random.uniform(-0.1,0.1,size=(2,))
self.obj_pos_new = np.array([self.obj_x+transl[0],self.obj_y+transl[1],self.obj_z])
r = R.from_quat(self.obj1_ori)
HTrans = np.zeros((4,4))
HTrans[:3,:3] = r.as_dcm()
rotation_degree = np.random.uniform(-0.5,0.5)
addRot = R.from_rotvec(rotation_degree * np.array([0,0,1]))
addHTrans = np.zeros((4,4))
addHTrans[:3,:3] = addRot.as_dcm()
NewHTrans = addHTrans.dot(HTrans)
self.obj1_ori_new = R.from_dcm(NewHTrans[:3,:3]).as_quat()
self.p.resetBasePositionAndOrientation(self.obj_id,self.obj_pos_new,self.obj1_ori_new)
init_d = np.random.uniform(0,0.04)
self.p.resetJointState(self.obj_id,0,init_d)
obj_friction_ceof = 0.3
self.p.changeDynamics(self.obj_id, -1, lateralFriction=0.3)
self.p.changeDynamics(self.obj_id, -1, rollingFriction=100.0)
self.p.changeDynamics(self.obj_id, -1, spinningFriction=100.0)
self.p.changeDynamics(self.obj_id, -1, linearDamping=40.0)
self.p.changeDynamics(self.obj_id, -1, angularDamping=40.0)
self.p.changeDynamics(self.obj_id, -1, contactStiffness=10000.0, contactDamping=1)
table_friction_ceof = 0.4
self.p.changeDynamics(self.table_id, -1, lateralFriction=table_friction_ceof)
self.p.changeDynamics(self.table_id, -1, rollingFriction=table_friction_ceof)
self.p.changeDynamics(self.table_id, -1, spinningFriction=table_friction_ceof)
self.p.changeDynamics(self.table_id, -1, contactStiffness=10000.0, contactDamping=0.01)
def init_motion(self):
self.data_q = np.load (os.path.join(self.robot_recordings_dir,"47-1/q.npy"))
self.data_gripper = np.load (self.configs_dir + '/init/gripper.npy')
self.initial_pos = (-1.3026999182595653, -1.210032113999055, 0.79519250956187, -2.118622450107143, 0.8971789146016195, 1.0616185345092588, -0.34515004476469724)
self.robot.gripperControl(0)
self.robot.setJointValue(self.initial_pos,220)
def init_grasp(self):
self.robot.gripperControl(0)
self.robot.setJointValue(self.initial_pos,220)
self.null_q = self.initial_pos
obj_x, obj_y, obj_z = self.obj_pos_new
pos = [obj_x+0.03,obj_y+0.3,obj_z+0.3]
orn = self.p.getQuaternionFromEuler([math.pi,0,0])
for i in range(19):
self.robot.positionControl(pos,orn,null_pose=self.null_q,gripperPos=220)
pos = [obj_x+0.0,obj_y+0.0,obj_z+0.3]
orn = self.p.getQuaternionFromEuler([math.pi,0,0])
for i in range(109):
self.robot.positionControl(pos,orn,null_pose=self.null_q,gripperPos=220)
pos = [obj_x-0.05,obj_y+0.0,obj_z+0.22]
orn = self.p.getQuaternionFromEuler([math.pi,0,0])
for i in range(19):
self.robot.positionControl(pos,orn,null_pose=self.null_q,gripperPos=220)
self.fix_orn = self.p.getLinkState(self.robotId, 7)[1]
self.start_pos = self.p.getLinkState (self.robotId, 7)[0]
def get_success(self,suc=None):
jointInfo = self.p.getJointState(self.obj_id,0)
if jointInfo[0] > 0.1:
return True
else:
return False
| 38.974684
| 168
| 0.676194
|
784f0e2030f337f291d829957902c2bde6918091
| 9,087
|
py
|
Python
|
config/settings/common.py
|
aachi/kickass
|
5668991440fbd759cde6e0e18956fa7706eb075a
|
[
"BSD-3-Clause"
] | null | null | null |
config/settings/common.py
|
aachi/kickass
|
5668991440fbd759cde6e0e18956fa7706eb075a
|
[
"BSD-3-Clause"
] | null | null | null |
config/settings/common.py
|
aachi/kickass
|
5668991440fbd759cde6e0e18956fa7706eb075a
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Django settings for kickass project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
from __future__ import absolute_import, unicode_literals
import environ
ROOT_DIR = environ.Path(__file__) - 3 # (/a/b/myfile.py - 3 = /)
APPS_DIR = ROOT_DIR.path('kickass')
env = environ.Env()
# APP CONFIGURATION
# ------------------------------------------------------------------------------
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# Admin
'django.contrib.admin',
)
THIRD_PARTY_APPS = (
'crispy_forms', # Form layouts
'allauth', # registration
'allauth.account', # registration
'allauth.socialaccount', # registration
)
# Apps specific for this project go here.
LOCAL_APPS = (
'kickass.users', # custom users app
# Your stuff: custom apps go here
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIDDLEWARE CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE_CLASSES = (
# Make sure djangosecure.middleware.SecurityMiddleware is listed first
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# MIGRATIONS CONFIGURATION
# ------------------------------------------------------------------------------
MIGRATION_MODULES = {
'sites': 'kickass.contrib.sites.migrations'
}
# DEBUG
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool("DJANGO_DEBUG", False)
# FIXTURE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
# EMAIL CONFIGURATION
# ------------------------------------------------------------------------------
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
# MANAGER CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
("""aachi""", 'asifmanzoorawan@gmail.com'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
'default': env.db("DATABASE_URL", default="postgres:///kickass"),
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
# GENERAL CONFIGURATION
# ------------------------------------------------------------------------------
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'UTC'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
# Your stuff: custom template context processors go here
],
},
},
]
# See: http://django-crispy-forms.readthedocs.org/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# STATIC FILE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
str(APPS_DIR.path('static')),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# MEDIA CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# URL Configuration
# ------------------------------------------------------------------------------
ROOT_URLCONF = 'config.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
# AUTHENTICATION CONFIGURATION
# ------------------------------------------------------------------------------
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
# Some really nice defaults
ACCOUNT_AUTHENTICATION_METHOD = 'username'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
# Custom user app defaults
# Select the correct user model
AUTH_USER_MODEL = 'users.User'
LOGIN_REDIRECT_URL = 'users:redirect'
LOGIN_URL = 'account_login'
# SLUGLIFIER
AUTOSLUG_SLUGIFY_FUNCTION = 'slugify.slugify'
# LOGGING CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# Your common stuff: Below this line define 3rd party library settings
| 35.22093
| 98
| 0.608452
|
b178d6caeef110570ec2cbf8c6e2ef29c11582d5
| 407
|
py
|
Python
|
djangoauthtrial/wsgi.py
|
sankalpmukim/django-auth-trial
|
40d2a0eabb91acf666fffdb9c6456228d55c0882
|
[
"MIT"
] | null | null | null |
djangoauthtrial/wsgi.py
|
sankalpmukim/django-auth-trial
|
40d2a0eabb91acf666fffdb9c6456228d55c0882
|
[
"MIT"
] | null | null | null |
djangoauthtrial/wsgi.py
|
sankalpmukim/django-auth-trial
|
40d2a0eabb91acf666fffdb9c6456228d55c0882
|
[
"MIT"
] | null | null | null |
"""
WSGI config for djangoauthtrial project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'djangoauthtrial.settings')
application = get_wsgi_application()
| 23.941176
| 78
| 0.793612
|
72d4e2a24dabe187754e76e665c6623d6bd0b741
| 266
|
py
|
Python
|
Codechef/October Challenge 2019/Operations on a Matrix/test_ops_on_matrix.py
|
Goyatuzo/HackerRank
|
ad836faa7e8ecfe751a72027e1312ae3ee19f4ca
|
[
"MIT"
] | null | null | null |
Codechef/October Challenge 2019/Operations on a Matrix/test_ops_on_matrix.py
|
Goyatuzo/HackerRank
|
ad836faa7e8ecfe751a72027e1312ae3ee19f4ca
|
[
"MIT"
] | 4
|
2016-03-19T05:13:29.000Z
|
2021-05-10T16:42:42.000Z
|
Codechef/October Challenge 2019/Operations on a Matrix/test_ops_on_matrix.py
|
Goyatuzo/Challenges
|
ad836faa7e8ecfe751a72027e1312ae3ee19f4ca
|
[
"MIT"
] | null | null | null |
import unittest
from ops_on_matrix import ops_on_matrix
class TestPhonePrices(unittest.TestCase):
def test_example(self):
res = ops_on_matrix(2, 2, [[1, 1], [1, 2], [2, 1]])
self.assertEqual(2, res)
if __name__ == '__main__':
unittest.main()
| 22.166667
| 59
| 0.669173
|
c9a75997adf61ab0102fb908095a473d831246b1
| 484
|
py
|
Python
|
src/tokens/migrations/0003_token_category.py
|
pwelzel/bornhack-website
|
af794e6a2fba06e09626259c7768feb30ff394be
|
[
"BSD-3-Clause"
] | null | null | null |
src/tokens/migrations/0003_token_category.py
|
pwelzel/bornhack-website
|
af794e6a2fba06e09626259c7768feb30ff394be
|
[
"BSD-3-Clause"
] | null | null | null |
src/tokens/migrations/0003_token_category.py
|
pwelzel/bornhack-website
|
af794e6a2fba06e09626259c7768feb30ff394be
|
[
"BSD-3-Clause"
] | null | null | null |
# Generated by Django 2.0.4 on 2018-08-19 15:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tokens', '0002_tokenfind'),
]
operations = [
migrations.AddField(
model_name='token',
name='category',
field=models.TextField(default='', help_text='The category/hint for this token (physical, website, whatever)'),
preserve_default=False,
),
]
| 24.2
| 123
| 0.609504
|
8406c805ac7c97388df1ca683b3ee42b61123c7b
| 15,987
|
py
|
Python
|
jasy/script/output/Compressor.py
|
sebastian-software/jasy
|
9740ed33f0836ab2dd3e00ab4fae4049f9908072
|
[
"MIT"
] | 2
|
2015-05-27T19:30:49.000Z
|
2015-12-10T16:55:14.000Z
|
jasy/script/output/Compressor.py
|
sebastian-software/jasy
|
9740ed33f0836ab2dd3e00ab4fae4049f9908072
|
[
"MIT"
] | 2
|
2015-03-16T09:15:58.000Z
|
2015-04-07T19:05:47.000Z
|
jasy/script/output/Compressor.py
|
sebastian-software/jasy
|
9740ed33f0836ab2dd3e00ab4fae4049f9908072
|
[
"MIT"
] | 2
|
2017-07-18T20:08:05.000Z
|
2021-01-04T10:46:14.000Z
|
#
# Jasy - Web Tooling Framework
# Copyright 2010-2012 Zynga Inc.
# Copyright 2013-2014 Sebastian Werner
#
import re
import sys
import json
from jasy.script.tokenize.Lang import keywords
from jasy.script.parse.Lang import expressions, futureReserved
high_unicode = re.compile(r"\\u[2-9A-Fa-f][0-9A-Fa-f]{3}")
ascii_encoder = json.JSONEncoder(ensure_ascii=True)
unicode_encoder = json.JSONEncoder(ensure_ascii=False)
#
# Class
#
class Compressor:
__semicolonSymbol = ";"
__commaSymbol = ","
def __init__(self, format=None):
if format:
if format.has("semicolon"):
self.__semicolonSymbol = ";\n"
if format.has("comma"):
self.__commaSymbol = ",\n"
self.__forcedSemicolon = False
#
# Main
#
def compress(self, node):
type = node.type
result = None
if type in self.__simple:
result = type
elif type in self.__prefixes:
if getattr(node, "postfix", False):
result = self.compress(node[0]) + self.__prefixes[node.type]
else:
result = self.__prefixes[node.type] + self.compress(node[0])
elif type in self.__dividers:
first = self.compress(node[0])
second = self.compress(node[1])
divider = self.__dividers[node.type]
# Fast path
if node.type not in ("plus", "minus"):
result = "%s%s%s" % (first, divider, second)
# Special code for dealing with situations like x + ++y and y-- - x
else:
result = first
if first.endswith(divider):
result += " "
result += divider
if second.startswith(divider):
result += " "
result += second
else:
try:
result = getattr(self, "type_%s" % type)(node)
except AttributeError:
raise Exception("Script compressor does not support type '%s' from line %s in file %s" % (type, node.line, node.getFileName()))
if getattr(node, "parenthesized", None):
return "(%s)" % result
else:
return result
#
# Helpers
#
def __statements(self, node):
result = []
for child in node:
result.append(self.compress(child))
return "".join(result)
def __handleForcedSemicolon(self, node):
if node.type == "semicolon" and not hasattr(node, "expression"):
self.__forcedSemicolon = True
def __addSemicolon(self, result):
if not result.endswith(self.__semicolonSymbol):
if self.__forcedSemicolon:
self.__forcedSemicolon = False
return result + self.__semicolonSymbol
else:
return result
def __removeSemicolon(self, result):
if self.__forcedSemicolon:
self.__forcedSemicolon = False
return result
if result.endswith(self.__semicolonSymbol):
return result[:-len(self.__semicolonSymbol)]
else:
return result
#
# Data
#
__simple_property = re.compile(r"^[a-zA-Z_$][a-zA-Z0-9_$]*$")
__number_property = re.compile(r"^[0-9]+$")
__simple = ["true", "false", "null", "this", "debugger"]
__dividers = {
"plus" : '+',
"minus" : '-',
"mul" : '*',
"div" : '/',
"mod" : '%',
"dot" : '.',
"or" : "||",
"and" : "&&",
"strict_eq" : '===',
"eq" : '==',
"strict_ne" : '!==',
"ne" : '!=',
"lsh" : '<<',
"le" : '<=',
"lt" : '<',
"ursh" : '>>>',
"rsh" : '>>',
"ge" : '>=',
"gt" : '>',
"bitwise_or" : '|',
"bitwise_xor" : '^',
"bitwise_and" : '&'
}
__prefixes = {
"increment" : "++",
"decrement" : "--",
"bitwise_not" : '~',
"not" : "!",
"unary_plus" : "+",
"unary_minus" : "-",
"delete" : "delete ",
"new" : "new ",
"typeof" : "typeof ",
"void" : "void "
}
#
# Script Scope
#
def type_script(self, node):
return self.__statements(node)
#
# Expressions
#
def type_comma(self, node):
return self.__commaSymbol.join(map(self.compress, node))
def type_object_init(self, node):
return "{%s}" % self.__commaSymbol.join(map(self.compress, node))
def type_property_init(self, node):
key = self.compress(node[0])
value = self.compress(node[1])
if type(key) in (int, float):
pass
elif self.__number_property.match(key):
pass
# Protect keywords and special characters
elif key in keywords or key in futureReserved or not self.__simple_property.match(key):
key = self.type_string(node[0])
return "%s:%s" % (key, value)
def type_array_init(self, node):
def helper(child):
return self.compress(child) if child is not None else ""
return "[%s]" % ",".join(map(helper, node))
def type_array_comp(self, node):
return "[%s %s]" % (self.compress(node.expression), self.compress(node.tail))
def type_string(self, node):
# Omit writing real high unicode character which are not supported well by browsers
ascii = ascii_encoder.encode(node.value)
if high_unicode.search(ascii):
return ascii
else:
return unicode_encoder.encode(node.value)
def type_number(self, node):
value = node.value
# Special handling for protected float/exponential
if isinstance(value, str):
# Convert zero-prefix
if value.startswith("0.") and len(value) > 2:
value = value[1:]
# Convert zero postfix
elif value.endswith(".0"):
value = value[:-2]
elif int(value) == value and node.parent.type != "dot":
value = int(value)
return "%s" % value
def type_regexp(self, node):
return node.value
def type_identifier(self, node):
return node.value
def type_list(self, node):
return ",".join(map(self.compress, node))
def type_index(self, node):
return "%s[%s]" % (self.compress(node[0]), self.compress(node[1]))
def type_declaration(self, node):
names = getattr(node, "names", None)
if names:
result = self.compress(names)
else:
result = node.name
initializer = getattr(node, "initializer", None)
if initializer:
result += "=%s" % self.compress(node.initializer)
return result
def type_assign(self, node):
assignOp = getattr(node, "assignOp", None)
operator = "=" if not assignOp else self.__dividers[assignOp] + "="
return self.compress(node[0]) + operator + self.compress(node[1])
def type_call(self, node):
return "%s(%s)" % (self.compress(node[0]), self.compress(node[1]))
def type_new_with_args(self, node):
result = "new %s" % self.compress(node[0])
# Compress new Object(); => new Object;
if len(node[1]) > 0:
result += "(%s)" % self.compress(node[1])
else:
parent = getattr(node, "parent", None)
if parent and parent.type is "dot":
result += "()"
return result
def type_exception(self, node):
return node.value
def type_generator(self, node):
"""Generator Expression."""
result = self.compress(getattr(node, "expression"))
tail = getattr(node, "tail", None)
if tail:
result += " %s" % self.compress(tail)
return result
def type_comp_tail(self, node):
"""Comprehensions Tails."""
result = self.compress(getattr(node, "for"))
guard = getattr(node, "guard", None)
if guard:
result += "if(%s)" % self.compress(guard)
return result
def type_in(self, node):
first = self.compress(node[0])
second = self.compress(node[1])
if first.endswith("'") or first.endswith('"'):
pattern = "%sin %s"
else:
pattern = "%s in %s"
return pattern % (first, second)
def type_instanceof(self, node):
first = self.compress(node[0])
second = self.compress(node[1])
return "%s instanceof %s" % (first, second)
#
# Statements :: Core
#
def type_block(self, node):
return "{%s}" % self.__removeSemicolon(self.__statements(node))
def type_let_block(self, node):
begin = "let(%s)" % ",".join(map(self.compress, node.variables))
if hasattr(node, "block"):
end = self.compress(node.block)
elif hasattr(node, "expression"):
end = self.compress(node.expression)
return begin + end
def type_const(self, node):
return self.__addSemicolon("const %s" % self.type_list(node))
def type_var(self, node):
return self.__addSemicolon("var %s" % self.type_list(node))
def type_let(self, node):
return self.__addSemicolon("let %s" % self.type_list(node))
def type_semicolon(self, node):
expression = getattr(node, "expression", None)
return self.__addSemicolon(self.compress(expression) if expression else "")
def type_label(self, node):
return self.__addSemicolon("%s:%s" % (node.label, self.compress(node.statement)))
def type_break(self, node):
return self.__addSemicolon("break" if not hasattr(node, "label") else "break %s" % node.label)
def type_continue(self, node):
return self.__addSemicolon("continue" if not hasattr(node, "label") else "continue %s" % node.label)
#
# Statements :: Functions
#
def type_function(self, node):
if node.type == "setter":
result = "set"
elif node.type == "getter":
result = "get"
else:
result = "function"
name = getattr(node, "name", None)
if name:
result += " %s" % name
params = getattr(node, "params", None)
result += "(%s)" % self.compress(params) if params else "()"
# keep expression closure format (may be micro-optimized for other code, too)
if getattr(node, "expressionClosure", False):
result += self.compress(node.body)
else:
result += "{%s}" % self.__removeSemicolon(self.compress(node.body))
return result
def type_getter(self, node):
return self.type_function(node)
def type_setter(self, node):
return self.type_function(node)
def type_return(self, node):
result = "return"
if hasattr(node, "value"):
valueCode = self.compress(node.value)
# Micro optimization: Don't need a space when a block/map/array/group/strings are returned
if not valueCode.startswith(("(", "[", "{", "'", '"', "!", "-", "/")):
result += " "
result += valueCode
return self.__addSemicolon(result)
#
# Statements :: Exception Handling
#
def type_throw(self, node):
return self.__addSemicolon("throw %s" % self.compress(node.exception))
def type_try(self, node):
result = "try%s" % self.compress(node.tryBlock)
for catch in node:
if catch.type == "catch":
if hasattr(catch, "guard"):
result += "catch(%s if %s)%s" % (self.compress(catch.exception), self.compress(catch.guard), self.compress(catch.block))
else:
result += "catch(%s)%s" % (self.compress(catch.exception), self.compress(catch.block))
if hasattr(node, "finallyBlock"):
result += "finally%s" % self.compress(node.finallyBlock)
return result
#
# Statements :: Loops
#
def type_while(self, node):
result = "while(%s)%s" % (self.compress(node.condition), self.compress(node.body))
self.__handleForcedSemicolon(node.body)
return result
def type_do(self, node):
# block unwrapping don't help to reduce size on this loop type
# but if it happens (don't like to modify a global function to fix a local issue), we
# need to fix the body and re-add braces around the statement
body = self.compress(node.body)
if not body.startswith("{"):
body = "{%s}" % body
return self.__addSemicolon("do%swhile(%s)" % (body, self.compress(node.condition)))
def type_for_in(self, node):
# Optional variable declarations
varDecl = getattr(node, "varDecl", None)
# Body is optional - at least in comprehensions tails
body = getattr(node, "body", None)
if body:
body = self.compress(body)
else:
body = ""
result = "for"
if node.isEach:
result += " each"
result += "(%s in %s)%s" % (self.__removeSemicolon(self.compress(node.iterator)), self.compress(node.object), body)
if body:
self.__handleForcedSemicolon(node.body)
return result
def type_for(self, node):
setup = getattr(node, "setup", None)
condition = getattr(node, "condition", None)
update = getattr(node, "update", None)
result = "for("
result += self.__addSemicolon(self.compress(setup) if setup else "")
result += self.__addSemicolon(self.compress(condition) if condition else "")
result += self.compress(update) if update else ""
result += ")%s" % self.compress(node.body)
self.__handleForcedSemicolon(node.body)
return result
#
# Statements :: Conditionals
#
def type_hook(self, node):
"""aka ternary operator."""
condition = node.condition
thenPart = node.thenPart
elsePart = node.elsePart
if condition.type == "not":
[thenPart, elsePart] = [elsePart, thenPart]
condition = condition[0]
return "%s?%s:%s" % (self.compress(condition), self.compress(thenPart), self.compress(elsePart))
def type_if(self, node):
result = "if(%s)%s" % (self.compress(node.condition), self.compress(node.thenPart))
elsePart = getattr(node, "elsePart", None)
if elsePart:
result += "else"
elseCode = self.compress(elsePart)
# Micro optimization: Don't need a space when the child is a block
# At this time the brace could not be part of a map declaration (would be a syntax error)
if not elseCode.startswith(("{", "(", ";")):
result += " "
result += elseCode
self.__handleForcedSemicolon(elsePart)
return result
def type_switch(self, node):
result = "switch(%s){" % self.compress(node.discriminant)
for case in node:
if case.type == "case":
labelCode = self.compress(case.label)
if labelCode.startswith('"'):
result += "case%s:" % labelCode
else:
result += "case %s:" % labelCode
elif case.type == "default":
result += "default:"
else:
continue
for statement in case.statements:
temp = self.compress(statement)
if len(temp) > 0:
result += self.__addSemicolon(temp)
return "%s}" % self.__removeSemicolon(result)
| 28.396092
| 143
| 0.544818
|
9cbb38bdf233e5d3a9a6b473ab62287cd3208cc8
| 488
|
py
|
Python
|
packages/python/plotly/plotly/validators/layout/legend/_orientation.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/validators/layout/legend/_orientation.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/validators/layout/legend/_orientation.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
import _plotly_utils.basevalidators
class OrientationValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="orientation", parent_name="layout.legend", **kwargs
):
super(OrientationValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "legend"),
values=kwargs.pop("values", ["v", "h"]),
**kwargs,
)
| 32.533333
| 78
| 0.641393
|
0e49f0f0b2f8c827bafe5c6efbc01b4f33ec7669
| 1,369
|
py
|
Python
|
.github/workflows/check-links/crawler.py
|
gabriel-milan/mais
|
d3f2dd70cd7574b021bde569ea31dcebcd5bb313
|
[
"MIT"
] | null | null | null |
.github/workflows/check-links/crawler.py
|
gabriel-milan/mais
|
d3f2dd70cd7574b021bde569ea31dcebcd5bb313
|
[
"MIT"
] | null | null | null |
.github/workflows/check-links/crawler.py
|
gabriel-milan/mais
|
d3f2dd70cd7574b021bde569ea31dcebcd5bb313
|
[
"MIT"
] | null | null | null |
from scrapy.item import Field, Item
from scrapy.linkextractors import LinkExtractor
from scrapy.selector import Selector
from scrapy.spiders import CrawlSpider, Rule
class LinkStatus(Item):
url = Field()
status = Field()
referer = Field()
link_text = Field()
class LinkSpider(CrawlSpider):
name = "link-checker"
report_if = set(range(400, 600))
handle_httpstatus_list = list(report_if)
start_urls = ["http://basedosdados.github.io/mais"]
rules = [
Rule(
LinkExtractor(allow="basedosdados\.github\.io.*"),
callback="parse",
follow=True,
),
Rule(LinkExtractor(), callback="parse", follow=False),
]
def parse(self, response):
if response.status in self.report_if:
item = LinkStatus()
item["url"] = response.url
item["status"] = response.status
item["link_text"] = response.meta["link_text"].strip()
item["referer"] = response.request.headers.get(
"Referer", self.start_urls[0]
)
yield item
yield None
# References
# https://gist.github.com/mdamien/7b71ef06f49de1189fb75f8fed91ae82
# https://matthewhoelter.com/2018/11/27/finding-broken-links-on-website.html
# https://dev.to/pjcalvo/broken-links-checker-with-python-and-scrapy-webcrawler-1gom
| 29.12766
| 84
| 0.636961
|
e06852fcb342842702b5ff66f224a0ee96fd17d5
| 10,085
|
py
|
Python
|
src/command_modules/azure-cli-lab/azure/cli/command_modules/lab/sdk/devtestlabs/models/lab_virtual_machine.py
|
saurabsa/azure-cli-old
|
f77477a98c9aa9cb55daf5b0d2f410d1455a9225
|
[
"MIT"
] | null | null | null |
src/command_modules/azure-cli-lab/azure/cli/command_modules/lab/sdk/devtestlabs/models/lab_virtual_machine.py
|
saurabsa/azure-cli-old
|
f77477a98c9aa9cb55daf5b0d2f410d1455a9225
|
[
"MIT"
] | 2
|
2021-03-25T21:38:56.000Z
|
2021-11-15T17:46:45.000Z
|
src/command_modules/azure-cli-lab/azure/cli/command_modules/lab/sdk/devtestlabs/models/lab_virtual_machine.py
|
Visual-Studio-China/azure-cli-int
|
48c7c7f371a0ecc4ebfd4dcfdc72764beddf5c31
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# coding: utf-8
# pylint: skip-file
from msrest.serialization import Model
class LabVirtualMachine(Model):
"""A virtual machine.
:param notes: The notes of the virtual machine.
:type notes: str
:param owner_object_id: The object identifier of the owner of the virtual
machine.
:type owner_object_id: str
:param created_by_user_id: The object identifier of the creator of the
virtual machine.
:type created_by_user_id: str
:param created_by_user: The email address of creator of the virtual
machine.
:type created_by_user: str
:param created_date: The creation date of the virtual machine.
:type created_date: datetime
:param compute_id: The resource identifier (Microsoft.Compute) of the
virtual machine.
:type compute_id: str
:param custom_image_id: The custom image identifier of the virtual
machine.
:type custom_image_id: str
:param os_type: The OS type of the virtual machine.
:type os_type: str
:param size: The size of the virtual machine.
:type size: str
:param user_name: The user name of the virtual machine.
:type user_name: str
:param password: The password of the virtual machine administrator.
:type password: str
:param ssh_key: The SSH key of the virtual machine administrator.
:type ssh_key: str
:param is_authentication_with_ssh_key: Indicates whether this virtual
machine uses an SSH key for authentication.
:type is_authentication_with_ssh_key: bool
:param fqdn: The fully-qualified domain name of the virtual machine.
:type fqdn: str
:param lab_subnet_name: The lab subnet name of the virtual machine.
:type lab_subnet_name: str
:param lab_virtual_network_id: The lab virtual network identifier of the
virtual machine.
:type lab_virtual_network_id: str
:param disallow_public_ip_address: Indicates whether the virtual machine
is to be created without a public IP address.
:type disallow_public_ip_address: bool
:param artifacts: The artifacts to be installed on the virtual machine.
:type artifacts: list of :class:`ArtifactInstallProperties
<azure.mgmt.devtestlabs.models.ArtifactInstallProperties>`
:param artifact_deployment_status: The artifact deployment status for the
virtual machine.
:type artifact_deployment_status:
:class:`ArtifactDeploymentStatusProperties
<azure.mgmt.devtestlabs.models.ArtifactDeploymentStatusProperties>`
:param gallery_image_reference: The Microsoft Azure Marketplace image
reference of the virtual machine.
:type gallery_image_reference: :class:`GalleryImageReference
<azure.mgmt.devtestlabs.models.GalleryImageReference>`
:param compute_vm: The compute virtual machine properties.
:type compute_vm: :class:`ComputeVmProperties
<azure.mgmt.devtestlabs.models.ComputeVmProperties>`
:param network_interface: The network interface properties.
:type network_interface: :class:`NetworkInterfaceProperties
<azure.mgmt.devtestlabs.models.NetworkInterfaceProperties>`
:param applicable_schedule: The applicable schedule for the virtual
machine.
:type applicable_schedule: :class:`ApplicableSchedule
<azure.mgmt.devtestlabs.models.ApplicableSchedule>`
:param expiration_date: The expiration date for VM.
:type expiration_date: str
:param allow_claim: Indicates whether another user can take ownership of
the virtual machine
:type allow_claim: bool
:param storage_type: Storage type to use for virtual machine (i.e.
Standard, Premium).
:type storage_type: str
:param virtual_machine_creation_source: Tells source of creation of lab
virtual machine. Output property only. Possible values include:
'FromCustomImage', 'FromGalleryImage'
:type virtual_machine_creation_source: str or
:class:`VirtualMachineCreationSource
<azure.mgmt.devtestlabs.models.VirtualMachineCreationSource>`
:param environment_id: The resource ID of the environment that contains
this virtual machine, if any.
:type environment_id: str
:param provisioning_state: The provisioning status of the resource.
:type provisioning_state: str
:param unique_identifier: The unique immutable identifier of a resource
(Guid).
:type unique_identifier: str
:param id: The identifier of the resource.
:type id: str
:param name: The name of the resource.
:type name: str
:param type: The type of the resource.
:type type: str
:param location: The location of the resource.
:type location: str
:param tags: The tags of the resource.
:type tags: dict
"""
_attribute_map = {
'notes': {'key': 'properties.notes', 'type': 'str'},
'owner_object_id': {'key': 'properties.ownerObjectId', 'type': 'str'},
'created_by_user_id': {'key': 'properties.createdByUserId', 'type': 'str'},
'created_by_user': {'key': 'properties.createdByUser', 'type': 'str'},
'created_date': {'key': 'properties.createdDate', 'type': 'iso-8601'},
'compute_id': {'key': 'properties.computeId', 'type': 'str'},
'custom_image_id': {'key': 'properties.customImageId', 'type': 'str'},
'os_type': {'key': 'properties.osType', 'type': 'str'},
'size': {'key': 'properties.size', 'type': 'str'},
'user_name': {'key': 'properties.userName', 'type': 'str'},
'password': {'key': 'properties.password', 'type': 'str'},
'ssh_key': {'key': 'properties.sshKey', 'type': 'str'},
'is_authentication_with_ssh_key': {'key': 'properties.isAuthenticationWithSshKey', 'type': 'bool'},
'fqdn': {'key': 'properties.fqdn', 'type': 'str'},
'lab_subnet_name': {'key': 'properties.labSubnetName', 'type': 'str'},
'lab_virtual_network_id': {'key': 'properties.labVirtualNetworkId', 'type': 'str'},
'disallow_public_ip_address': {'key': 'properties.disallowPublicIpAddress', 'type': 'bool'},
'artifacts': {'key': 'properties.artifacts', 'type': '[ArtifactInstallProperties]'},
'artifact_deployment_status': {'key': 'properties.artifactDeploymentStatus', 'type': 'ArtifactDeploymentStatusProperties'},
'gallery_image_reference': {'key': 'properties.galleryImageReference', 'type': 'GalleryImageReference'},
'compute_vm': {'key': 'properties.computeVm', 'type': 'ComputeVmProperties'},
'network_interface': {'key': 'properties.networkInterface', 'type': 'NetworkInterfaceProperties'},
'applicable_schedule': {'key': 'properties.applicableSchedule', 'type': 'ApplicableSchedule'},
'expiration_date': {'key': 'properties.expirationDate', 'type': 'str'},
'allow_claim': {'key': 'properties.allowClaim', 'type': 'bool'},
'storage_type': {'key': 'properties.storageType', 'type': 'str'},
'virtual_machine_creation_source': {'key': 'properties.virtualMachineCreationSource', 'type': 'str'},
'environment_id': {'key': 'properties.environmentId', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'unique_identifier': {'key': 'properties.uniqueIdentifier', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(self, notes=None, owner_object_id=None, created_by_user_id=None, created_by_user=None, created_date=None, compute_id=None, custom_image_id=None, os_type=None, size=None, user_name=None, password=None, ssh_key=None, is_authentication_with_ssh_key=None, fqdn=None, lab_subnet_name=None, lab_virtual_network_id=None, disallow_public_ip_address=None, artifacts=None, artifact_deployment_status=None, gallery_image_reference=None, compute_vm=None, network_interface=None, applicable_schedule=None, expiration_date=None, allow_claim=None, storage_type=None, virtual_machine_creation_source=None, environment_id=None, provisioning_state=None, unique_identifier=None, id=None, name=None, type=None, location=None, tags=None):
self.notes = notes
self.owner_object_id = owner_object_id
self.created_by_user_id = created_by_user_id
self.created_by_user = created_by_user
self.created_date = created_date
self.compute_id = compute_id
self.custom_image_id = custom_image_id
self.os_type = os_type
self.size = size
self.user_name = user_name
self.password = password
self.ssh_key = ssh_key
self.is_authentication_with_ssh_key = is_authentication_with_ssh_key
self.fqdn = fqdn
self.lab_subnet_name = lab_subnet_name
self.lab_virtual_network_id = lab_virtual_network_id
self.disallow_public_ip_address = disallow_public_ip_address
self.artifacts = artifacts
self.artifact_deployment_status = artifact_deployment_status
self.gallery_image_reference = gallery_image_reference
self.compute_vm = compute_vm
self.network_interface = network_interface
self.applicable_schedule = applicable_schedule
self.expiration_date = expiration_date
self.allow_claim = allow_claim
self.storage_type = storage_type
self.virtual_machine_creation_source = virtual_machine_creation_source
self.environment_id = environment_id
self.provisioning_state = provisioning_state
self.unique_identifier = unique_identifier
self.id = id
self.name = name
self.type = type
self.location = location
self.tags = tags
| 54.22043
| 734
| 0.694497
|
498dbbaaee18f85664b1a62549c3bb8ef6617e0c
| 2,885
|
py
|
Python
|
lib/models/MobileNetCoco14_5.py
|
yukihiko/human-pose-estimation.pytorch
|
187f60ac2088f91b9a48d9e5f4c2b1053a9f32bf
|
[
"MIT"
] | 2
|
2019-04-30T12:31:32.000Z
|
2019-11-25T16:39:07.000Z
|
lib/models/MobileNetCoco14_5.py
|
yukihiko/human-pose-estimation.pytorch
|
187f60ac2088f91b9a48d9e5f4c2b1053a9f32bf
|
[
"MIT"
] | null | null | null |
lib/models/MobileNetCoco14_5.py
|
yukihiko/human-pose-estimation.pytorch
|
187f60ac2088f91b9a48d9e5f4c2b1053a9f32bf
|
[
"MIT"
] | 4
|
2019-12-05T06:26:57.000Z
|
2021-12-16T05:17:39.000Z
|
# -*- coding: utf-8 -*-
import torch.nn as nn
import torch.nn.functional as F
import sys
class MobileNetCoco14_5(nn.Module):
def __init__(self):
super(MobileNetCoco14_5, self).__init__()
self.col = 14
self.Nj = 17
def conv_bn(inp, oup, stride):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU(inplace=True),
)
def conv_dw(inp, oup, stride):
return nn.Sequential(
nn.Conv2d(inp, inp, 3, stride, 1, groups=inp, bias=False),
nn.BatchNorm2d(inp),
nn.ReLU(inplace=True),
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU(inplace=True),
)
def conv_last(inp, oup, stride):
return nn.Sequential(
nn.Conv2d(inp, inp, 3, stride, 1, groups=inp, bias=False),
nn.BatchNorm2d(inp),
nn.ReLU(inplace=True),
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
)
self.model = nn.Sequential(
conv_bn( 3, 32, 2),
conv_dw( 32, 64, 1),
conv_dw( 64, 128, 2),
conv_dw(128, 128, 1),
conv_dw(128, 256, 2),
)
self.model1_1 = conv_dw(256, 256, 1)
self.model1_1_2 = conv_dw(256, 256, 1)
self.model1_2 = conv_dw(256, 512, 2)
self.model1_3 = conv_dw(512, 512, 1)
self.model1_4 = conv_dw(512, 512, 1)
self.model1_5 = conv_dw(512, 512, 1)
self.model1_6 = conv_dw(512, 512, 1)
self.model1_7 = conv_dw(512, 512, 1)
self.model1_7_2 = conv_dw(512, 512, 1)
self.model1_8 = conv_dw(512, 1024, 1)
self.model1_9 = conv_dw(1024, 1024, 1)
self.model2_1 = conv_dw(1024, 1024, 1)
#20181201
self.model2_2 = conv_dw(1024, 1024, 1)
self.heatmap = conv_last(1024, self.Nj, 1)
self.offset = conv_last(1024, self.Nj*2, 1)
def forward(self, x):
x1 = self.model(x)
x11 = self.model1_1(x1) + x1
x11_2 = self.model1_1_2(x11) + x11
x12 = self.model1_2(x11_2)
x13 = self.model1_3(x12) + x12
x14 = self.model1_4(x13) + x13
x15 = self.model1_5(x14) + x14
x16 = self.model1_6(x15) + x15
x17 = self.model1_7(x16) + x16 + x13
x17_2 = self.model1_7_2(x17) + x17 + x12
x18 = self.model1_8(x17_2)
x19 = self.model1_9(x18) + x18
x21 = self.model2_1(x19) + x19
x22 = self.model2_2(x21) + x21 + x18
h = self.heatmap(x22)
h = F.sigmoid(h)
o = self.offset(x22)
return o, h
| 31.358696
| 75
| 0.498787
|
69e8e89dd29f505643e8bddc8e961636722b3acb
| 2,875
|
py
|
Python
|
14_orf/solution2_str_partition.py
|
LongNguyen1984/biofx_python
|
b8d45dc38d968674c6b641051b73f8ed1503b1e4
|
[
"MIT"
] | 1
|
2021-04-21T07:15:27.000Z
|
2021-04-21T07:15:27.000Z
|
14_orf/solution2_str_partition.py
|
LongNguyen1984/biofx_python
|
b8d45dc38d968674c6b641051b73f8ed1503b1e4
|
[
"MIT"
] | null | null | null |
14_orf/solution2_str_partition.py
|
LongNguyen1984/biofx_python
|
b8d45dc38d968674c6b641051b73f8ed1503b1e4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
""" Open Reading Frames """
import argparse
import sys
from typing import List, NamedTuple, TextIO
from Bio import Seq, SeqIO
class Args(NamedTuple):
""" Command-line arguments """
file: TextIO
# --------------------------------------------------
def get_args() -> Args:
""" Get command-line arguments """
parser = argparse.ArgumentParser(
description='Open Reading Frames',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('file',
help='Input FASTA file',
metavar='FILE',
type=argparse.FileType('rt'))
args = parser.parse_args()
return Args(args.file)
# --------------------------------------------------
def main() -> None:
""" Make a jazz noise here """
args = get_args()
if seqs := [str(rec.seq) for rec in SeqIO.parse(args.file, 'fasta')]:
rna = seqs[0].replace('T', 'U')
orfs = set()
for seq in [rna, Seq.reverse_complement(rna)]:
for i in range(3):
if prot := Seq.translate(truncate(seq[i:], 3), to_stop=False):
for orf in find_orfs(prot):
orfs.add(orf)
print('\n'.join(sorted(orfs)))
else:
sys.exit(f'"{args.file.name}" contains no sequences.')
# --------------------------------------------------
def find_orfs(aa: str) -> List[str]:
""" Find ORFs in AA sequence """
# 2: Use str.partition()
orfs = []
while True:
first, middle, rest = aa.partition('*')
if middle == '':
break
last = 0
while True:
start = first.find('M', last)
if start == -1:
break
orfs.append(first[start:])
last = start + 1
aa = rest
return orfs
# --------------------------------------------------
def test_find_orfs() -> None:
""" Test find_orfs """
assert find_orfs('') == []
assert find_orfs('M') == []
assert find_orfs('*') == []
assert find_orfs('M*') == ['M']
assert find_orfs('MAMAPR*') == ['MAMAPR', 'MAPR']
assert find_orfs('MAMAPR*M') == ['MAMAPR', 'MAPR']
assert find_orfs('MAMAPR*MP*') == ['MAMAPR', 'MAPR', 'MP']
# --------------------------------------------------
def truncate(seq: str, k: int) -> str:
""" Truncate a sequence to even division by k """
length = len(seq)
end = length - (length % k)
return seq[:end]
# --------------------------------------------------
def test_truncate() -> None:
""" Test truncate """
seq = '0123456789'
assert truncate(seq, 3) == '012345678'
assert truncate(seq[1:], 3) == '123456789'
assert truncate(seq[2:], 3) == '234567'
# --------------------------------------------------
if __name__ == '__main__':
main()
| 25.900901
| 78
| 0.472348
|
4f1404accf429af8352ca762f5a5dc845d9bdc98
| 1,365
|
py
|
Python
|
metaflow/plugins/resources_decorator.py
|
celsiustx/metaflow
|
53b72aac978c429ced680ebbd222c1056425ad9c
|
[
"Apache-2.0"
] | 1
|
2022-01-07T22:32:27.000Z
|
2022-01-07T22:32:27.000Z
|
metaflow/plugins/resources_decorator.py
|
celsiustx/metaflow
|
53b72aac978c429ced680ebbd222c1056425ad9c
|
[
"Apache-2.0"
] | null | null | null |
metaflow/plugins/resources_decorator.py
|
celsiustx/metaflow
|
53b72aac978c429ced680ebbd222c1056425ad9c
|
[
"Apache-2.0"
] | null | null | null |
from metaflow.decorators import StepDecorator
class ResourcesDecorator(StepDecorator):
"""
Step decorator to specify the resources needed when executing this step.
This decorator passes this information along to container orchestrator
(AWS Batch, Kubernetes, etc.) when requesting resources to execute this
step.
This decorator is ignored if the execution of the step happens locally.
To use, annotate your step as follows:
```
@resources(cpu=32)
@step
def my_step(self):
...
```
Parameters
----------
cpu : int
Number of CPUs required for this step. Defaults to 1
gpu : int
Number of GPUs required for this step. Defaults to 0
memory : int
Memory size (in MB) required for this step. Defaults to 4096
shared_memory : int
The value for the size (in MiB) of the /dev/shm volume for this step.
This parameter maps to the --shm-size option to docker run .
"""
name = 'resources'
defaults = {
'cpu': '1',
'gpu': '0',
'memory': '4096',
'shared_memory': None
}
def __eq__(self, other):
'''Useful for tests: compare decoartors'''
if isinstance(other, ResourcesDecorator):
return self.attributes == other.attributes
else:
return False
| 29.042553
| 77
| 0.620513
|
f6289ea02abf57a1f1805530e7d77c100730aa15
| 14,329
|
py
|
Python
|
esphomeyaml/components/mqtt.py
|
dotlambda/esphomeyaml
|
4f5389998fee8e61520c48a4a4e2ddb960649f01
|
[
"MIT"
] | 2
|
2020-12-03T17:38:24.000Z
|
2021-03-10T04:11:44.000Z
|
esphomeyaml/components/mqtt.py
|
dreed47/esphomeyaml
|
9890a51c5136c356c2cc7f14a3ab8fe547bbb72a
|
[
"MIT"
] | null | null | null |
esphomeyaml/components/mqtt.py
|
dreed47/esphomeyaml
|
9890a51c5136c356c2cc7f14a3ab8fe547bbb72a
|
[
"MIT"
] | null | null | null |
from collections import OrderedDict
import re
import voluptuous as vol
from esphomeyaml import automation
from esphomeyaml.automation import ACTION_REGISTRY
from esphomeyaml.components import logger
import esphomeyaml.config_validation as cv
from esphomeyaml.const import CONF_AVAILABILITY, CONF_BIRTH_MESSAGE, CONF_BROKER, CONF_CLIENT_ID, \
CONF_COMMAND_TOPIC, CONF_DISCOVERY, CONF_DISCOVERY_PREFIX, CONF_DISCOVERY_RETAIN, \
CONF_ESPHOMEYAML, CONF_ID, CONF_INTERNAL, CONF_KEEPALIVE, CONF_LEVEL, CONF_LOG_TOPIC, \
CONF_MQTT, CONF_NAME, CONF_ON_JSON_MESSAGE, CONF_ON_MESSAGE, CONF_PASSWORD, CONF_PAYLOAD, \
CONF_PAYLOAD_AVAILABLE, CONF_PAYLOAD_NOT_AVAILABLE, CONF_PORT, CONF_QOS, CONF_REBOOT_TIMEOUT, \
CONF_RETAIN, CONF_SHUTDOWN_MESSAGE, CONF_SSL_FINGERPRINTS, CONF_STATE_TOPIC, CONF_TOPIC, \
CONF_TOPIC_PREFIX, CONF_TRIGGER_ID, CONF_USERNAME, CONF_WILL_MESSAGE
from esphomeyaml.core import EsphomeyamlError
from esphomeyaml.cpp_generator import ArrayInitializer, Pvariable, RawExpression, \
StructInitializer, TemplateArguments, add, process_lambda, templatable
from esphomeyaml.cpp_types import Action, App, Component, JsonObjectConstRef, JsonObjectRef, \
Trigger, bool_, esphomelib_ns, optional, std_string, uint8, void
def validate_message_just_topic(value):
value = cv.publish_topic(value)
return MQTT_MESSAGE_BASE({CONF_TOPIC: value})
MQTT_MESSAGE_BASE = vol.Schema({
vol.Required(CONF_TOPIC): cv.publish_topic,
vol.Optional(CONF_QOS, default=0): cv.mqtt_qos,
vol.Optional(CONF_RETAIN, default=True): cv.boolean,
})
MQTT_MESSAGE_TEMPLATE_SCHEMA = vol.Any(None, MQTT_MESSAGE_BASE, validate_message_just_topic)
MQTT_MESSAGE_SCHEMA = vol.Any(None, MQTT_MESSAGE_BASE.extend({
vol.Required(CONF_PAYLOAD): cv.mqtt_payload,
}))
mqtt_ns = esphomelib_ns.namespace('mqtt')
MQTTMessage = mqtt_ns.struct('MQTTMessage')
MQTTClientComponent = mqtt_ns.class_('MQTTClientComponent', Component)
MQTTPublishAction = mqtt_ns.class_('MQTTPublishAction', Action)
MQTTPublishJsonAction = mqtt_ns.class_('MQTTPublishJsonAction', Action)
MQTTMessageTrigger = mqtt_ns.class_('MQTTMessageTrigger', Trigger.template(std_string))
MQTTJsonMessageTrigger = mqtt_ns.class_('MQTTJsonMessageTrigger',
Trigger.template(JsonObjectConstRef))
MQTTComponent = mqtt_ns.class_('MQTTComponent', Component)
def validate_config(value):
if CONF_PORT not in value:
parts = value[CONF_BROKER].split(u':')
if len(parts) == 2:
value[CONF_BROKER] = parts[0]
value[CONF_PORT] = cv.port(parts[1])
else:
value[CONF_PORT] = 1883
return value
def validate_fingerprint(value):
value = cv.string(value)
if re.match(r'^[0-9a-f]{40}$', value) is None:
raise vol.Invalid(u"fingerprint must be valid SHA1 hash")
return value
CONFIG_SCHEMA = vol.All(vol.Schema({
cv.GenerateID(): cv.declare_variable_id(MQTTClientComponent),
vol.Required(CONF_BROKER): cv.string_strict,
vol.Optional(CONF_PORT): cv.port,
vol.Optional(CONF_USERNAME, default=''): cv.string,
vol.Optional(CONF_PASSWORD, default=''): cv.string,
vol.Optional(CONF_CLIENT_ID): vol.All(cv.string, vol.Length(max=23)),
vol.Optional(CONF_DISCOVERY): vol.Any(cv.boolean, cv.one_of("CLEAN", upper=True)),
vol.Optional(CONF_DISCOVERY_RETAIN): cv.boolean,
vol.Optional(CONF_DISCOVERY_PREFIX): cv.publish_topic,
vol.Optional(CONF_BIRTH_MESSAGE): MQTT_MESSAGE_SCHEMA,
vol.Optional(CONF_WILL_MESSAGE): MQTT_MESSAGE_SCHEMA,
vol.Optional(CONF_SHUTDOWN_MESSAGE): MQTT_MESSAGE_SCHEMA,
vol.Optional(CONF_TOPIC_PREFIX): cv.publish_topic,
vol.Optional(CONF_LOG_TOPIC): vol.Any(None, MQTT_MESSAGE_BASE.extend({
vol.Optional(CONF_LEVEL): logger.is_log_level,
}), validate_message_just_topic),
vol.Optional(CONF_SSL_FINGERPRINTS): vol.All(cv.only_on_esp8266,
cv.ensure_list(validate_fingerprint)),
vol.Optional(CONF_KEEPALIVE): cv.positive_time_period_seconds,
vol.Optional(CONF_REBOOT_TIMEOUT): cv.positive_time_period_milliseconds,
vol.Optional(CONF_ON_MESSAGE): automation.validate_automation({
cv.GenerateID(CONF_TRIGGER_ID): cv.declare_variable_id(MQTTMessageTrigger),
vol.Required(CONF_TOPIC): cv.subscribe_topic,
vol.Optional(CONF_QOS): cv.mqtt_qos,
vol.Optional(CONF_PAYLOAD): cv.string_strict,
}),
vol.Optional(CONF_ON_JSON_MESSAGE): automation.validate_automation({
cv.GenerateID(CONF_TRIGGER_ID): cv.declare_variable_id(MQTTJsonMessageTrigger),
vol.Required(CONF_TOPIC): cv.subscribe_topic,
vol.Optional(CONF_QOS, default=0): cv.mqtt_qos,
}),
}), validate_config)
def exp_mqtt_message(config):
if config is None:
return optional(TemplateArguments(MQTTMessage))
exp = StructInitializer(
MQTTMessage,
('topic', config[CONF_TOPIC]),
('payload', config.get(CONF_PAYLOAD, "")),
('qos', config[CONF_QOS]),
('retain', config[CONF_RETAIN])
)
return exp
def to_code(config):
rhs = App.init_mqtt(config[CONF_BROKER], config[CONF_PORT],
config[CONF_USERNAME], config[CONF_PASSWORD])
mqtt = Pvariable(config[CONF_ID], rhs)
discovery = config.get(CONF_DISCOVERY, True)
discovery_retain = config.get(CONF_DISCOVERY_RETAIN, True)
discovery_prefix = config.get(CONF_DISCOVERY_PREFIX, 'homeassistant')
if not discovery:
add(mqtt.disable_discovery())
elif discovery == "CLEAN":
add(mqtt.set_discovery_info(discovery_prefix, discovery_retain, True))
elif CONF_DISCOVERY_RETAIN in config or CONF_DISCOVERY_PREFIX in config:
add(mqtt.set_discovery_info(discovery_prefix, discovery_retain))
if CONF_TOPIC_PREFIX in config:
add(mqtt.set_topic_prefix(config[CONF_TOPIC_PREFIX]))
if CONF_BIRTH_MESSAGE in config:
birth_message = config[CONF_BIRTH_MESSAGE]
if not birth_message:
add(mqtt.disable_birth_message())
else:
add(mqtt.set_birth_message(exp_mqtt_message(birth_message)))
if CONF_WILL_MESSAGE in config:
will_message = config[CONF_WILL_MESSAGE]
if not will_message:
add(mqtt.disable_last_will())
else:
add(mqtt.set_last_will(exp_mqtt_message(will_message)))
if CONF_SHUTDOWN_MESSAGE in config:
shutdown_message = config[CONF_SHUTDOWN_MESSAGE]
if not shutdown_message:
add(mqtt.disable_shutdown_message())
else:
add(mqtt.set_shutdown_message(exp_mqtt_message(shutdown_message)))
if CONF_CLIENT_ID in config:
add(mqtt.set_client_id(config[CONF_CLIENT_ID]))
if CONF_LOG_TOPIC in config:
log_topic = config[CONF_LOG_TOPIC]
if not log_topic:
add(mqtt.disable_log_message())
else:
add(mqtt.set_log_message_template(exp_mqtt_message(log_topic)))
if CONF_LEVEL in config:
add(mqtt.set_log_level(logger.LOG_LEVELS[config[CONF_LEVEL]]))
if CONF_SSL_FINGERPRINTS in config:
for fingerprint in config[CONF_SSL_FINGERPRINTS]:
arr = [RawExpression("0x{}".format(fingerprint[i:i + 2])) for i in range(0, 40, 2)]
add(mqtt.add_ssl_fingerprint(ArrayInitializer(*arr, multiline=False)))
if CONF_KEEPALIVE in config:
add(mqtt.set_keep_alive(config[CONF_KEEPALIVE]))
if CONF_REBOOT_TIMEOUT in config:
add(mqtt.set_reboot_timeout(config[CONF_REBOOT_TIMEOUT]))
for conf in config.get(CONF_ON_MESSAGE, []):
rhs = App.register_component(mqtt.make_message_trigger(conf[CONF_TOPIC]))
trigger = Pvariable(conf[CONF_TRIGGER_ID], rhs)
if CONF_QOS in conf:
add(trigger.set_qos(conf[CONF_QOS]))
if CONF_PAYLOAD in conf:
add(trigger.set_payload(conf[CONF_PAYLOAD]))
automation.build_automation(trigger, std_string, conf)
for conf in config.get(CONF_ON_JSON_MESSAGE, []):
rhs = mqtt.make_json_message_trigger(conf[CONF_TOPIC], conf[CONF_QOS])
trigger = Pvariable(conf[CONF_TRIGGER_ID], rhs)
automation.build_automation(trigger, JsonObjectConstRef, conf)
CONF_MQTT_PUBLISH = 'mqtt.publish'
MQTT_PUBLISH_ACTION_SCHEMA = vol.Schema({
vol.Required(CONF_TOPIC): cv.templatable(cv.publish_topic),
vol.Required(CONF_PAYLOAD): cv.templatable(cv.mqtt_payload),
vol.Optional(CONF_QOS): cv.templatable(cv.mqtt_qos),
vol.Optional(CONF_RETAIN): cv.templatable(cv.boolean),
})
@ACTION_REGISTRY.register(CONF_MQTT_PUBLISH, MQTT_PUBLISH_ACTION_SCHEMA)
def mqtt_publish_action_to_code(config, action_id, arg_type, template_arg):
rhs = App.Pget_mqtt_client().Pmake_publish_action(template_arg)
type = MQTTPublishAction.template(template_arg)
action = Pvariable(action_id, rhs, type=type)
for template_ in templatable(config[CONF_TOPIC], arg_type, std_string):
yield None
add(action.set_topic(template_))
for template_ in templatable(config[CONF_PAYLOAD], arg_type, std_string):
yield None
add(action.set_payload(template_))
if CONF_QOS in config:
for template_ in templatable(config[CONF_QOS], arg_type, uint8):
yield
add(action.set_qos(template_))
if CONF_RETAIN in config:
for template_ in templatable(config[CONF_RETAIN], arg_type, bool_):
yield None
add(action.set_retain(template_))
yield action
CONF_MQTT_PUBLISH_JSON = 'mqtt.publish_json'
MQTT_PUBLISH_JSON_ACTION_SCHEMA = vol.Schema({
vol.Required(CONF_TOPIC): cv.templatable(cv.publish_topic),
vol.Required(CONF_PAYLOAD): cv.lambda_,
vol.Optional(CONF_QOS): cv.mqtt_qos,
vol.Optional(CONF_RETAIN): cv.boolean,
})
@ACTION_REGISTRY.register(CONF_MQTT_PUBLISH_JSON, MQTT_PUBLISH_JSON_ACTION_SCHEMA)
def mqtt_publish_json_action_to_code(config, action_id, arg_type, template_arg):
rhs = App.Pget_mqtt_client().Pmake_publish_json_action(template_arg)
type = MQTTPublishJsonAction.template(template_arg)
action = Pvariable(action_id, rhs, type=type)
for template_ in templatable(config[CONF_TOPIC], arg_type, std_string):
yield None
add(action.set_topic(template_))
for lambda_ in process_lambda(config[CONF_PAYLOAD], [(arg_type, 'x'), (JsonObjectRef, 'root')],
return_type=void):
yield None
add(action.set_payload(lambda_))
if CONF_QOS in config:
add(action.set_qos(config[CONF_QOS]))
if CONF_RETAIN in config:
add(action.set_retain(config[CONF_RETAIN]))
yield action
def required_build_flags(config):
if CONF_SSL_FINGERPRINTS in config:
return '-DASYNC_TCP_SSL_ENABLED=1'
return None
def get_default_topic_for(data, component_type, name, suffix):
whitelist = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_'
sanitized_name = ''.join(x for x in name.lower().replace(' ', '_') if x in whitelist)
return '{}/{}/{}/{}'.format(data.topic_prefix, component_type,
sanitized_name, suffix)
def build_hass_config(data, component_type, config, include_state=True, include_command=True):
if config.get(CONF_INTERNAL, False):
return None
ret = OrderedDict()
ret['platform'] = 'mqtt'
ret['name'] = config[CONF_NAME]
if include_state:
default = get_default_topic_for(data, component_type, config[CONF_NAME], 'state')
ret['state_topic'] = config.get(CONF_STATE_TOPIC, default)
if include_command:
default = get_default_topic_for(data, component_type, config[CONF_NAME], 'command')
ret['command_topic'] = config.get(CONF_STATE_TOPIC, default)
avail = config.get(CONF_AVAILABILITY, data.availability)
if avail:
ret['availability_topic'] = avail[CONF_TOPIC]
payload_available = avail[CONF_PAYLOAD_AVAILABLE]
if payload_available != 'online':
ret['payload_available'] = payload_available
payload_not_available = avail[CONF_PAYLOAD_NOT_AVAILABLE]
if payload_not_available != 'offline':
ret['payload_not_available'] = payload_not_available
return ret
class GenerateHassConfigData(object):
def __init__(self, config):
if 'mqtt' not in config:
raise EsphomeyamlError("Cannot generate Home Assistant MQTT config if MQTT is not "
"used!")
mqtt = config[CONF_MQTT]
self.topic_prefix = mqtt.get(CONF_TOPIC_PREFIX, config[CONF_ESPHOMEYAML][CONF_NAME])
birth_message = mqtt.get(CONF_BIRTH_MESSAGE)
if CONF_BIRTH_MESSAGE not in mqtt:
birth_message = {
CONF_TOPIC: self.topic_prefix + '/status',
CONF_PAYLOAD: 'online',
}
will_message = mqtt.get(CONF_WILL_MESSAGE)
if CONF_WILL_MESSAGE not in mqtt:
will_message = {
CONF_TOPIC: self.topic_prefix + '/status',
CONF_PAYLOAD: 'offline'
}
if not birth_message or not will_message:
self.availability = None
elif birth_message[CONF_TOPIC] != will_message[CONF_TOPIC]:
self.availability = None
else:
self.availability = {
CONF_TOPIC: birth_message[CONF_TOPIC],
CONF_PAYLOAD_AVAILABLE: birth_message[CONF_PAYLOAD],
CONF_PAYLOAD_NOT_AVAILABLE: will_message[CONF_PAYLOAD],
}
def setup_mqtt_component(obj, config):
if CONF_RETAIN in config:
add(obj.set_retain(config[CONF_RETAIN]))
if not config.get(CONF_DISCOVERY, True):
add(obj.disable_discovery())
if CONF_STATE_TOPIC in config:
add(obj.set_custom_state_topic(config[CONF_STATE_TOPIC]))
if CONF_COMMAND_TOPIC in config:
add(obj.set_custom_command_topic(config[CONF_COMMAND_TOPIC]))
if CONF_AVAILABILITY in config:
availability = config[CONF_AVAILABILITY]
if not availability:
add(obj.disable_availability())
else:
add(obj.set_availability(availability[CONF_TOPIC], availability[CONF_PAYLOAD_AVAILABLE],
availability[CONF_PAYLOAD_NOT_AVAILABLE]))
| 42.144118
| 100
| 0.708842
|
ac49500f41a47dc691542f61a5f1b2145a57336b
| 1,798
|
py
|
Python
|
xero_python/accounting/models/expense_claims.py
|
parasharrk/xero-python
|
e8416f3bd893520a343af014f5bb65acbf1f4f13
|
[
"MIT"
] | null | null | null |
xero_python/accounting/models/expense_claims.py
|
parasharrk/xero-python
|
e8416f3bd893520a343af014f5bb65acbf1f4f13
|
[
"MIT"
] | null | null | null |
xero_python/accounting/models/expense_claims.py
|
parasharrk/xero-python
|
e8416f3bd893520a343af014f5bb65acbf1f4f13
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Accounting API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: 2.4.0
Contact: api@xero.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
from xero_python.models import BaseModel
class ExpenseClaims(BaseModel):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {"expense_claims": "list[ExpenseClaim]"}
attribute_map = {"expense_claims": "ExpenseClaims"}
def __init__(self, expense_claims=None): # noqa: E501
"""ExpenseClaims - a model defined in OpenAPI""" # noqa: E501
self._expense_claims = None
self.discriminator = None
if expense_claims is not None:
self.expense_claims = expense_claims
@property
def expense_claims(self):
"""Gets the expense_claims of this ExpenseClaims. # noqa: E501
:return: The expense_claims of this ExpenseClaims. # noqa: E501
:rtype: list[ExpenseClaim]
"""
return self._expense_claims
@expense_claims.setter
def expense_claims(self, expense_claims):
"""Sets the expense_claims of this ExpenseClaims.
:param expense_claims: The expense_claims of this ExpenseClaims. # noqa: E501
:type: list[ExpenseClaim]
"""
self._expense_claims = expense_claims
| 27.242424
| 124
| 0.650167
|
3700c7b43efdcd17931ea3a087e71418f8a69e7d
| 3,280
|
py
|
Python
|
elegantrl/agents/AgentDoubleDQN.py
|
ihopethiswillfi/ElegantRL-1
|
b81052a0bc6802443eb0f653c69158396b613b00
|
[
"Apache-2.0"
] | null | null | null |
elegantrl/agents/AgentDoubleDQN.py
|
ihopethiswillfi/ElegantRL-1
|
b81052a0bc6802443eb0f653c69158396b613b00
|
[
"Apache-2.0"
] | null | null | null |
elegantrl/agents/AgentDoubleDQN.py
|
ihopethiswillfi/ElegantRL-1
|
b81052a0bc6802443eb0f653c69158396b613b00
|
[
"Apache-2.0"
] | null | null | null |
import torch
from elegantrl.agents.net import QNetTwin, QNetTwinDuel
from elegantrl.agents.AgentDQN import AgentDQN
class AgentDoubleDQN(AgentDQN):
"""
Bases: ``AgentDQN``
Double Deep Q-Network algorithm. “Deep Reinforcement Learning with Double Q-learning”. H. V. Hasselt et al.. 2015.
:param net_dim[int]: the dimension of networks (the width of neural networks)
:param state_dim[int]: the dimension of state (the number of state vector)
:param action_dim[int]: the dimension of action (the number of discrete action)
:param learning_rate[float]: learning rate of optimizer
:param if_per_or_gae[bool]: PER (off-policy) or GAE (on-policy) for sparse reward
:param env_num[int]: the env number of VectorEnv. env_num == 1 means don't use VectorEnv
:param agent_id[int]: if the visible_gpu is '1,9,3,4', agent_id=1 means (1,9,4,3)[agent_id] == 9
"""
def __init__(self, net_dim, state_dim, action_dim, gpu_id=0, args=None):
self.act_class = getattr(self, "act_class", QNetTwin)
super().__init__(net_dim, state_dim, action_dim, gpu_id, args)
def get_obj_critic_raw(self, buffer, batch_size):
"""
Calculate the loss of the network and predict Q values with **uniform sampling**.
:param buffer: the ReplayBuffer instance that stores the trajectories.
:param batch_size: the size of batch data for Stochastic Gradient Descent (SGD).
:return: the loss of the network and Q values.
"""
with torch.no_grad():
reward, mask, action, state, next_s = buffer.sample_batch(batch_size)
next_q = torch.min(*self.cri_target.get_q1_q2(next_s)).max(
dim=1, keepdim=True
)[0]
q_label = reward + mask * next_q
q1, q2 = [qs.gather(1, action.long()) for qs in self.act.get_q1_q2(state)]
obj_critic = self.criterion(q1, q_label) + self.criterion(q2, q_label)
return obj_critic, q1
def get_obj_critic_per(self, buffer, batch_size):
"""
Calculate the loss of the network and predict Q values with **Prioritized Experience Replay (PER)**.
:param buffer: the ReplayBuffer instance that stores the trajectories.
:param batch_size: the size of batch data for Stochastic Gradient Descent (SGD).
:return: the loss of the network and Q values.
"""
with torch.no_grad():
reward, mask, action, state, next_s, is_weights = buffer.sample_batch(
batch_size
)
next_q = torch.min(*self.cri_target.get_q1_q2(next_s)).max(
dim=1, keepdim=True
)[0]
q_label = reward + mask * next_q
q1, q2 = [qs.gather(1, action.long()) for qs in self.act.get_q1_q2(state)]
td_error = self.criterion(q1, q_label) + self.criterion(q2, q_label)
obj_critic = (td_error * is_weights).mean()
buffer.td_error_update(td_error.detach())
return obj_critic, q1
class AgentD3QN(AgentDoubleDQN): # D3QN: DuelingDoubleDQN
def __init__(self, net_dim, state_dim, action_dim, gpu_id=0, args=None):
self.act_class = getattr(self, "act_class", QNetTwinDuel)
super().__init__(net_dim, state_dim, action_dim, gpu_id, args)
| 44.931507
| 118
| 0.660976
|
b350b0f3d038718146bf8480f50e822a50002c19
| 100
|
py
|
Python
|
sparcc_fast/__init__.py
|
shafferm/fast_sparCC
|
8e753ed1250be0185faffccdfcdbd4f5da44a756
|
[
"BSD-3-Clause"
] | 6
|
2018-06-10T14:55:04.000Z
|
2022-03-31T07:23:44.000Z
|
sparcc_fast/__init__.py
|
shafferm/fast_sparCC
|
8e753ed1250be0185faffccdfcdbd4f5da44a756
|
[
"BSD-3-Clause"
] | 1
|
2020-12-17T12:21:32.000Z
|
2020-12-17T12:21:32.000Z
|
sparcc_fast/__init__.py
|
shafferm/fast_sparCC
|
8e753ed1250be0185faffccdfcdbd4f5da44a756
|
[
"BSD-3-Clause"
] | 3
|
2019-07-02T10:04:59.000Z
|
2020-10-07T21:06:02.000Z
|
__author__ = 'shafferm'
from run_sparcc import sparcc_correlation, sparcc_correlation_w_bootstraps
| 25
| 74
| 0.87
|
190b5ed7e91bcb95be3cbd2de8bd7bebde40fc9b
| 2,801
|
py
|
Python
|
huaweicloud-sdk-cce/huaweicloudsdkcce/v3/model/node_item.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 64
|
2020-06-12T07:05:07.000Z
|
2022-03-30T03:32:50.000Z
|
huaweicloud-sdk-cce/huaweicloudsdkcce/v3/model/node_item.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 11
|
2020-07-06T07:56:54.000Z
|
2022-01-11T11:14:40.000Z
|
huaweicloud-sdk-cce/huaweicloudsdkcce/v3/model/node_item.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 24
|
2020-06-08T11:42:13.000Z
|
2022-03-04T06:44:08.000Z
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class NodeItem:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'uid': 'str'
}
attribute_map = {
'uid': 'uid'
}
def __init__(self, uid=None):
"""NodeItem - a model defined in huaweicloud sdk"""
self._uid = None
self.discriminator = None
if uid is not None:
self.uid = uid
@property
def uid(self):
"""Gets the uid of this NodeItem.
节点ID
:return: The uid of this NodeItem.
:rtype: str
"""
return self._uid
@uid.setter
def uid(self, uid):
"""Sets the uid of this NodeItem.
节点ID
:param uid: The uid of this NodeItem.
:type: str
"""
self._uid = uid
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, NodeItem):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 24.356522
| 79
| 0.514816
|
67b8f6fe443662366125d3992ea9f5e6b987b48c
| 5,084
|
py
|
Python
|
Thesis@3.9.1/Lib/site-packages/setuptools/glob.py
|
nverbois/TFE21-232
|
7113837b5263b5c508bfc6903cb6982b48aa7ee4
|
[
"MIT"
] | 1
|
2020-07-14T08:40:59.000Z
|
2020-07-14T08:40:59.000Z
|
Thesis@3.9.1/Lib/site-packages/setuptools/glob.py
|
nverbois/TFE21-232
|
7113837b5263b5c508bfc6903cb6982b48aa7ee4
|
[
"MIT"
] | 1
|
2021-06-01T23:32:38.000Z
|
2021-06-01T23:32:38.000Z
|
Thesis@3.9.1/Lib/site-packages/setuptools/glob.py
|
nverbois/TFE21-232
|
7113837b5263b5c508bfc6903cb6982b48aa7ee4
|
[
"MIT"
] | null | null | null |
"""
Filename globbing utility. Mostly a copy of `glob` from Python 3.5.
Changes include:
* `yield from` and PEP3102 `*` removed.
* Hidden files are not ignored.
"""
import os
import re
import fnmatch
__all__ = ["glob", "iglob", "escape"]
def glob(pathname, recursive=False):
"""Return a list of paths matching a pathname pattern.
The pattern may contain simple shell-style wildcards a la
fnmatch. However, unlike fnmatch, filenames starting with a
dot are special cases that are not matched by '*' and '?'
patterns.
If recursive is true, the pattern '**' will match any files and
zero or more directories and subdirectories.
"""
return list(iglob(pathname, recursive=recursive))
def iglob(pathname, recursive=False):
"""Return an iterator which yields the paths matching a pathname pattern.
The pattern may contain simple shell-style wildcards a la
fnmatch. However, unlike fnmatch, filenames starting with a
dot are special cases that are not matched by '*' and '?'
patterns.
If recursive is true, the pattern '**' will match any files and
zero or more directories and subdirectories.
"""
it = _iglob(pathname, recursive)
if recursive and _isrecursive(pathname):
s = next(it) # skip empty string
assert not s
return it
def _iglob(pathname, recursive):
dirname, basename = os.path.split(pathname)
if not has_magic(pathname):
if basename:
if os.path.lexists(pathname):
yield pathname
else:
# Patterns ending with a slash should match only directories
if os.path.isdir(dirname):
yield pathname
return
if not dirname:
if recursive and _isrecursive(basename):
for x in glob2(dirname, basename):
yield x
else:
for x in glob1(dirname, basename):
yield x
return
# `os.path.split()` returns the argument itself as a dirname if it is a
# drive or UNC path. Prevent an infinite recursion if a drive or UNC path
# contains magic characters (i.e. r'\\?\C:').
if dirname != pathname and has_magic(dirname):
dirs = _iglob(dirname, recursive)
else:
dirs = [dirname]
if has_magic(basename):
if recursive and _isrecursive(basename):
glob_in_dir = glob2
else:
glob_in_dir = glob1
else:
glob_in_dir = glob0
for dirname in dirs:
for name in glob_in_dir(dirname, basename):
yield os.path.join(dirname, name)
# These 2 helper functions non-recursively glob inside a literal directory.
# They return a list of basenames. `glob1` accepts a pattern while `glob0`
# takes a literal basename (so it only has to check for its existence).
def glob1(dirname, pattern):
if not dirname:
if isinstance(pattern, bytes):
dirname = os.curdir.encode("ASCII")
else:
dirname = os.curdir
try:
names = os.listdir(dirname)
except OSError:
return []
return fnmatch.filter(names, pattern)
def glob0(dirname, basename):
if not basename:
# `os.path.split()` returns an empty basename for paths ending with a
# directory separator. 'q*x/' should match only directories.
if os.path.isdir(dirname):
return [basename]
else:
if os.path.lexists(os.path.join(dirname, basename)):
return [basename]
return []
# This helper function recursively yields relative pathnames inside a literal
# directory.
def glob2(dirname, pattern):
assert _isrecursive(pattern)
yield pattern[:0]
for x in _rlistdir(dirname):
yield x
# Recursively yields relative pathnames inside a literal directory.
def _rlistdir(dirname):
if not dirname:
if isinstance(dirname, bytes):
dirname = os.curdir.encode("ASCII")
else:
dirname = os.curdir
try:
names = os.listdir(dirname)
except os.error:
return
for x in names:
yield x
path = os.path.join(dirname, x) if dirname else x
for y in _rlistdir(path):
yield os.path.join(x, y)
magic_check = re.compile("([*?[])")
magic_check_bytes = re.compile(b"([*?[])")
def has_magic(s):
if isinstance(s, bytes):
match = magic_check_bytes.search(s)
else:
match = magic_check.search(s)
return match is not None
def _isrecursive(pattern):
if isinstance(pattern, bytes):
return pattern == b"**"
else:
return pattern == "**"
def escape(pathname):
"""Escape all special characters.
"""
# Escaping is done by wrapping any of "*?[" between square brackets.
# Metacharacters do not work in the drive part and shouldn't be escaped.
drive, pathname = os.path.splitdrive(pathname)
if isinstance(pathname, bytes):
pathname = magic_check_bytes.sub(br"[\1]", pathname)
else:
pathname = magic_check.sub(r"[\1]", pathname)
return drive + pathname
| 29.051429
| 78
| 0.636703
|
31c3be3901f8245b2ac0146316cf13b964525a6a
| 6,018
|
py
|
Python
|
modules/feedback/tests/unit/test_feedback_score_fields.py
|
heolin123/funcrowd
|
20167783de208394c09ed0429a5f02ec6dd79c42
|
[
"MIT"
] | null | null | null |
modules/feedback/tests/unit/test_feedback_score_fields.py
|
heolin123/funcrowd
|
20167783de208394c09ed0429a5f02ec6dd79c42
|
[
"MIT"
] | 11
|
2019-11-12T23:26:45.000Z
|
2021-06-10T17:37:23.000Z
|
modules/feedback/tests/unit/test_feedback_score_fields.py
|
heolin123/funcrowd
|
20167783de208394c09ed0429a5f02ec6dd79c42
|
[
"MIT"
] | null | null | null |
import pytest
from modules.feedback.models.score_fields.regression_reference_score import RegressionReferenceScore
from tasks.models import Task
from modules.feedback.models.score_fields import (
VotingScore, ReferenceScore,
NERReferenceScore)
@pytest.mark.django_db
def test_reference_score(task_with_items, users):
user1, user2, user3 = users
task = Task.objects.first()
item = task.items.first()
annotation_field = item.template.annotations_fields.first()
scorer = ReferenceScore(annotation_field.name)
item = task.items.get(order=0)
scores = {user1: 1, user2: 1, user3: 0}
for annotation in item.annotations.exclude(user=None):
assert scorer.score(annotation) == scores[annotation.user]
item = task.items.get(order=1)
scores = {user1: 1, user2: 1, user3: 1}
for annotation in item.annotations.exclude(user=None):
assert scorer.score(annotation) == scores[annotation.user]
item = task.items.get(order=2)
scores = {user1: 0, user2: 1, user3: 1}
for annotation in item.annotations.exclude(user=None):
assert scorer.score(annotation) == scores[annotation.user]
item = task.items.get(order=3)
for annotation in item.annotations.exclude(user=None):
assert scorer.score(annotation) is None
@pytest.mark.django_db
def test_voting_score(task_with_items, users):
user1, user2, user3 = users
task = Task.objects.first()
item = task.items.first()
annotation_field = item.template.annotations_fields.first()
scorer = VotingScore(annotation_field.name)
item = task.items.get(order=0)
scores = {user1: 0.67, user2: 0.67, user3: 0.33}
for annotation in item.annotations.exclude(user=None):
assert round(scorer.score(annotation), 2) == scores[annotation.user]
item = task.items.get(order=1)
scores = {user1: 1, user2: 1, user3: 1}
for annotation in item.annotations.exclude(user=None):
assert round(scorer.score(annotation), 2) == scores[annotation.user]
item = task.items.get(order=2)
scores = {user1: 0.33, user2: 0.33, user3: 0.33}
for annotation in item.annotations.exclude(user=None):
assert round(scorer.score(annotation), 2) == scores[annotation.user]
item = task.items.get(order=3)
scores = {user1: 0.67, user2: 0.67, user3: 0.33}
for annotation in item.annotations.exclude(user=None):
assert round(scorer.score(annotation), 2) == scores[annotation.user]
@pytest.mark.django_db
def test_vote_ranking(task_with_items_data_source, users):
user1, user2, user3 = users
task = Task.objects.first()
item = task.items.first()
annotation_field = item.template.annotations_fields.first()
scorer = VotingScore(annotation_field.name)
item = task.items.get(order=0)
scores = {user1: 0.33, user2: 0.33, user3: 0.33}
for annotation in item.annotations.exclude(user=None):
assert round(scorer.score(annotation), 2) == scores[annotation.user]
@pytest.mark.django_db
def test_voting_score_list(task_with_items_multiple_choice, users):
user1, user2, user3 = users
task = Task.objects.first()
item = task.items.first()
annotation_field = item.template.annotations_fields.first()
scorer = VotingScore(annotation_field.name)
item = task.items.get(order=0)
scores = {user1: 0.67, user2: 0.67, user3: 0.67}
for annotation in item.annotations.exclude(user=None):
assert round(scorer.score(annotation), 2) == round(scores[annotation.user], 2)
item = task.items.get(order=1)
scores = {user1: 1, user2: 1, user3: 1}
for annotation in item.annotations.exclude(user=None):
assert round(scorer.score(annotation), 2) == round(scores[annotation.user], 2)
item = task.items.get(order=2)
scores = {user1: 0.5, user2: 0.67, user3: 0.33}
for annotation in item.annotations.exclude(user=None):
assert round(scorer.score(annotation), 2) == round(scores[annotation.user], 2)
item = task.items.get(order=3)
scores = {user1: 0.67, user2: 0.67, user3: 0.33}
for annotation in item.annotations.exclude(user=None):
assert round(scorer.score(annotation), 2) == round(scores[annotation.user], 2)
@pytest.mark.django_db
def test_regression_reference_score(task_with_regression_items, users):
user1, user2, user3 = users
task = Task.objects.first()
item = task.items.first()
annotation_field = item.template.annotations_fields.first()
scorer = RegressionReferenceScore(annotation_field.name)
item = task.items.get(order=0)
scores = {user1: 0.75, user2: 1.0, user3: 0.9}
for annotation in item.annotations.exclude(user=None):
assert round(scorer.score(annotation), 2) == scores[annotation.user]
item = task.items.get(order=1)
scores = {user1: 0, user2: 0, user3: 1}
for annotation in item.annotations.exclude(user=None):
assert round(scorer.score(annotation), 2) == scores[annotation.user]
item = task.items.get(order=2)
scores = {user1: 0, user2: 1, user3: 1}
for annotation in item.annotations.exclude(user=None):
assert round(scorer.score(annotation), 2) == scores[annotation.user]
item = task.items.get(order=3)
for annotation in item.annotations.exclude(user=None):
assert scorer.score(annotation) is None
@pytest.mark.django_db
def test_ner_reference_score(task_with_ner_items, users):
user1, _, _ = users
task = Task.objects.first()
item = task.items.first()
annotation_field = item.template.annotations_fields.first()
scorer = NERReferenceScore(annotation_field.name)
item = task.items.get(order=0)
annotation = item.annotations.get(user=user1)
assert scorer.score(annotation) == 1.0
item = task.items.get(order=1)
annotation = item.annotations.get(user=user1)
assert scorer.score(annotation) == 0.5
item = task.items.get(order=2)
annotation = item.annotations.get(user=user1)
assert scorer.score(annotation) == 0.0
| 34.193182
| 100
| 0.698903
|
effa16bd99ab71b273d82d0014908be2e033a74e
| 1,126
|
py
|
Python
|
google/cloud/bigquery_storage_v1/gapic/enums.py
|
vam-google/python-bigquery-storage
|
e988834a2e7397a7a543ac95ae52a573c1699d21
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/bigquery_storage_v1/gapic/enums.py
|
vam-google/python-bigquery-storage
|
e988834a2e7397a7a543ac95ae52a573c1699d21
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/bigquery_storage_v1/gapic/enums.py
|
vam-google/python-bigquery-storage
|
e988834a2e7397a7a543ac95ae52a573c1699d21
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrappers for protocol buffer enum types."""
import enum
class DataFormat(enum.IntEnum):
"""
Data format for input or output data.
Attributes:
DATA_FORMAT_UNSPECIFIED (int)
AVRO (int): Avro is a standard open source row based file format.
See https://avro.apache.org/ for more details.
ARROW (int): Arrow is a standard open source column-based message format.
See https://arrow.apache.org/ for more details.
"""
DATA_FORMAT_UNSPECIFIED = 0
AVRO = 1
ARROW = 2
| 30.432432
| 79
| 0.709591
|
b1864d1a335a0ef371f66c61d51de0231f3c1ec0
| 7,003
|
py
|
Python
|
bin/helpers/bactopia-prepare.py
|
oschwengers/bactopia
|
b18169d0395f6db3887c5d3c6a1bd75f63755a34
|
[
"MIT"
] | 1
|
2020-09-19T11:17:40.000Z
|
2020-09-19T11:17:40.000Z
|
bin/helpers/bactopia-prepare.py
|
oschwengers/bactopia
|
b18169d0395f6db3887c5d3c6a1bd75f63755a34
|
[
"MIT"
] | null | null | null |
bin/helpers/bactopia-prepare.py
|
oschwengers/bactopia
|
b18169d0395f6db3887c5d3c6a1bd75f63755a34
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python3
"""
usage: bactopia prepare [-h] [-f STR] [-a STR] [--fastq_seperator STR]
[--fastq_pattern STR] [--assembly_pattern STR]
[--long_reads] [--version]
STR
bactopia prepare - Read a directory and prepare a FOFN of
FASTQs/FASTAs
positional arguments:
STR Directory where FASTQ files are stored
optional arguments:
-h, --help show this help message and exit
-f STR, --fastq_ext STR
Extension of the FASTQs. Default: .fastq.gz
-a STR, --assembly_ext STR
Extension of the FASTA assemblies. Default: .fna.gz
--fastq_seperator STR
Split FASTQ name on the last occurrence of the
separator. Default: _
--fastq_pattern STR Glob pattern to match FASTQs. Default: *.fastq.gz
--assembly_pattern STR
Glob pattern to match assembly FASTAs. Default:
*.fna.gz
--long_reads Single-end reads should be treated as long reads
--version show program's version number and exit
"""
VERSION = "1.4.0"
PROGRAM = "bactopia prepare"
if __name__ == '__main__':
import argparse as ap
from collections import defaultdict
import glob
import os
import sys
parser = ap.ArgumentParser(
prog='bactopia prepare',
conflict_handler='resolve',
description=(
f'{PROGRAM} (v{VERSION}) - Read a directory and prepare a FOFN of FASTQs/FASTAs'
)
)
parser.add_argument('path', metavar="STR", type=str,
help='Directory where FASTQ files are stored')
parser.add_argument(
'-f', '--fastq_ext', metavar='STR', type=str,
default=".fastq.gz",
help='Extension of the FASTQs. Default: .fastq.gz'
)
parser.add_argument(
'-a', '--assembly_ext', metavar='STR', type=str,
default=".fna.gz",
help='Extension of the FASTA assemblies. Default: .fna.gz'
)
parser.add_argument(
'--fastq_seperator', metavar='STR', type=str,
default="_",
help='Split FASTQ name on the last occurrence of the separator. Default: _'
)
parser.add_argument(
'--fastq_pattern', metavar='STR', type=str,
default="*.fastq.gz",
help='Glob pattern to match FASTQs. Default: *.fastq.gz'
)
parser.add_argument(
'--assembly_pattern', metavar='STR', type=str,
default="*.fna.gz",
help='Glob pattern to match assembly FASTAs. Default: *.fna.gz'
)
parser.add_argument(
'--long_reads', action='store_true',
help='Single-end reads should be treated as long reads'
)
parser.add_argument('--version', action='version',
version=f'{PROGRAM} {VERSION}')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(0)
args = parser.parse_args()
# https://docs.oracle.com/javase/tutorial/essential/io/fileOps.html#glob
abspath = os.path.abspath(args.path)
SAMPLES = {}
# Match FASTQS
for fastq in glob.glob(f'{abspath}/*{args.fastq_pattern}'):
fastq_name = os.path.basename(fastq).replace(args.fastq_ext, "")
# Split the fastq file name on separator
# Example MY_FASTQ_R1.rsplit('_', 1) becomes ['MY_FASTQ', 'R1'] (PE)
# Example MY_FASTQ.rsplit('_', 1) becomes ['MY_FASTQ'] (SE)
split_vals = fastq_name.rsplit(args.fastq_seperator, 1)
sample_name = split_vals[0]
if sample_name not in SAMPLES:
SAMPLES[sample_name] = {'pe': [], 'se': [], 'assembly': []}
if len(split_vals) == 1:
# single-end
SAMPLES[sample_name]['se'].append(fastq)
else:
# paired-end
SAMPLES[sample_name]['pe'].append(fastq)
# Match assemblies
for assembly in glob.glob(f'{abspath}/*{args.assembly_pattern}'):
sample_name = os.path.basename(assembly).replace(args.assembly_ext, "")
# Split the fastq file name on separator
# Example MY_FASTQ_R1.rsplit('_', 1) becomes ['MY_FASTQ', 'R1'] (PE)
# Example MY_FASTQ.rsplit('_', 1) becomes ['MY_FASTQ'] (SE)
if sample_name not in SAMPLES:
SAMPLES[sample_name] = {'pe': [], 'se': [], 'assembly': []}
SAMPLES[sample_name]['assembly'].append(assembly)
FOFN = []
for sample, vals in sorted(SAMPLES.items()):
pe_reads = vals['pe']
se_reads = vals['se']
assembly = vals['assembly']
errors = []
is_single_end = False
# Validate everything
if len(assembly) > 1:
# Can't have multiple assemblies for the same sample
errors.append(f'ERROR: "{sample}" cannot have more than two assembly FASTA, please check.')
elif len(assembly) == 1 and (len(pe_reads) or len(se_reads)):
# Can't have an assembly and reads for a sample
errors.append(f'ERROR: "{sample}" cannot have assembly and sequence reads, please check.')
if len(pe_reads) == 1:
# PE reads must be a pair
errors.append(f'ERROR: "{sample}" must have two paired-end FASTQ, please check.')
elif len(pe_reads) > 2:
# PE reads must be a pair
errors.append(f'ERROR: "{sample}" cannot have more than two paired-end FASTQ, please check.')
if args.long_reads:
if not len(pe_reads) and len(se_reads):
# Long reads must also have short PE reads
print(f'WARNING: "{sample}" does not have paired-end reads, treating as single-end short reads, please verify.', file=sys.stderr)
is_single_end = True
else:
if len(se_reads) > 1:
# Can't have multiple SE reads
errors.append(f'ERROR: "{sample}" has more than two single-end FASTQs, please check.')
elif len(pe_reads) and len(se_reads):
# Can't have SE and PE reads unless long reads
errors.append(f'ERROR: "{sample}" has paired and single-end FASTQs, please check.')
if errors:
print('\n'.join(errors), file=sys.stderr)
else:
runtype = ''
r1 = ''
r2 = ''
extra = ''
if assembly:
runtype = 'assembly'
extra = assembly[0]
if pe_reads:
runtype = 'paired-end'
r1, r2 = sorted(pe_reads)
if se_reads:
if args.long_reads and not is_single_end:
runtype = 'hybrid'
extra = se_reads[0]
else:
runtype = 'single-end'
r1 = se_reads[0]
FOFN.append([sample, runtype, r1, r2, extra])
if FOFN:
print('sample\truntype\tr1\tr2\textra')
for line in FOFN:
print('\t'.join(line))
| 37.25
| 145
| 0.567185
|
ad0b67e438bd7acafabe45336f183d89bbc6d20f
| 372
|
py
|
Python
|
sample/create_table/sample_create_table_from_dataframe.py
|
jph00/SimpleSQLite
|
aef4a3e416e4c578c2017cfbdc11d3afa67fcdc3
|
[
"MIT"
] | 4
|
2018-03-25T19:47:28.000Z
|
2020-07-09T14:10:52.000Z
|
sample/create_table/sample_create_table_from_dataframe.py
|
jph00/SimpleSQLite
|
aef4a3e416e4c578c2017cfbdc11d3afa67fcdc3
|
[
"MIT"
] | null | null | null |
sample/create_table/sample_create_table_from_dataframe.py
|
jph00/SimpleSQLite
|
aef4a3e416e4c578c2017cfbdc11d3afa67fcdc3
|
[
"MIT"
] | 1
|
2019-03-08T11:45:53.000Z
|
2019-03-08T11:45:53.000Z
|
#!/usr/bin/env python
# encoding: utf-8
from __future__ import print_function
from simplesqlite import SimpleSQLite
import pandas
con = SimpleSQLite("pandas_df.sqlite")
con.create_table_from_dataframe(pandas.DataFrame(
[
[0, 0.1, "a"],
[1, 1.1, "bb"],
[2, 2.2, "ccc"],
],
columns=['id', 'value', 'name']
), table_name="pandas_df")
| 18.6
| 49
| 0.629032
|
166a2ec41332f3236358ddd7f4e738b0a7979e98
| 648
|
py
|
Python
|
mayan/apps/metadata/widgets.py
|
O2Graphics/Mayan-EDMS
|
e11e6f47240f3c536764be66828dbe6428dceb41
|
[
"Apache-2.0"
] | null | null | null |
mayan/apps/metadata/widgets.py
|
O2Graphics/Mayan-EDMS
|
e11e6f47240f3c536764be66828dbe6428dceb41
|
[
"Apache-2.0"
] | 5
|
2021-03-19T22:56:45.000Z
|
2022-03-12T00:08:43.000Z
|
mayan/apps/metadata/widgets.py
|
halsten/mayan-edms
|
10372daede6e6dea0bea67eb98767e3be6fbf86f
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import unicode_literals
from django.utils.html import format_html_join
def get_metadata_string(document):
"""
Return a formated representation of a document's metadata values
"""
return format_html_join(
'\n', '<div class="metadata-display" style="word-break: break-all; overflow-wrap: break-word;"><b>{}: </b><span data-metadata-type="{}" data-pk="{}">{}</span></div>',
(
(
document_metadata.metadata_type, document_metadata.metadata_type_id, document_metadata.id, document_metadata.value
) for document_metadata in document.metadata.all()
)
)
| 36
| 174
| 0.665123
|
bd88c4ead660018084624b5cf1c336eed98e14ce
| 1,312
|
py
|
Python
|
python/qibuild/actions/list_binaries.py
|
PrashantKumar-sudo/qibuild
|
a16ce425cf25127ceff29507feeeeca37af23351
|
[
"BSD-3-Clause"
] | null | null | null |
python/qibuild/actions/list_binaries.py
|
PrashantKumar-sudo/qibuild
|
a16ce425cf25127ceff29507feeeeca37af23351
|
[
"BSD-3-Clause"
] | null | null | null |
python/qibuild/actions/list_binaries.py
|
PrashantKumar-sudo/qibuild
|
a16ce425cf25127ceff29507feeeeca37af23351
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2019 SoftBank Robotics. All rights reserved.
# Use of this source code is governed by a BSD-style license (see the COPYING file).
"""
List every binaries in the given worktree.
Mainly useful to auto-complete ``qibuild run``.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import os
import qibuild.parsers
from qisys import ui
def configure_parser(parser):
""" Configure parser for this action. """
qibuild.parsers.cmake_build_parser(parser)
def do(args):
""" Main entry point. """
build_worktree = qibuild.parsers.get_build_worktree(args)
sdk_dirs = [x.sdk_directory for x in build_worktree.build_projects]
bin_dirs = [os.path.join(x, "bin") for x in sdk_dirs]
res = list()
for bin_dir in bin_dirs:
if os.path.exists(bin_dir):
binaries = os.listdir(bin_dir)
else:
binaries = list()
if os.name == 'nt':
binaries = [x for x in binaries if x.endswith(".exe")]
binaries = [x.replace("_d.exe", "") for x in binaries]
binaries = [x.replace(".exe", "") for x in binaries]
res.extend(binaries)
for binary in sorted(res):
ui.info(binary)
| 31.238095
| 84
| 0.662348
|
07f7f0d68a4949396eec3705fc09f9bea89cffe4
| 1,073
|
py
|
Python
|
setup.py
|
stsievert/adadamp
|
58baaec11510fa5918cb5808012ff251ee2ed2bb
|
[
"BSD-3-Clause"
] | 3
|
2020-07-17T16:11:46.000Z
|
2021-02-20T01:36:39.000Z
|
setup.py
|
stsievert/adadamp
|
58baaec11510fa5918cb5808012ff251ee2ed2bb
|
[
"BSD-3-Clause"
] | 13
|
2020-04-24T19:43:04.000Z
|
2021-04-14T18:13:27.000Z
|
setup.py
|
stsievert/adadamp
|
58baaec11510fa5918cb5808012ff251ee2ed2bb
|
[
"BSD-3-Clause"
] | 2
|
2020-03-24T22:13:43.000Z
|
2021-05-04T23:10:17.000Z
|
from os.path import exists
from setuptools import setup, find_packages
# install_requires = [
# "torch >= 0.14.0",
# "toolz >= 0.8.2",
# "scikit-learn >= 0.18.0",
# "scipy >= 0.13.0",
# "numpy >= 1.8.0",
# ]
def _get_version() -> str:
with open("adadamp/__init__.py", "r") as f:
raw = f.readlines()
medium_rare = [l for l in raw if "__version__" in l]
assert len(medium_rare) == 1
medium = medium_rare[0].split(" = ")[1].strip()
well_done = medium[1:-1]
return well_done
with open("README.md", "r") as fh:
long_description = fh.read()
install_requires = ["numpy", "pandas"]
setup(
name="adadamp",
version=_get_version(),
url="https://stsievert.com/adadamp",
maintainer="Scott Sievert",
maintainer_email="dev@stsievert.com",
install_requires=install_requires,
description="Batch size estimation for machine learning training",
long_description=long_description,
long_description_content_type='text/markdown',
packages=find_packages(exclude=["docs", "tests", "tests.*", "docs.*"]),
)
| 26.170732
| 75
| 0.648649
|
429cdf74106122ad0fc0fbaa241242a082615f3d
| 4,504
|
py
|
Python
|
cuml_bench/df_clsf.py
|
cbigit/scikit-learn_bench
|
c94ca9489cbdccb20545d40b89a2a25de1fe26d2
|
[
"Apache-2.0"
] | null | null | null |
cuml_bench/df_clsf.py
|
cbigit/scikit-learn_bench
|
c94ca9489cbdccb20545d40b89a2a25de1fe26d2
|
[
"Apache-2.0"
] | null | null | null |
cuml_bench/df_clsf.py
|
cbigit/scikit-learn_bench
|
c94ca9489cbdccb20545d40b89a2a25de1fe26d2
|
[
"Apache-2.0"
] | null | null | null |
# ===============================================================================
# Copyright 2020-2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
import argparse
from typing import Any
import bench
import cuml
from cuml.ensemble import RandomForestClassifier
parser = argparse.ArgumentParser(description='cuml random forest '
'classification benchmark')
parser.add_argument('--criterion', type=str, default='gini',
choices=('gini', 'entropy'),
help='The function to measure the quality of a split')
parser.add_argument('--split-algorithm', type=str, default='hist',
choices=('hist', 'global_quantile'),
help='The algorithm to determine how '
'nodes are split in the tree')
parser.add_argument('--num-trees', type=int, default=100,
help='Number of trees in the forest')
parser.add_argument('--max-features', type=bench.float_or_int, default=None,
help='Upper bound on features used at each split')
parser.add_argument('--max-depth', type=int, default=None,
help='Upper bound on depth of constructed trees')
parser.add_argument('--min-samples-split', type=bench.float_or_int, default=2,
help='Minimum samples number for node splitting')
parser.add_argument('--max-leaf-nodes', type=int, default=-1,
help='Maximum leaf nodes per tree')
parser.add_argument('--min-impurity-decrease', type=float, default=0.,
help='Needed impurity decrease for node splitting')
parser.add_argument('--no-bootstrap', dest='bootstrap', default=True,
action='store_false', help="Don't control bootstraping")
params = bench.parse_args(parser)
# Load and convert data
X_train, X_test, y_train, y_test = bench.load_data(params, int_label=True)
if params.criterion == 'gini':
params.criterion = 0
else:
params.criterion = 1
if params.split_algorithm == 'hist':
params.split_algorithm = 0
else:
params.split_algorithm = 1
params.n_classes = y_train[y_train.columns[0]].nunique()
clf: Any
def fit(X, y):
global clf
clf = RandomForestClassifier(split_criterion=params.criterion,
split_algo=params.split_algorithm,
n_estimators=params.num_trees,
max_depth=params.max_depth,
max_features=params.max_features,
min_rows_per_node=params.min_samples_split,
max_leaves=params.max_leaf_nodes,
min_impurity_decrease=params.min_impurity_decrease,
bootstrap=params.bootstrap)
return clf.fit(X, y)
def predict(X):
global clf
prediction_args = {'predict_model': 'GPU'}
if int(cuml.__version__.split('.')[1]) <= 14:
prediction_args.update({'num_classes': params.n_classes})
return clf.predict(X, **prediction_args)
fit_time, _ = bench.measure_function_time(fit, X_train, y_train, params=params)
y_pred = predict(X_train)
train_acc = 100 * bench.accuracy_score(y_pred, y_train)
predict_time, y_pred = bench.measure_function_time(predict, X_test, params=params)
test_acc = 100 * bench.accuracy_score(y_pred, y_test)
bench.print_output(library='cuml', algorithm='decision_forest_classification',
stages=['training', 'prediction'],
params=params, functions=['df_clsf.fit', 'df_clsf.predict'],
times=[fit_time, predict_time], accuracy_type='accuracy[%]',
accuracies=[train_acc, test_acc], data=[X_train, X_test],
alg_instance=clf)
| 43.728155
| 85
| 0.609014
|
de8e3dd4c5bf1a61b4188e134889d06e3d63c788
| 2,305
|
py
|
Python
|
playground/graph_cost.py
|
Moecker/gpx
|
e7c5c18395ac980baf8d5b95742b3a52438bc0c3
|
[
"MIT"
] | null | null | null |
playground/graph_cost.py
|
Moecker/gpx
|
e7c5c18395ac980baf8d5b95742b3a52438bc0c3
|
[
"MIT"
] | null | null | null |
playground/graph_cost.py
|
Moecker/gpx
|
e7c5c18395ac980baf8d5b95742b3a52438bc0c3
|
[
"MIT"
] | null | null | null |
from collections import defaultdict
from functools import partial
import logging
import pprint
import graph_simple
class CostGraph(graph_simple.Graph):
def __init__(self, _):
self._graph = defaultdict(partial(defaultdict, int))
def __repr__(self):
return "{}({})".format(self.__class__.__name__, dict(self._graph))
def __str__(self):
return "{}({})".format(self.__class__.__name__, dict(self._graph))
def add(self, node1, node2, cost=1):
self._graph[node1].update({node2: cost})
self._graph[node2].update({node1: cost})
def dijkstra(self, start, end):
graph = self._graph
D = {} # Final distances dict
P = {} # Predecessor dict
max_weight = 0
for node in graph.keys():
D[node] = -1 # Vertices are unreachable
P[node] = ""
D[start] = 0 # The start vertex needs no move
unseen_nodes = list(graph.keys()) # All nodes are unseen
while len(unseen_nodes) > 0:
shortest = None
node = ""
for temp_node in unseen_nodes:
if shortest == None:
shortest = D[temp_node]
node = temp_node
elif D[temp_node] < shortest:
shortest = D[temp_node]
node = temp_node
unseen_nodes.remove(node)
for child_node, child_value in graph[node].items():
weight = D[node] + child_value
if D[child_node] < D[node] + child_value:
D[child_node] = D[node] + child_value
P[child_node] = node
max_weight = weight
path = []
node = end
while not (node == start):
if path.count(node) == 0:
path.insert(0, node) # Insert the predecessor of the current node
node = P[node] # The current node becomes its predecessor
else:
break
path.insert(0, start) # Finally, insert the start vertex
logging.info(f"Dijksra found a path of length {path} with weight {max_weight}.")
logging.info(f"Dijksra path {pprint.pformat(path)}.")
logging.info(f"Dijksra graph {pprint.pformat(D)}.")
return path
| 35.461538
| 88
| 0.554881
|
77b3c0f07476e27862164feef2b62c5a25650752
| 13,388
|
py
|
Python
|
maistra/vendor/com_googlesource_chromium_v8/wee8/third_party/depot_tools/tests/gerrit_util_test.py
|
maistra-bot/proxy
|
05a551df62d90e96c24afc649f2755983d020b5b
|
[
"Apache-2.0"
] | 1
|
2021-11-22T05:43:42.000Z
|
2021-11-22T05:43:42.000Z
|
maistra/vendor/com_googlesource_chromium_v8/wee8/third_party/depot_tools/tests/gerrit_util_test.py
|
maistra-bot/proxy
|
05a551df62d90e96c24afc649f2755983d020b5b
|
[
"Apache-2.0"
] | null | null | null |
maistra/vendor/com_googlesource_chromium_v8/wee8/third_party/depot_tools/tests/gerrit_util_test.py
|
maistra-bot/proxy
|
05a551df62d90e96c24afc649f2755983d020b5b
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env vpython3
# coding=utf-8
# Copyright (c) 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
from __future__ import unicode_literals
import base64
import json
import os
import sys
import unittest
if sys.version_info.major == 2:
from cStringIO import StringIO
import mock
else:
from io import StringIO
from unittest import mock
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import gerrit_util
import gclient_utils
import metrics
import metrics_utils
import subprocess2
class CookiesAuthenticatorTest(unittest.TestCase):
_GITCOOKIES = '\n'.join([
'\t'.join([
'chromium.googlesource.com',
'FALSE',
'/',
'TRUE',
'2147483647',
'o',
'git-user.chromium.org=1/chromium-secret',
]),
'\t'.join([
'chromium-review.googlesource.com',
'FALSE',
'/',
'TRUE',
'2147483647',
'o',
'git-user.chromium.org=1/chromium-secret',
]),
'\t'.join([
'.example.com',
'FALSE',
'/',
'TRUE',
'2147483647',
'o',
'example-bearer-token',
]),
'\t'.join([
'another-path.example.com',
'FALSE',
'/foo',
'TRUE',
'2147483647',
'o',
'git-example.com=1/another-path-secret',
]),
'\t'.join([
'another-key.example.com',
'FALSE',
'/',
'TRUE',
'2147483647',
'not-o',
'git-example.com=1/another-key-secret',
]),
'#' + '\t'.join([
'chromium-review.googlesource.com',
'FALSE',
'/',
'TRUE',
'2147483647',
'o',
'git-invalid-user.chromium.org=1/invalid-chromium-secret',
]),
'Some unrelated line\t that should not be here',
])
def setUp(self):
mock.patch('gclient_utils.FileRead', return_value=self._GITCOOKIES).start()
mock.patch('os.getenv', return_value={}).start()
mock.patch('os.environ', {'HOME': '$HOME'}).start()
mock.patch('os.path.exists', return_value=True).start()
mock.patch(
'subprocess2.check_output',
side_effect=[
subprocess2.CalledProcessError(1, ['cmd'], 'cwd', 'out', 'err')],
).start()
self.addCleanup(mock.patch.stopall)
self.maxDiff = None
def testGetNewPasswordUrl(self):
auth = gerrit_util.CookiesAuthenticator()
self.assertEqual(
'https://chromium-review.googlesource.com/new-password',
auth.get_new_password_url('chromium.googlesource.com'))
self.assertEqual(
'https://chrome-internal-review.googlesource.com/new-password',
auth.get_new_password_url('chrome-internal-review.googlesource.com'))
def testGetNewPasswordMessage(self):
auth = gerrit_util.CookiesAuthenticator()
self.assertIn(
'https://chromium-review.googlesource.com/new-password',
auth.get_new_password_message('chromium-review.googlesource.com'))
self.assertIn(
'https://chrome-internal-review.googlesource.com/new-password',
auth.get_new_password_message('chrome-internal.googlesource.com'))
def testGetGitcookiesPath(self):
self.assertEqual(
os.path.join('$HOME', '.gitcookies'),
gerrit_util.CookiesAuthenticator().get_gitcookies_path())
subprocess2.check_output.side_effect = ['http.cookiefile']
self.assertEqual(
'http.cookiefile',
gerrit_util.CookiesAuthenticator().get_gitcookies_path())
subprocess2.check_output.assert_called_with(
['git', 'config', '--path', 'http.cookiefile'])
os.getenv.return_value = 'git-cookies-path'
self.assertEqual(
'git-cookies-path',
gerrit_util.CookiesAuthenticator().get_gitcookies_path())
os.getenv.assert_called_with('GIT_COOKIES_PATH')
def testGitcookies(self):
auth = gerrit_util.CookiesAuthenticator()
self.assertEqual(auth.gitcookies, {
'chromium.googlesource.com':
('git-user.chromium.org', '1/chromium-secret'),
'chromium-review.googlesource.com':
('git-user.chromium.org', '1/chromium-secret'),
'.example.com':
('', 'example-bearer-token'),
})
def testGetAuthHeader(self):
expected_chromium_header = (
'Basic Z2l0LXVzZXIuY2hyb21pdW0ub3JnOjEvY2hyb21pdW0tc2VjcmV0')
auth = gerrit_util.CookiesAuthenticator()
self.assertEqual(
expected_chromium_header,
auth.get_auth_header('chromium.googlesource.com'))
self.assertEqual(
expected_chromium_header,
auth.get_auth_header('chromium-review.googlesource.com'))
self.assertEqual(
'Bearer example-bearer-token',
auth.get_auth_header('some-review.example.com'))
def testGetAuthEmail(self):
auth = gerrit_util.CookiesAuthenticator()
self.assertEqual(
'user@chromium.org',
auth.get_auth_email('chromium.googlesource.com'))
self.assertEqual(
'user@chromium.org',
auth.get_auth_email('chromium-review.googlesource.com'))
self.assertIsNone(auth.get_auth_email('some-review.example.com'))
class GerritUtilTest(unittest.TestCase):
def setUp(self):
super(GerritUtilTest, self).setUp()
mock.patch('gerrit_util.LOGGER').start()
mock.patch('gerrit_util.time_sleep').start()
mock.patch('metrics.collector').start()
mock.patch(
'metrics_utils.extract_http_metrics',
return_value='http_metrics').start()
self.addCleanup(mock.patch.stopall)
def testQueryString(self):
self.assertEqual('', gerrit_util._QueryString([]))
self.assertEqual(
'first%20param%2B', gerrit_util._QueryString([], 'first param+'))
self.assertEqual(
'key:val+foo:bar',
gerrit_util._QueryString([('key', 'val'), ('foo', 'bar')]))
self.assertEqual(
'first%20param%2B+key:val+foo:bar',
gerrit_util._QueryString(
[('key', 'val'), ('foo', 'bar')], 'first param+'))
@mock.patch('gerrit_util.Authenticator')
def testCreateHttpConn_Basic(self, mockAuth):
mockAuth.get().get_auth_header.return_value = None
conn = gerrit_util.CreateHttpConn('host.example.com', 'foo/bar')
self.assertEqual('host.example.com', conn.req_host)
self.assertEqual({
'uri': 'https://host.example.com/foo/bar',
'method': 'GET',
'headers': {},
'body': None,
}, conn.req_params)
@mock.patch('gerrit_util.Authenticator')
def testCreateHttpConn_Authenticated(self, mockAuth):
mockAuth.get().get_auth_header.return_value = 'Bearer token'
conn = gerrit_util.CreateHttpConn(
'host.example.com', 'foo/bar', headers={'header': 'value'})
self.assertEqual('host.example.com', conn.req_host)
self.assertEqual({
'uri': 'https://host.example.com/a/foo/bar',
'method': 'GET',
'headers': {'Authorization': 'Bearer token', 'header': 'value'},
'body': None,
}, conn.req_params)
@mock.patch('gerrit_util.Authenticator')
def testCreateHttpConn_Body(self, mockAuth):
mockAuth.get().get_auth_header.return_value = None
conn = gerrit_util.CreateHttpConn(
'host.example.com', 'foo/bar', body={'l': [1, 2, 3], 'd': {'k': 'v'}})
self.assertEqual('host.example.com', conn.req_host)
self.assertEqual({
'uri': 'https://host.example.com/foo/bar',
'method': 'GET',
'headers': {'Content-Type': 'application/json'},
'body': '{"d": {"k": "v"}, "l": [1, 2, 3]}',
}, conn.req_params)
def testReadHttpResponse_200(self):
conn = mock.Mock()
conn.req_params = {'uri': 'uri', 'method': 'method'}
conn.request.return_value = (mock.Mock(status=200), b'content\xe2\x9c\x94')
content = gerrit_util.ReadHttpResponse(conn)
self.assertEqual('content✔', content.getvalue())
metrics.collector.add_repeated.assert_called_once_with(
'http_requests', 'http_metrics')
def testReadHttpResponse_AuthenticationIssue(self):
for status in (302, 401, 403):
response = mock.Mock(status=status)
response.get.return_value = None
conn = mock.Mock(req_params={'uri': 'uri', 'method': 'method'})
conn.request.return_value = (response, b'')
with mock.patch('sys.stdout', StringIO()):
with self.assertRaises(gerrit_util.GerritError) as cm:
gerrit_util.ReadHttpResponse(conn)
self.assertEqual(status, cm.exception.http_status)
self.assertIn(
'Your Gerrit credentials might be misconfigured',
sys.stdout.getvalue())
def testReadHttpResponse_ClientError(self):
conn = mock.Mock(req_params={'uri': 'uri', 'method': 'method'})
conn.request.return_value = (mock.Mock(status=404), b'')
with self.assertRaises(gerrit_util.GerritError) as cm:
gerrit_util.ReadHttpResponse(conn)
self.assertEqual(404, cm.exception.http_status)
def testReadHttpResponse_ServerError(self):
conn = mock.Mock(req_params={'uri': 'uri', 'method': 'method'})
conn.request.return_value = (mock.Mock(status=500), b'')
with self.assertRaises(gerrit_util.GerritError) as cm:
gerrit_util.ReadHttpResponse(conn)
self.assertEqual(500, cm.exception.http_status)
self.assertEqual(gerrit_util.TRY_LIMIT, len(conn.request.mock_calls))
self.assertEqual(
[mock.call(1.5), mock.call(3)], gerrit_util.time_sleep.mock_calls)
def testReadHttpResponse_ServerErrorAndSuccess(self):
conn = mock.Mock(req_params={'uri': 'uri', 'method': 'method'})
conn.request.side_effect = [
(mock.Mock(status=500), b''),
(mock.Mock(status=200), b'content\xe2\x9c\x94'),
]
self.assertEqual('content✔', gerrit_util.ReadHttpResponse(conn).getvalue())
self.assertEqual(2, len(conn.request.mock_calls))
gerrit_util.time_sleep.assert_called_once_with(1.5)
def testReadHttpResponse_Expected404(self):
conn = mock.Mock()
conn.req_params = {'uri': 'uri', 'method': 'method'}
conn.request.return_value = (mock.Mock(status=404), b'content\xe2\x9c\x94')
content = gerrit_util.ReadHttpResponse(conn, (404,))
self.assertEqual('', content.getvalue())
@mock.patch('gerrit_util.ReadHttpResponse')
def testReadHttpJsonResponse_NotJSON(self, mockReadHttpResponse):
mockReadHttpResponse.return_value = StringIO('not json')
with self.assertRaises(gerrit_util.GerritError) as cm:
gerrit_util.ReadHttpJsonResponse(None)
self.assertEqual(cm.exception.http_status, 200)
self.assertEqual(
cm.exception.message, '(200) Unexpected json output: not json')
@mock.patch('gerrit_util.ReadHttpResponse')
def testReadHttpJsonResponse_EmptyValue(self, mockReadHttpResponse):
mockReadHttpResponse.return_value = StringIO(')]}\'')
self.assertIsNone(gerrit_util.ReadHttpJsonResponse(None))
@mock.patch('gerrit_util.ReadHttpResponse')
def testReadHttpJsonResponse_JSON(self, mockReadHttpResponse):
expected_value = {'foo': 'bar', 'baz': [1, '2', 3]}
mockReadHttpResponse.return_value = StringIO(
')]}\'\n' + json.dumps(expected_value))
self.assertEqual(expected_value, gerrit_util.ReadHttpJsonResponse(None))
@mock.patch('gerrit_util.CreateHttpConn')
@mock.patch('gerrit_util.ReadHttpJsonResponse')
def testQueryChanges(self, mockJsonResponse, mockCreateHttpConn):
gerrit_util.QueryChanges(
'host', [('key', 'val'), ('foo', 'bar')], 'first param', limit=500,
o_params=['PARAM_A', 'PARAM_B'], start='start')
mockCreateHttpConn.assert_called_once_with(
'host',
('changes/?q=first%20param+key:val+foo:bar'
'&start=start'
'&n=500'
'&o=PARAM_A'
'&o=PARAM_B'))
def testQueryChanges_NoParams(self):
self.assertRaises(RuntimeError, gerrit_util.QueryChanges, 'host', [])
@mock.patch('gerrit_util.QueryChanges')
def testGenerateAllChanges(self, mockQueryChanges):
mockQueryChanges.side_effect = [
# First results page
[
{'_number': '4'},
{'_number': '3'},
{'_number': '2', '_more_changes': True},
],
# Second results page, there are new changes, so second page includes
# some results from the first page.
[
{'_number': '2'},
{'_number': '1'},
],
# GenerateAllChanges queries again from the start to get any new
# changes (5 in this case).
[
{'_number': '5'},
{'_number': '4'},
{'_number': '3', '_more_changes': True},
],
]
changes = list(gerrit_util.GenerateAllChanges('host', 'params'))
self.assertEqual(
[
{'_number': '4'},
{'_number': '3'},
{'_number': '2', '_more_changes': True},
{'_number': '1'},
{'_number': '5'},
],
changes)
self.assertEqual(
[
mock.call('host', 'params', None, 500, None, 0),
mock.call('host', 'params', None, 500, None, 3),
mock.call('host', 'params', None, 500, None, 0),
],
mockQueryChanges.mock_calls)
if __name__ == '__main__':
unittest.main()
| 34.416452
| 79
| 0.636316
|
036064d54911b0f9522f9a327ee821658cff29ec
| 3,028
|
py
|
Python
|
packages/python/plotly/plotly/graph_objs/choropleth/_selected.py
|
eranws/plotly.py
|
5b0e8d3ccab55fe1a6e4ba123cfc9d718a9ffc5a
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/graph_objs/choropleth/_selected.py
|
eranws/plotly.py
|
5b0e8d3ccab55fe1a6e4ba123cfc9d718a9ffc5a
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/graph_objs/choropleth/_selected.py
|
eranws/plotly.py
|
5b0e8d3ccab55fe1a6e4ba123cfc9d718a9ffc5a
|
[
"MIT"
] | null | null | null |
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Selected(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "choropleth"
_path_str = "choropleth.selected"
_valid_props = {"marker"}
# marker
# ------
@property
def marker(self):
"""
The 'marker' property is an instance of Marker
that may be specified as:
- An instance of :class:`plotly.graph_objs.choropleth.selected.Marker`
- A dict of string/value properties that will be passed
to the Marker constructor
Supported dict properties:
opacity
Sets the marker opacity of selected points.
Returns
-------
plotly.graph_objs.choropleth.selected.Marker
"""
return self["marker"]
@marker.setter
def marker(self, val):
self["marker"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
marker
:class:`plotly.graph_objects.choropleth.selected.Marker
` instance or dict with compatible properties
"""
def __init__(self, arg=None, marker=None, **kwargs):
"""
Construct a new Selected object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.choropleth.Selected`
marker
:class:`plotly.graph_objects.choropleth.selected.Marker
` instance or dict with compatible properties
Returns
-------
Selected
"""
super(Selected, self).__init__("selected")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.choropleth.Selected
constructor must be a dict or
an instance of :class:`plotly.graph_objs.choropleth.Selected`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("marker", None)
_v = marker if marker is not None else _v
if _v is not None:
self["marker"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| 28.299065
| 82
| 0.537318
|
25142c1c900b9655bff83bd3ddd75629f4f5ee35
| 296
|
py
|
Python
|
odoo/custom/src/private/l10n_th_ecosoft/__init__.py
|
ecosoft-odoo/ecosoft
|
28ea9169c7b6b7e28351db8a8de594f5ee7ee869
|
[
"BSL-1.0"
] | 1
|
2021-02-13T12:43:38.000Z
|
2021-02-13T12:43:38.000Z
|
odoo/custom/src/private/l10n_th_ecosoft/__init__.py
|
ecosoft-odoo/ecosoft
|
28ea9169c7b6b7e28351db8a8de594f5ee7ee869
|
[
"BSL-1.0"
] | 1
|
2021-01-13T07:25:32.000Z
|
2021-01-13T07:25:32.000Z
|
odoo/custom/src/private/l10n_th_ecosoft/__init__.py
|
ecosoft-odoo/ecosoft
|
28ea9169c7b6b7e28351db8a8de594f5ee7ee869
|
[
"BSL-1.0"
] | null | null | null |
# Part of Odoo. See LICENSE file for full copyright and licensing details.
def _preserve_tag_on_taxes(cr, registry):
from odoo.addons.account.models.chart_template import (
preserve_existing_tags_on_taxes,
)
preserve_existing_tags_on_taxes(cr, registry, "l10n_th_ecosoft")
| 29.6
| 74
| 0.77027
|
4a474e993dc686e11b36821bf378fe31b1a44389
| 1,342
|
py
|
Python
|
Python/dic.py
|
lzz5235/Code-Segment
|
e10a172972ea75151f77929dfe105729600c854e
|
[
"MIT"
] | 4
|
2017-04-28T09:27:28.000Z
|
2018-03-26T09:20:06.000Z
|
Python/dic.py
|
lzz5235/Code-Segment
|
e10a172972ea75151f77929dfe105729600c854e
|
[
"MIT"
] | null | null | null |
Python/dic.py
|
lzz5235/Code-Segment
|
e10a172972ea75151f77929dfe105729600c854e
|
[
"MIT"
] | 2
|
2020-07-04T02:46:21.000Z
|
2021-09-13T11:29:14.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 12 15:05:41 2015
@author: lzz
"""
def load_dic(filename):
f = open(filename)
word_dic = set()
max_length = 1
for line in f:
word = unicode(line.strip(),'utf-8')
word_dic.add(word)
if len(word) > max_length:
max_length = len(word)
return max_length,word_dic
def fmm_word_seg(sent,max_len,word_dict):
begin = 0
words = []
sent = unicode(sent, 'utf-8')
while begin < len(sent):
for end in range(min(begin + max_len,len(sent)),begin,-1):
if sent[begin:end] in word_dict:
words.append(sent[begin:end])
break
begin = end
return words
def Bmm_word_seg(sent,max_len,word_dict):
end = len(sent)
begin = 0
words = []
sent = unicode(sent, 'utf-8')
while end >begin:
for start in range(begin,max(begin+max_len,len(sent)),1):
if sent[start:end] in word_dict:
words.append(sent[start:end])
break
end = start
return words
max_len,word_dict = load_dic('lexicon.dic')
sent = raw_input()
words = Bmm_word_seg(sent,max_len,word_dict)
for i in range(len(words)):
print words[i]
| 22.366667
| 67
| 0.537258
|
39f8ca5753299fdb8503160d0d0d2e7f20ed7882
| 20,303
|
py
|
Python
|
psqlextra/manager/manager.py
|
SoprisApps/django-postgres-extra
|
e0eb6b2ec5b0ef72555aa8ecc187fda1277c0ba5
|
[
"MIT"
] | null | null | null |
psqlextra/manager/manager.py
|
SoprisApps/django-postgres-extra
|
e0eb6b2ec5b0ef72555aa8ecc187fda1277c0ba5
|
[
"MIT"
] | 1
|
2018-12-11T15:12:56.000Z
|
2018-12-11T15:12:56.000Z
|
psqlextra/manager/manager.py
|
SoprisApps/django-postgres-extra
|
e0eb6b2ec5b0ef72555aa8ecc187fda1277c0ba5
|
[
"MIT"
] | null | null | null |
from typing import Dict, List, Union, Tuple
import django
from django.conf import settings
from django.db import models, transaction
from django.db.models.sql import UpdateQuery
from django.db.models.sql.constants import CURSOR
from django.db.models.fields import NOT_PROVIDED
from django.core.exceptions import ImproperlyConfigured, SuspiciousOperation
from psqlextra import signals
from psqlextra.compiler import (PostgresReturningUpdateCompiler,
PostgresInsertCompiler)
from psqlextra.query import PostgresQuery, PostgresInsertQuery, ConflictAction
class PostgresQuerySet(models.QuerySet):
"""Adds support for PostgreSQL specifics."""
def __init__(self, model=None, query=None, using=None, hints=None):
"""Initializes a new instance of :see:PostgresQuerySet."""
super().__init__(model, query, using, hints)
self.query = query or PostgresQuery(self.model)
self.conflict_target = None
self.conflict_action = None
self.index_predicate = None
def annotate(self, **annotations):
"""Custom version of the standard annotate function
that allows using field names as annotated fields.
Normally, the annotate function doesn't allow you
to use the name of an existing field on the model
as the alias name. This version of the function does
allow that.
"""
fields = {
field.name: field
for field in self.model._meta.get_fields()
}
# temporarily rename the fields that have the same
# name as a field name, we'll rename them back after
# the function in the base class ran
new_annotations = {}
renames = {}
for name, value in annotations.items():
if name in fields:
new_name = '%s_new' % name
new_annotations[new_name] = value
renames[new_name] = name
else:
new_annotations[name] = value
# run the base class's annotate function
result = super().annotate(**new_annotations)
# rename the annotations back to as specified
result.rename_annotations(**renames)
return result
def rename_annotations(self, **annotations):
"""Renames the aliases for the specified annotations:
.annotate(myfield=F('somestuf__myfield'))
.rename_annotations(myfield='field')
Arguments:
annotations:
The annotations to rename. Mapping the
old name to the new name.
"""
self.query.rename_annotations(annotations)
return self
def join(self, **conditions):
"""Adds extra conditions to existing joins.
WARNING: This is an extremely experimental feature.
DO NOT USE unless you know what you're doing.
"""
self.query.add_join_conditions(conditions)
return self
def update(self, **fields):
"""Updates all rows that match the filter."""
# build up the query to execute
self._for_write = True
if django.VERSION >= (2, 0):
query = self.query.chain(UpdateQuery)
else:
query = self.query.clone(UpdateQuery)
query._annotations = None
query.add_update_values(fields)
# build the compiler for for the query
connection = django.db.connections[self.db]
compiler = PostgresReturningUpdateCompiler(query, connection, self.db)
# execute the query
with transaction.atomic(using=self.db, savepoint=False):
rows = compiler.execute_sql(CURSOR)
self._result_cache = None
# send out a signal for each row
for row in rows:
signals.update.send(self.model, pk=row[0])
# the original update(..) returns the amount of rows
# affected, let's do the same
return len(rows)
def on_conflict(self, fields: List[Union[str, Tuple[str]]], action, index_predicate: str=None):
"""Sets the action to take when conflicts arise when attempting
to insert/create a new row.
Arguments:
fields:
The fields the conflicts can occur in.
action:
The action to take when the conflict occurs.
index_predicate:
The index predicate to satisfy an arbiter partial index (i.e. what partial index to use for checking
conflicts)
"""
self.conflict_target = fields
self.conflict_action = action
self.index_predicate = index_predicate
return self
def bulk_insert(self, rows, return_model=False):
"""Creates multiple new records in the database.
This allows specifying custom conflict behavior using .on_conflict().
If no special behavior was specified, this uses the normal Django create(..)
Arguments:
rows:
An array of dictionaries, where each dictionary
describes the fields to insert.
return_model (default: False):
If model instances should be returned rather than
just dicts.
Returns:
A list of either the dicts of the rows inserted, including the pk or
the models of the rows inserted with defaults for any fields not specified
"""
if self.conflict_target or self.conflict_action:
compiler = self._build_insert_compiler(rows)
objs = compiler.execute_sql(return_id=True)
if return_model:
return [self.model(**dict(r, **k)) for r, k in zip(rows, objs)]
else:
return [dict(r, **k) for r, k in zip(rows, objs)]
# no special action required, use the standard Django bulk_create(..)
return super().bulk_create([self.model(**fields) for fields in rows])
def insert(self, **fields):
"""Creates a new record in the database.
This allows specifying custom conflict behavior using .on_conflict().
If no special behavior was specified, this uses the normal Django create(..)
Arguments:
fields:
The fields of the row to create.
Returns:
The primary key of the record that was created.
"""
if self.conflict_target or self.conflict_action:
compiler = self._build_insert_compiler([fields])
rows = compiler.execute_sql(return_id=True)
pk_field_name = self.model._meta.pk.name
return rows[0][pk_field_name]
# no special action required, use the standard Django create(..)
return super().create(**fields).pk
def insert_and_get(self, **fields):
"""Creates a new record in the database and then gets
the entire row.
This allows specifying custom conflict behavior using .on_conflict().
If no special behavior was specified, this uses the normal Django create(..)
Arguments:
fields:
The fields of the row to create.
Returns:
The model instance representing the row that was created.
"""
if not self.conflict_target and not self.conflict_action:
# no special action required, use the standard Django create(..)
return super().create(**fields)
compiler = self._build_insert_compiler([fields])
rows = compiler.execute_sql(return_id=False)
columns = rows[0]
# get a list of columns that are officially part of the model and preserve the fact that the attribute name
# might be different than the database column name
model_columns = {}
for field in self.model._meta.local_concrete_fields:
model_columns[field.column] = field.attname
# strip out any columns/fields returned by the db that
# are not present in the model
model_init_fields = {}
for column_name, column_value in columns.items():
try:
model_init_fields[model_columns[column_name]] = column_value
except KeyError:
pass
return self.model(**model_init_fields)
def upsert(self, conflict_target: List, fields: Dict, index_predicate: str=None) -> int:
"""Creates a new record or updates the existing one
with the specified data.
Arguments:
conflict_target:
Fields to pass into the ON CONFLICT clause.
fields:
Fields to insert/update.
index_predicate:
The index predicate to satisfy an arbiter partial index (i.e. what partial index to use for checking
conflicts)
Returns:
The primary key of the row that was created/updated.
"""
self.on_conflict(conflict_target, ConflictAction.UPDATE, index_predicate)
return self.insert(**fields)
def upsert_and_get(self, conflict_target: List, fields: Dict, index_predicate: str=None):
"""Creates a new record or updates the existing one
with the specified data and then gets the row.
Arguments:
conflict_target:
Fields to pass into the ON CONFLICT clause.
fields:
Fields to insert/update.
index_predicate:
The index predicate to satisfy an arbiter partial index (i.e. what partial index to use for checking
conflicts)
Returns:
The model instance representing the row
that was created/updated.
"""
self.on_conflict(conflict_target, ConflictAction.UPDATE, index_predicate)
return self.insert_and_get(**fields)
def bulk_upsert(self, conflict_target: List, rows: List[Dict], index_predicate: str=None):
"""Creates a set of new records or updates the existing
ones with the specified data.
Arguments:
conflict_target:
Fields to pass into the ON CONFLICT clause.
rows:
Rows to upsert.
index_predicate:
The index predicate to satisfy an arbiter partial index (i.e. what partial index to use for checking
conflicts)
"""
if not rows or len(rows) <= 0:
return
self.on_conflict(conflict_target, ConflictAction.UPDATE, index_predicate)
return self.bulk_insert(rows)
def _build_insert_compiler(self, rows: List[Dict]):
"""Builds the SQL compiler for a insert query.
Arguments:
rows:
A list of dictionaries, where each entry
describes a record to insert.
Returns:
The SQL compiler for the insert.
"""
# create model objects, we also have to detect cases
# such as:
# [dict(first_name='swen'), dict(fist_name='swen', last_name='kooij')]
# we need to be certain that each row specifies the exact same
# amount of fields/columns
objs = []
field_count = len(rows[0])
for index, row in enumerate(rows):
if field_count != len(row):
raise SuspiciousOperation((
'In bulk upserts, you cannot have rows with different field '
'configurations. Row {0} has a different field config than '
'the first row.'
).format(index))
objs.append(self.model(**row))
# indicate this query is going to perform write
self._for_write = True
# get the fields to be used during update/insert
insert_fields, update_fields = self._get_upsert_fields(rows[0])
# build a normal insert query
query = PostgresInsertQuery(self.model)
query.conflict_action = self.conflict_action
query.conflict_target = self.conflict_target
query.index_predicate = self.index_predicate
query.values(objs, insert_fields, update_fields)
# use the postgresql insert query compiler to transform the insert
# into an special postgresql insert
connection = django.db.connections[self.db]
compiler = PostgresInsertCompiler(query, connection, self.db)
return compiler
def _is_magical_field(self, model_instance, field, is_insert: bool):
"""Verifies whether this field is gonna modify something
on its own.
"Magical" means that a field modifies the field value
during the pre_save.
Arguments:
model_instance:
The model instance the field is defined on.
field:
The field to get of whether the field is
magical.
is_insert:
Pretend whether this is an insert?
Returns:
True when this field modifies something.
"""
# does this field modify someting upon insert?
old_value = getattr(model_instance, field.name, None)
field.pre_save(model_instance, is_insert)
new_value = getattr(model_instance, field.name, None)
return old_value != new_value
def _get_upsert_fields(self, kwargs):
"""Gets the fields to use in an upsert.
This some nice magic. We'll split the fields into
a group of "insert fields" and "update fields":
INSERT INTO bla ("val1", "val2") ON CONFLICT DO UPDATE SET val1 = EXCLUDED.val1
^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^
insert_fields update_fields
Often, fields appear in both lists. But, for example,
a :see:DateTime field with `auto_now_add=True` set, will
only appear in "insert_fields", since it won't be set
on existing rows.
Other than that, the user specificies a list of fields
in the upsert() call. That migt not be all fields. The
user could decide to leave out optional fields. If we
end up doing an update, we don't want to overwrite
those non-specified fields.
We cannot just take the list of fields the user
specifies, because as mentioned, some fields
make modifications to the model on their own.
We'll have to detect which fields make modifications
and include them in the list of insert/update fields.
"""
model_instance = self.model(**kwargs)
insert_fields = []
update_fields = []
for field in model_instance._meta.local_concrete_fields:
has_default = field.default != NOT_PROVIDED
if (field.name in kwargs or field.column in kwargs):
insert_fields.append(field)
update_fields.append(field)
continue
elif has_default:
insert_fields.append(field)
continue
# special handling for 'pk' which always refers to
# the primary key, so if we the user specifies `pk`
# instead of a concrete field, we have to handle that
if field.primary_key is True and 'pk' in kwargs:
insert_fields.append(field)
update_fields.append(field)
continue
if self._is_magical_field(model_instance, field, is_insert=True):
insert_fields.append(field)
if self._is_magical_field(model_instance, field, is_insert=False):
update_fields.append(field)
return insert_fields, update_fields
class PostgresManager(models.Manager):
"""Adds support for PostgreSQL specifics."""
use_in_migrations = True
def __init__(self, *args, **kwargs):
"""Initializes a new instance of :see:PostgresManager."""
super(PostgresManager, self).__init__(*args, **kwargs)
# make sure our back-end is set and refuse to proceed
# if it's not set
db_backend = settings.DATABASES['default']['ENGINE']
if 'psqlextra' not in db_backend:
raise ImproperlyConfigured((
'\'%s\' is not the \'psqlextra.backend\'. '
'django-postgres-extra cannot function without '
'the \'psqlextra.backend\'. Set DATABASES.ENGINE.'
) % db_backend)
# hook into django signals to then trigger our own
django.db.models.signals.post_save.connect(
self._on_model_save, sender=self.model, weak=False)
django.db.models.signals.pre_delete.connect(
self._on_model_delete, sender=self.model, weak=False)
self._signals_connected = True
def __del__(self):
"""Disconnects signals."""
if self._signals_connected is False:
return
# django.db.models.signals.post_save.disconnect(
# self._on_model_save, sender=self.model)
# django.db.models.signals.pre_delete.disconnect(
# self._on_model_delete, sender=self.model)
def get_queryset(self):
"""Gets the query set to be used on this manager."""
return PostgresQuerySet(self.model, using=self._db)
def on_conflict(self, fields: List[Union[str, Tuple[str]]], action, index_predicate: str=None):
"""Sets the action to take when conflicts arise when attempting
to insert/create a new row.
Arguments:
fields:
The fields the conflicts can occur in.
action:
The action to take when the conflict occurs.
index_predicate:
The index predicate to satisfy an arbiter partial index.
"""
return self.get_queryset().on_conflict(fields, action, index_predicate)
def upsert(self, conflict_target: List, fields: Dict, index_predicate: str=None) -> int:
"""Creates a new record or updates the existing one
with the specified data.
Arguments:
conflict_target:
Fields to pass into the ON CONFLICT clause.
fields:
Fields to insert/update.
index_predicate:
The index predicate to satisfy an arbiter partial index.
Returns:
The primary key of the row that was created/updated.
"""
return self.get_queryset().upsert(conflict_target, fields, index_predicate)
def upsert_and_get(self, conflict_target: List, fields: Dict, index_predicate: str=None):
"""Creates a new record or updates the existing one
with the specified data and then gets the row.
Arguments:
conflict_target:
Fields to pass into the ON CONFLICT clause.
fields:
Fields to insert/update.
index_predicate:
The index predicate to satisfy an arbiter partial index.
Returns:
The model instance representing the row
that was created/updated.
"""
return self.get_queryset().upsert_and_get(conflict_target, fields, index_predicate)
def bulk_upsert(self, conflict_target: List, rows: List[Dict], index_predicate: str=None):
"""Creates a set of new records or updates the existing
ones with the specified data.
Arguments:
conflict_target:
Fields to pass into the ON CONFLICT clause.
index_predicate:
The index predicate to satisfy an arbiter partial index.
rows:
Rows to upsert.
"""
return self.get_queryset().bulk_upsert(conflict_target, rows, index_predicate)
@staticmethod
def _on_model_save(sender, **kwargs):
"""When a model gets created or updated."""
created, instance = kwargs['created'], kwargs['instance']
if created:
signals.create.send(sender, pk=instance.pk)
else:
signals.update.send(sender, pk=instance.pk)
@staticmethod
def _on_model_delete(sender, **kwargs):
"""When a model gets deleted."""
instance = kwargs['instance']
signals.delete.send(sender, pk=instance.pk)
| 34.88488
| 116
| 0.614835
|
6b8790b90f60acf66d180f13807d91cbe4359269
| 8,032
|
py
|
Python
|
vimfiles/bundle/vim-python/submodules/pylama/pylama/config.py
|
ciskoinch8/vimrc
|
5bf77a7e7bc70fac5173ab2e9ea05d7dda3e52b8
|
[
"MIT"
] | 463
|
2015-01-15T08:17:42.000Z
|
2022-03-28T15:10:20.000Z
|
vimfiles/bundle/vim-python/submodules/pylama/pylama/config.py
|
ciskoinch8/vimrc
|
5bf77a7e7bc70fac5173ab2e9ea05d7dda3e52b8
|
[
"MIT"
] | 52
|
2015-01-06T02:43:59.000Z
|
2022-03-14T11:15:21.000Z
|
vimfiles/bundle/vim-python/submodules/pylama/pylama/config.py
|
ciskoinch8/vimrc
|
5bf77a7e7bc70fac5173ab2e9ea05d7dda3e52b8
|
[
"MIT"
] | 249
|
2015-01-07T22:49:49.000Z
|
2022-03-18T02:32:06.000Z
|
"""Parse arguments from command line and configuration files."""
import fnmatch
import os
import sys
import re
import logging
from argparse import ArgumentParser
from . import __version__
from .libs.inirama import Namespace
from .lint.extensions import LINTERS
#: A default checkers
DEFAULT_LINTERS = 'pycodestyle', 'pyflakes', 'mccabe'
CURDIR = os.getcwd()
CONFIG_FILES = 'pylama.ini', 'setup.cfg', 'tox.ini', 'pytest.ini'
#: The skip pattern
SKIP_PATTERN = re.compile(r'# *noqa\b', re.I).search
# Parse a modelines
MODELINE_RE = re.compile(
r'^\s*#\s+(?:pylama:)\s*((?:[\w_]*=[^:\n\s]+:?)+)',
re.I | re.M)
# Setup a logger
LOGGER = logging.getLogger('pylama')
LOGGER.propagate = False
STREAM = logging.StreamHandler(sys.stdout)
LOGGER.addHandler(STREAM)
class _Default(object): # pylint: disable=too-few-public-methods
def __init__(self, value=None):
self.value = value
def __str__(self):
return str(self.value)
def __repr__(self):
return "<_Default [%s]>" % self.value
def split_csp_str(val):
""" Split comma separated string into unique values, keeping their order.
:returns: list of splitted values
"""
seen = set()
values = val if isinstance(val, (list, tuple)) else val.strip().split(',')
return [x for x in values if x and not (x in seen or seen.add(x))]
def parse_linters(linters):
""" Initialize choosen linters.
:returns: list of inited linters
"""
result = list()
for name in split_csp_str(linters):
linter = LINTERS.get(name)
if linter:
result.append((name, linter))
else:
logging.warning("Linter `%s` not found.", name)
return result
def get_default_config_file(rootdir=None):
"""Search for configuration file."""
if rootdir is None:
return DEFAULT_CONFIG_FILE
for path in CONFIG_FILES:
path = os.path.join(rootdir, path)
if os.path.isfile(path) and os.access(path, os.R_OK):
return path
DEFAULT_CONFIG_FILE = get_default_config_file(CURDIR)
PARSER = ArgumentParser(description="Code audit tool for python.")
PARSER.add_argument(
"paths", nargs='*', default=_Default([CURDIR]),
help="Paths to files or directories for code check.")
PARSER.add_argument(
"--verbose", "-v", action='store_true', help="Verbose mode.")
PARSER.add_argument('--version', action='version',
version='%(prog)s ' + __version__)
PARSER.add_argument(
"--format", "-f", default=_Default('pycodestyle'),
choices=['pep8', 'pycodestyle', 'pylint', 'parsable'],
help="Choose errors format (pycodestyle, pylint, parsable).")
PARSER.add_argument(
"--select", "-s", default=_Default(''), type=split_csp_str,
help="Select errors and warnings. (comma-separated list)")
PARSER.add_argument(
"--sort", default=_Default(''), type=split_csp_str,
help="Sort result by error types. Ex. E,W,D")
PARSER.add_argument(
"--linters", "-l", default=_Default(','.join(DEFAULT_LINTERS)),
type=parse_linters, help=(
"Select linters. (comma-separated). Choices are %s."
% ','.join(s for s in LINTERS)
))
PARSER.add_argument(
"--ignore", "-i", default=_Default(''), type=split_csp_str,
help="Ignore errors and warnings. (comma-separated)")
PARSER.add_argument(
"--skip", default=_Default(''),
type=lambda s: [re.compile(fnmatch.translate(p))
for p in s.split(',') if p],
help="Skip files by masks (comma-separated, Ex. */messages.py)")
PARSER.add_argument("--report", "-r", help="Send report to file [REPORT]")
PARSER.add_argument(
"--hook", action="store_true", help="Install Git (Mercurial) hook.")
PARSER.add_argument(
"--concurrent", "--async", action="store_true",
help="Enable async mode. Useful for checking a lot of files. "
"Unsupported with pylint.")
PARSER.add_argument(
"--options", "-o", default=DEFAULT_CONFIG_FILE, metavar='FILE',
help="Specify configuration file. "
"Looks for {}, or {} in the current directory (default: {}).".format(
", ".join(CONFIG_FILES[:-1]), CONFIG_FILES[-1],
DEFAULT_CONFIG_FILE))
PARSER.add_argument(
"--force", "-F", action='store_true', default=_Default(False),
help="Force code checking (if linter doesn't allow)")
PARSER.add_argument(
"--abspath", "-a", action='store_true', default=_Default(False),
help="Use absolute paths in output.")
ACTIONS = dict((a.dest, a)
for a in PARSER._actions) # pylint: disable=protected-access
def parse_options(args=None, config=True, rootdir=CURDIR, **overrides): # noqa
""" Parse options from command line and configuration files.
:return argparse.Namespace:
"""
args = args or []
# Parse args from command string
options = PARSER.parse_args(args)
options.file_params = dict()
options.linters_params = dict()
# Compile options from ini
if config:
cfg = get_config(str(options.options), rootdir=rootdir)
for opt, val in cfg.default.items():
LOGGER.info('Find option %s (%s)', opt, val)
passed_value = getattr(options, opt, _Default())
if isinstance(passed_value, _Default):
if opt == 'paths':
val = val.split()
if opt == 'skip':
val = fix_pathname_sep(val)
setattr(options, opt, _Default(val))
# Parse file related options
for name, opts in cfg.sections.items():
if name == cfg.default_section:
continue
if name.startswith('pylama'):
name = name[7:]
if name in LINTERS:
options.linters_params[name] = dict(opts)
continue
mask = re.compile(fnmatch.translate(fix_pathname_sep(name)))
options.file_params[mask] = dict(opts)
# Override options
_override_options(options, **overrides)
# Postprocess options
for name in options.__dict__:
value = getattr(options, name)
if isinstance(value, _Default):
setattr(options, name, process_value(name, value.value))
if options.concurrent and 'pylint' in options.linters:
LOGGER.warning('Can\'t parse code asynchronously with pylint enabled.')
options.concurrent = False
return options
def _override_options(options, **overrides):
"""Override options."""
for opt, val in overrides.items():
passed_value = getattr(options, opt, _Default())
if opt in ('ignore', 'select') and passed_value:
value = process_value(opt, passed_value.value)
value += process_value(opt, val)
setattr(options, opt, value)
elif isinstance(passed_value, _Default):
setattr(options, opt, process_value(opt, val))
def process_value(name, value):
""" Compile option value. """
action = ACTIONS.get(name)
if not action:
return value
if callable(action.type):
return action.type(value)
if action.const:
return bool(int(value))
return value
def get_config(ini_path=None, rootdir=None):
""" Load configuration from INI.
:return Namespace:
"""
config = Namespace()
config.default_section = 'pylama'
if not ini_path:
path = get_default_config_file(rootdir)
if path:
config.read(path)
else:
config.read(ini_path)
return config
def setup_logger(options):
"""Do the logger setup with options."""
LOGGER.setLevel(logging.INFO if options.verbose else logging.WARN)
if options.report:
LOGGER.removeHandler(STREAM)
LOGGER.addHandler(logging.FileHandler(options.report, mode='w'))
if options.options:
LOGGER.info('Try to read configuration from: %r', options.options)
def fix_pathname_sep(val):
"""Fix pathnames for Win."""
return val.replace(os.altsep or "\\", os.sep)
# pylama:ignore=W0212,D210,F0001
| 28.99639
| 79
| 0.638944
|
7560d50f305e65cb2cd4d5cb7720cc0e7682f435
| 2,275
|
py
|
Python
|
asignacion3maxmin/app/__init__.py
|
jawmuk/uip-prog3
|
c21f226ea64636a9826dad9b82f1c837b95e6398
|
[
"MIT"
] | null | null | null |
asignacion3maxmin/app/__init__.py
|
jawmuk/uip-prog3
|
c21f226ea64636a9826dad9b82f1c837b95e6398
|
[
"MIT"
] | null | null | null |
asignacion3maxmin/app/__init__.py
|
jawmuk/uip-prog3
|
c21f226ea64636a9826dad9b82f1c837b95e6398
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
'''
Este programa agarra dos variables y cuenta las veces que fue llamada la misma
'''
maxCalled = 0
minCalled = 0
def max_val(a, b):
#''''Returns the maximum of the specified arguments.
''' Esta parte dice cual es el numero mayor entre los 2 numeros ingresados
:param a: Primer numero a comparar
:type a: int
:param b: Segundo numero a comparar
:type b: int
:return: Regresa el valor mas grande de los 2
:rtype: int '''
global maxCalled
maxCalled = maxCalled + 1
if (a > b):
return a
elif (b > a):
return b
else:
return a
def min_val(a, b):
'''
Esta funcion nos proporciona el numero mas pequeño de los 2 ingresados
:param a: Primer numero
:type a: int
:param b: Segundo numero
:type b: int
:return: Regresa el valor mas pequeño de los 2
:rtype: int
'''
global minCalled
minCalled = minCalled + 1
if (a < b):
return a
elif (b < a):
return b
else:
return a
def print_usage(init_msg, max_val=True, min_val=True):
''' Esta funcion sirve para decir la cuantas veces las funciones anteriores fueron llamadas
:param init_msg: EL mensaje inicial a imprimir
:type init_msg: str
:param max_val: Esta variable sirve para saber si se va a imprimir la cantidad de veces usada la funcion max_val
:type max_val: bool
:param min_val: Esta variable sirve para saber si se va a imprimir la cantidad de veces usada la funcion min_val
:type min_val: bool '''
global maxCalled, minCalled
print(init_msg)
if max_val:
print('functin max_val was called', maxCalled, ' times')
if min_val:
print('function min_val was called', minCalled, ' times')
print('Calling function max_val')
print(max_val.__doc__)
max_val(1, 4)
max_val(2, b=1)
max_val(b=4, a=3)
print('Calling function min_val')
print(min_val.__doc__)
min_val(1, 4)
min_val(2, 4)
min_val(4, b=9)
print_usage('The usage of functions min_val and max_val')
| 16.977612
| 121
| 0.592527
|
c01d6c513fd7014cdf01e2c67c5aabb00b04fc65
| 1,650
|
py
|
Python
|
tests/test_factory.py
|
provinzkraut/redlock-plus
|
9773204ea686aa9e7faaf3dedf8fb79347f56347
|
[
"MIT"
] | 2
|
2021-09-21T15:04:06.000Z
|
2022-01-01T16:13:02.000Z
|
tests/test_factory.py
|
provinzkraut/redlock-plus
|
9773204ea686aa9e7faaf3dedf8fb79347f56347
|
[
"MIT"
] | null | null | null |
tests/test_factory.py
|
provinzkraut/redlock-plus
|
9773204ea686aa9e7faaf3dedf8fb79347f56347
|
[
"MIT"
] | 1
|
2021-09-21T15:10:35.000Z
|
2021-09-21T15:10:35.000Z
|
import pytest
from redlock_plus import LockFactory, InsufficientNodesError
import redlock_plus
def test_create(fake_redis_client):
factory = LockFactory(
[fake_redis_client(), fake_redis_client(), fake_redis_client()]
)
lock = factory("test_factory_create", ttl=500, retry_times=5, retry_delay=100)
assert isinstance(lock, redlock_plus.Lock)
assert factory.redis_nodes == lock.redis_nodes
assert lock.ttl == 500
assert lock.retry_times == 5
assert lock.retry_delay == 100
def test_create_rlock_factory(fake_redis_client):
factory = redlock_plus.RLockFactory(
[fake_redis_client(), fake_redis_client(), fake_redis_client()],
)
lock = factory("test_factory_create")
assert isinstance(lock, redlock_plus.RLock)
def test_custom_lock_class(fake_redis_client):
factory = LockFactory(
[fake_redis_client(), fake_redis_client(), fake_redis_client()],
lock_class=redlock_plus.RLock,
)
lock = factory("test_factory_create")
assert isinstance(lock, redlock_plus.RLock)
def test_insufficient_redis_nodes(fake_redis_client):
with pytest.raises(InsufficientNodesError):
LockFactory([fake_redis_client(), fake_redis_client()])
def test_create_from_url(fake_redis_client):
factory = LockFactory(
[{"url": "redis://localhost/0"}, fake_redis_client(), fake_redis_client()]
)
lock = factory(
"test_factory_create_from_url", ttl=500, retry_times=5, retry_delay=100
)
assert factory.redis_nodes == lock.redis_nodes
assert lock.ttl == 500
assert lock.retry_times == 5
assert lock.retry_delay == 100
| 28.448276
| 82
| 0.725455
|
29467cc54e6cdf19a2bbba67ccd99db070bb9860
| 6,721
|
py
|
Python
|
main/settings.py
|
gurupratap-matharu/polls
|
5484a1f639b2ccd5e0a676f24cf0e23b6b2d0639
|
[
"MIT"
] | null | null | null |
main/settings.py
|
gurupratap-matharu/polls
|
5484a1f639b2ccd5e0a676f24cf0e23b6b2d0639
|
[
"MIT"
] | 66
|
2020-08-25T02:03:25.000Z
|
2021-09-22T19:33:34.000Z
|
main/settings.py
|
gurupratap-matharu/polls
|
5484a1f639b2ccd5e0a676f24cf0e23b6b2d0639
|
[
"MIT"
] | null | null | null |
"""
Django settings for main project.
Generated by 'django-admin startproject' using Django 3.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import os
from pathlib import Path
import dj_database_url
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve(strict=True).parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = int(os.environ.get('DEBUG', default=0))
ALLOWED_HOSTS = ['stormy-falls-26925.herokuapp.com', 'testserver', 'localhost', '127.0.0.1', '0.0.0.0']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.sites',
'django.contrib.staticfiles',
# Third party
'rest_framework',
'rest_framework.authtoken',
'allauth',
'allauth.account',
'allauth.socialaccount',
'rest_auth',
'rest_auth.registration',
'crispy_forms',
'django_extensions',
'django_countries',
'taggit',
# Local
'users.apps.UsersConfig',
'pages.apps.PagesConfig',
'polls.apps.PollsConfig',
'classroom.apps.ClassroomConfig',
'api.apps.ApiConfig',
]
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.IsAuthenticated',
],
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.TokenAuthentication',
],
}
# django-allauth config
SITE_ID = 1
ACCOUNT_SESSION_REMEMBER = True
ACCOUNT_SIGNUP_PASSWORD_ENTER_TWICE = False
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_UNIQUE_EMAIL = True
CRISPY_TEMPLATE_PACK = 'bootstrap4'
# django-taggit
TAGGIT_CASE_INSENSITIVE = True
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
ROOT_URLCONF = 'main.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'main.wsgi.application'
AUTH_USER_MODEL = 'users.CustomUser'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': os.environ.get('DATABASE_NAME'),
'USER': os.environ.get('DATABASE_USER'),
'PASSWORD': os.environ.get('DATABASE_PASSWORD'),
'HOST': os.environ.get('DATABASE_HOST', default='db'),
'PORT': 5432
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static'), ]
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_FINDERS = [
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
]
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
LOGIN_REDIRECT_URL = 'pages:home'
ACCOUNT_LOGOUT_REDIRECT = 'pages:home'
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
DEFAULT_FROM_EMAIL = 'admin@sitedomain.com'
RECIPIENT_LIST = ['gurupratap.matharu@gmail.com']
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'default': {
'format': '[DJANGO] %(levelname)s %(asctime)s %(module)s '
'%(name)s.%(funcName)s:%(lineno)s: %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
},
'loggers': {
'*': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': True,
},
},
}
if not DEBUG:
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST_USER = os.environ.get('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD')
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
SECURE_BROWSER_XSS_FILTER = True
X_FRAME_OPTIONS = 'DENY'
SECURE_SSL_REDIRECT = True
SECURE_HSTS_INCLUDE_SUBDOMAINS = True
SECURE_HSTS_PRELOAD = True
SECURE_HSTS_SECONDS = 3600
SECURE_CONTENT_TYPE_NOSNIFF = True
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Heroku
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
| 26.565217
| 103
| 0.685315
|
7935f6220b27247c03af3e847259d4720629901f
| 948
|
py
|
Python
|
.idea/VirtualEnvironment/Lib/site-packages/tests/outcomes/separate_package/find_module_no_info_analyze_name_main_3/test.py
|
Vladpetr/NewsPortal
|
cd4127fbc09d9c8f5e65c8ae699856c6d380a320
|
[
"Apache-2.0"
] | null | null | null |
.idea/VirtualEnvironment/Lib/site-packages/tests/outcomes/separate_package/find_module_no_info_analyze_name_main_3/test.py
|
Vladpetr/NewsPortal
|
cd4127fbc09d9c8f5e65c8ae699856c6d380a320
|
[
"Apache-2.0"
] | 5
|
2021-04-08T22:02:15.000Z
|
2022-02-10T14:53:45.000Z
|
.idea/VirtualEnvironment/Lib/site-packages/tests/outcomes/separate_package/find_module_no_info_analyze_name_main_3/test.py
|
Vladpetr/NewsPortal
|
cd4127fbc09d9c8f5e65c8ae699856c6d380a320
|
[
"Apache-2.0"
] | null | null | null |
import unittest
from hstest.check_result import CheckResult, correct
from hstest.dynamic.dynamic_test import dynamic_test
from hstest.stage_test import StageTest
from hstest.testing.tested_program import TestedProgram
class FindModuleNoInfoAnalyzeImports(StageTest):
@dynamic_test
def test(self):
main = TestedProgram()
result = main.start()
return CheckResult(
result ==
'Main 3\n'
'Main 2\n', '')
class Test(unittest.TestCase):
def test(self):
status, feedback = FindModuleNoInfoAnalyzeImports().run_tests()
self.assertNotEqual(status, 0)
self.assertEqual(
feedback,
'Error in test #1\n\n'
'Cannot decide which file to run out of the following: "main.py", "main4.py"\n'
'They all have "if __name__ == \'__main__\'". Leave one file with this line.')
if __name__ == '__main__':
Test().test()
| 27.882353
| 91
| 0.646624
|
6c6c3c4f205109ad75fe7d1f70c6ef7a4a25b890
| 1,825
|
py
|
Python
|
exercises/en/solution_03_20a.py
|
hfboyce/MCL-DSCI-571-machine-learning
|
25757369491ac547daa94ff1143ca7389d433a6e
|
[
"MIT"
] | 1
|
2020-11-23T03:19:18.000Z
|
2020-11-23T03:19:18.000Z
|
exercises/en/solution_03_20a.py
|
hfboyce/MCL-DSCI-571-machine-learning
|
25757369491ac547daa94ff1143ca7389d433a6e
|
[
"MIT"
] | 13
|
2020-10-02T16:48:24.000Z
|
2020-12-09T18:58:21.000Z
|
exercises/en/solution_03_20a.py
|
hfboyce/MCL-DSCI-571-machine-learning
|
25757369491ac547daa94ff1143ca7389d433a6e
|
[
"MIT"
] | 2
|
2020-10-28T19:43:42.000Z
|
2021-03-30T22:57:47.000Z
|
import pandas as pd
import altair as alt
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split, cross_validate
# Loading in the data
bball_df = pd.read_csv('data/bball.csv')
bball_df = bball_df[(bball_df['position'] =='G') | (bball_df['position'] =='F')]
# Define X and y
X = bball_df.loc[:, ['height', 'weight', 'salary']]
y = bball_df['position']
# Split the dataset
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=33)
results_dict = {"depth": [], "mean_train_score": [], "mean_cv_score": []}
# Create a for loop and fill in the blanks
for depth in range(1,20):
model = DecisionTreeClassifier(max_depth=depth)
scores = cross_validate(model, X_train, y_train, cv=10, return_train_score=True)
results_dict["depth"].append(depth)
results_dict["mean_cv_score"].append(scores["test_score"].mean())
results_dict["mean_train_score"].append(scores["train_score"].mean())
# Wrangles the data into a form suitable for plotting
results_df = pd.DataFrame(results_dict).melt(id_vars=['depth'],
value_vars=['mean_train_score',
'mean_cv_score'],
var_name='split',
value_name='score')
# Create a chart that plots depth vs score
chart1 = alt.Chart(results_df).mark_line().encode(
alt.X('depth:Q', axis=alt.Axis(title="Tree Depth")),
alt.Y('score:Q', scale=alt.Scale(domain=[.80, 1.00])),
alt.Color('split:N', scale=alt.Scale(domain=['mean_train_score',
'mean_cv_score'],
range=['teal', 'gold'])))
chart1
| 43.452381
| 84
| 0.598904
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.